From 40fa5d7f20d2c73c42b7e81181b6f35beec58218 Mon Sep 17 00:00:00 2001 From: Ilya Sherstyuk Date: Thu, 4 May 2023 10:09:56 -0700 Subject: [PATCH 1/4] Update Polygraphy to v0.47.1 Signed-off-by: Ilya Sherstyuk --- tools/Polygraphy/CHANGELOG.md | 80 ++++- tools/Polygraphy/Makefile | 4 +- tools/Polygraphy/bad.onnx | Bin 0 -> 545 bytes tools/Polygraphy/docs/conf.py | 2 +- .../build_and_run.py | 2 +- .../load_and_run.py | 2 +- .../api/01_comparing_frameworks/example.py | 2 +- .../api/02_validating_on_a_dataset/example.py | 2 +- .../example.py | 2 +- .../example.py | 2 +- .../05_using_tensorrt_network_api/example.py | 2 +- .../06_immediate_eval_api/build_and_run.py | 2 +- .../api/06_immediate_eval_api/load_and_run.py | 2 +- .../07_tensorrt_and_dynamic_shapes/example.py | 2 +- .../example.py | 2 +- .../data_loader.py | 2 +- .../create_config.py | 2 +- .../define_network.py | 2 +- .../data_loader.py | 2 +- .../generate_data.py | 2 +- .../08_adding_precision_constraints/README.md | 57 ++-- .../add_constraints.py | 3 +- .../constrained_network.py | 2 +- .../surgeon/04_setting_upper_bounds/README.md | 56 ++++ .../04_setting_upper_bounds/model.onnx | Bin 0 -> 1317 bytes .../surgeon/04_setting_upper_bounds/model.png | Bin 0 -> 103249 bytes .../04_setting_upper_bounds/modified.png | Bin 0 -> 110733 bytes .../dev/02_extending_polygraphy_run/README.md | 3 +- .../polygraphy_reshape_destroyer/__init__.py | 2 +- .../args/__init__.py | 2 +- .../args/loader.py | 7 +- .../args/runner.py | 2 +- .../backend/__init__.py | 2 +- .../backend/loader.py | 7 +- .../backend/runner.py | 6 +- .../polygraphy_reshape_destroyer/export.py | 2 +- .../extension_module/setup.py | 2 +- tools/Polygraphy/polygraphy/__init__.py | 2 +- .../polygraphy/backend/base/loader.py | 8 +- .../polygraphy/backend/base/runner.py | 7 +- .../polygraphy/backend/common/loader.py | 5 +- .../polygraphy/backend/onnx/loader.py | 147 ++++++++- .../polygraphy/backend/onnx/util.py | 82 ++++- .../polygraphy/backend/onnxrt/loader.py | 3 +- .../polygraphy/backend/onnxrt/runner.py | 6 +- .../backend/pluginref/references.py | 2 +- .../polygraphy/backend/pluginref/runner.py | 6 +- .../polygraphy/backend/pyt/runner.py | 6 +- .../polygraphy/backend/tf/loader.py | 11 +- .../polygraphy/backend/tf/runner.py | 6 +- .../Polygraphy/polygraphy/backend/tf/util.py | 2 +- .../backend/trt/algorithm_selector.py | 184 +++++++---- .../polygraphy/backend/trt/calibrator.py | 2 +- .../polygraphy/backend/trt/config.py | 44 ++- .../polygraphy/backend/trt/loader.py | 128 +++++++- .../polygraphy/backend/trt/profile.py | 2 +- .../polygraphy/backend/trt/runner.py | 40 ++- .../Polygraphy/polygraphy/backend/trt/util.py | 38 ++- .../polygraphy/backend/trt_legacy.py | 16 +- .../Polygraphy/polygraphy/common/interface.py | 2 +- tools/Polygraphy/polygraphy/common/struct.py | 9 +- .../polygraphy/comparator/comparator.py | 2 +- .../polygraphy/comparator/compare.py | 182 +++++++---- .../polygraphy/comparator/data_loader.py | 2 +- .../polygraphy/comparator/postprocess.py | 2 +- .../polygraphy/comparator/struct.py | 4 +- .../Polygraphy/polygraphy/comparator/util.py | 220 ++++++++++++- tools/Polygraphy/polygraphy/config.py | 2 +- tools/Polygraphy/polygraphy/constants.py | 2 +- tools/Polygraphy/polygraphy/cuda/cuda.py | 3 +- .../polygraphy/exception/exception.py | 2 +- tools/Polygraphy/polygraphy/func/func.py | 2 +- tools/Polygraphy/polygraphy/json/serde.py | 2 +- tools/Polygraphy/polygraphy/logger/logger.py | 5 +- tools/Polygraphy/polygraphy/mod/exporter.py | 2 +- tools/Polygraphy/polygraphy/mod/importer.py | 2 +- tools/Polygraphy/polygraphy/mod/util.py | 21 +- .../tools/args/backend/onnx/loader.py | 53 +++- .../tools/args/backend/onnxrt/loader.py | 2 +- .../tools/args/backend/onnxrt/runner.py | 2 +- .../tools/args/backend/pluginref/runner.py | 2 +- .../tools/args/backend/runner_select.py | 2 +- .../tools/args/backend/tf/config.py | 2 +- .../tools/args/backend/tf/loader.py | 8 +- .../tools/args/backend/tf/runner.py | 2 +- .../tools/args/backend/trt/config.py | 73 +++-- .../tools/args/backend/trt/helper.py | 28 ++ .../tools/args/backend/trt/loader.py | 236 ++++++++++++-- .../tools/args/backend/trt/runner.py | 5 +- .../tools/args/backend/trt_legacy.py | 10 +- .../Polygraphy/polygraphy/tools/args/base.py | 17 +- .../tools/args/comparator/comparator.py | 27 +- .../tools/args/comparator/compare.py | 44 ++- .../tools/args/comparator/data_loader.py | 2 +- .../tools/args/comparator/postprocess.py | 2 +- .../polygraphy/tools/args/logger/logger.py | 2 +- .../Polygraphy/polygraphy/tools/args/model.py | 9 +- .../polygraphy/tools/args/util/util.py | 30 +- .../Polygraphy/polygraphy/tools/base/tool.py | 2 +- .../polygraphy/tools/convert/convert.py | 28 +- .../Polygraphy/polygraphy/tools/data/data.py | 2 +- .../polygraphy/tools/data/subtool/to_input.py | 2 +- .../polygraphy/tools/debug/debug.py | 2 +- .../polygraphy/tools/debug/subtool/base.py | 20 +- .../polygraphy/tools/debug/subtool/build.py | 2 +- .../debug/subtool/iterative_debug_args.py | 2 +- .../tools/debug/subtool/precision.py | 8 +- .../polygraphy/tools/debug/subtool/reduce.py | 39 ++- .../polygraphy/tools/debug/subtool/repeat.py | 2 +- .../polygraphy/tools/inspect/inspect.py | 2 +- .../tools/inspect/subtool/capability.py | 2 +- .../polygraphy/tools/inspect/subtool/data.py | 2 +- .../tools/inspect/subtool/diff_tactics.py | 2 +- .../polygraphy/tools/inspect/subtool/model.py | 31 +- .../tools/inspect/subtool/tactics.py | 2 +- tools/Polygraphy/polygraphy/tools/registry.py | 6 +- tools/Polygraphy/polygraphy/tools/run/run.py | 12 +- tools/Polygraphy/polygraphy/tools/script.py | 13 +- .../polygraphy/tools/surgeon/subtool/base.py | 2 +- .../tools/surgeon/subtool/extract.py | 2 +- .../tools/surgeon/subtool/insert.py | 2 +- .../tools/surgeon/subtool/sanitize.py | 4 +- .../polygraphy/tools/surgeon/surgeon.py | 2 +- .../polygraphy/tools/template/subtool/base.py | 2 +- .../tools/template/subtool/onnx_gs.py | 2 +- .../tools/template/subtool/trt_config.py | 2 +- .../tools/template/subtool/trt_network.py | 4 +- .../polygraphy/tools/template/template.py | 2 +- tools/Polygraphy/polygraphy/tools/util.py | 2 +- tools/Polygraphy/polygraphy/util/format.py | 2 +- tools/Polygraphy/polygraphy/util/util.py | 58 +++- tools/Polygraphy/polygraphy_debug_replay.json | 14 + .../polygraphy_debug_replay_skip_current.json | 20 ++ tools/Polygraphy/reduced.onnx | Bin 0 -> 151 bytes tools/Polygraphy/setup.py | 2 +- .../tests/backend/base/test_loader.py | 2 +- .../tests/backend/base/test_runner.py | 2 +- .../tests/backend/common/test_loader.py | 2 +- .../tests/backend/onnx/test_loader.py | 50 ++- .../tests/backend/onnx/test_util.py | 14 +- .../tests/backend/onnxrt/test_loader.py | 2 +- .../tests/backend/onnxrt/test_runner.py | 8 +- .../tests/backend/pluginref/test_runner.py | 8 +- .../tests/backend/test_tensorrt_legacy.py | 2 +- .../tests/backend/tf/test_loader.py | 2 +- .../tests/backend/tf/test_runner.py | 8 +- .../backend/trt/test_algorithm_selector.py | 288 ++++++++++++------ .../tests/backend/trt/test_calibrator.py | 2 +- .../tests/backend/trt/test_config.py | 28 +- .../tests/backend/trt/test_loader.py | 89 ++++-- .../tests/backend/trt/test_profile.py | 2 +- .../tests/backend/trt/test_runner.py | 40 ++- .../Polygraphy/tests/backend/trt/test_util.py | 4 +- .../Polygraphy/tests/common/test_interface.py | 2 +- tools/Polygraphy/tests/common/test_struct.py | 2 +- .../tests/comparator/test_comparator.py | 2 +- .../tests/comparator/test_compare.py | 7 +- .../tests/comparator/test_data_loader.py | 2 +- .../tests/comparator/test_postprocess.py | 2 +- .../tests/comparator/test_struct.py | 2 +- tools/Polygraphy/tests/conftest.py | 97 +++++- tools/Polygraphy/tests/cuda/test_cuda.py | 2 +- tools/Polygraphy/tests/func/test_func.py | 2 +- tools/Polygraphy/tests/helper.py | 9 +- tools/Polygraphy/tests/logger/test_logger.py | 2 +- .../Polygraphy/tests/mod/test_dependencies.py | 4 +- tools/Polygraphy/tests/mod/test_exporter.py | 2 +- tools/Polygraphy/tests/mod/test_importer.py | 2 +- tools/Polygraphy/tests/mod/test_util.py | 37 +++ tools/Polygraphy/tests/models/I.onnx | Bin 0 -> 30743 bytes .../tests/models/identity_multi_ch.onnx | Bin 0 -> 93 bytes tools/Polygraphy/tests/models/make_models.py | 112 ++++++- tools/Polygraphy/tests/models/meta.py | 18 +- .../Polygraphy/tests/models/multi_output.onnx | Bin 0 -> 352 bytes .../tests/models/unbounded_dds.onnx | Bin 0 -> 1317 bytes .../tests/test_deprecated_aliases.py | 3 +- tools/Polygraphy/tests/test_examples.py | 3 +- tools/Polygraphy/tests/test_packaging.py | 2 +- tools/Polygraphy/tests/test_tests.py | 2 +- tools/Polygraphy/tests/test_ux.py | 4 +- .../tools/args/backend/onnx/test_loader.py | 35 ++- .../tools/args/backend/onnxrt/test_loader.py | 2 +- .../tools/args/backend/test_runner_select.py | 2 +- .../tools/args/backend/tf/test_loader.py | 2 +- .../tools/args/backend/trt/test_config.py | 33 +- .../tools/args/backend/trt/test_loader.py | 124 ++++++-- .../tools/args/backend/trt/test_runner.py | 8 +- .../tools/args/comparator/test_comparator.py | 42 ++- .../tools/args/comparator/test_compare.py | 39 ++- .../tools/args/comparator/test_data_loader.py | 2 +- tools/Polygraphy/tests/tools/args/helper.py | 2 +- .../tests/tools/args/logger/test_logger.py | 2 +- .../tests/tools/args/test_docstrings.py | 6 +- .../Polygraphy/tests/tools/args/test_model.py | 6 +- .../tests/tools/args/util/test_util.py | 2 +- tools/Polygraphy/tests/tools/conftest.py | 13 +- .../tests/tools/fake_reduce_checker.py | 2 +- tools/Polygraphy/tests/tools/test_convert.py | 2 +- tools/Polygraphy/tests/tools/test_data.py | 2 +- tools/Polygraphy/tests/tools/test_debug.py | 23 +- .../Polygraphy/tests/tools/test_deprecated.py | 2 +- tools/Polygraphy/tests/tools/test_inspect.py | 12 +- .../Polygraphy/tests/tools/test_polygraphy.py | 2 +- tools/Polygraphy/tests/tools/test_run.py | 2 +- tools/Polygraphy/tests/tools/test_script.py | 2 +- tools/Polygraphy/tests/tools/test_surgeon.py | 45 ++- tools/Polygraphy/tests/tools/test_template.py | 2 +- tools/Polygraphy/tests/util/test_format.py | 2 +- tools/Polygraphy/tests/util/test_serde.py | 24 +- tools/Polygraphy/tests/util/test_util.py | 32 +- 210 files changed, 3003 insertions(+), 689 deletions(-) create mode 100644 tools/Polygraphy/bad.onnx create mode 100644 tools/Polygraphy/examples/cli/surgeon/04_setting_upper_bounds/README.md create mode 100644 tools/Polygraphy/examples/cli/surgeon/04_setting_upper_bounds/model.onnx create mode 100644 tools/Polygraphy/examples/cli/surgeon/04_setting_upper_bounds/model.png create mode 100644 tools/Polygraphy/examples/cli/surgeon/04_setting_upper_bounds/modified.png create mode 100644 tools/Polygraphy/polygraphy/tools/args/backend/trt/helper.py create mode 100644 tools/Polygraphy/polygraphy_debug_replay.json create mode 100644 tools/Polygraphy/polygraphy_debug_replay_skip_current.json create mode 100644 tools/Polygraphy/reduced.onnx create mode 100644 tools/Polygraphy/tests/mod/test_util.py create mode 100644 tools/Polygraphy/tests/models/I.onnx create mode 100644 tools/Polygraphy/tests/models/identity_multi_ch.onnx create mode 100644 tools/Polygraphy/tests/models/multi_output.onnx create mode 100644 tools/Polygraphy/tests/models/unbounded_dds.onnx diff --git a/tools/Polygraphy/CHANGELOG.md b/tools/Polygraphy/CHANGELOG.md index f0dd7823..3c4c6470 100644 --- a/tools/Polygraphy/CHANGELOG.md +++ b/tools/Polygraphy/CHANGELOG.md @@ -2,6 +2,82 @@ Dates are in YYYY-MM-DD format. + +## v0.47.1 (2023-03-29) +### Changed +- Updated `TrtOnnxFlagArgs` to automatically enable `NATIVE_INSTANCENORM` when either hardware or + version compatibility is enabled in the builder configuration. +- Downgraded errors for extra layers/tensors in `SetLayerPrecisions`, `SetTensorDatatypes`, and `SetTensorFormats` to warnings. + + +## v0.47.0 (2023-03-28) +### Added +- Added experimental support for error heatmaps. These can be visualized and/or saved with the + `--show-heatmaps`/`--save-heatmaps` command-line options or `show_heatmaps`/`save_heatmaps` + arguments to `CompareFunc.simple`. +- Added experimental `--show-error-metrics-plot/--save-error-metrics-plot` command-line options and + corresponding `show_error_metrics_plot`/`save_error_metrics_plot` arguments to `CompareFunc.simple`. + These allow you to generate plots of error vs. magnitude. +- Added `--version-compatible` flag for building version-compatible engines. + Note that for building version compatible engines for ONNX models, `--onnx-flags native_instancenorm` must also be provided. +- Added `TrtSaveEngineBytesArgs` and `TrtLoadEngineBytesArgs` to allow for avoiding engine deserialization until necessary. +- Added an `exclude_lean_runtime` parameter to `CreateConfig` and corresponding `--exclude-lean-runtime` CLI option. +- Added a `runtime` parameter to `EngineFromBytes` and `EngineFromNetwork` to enable deserializing plans with a custom runtime. +- Added a `LoadRuntime` TensorRT loader that can be used to load a runtime from a path and a corresponding `--load-runtime` CLI option. + +### Changed +- Updated Polygraphy to warn when it detects unsupported TensorRT and NumPy version combinations. +- `TrtSaveEngineArgs` and `TrtLoadEngineArgs` now depend on `TrtSaveEngineBytesArgs` and `TrtLoadEngineBytesArgs` respectively. + Additionally, all command-line options have been migrated to the latter argument groups. + +### Fixed +- Fixed a bug in `debug precision` where Polygraphy would attempt to set the layer precision for layers producing + non-activation outputs, which is an error in TensorRT. + + +## v0.46.2 (2023-02-28) +### Fixed +- Fixed minor formatting issues in help text + + +## v0.46.1 (2023-02-27) +### Changed +- `CompareFunc.simple` will now add a small epsilon when computing relative error to avoid Inf/NaNs. + + +## v0.46.0 (2023-02-10) +### Changed +- `polygraphy run` will now print warnings when command-line options are provided for comparison + functions types other than the current one specified by `--compare-func`. +- Added a `TensorInfo` class to the TensorRT backend to track information from `IAlgorithmIOInfo`. + The `Algorithm` class now keeps `TensorInfo`s instead of tuples. +- Changed the format of tactic replay files to include more information about tensor formats where possible. + *NOTE: This means that tactic replay files generated with previous versions of Polygraphy are not compatible* + *with this version!* + +### Fixed +- Fixed a bug where the `--trt-legacy` runner would not work with `--input-shapes` specified. +- Fixed a bug where `debug reduce` would not work correctly for models where a node had multiple outputs + which were also graph outputs. See the comment in [`reduce.py`](./polygraphy/tools/debug/subtool/reduce.py) for details. + + +## v0.45.3 (2023-01-25) +### Changed +- Updated comparison functions so that the output array is now displayed *in addition* to the histogram + rather than instead of it. + + +## v0.45.2 (2023-01-25) +### Changed +- Updated comparison functions to display the entire output array instead of a histogram if it is small enough. + + +## v0.45.1 (2023-01-19) +### Added +- Added `max_aux_streams` to `CreateConfig` for TensorRT and corresponding `--max-aux-streams` command-line option. +- Added support for HWC I/O formats in `TrtRunner` for TensorRT 8.6+. + + ## v0.45.0 (2023-01-12) ### Added - Added an `-n/--num-items` option to `inspect data` to control how many elements of an array are shown. @@ -1124,8 +1200,8 @@ Dates are in YYYY-MM-DD format. - Added a passthrough loader, `LoadPlugins`, that can wrap any other loader, and load plugins ### Changed -- `EngineFromNetwork` will no longer free the the builder, network and parser if they are provided directly (as opposed to via a loader). -- `TrtRunner` will no longer free the the engine if it is provided directly (as opposed to via a loader). +- `EngineFromNetwork` will no longer free the builder, network and parser if they are provided directly (as opposed to via a loader). +- `TrtRunner` will no longer free the engine if it is provided directly (as opposed to via a loader). - All file saving arguments now take file paths instead of directories. This makes it easier to know exactly where each file is being written. - `compare_func` in `Comparator.compare_accuracy` now accepts a function that returns anything convertible to a boolean, rather than requiring a boolean. - `basic_compare_func` now will return information about required tolerances after `Comparator.compare_accuracy`. diff --git a/tools/Polygraphy/Makefile b/tools/Polygraphy/Makefile index 33995637..39130aa3 100644 --- a/tools/Polygraphy/Makefile +++ b/tools/Polygraphy/Makefile @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -44,7 +44,7 @@ leak_check: clean: rm -rf dist/ $(BUILD_DIR)/ polygraphy.egg-info/ -build: +build: clean python3 setup.py bdist_wheel install_deps: build diff --git a/tools/Polygraphy/bad.onnx b/tools/Polygraphy/bad.onnx new file mode 100644 index 0000000000000000000000000000000000000000..ad84a9f4eb7dfb4ad212376a7e11e5d0ec4c7cf5 GIT binary patch literal 545 zcmdT1VIahpnUb1Ul37w2pI=%MZy+U+pO;q=pI($$kWpM(l%ATO z7oV4(k{WNQ#Ni253DOyfuG0voPGcmUUR-GQq4AA`I1^J+z)ms2X`rbRvtvpMmm?QK zEt)sXaOyQz;_}QZOU%hkQOGagvd3+N1x_O@5&jVp#|$lS&_xM=1DA`7gM(3sg^P)U s5hMZ(G$fHE88pwE;gz*Omd!%fi()c{tPzS{ab#a3Imn5Hi$Q=J07H77$^ZZW literal 0 HcmV?d00001 diff --git a/tools/Polygraphy/docs/conf.py b/tools/Polygraphy/docs/conf.py index 3913f66d..58c88f92 100644 --- a/tools/Polygraphy/docs/conf.py +++ b/tools/Polygraphy/docs/conf.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/Polygraphy/examples/api/00_inference_with_tensorrt/build_and_run.py b/tools/Polygraphy/examples/api/00_inference_with_tensorrt/build_and_run.py index 9926f322..18667f22 100644 --- a/tools/Polygraphy/examples/api/00_inference_with_tensorrt/build_and_run.py +++ b/tools/Polygraphy/examples/api/00_inference_with_tensorrt/build_and_run.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/Polygraphy/examples/api/00_inference_with_tensorrt/load_and_run.py b/tools/Polygraphy/examples/api/00_inference_with_tensorrt/load_and_run.py index e6a2a454..3ba4c0db 100644 --- a/tools/Polygraphy/examples/api/00_inference_with_tensorrt/load_and_run.py +++ b/tools/Polygraphy/examples/api/00_inference_with_tensorrt/load_and_run.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/Polygraphy/examples/api/01_comparing_frameworks/example.py b/tools/Polygraphy/examples/api/01_comparing_frameworks/example.py index 70dcf158..2503cb03 100644 --- a/tools/Polygraphy/examples/api/01_comparing_frameworks/example.py +++ b/tools/Polygraphy/examples/api/01_comparing_frameworks/example.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/Polygraphy/examples/api/02_validating_on_a_dataset/example.py b/tools/Polygraphy/examples/api/02_validating_on_a_dataset/example.py index 577ccd26..f07bc26f 100644 --- a/tools/Polygraphy/examples/api/02_validating_on_a_dataset/example.py +++ b/tools/Polygraphy/examples/api/02_validating_on_a_dataset/example.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/Polygraphy/examples/api/03_interoperating_with_tensorrt/example.py b/tools/Polygraphy/examples/api/03_interoperating_with_tensorrt/example.py index 03b6276f..9658dba0 100644 --- a/tools/Polygraphy/examples/api/03_interoperating_with_tensorrt/example.py +++ b/tools/Polygraphy/examples/api/03_interoperating_with_tensorrt/example.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/Polygraphy/examples/api/04_int8_calibration_in_tensorrt/example.py b/tools/Polygraphy/examples/api/04_int8_calibration_in_tensorrt/example.py index 3af08b27..ed03d9d3 100644 --- a/tools/Polygraphy/examples/api/04_int8_calibration_in_tensorrt/example.py +++ b/tools/Polygraphy/examples/api/04_int8_calibration_in_tensorrt/example.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/Polygraphy/examples/api/05_using_tensorrt_network_api/example.py b/tools/Polygraphy/examples/api/05_using_tensorrt_network_api/example.py index 6fb4ea1d..91a45235 100644 --- a/tools/Polygraphy/examples/api/05_using_tensorrt_network_api/example.py +++ b/tools/Polygraphy/examples/api/05_using_tensorrt_network_api/example.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/Polygraphy/examples/api/06_immediate_eval_api/build_and_run.py b/tools/Polygraphy/examples/api/06_immediate_eval_api/build_and_run.py index 74004d61..f355a63d 100644 --- a/tools/Polygraphy/examples/api/06_immediate_eval_api/build_and_run.py +++ b/tools/Polygraphy/examples/api/06_immediate_eval_api/build_and_run.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/Polygraphy/examples/api/06_immediate_eval_api/load_and_run.py b/tools/Polygraphy/examples/api/06_immediate_eval_api/load_and_run.py index 9902a53f..0971073e 100644 --- a/tools/Polygraphy/examples/api/06_immediate_eval_api/load_and_run.py +++ b/tools/Polygraphy/examples/api/06_immediate_eval_api/load_and_run.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/Polygraphy/examples/api/07_tensorrt_and_dynamic_shapes/example.py b/tools/Polygraphy/examples/api/07_tensorrt_and_dynamic_shapes/example.py index 7add2703..771bbc53 100644 --- a/tools/Polygraphy/examples/api/07_tensorrt_and_dynamic_shapes/example.py +++ b/tools/Polygraphy/examples/api/07_tensorrt_and_dynamic_shapes/example.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/Polygraphy/examples/api/08_working_with_run_results_and_saved_inputs_manually/example.py b/tools/Polygraphy/examples/api/08_working_with_run_results_and_saved_inputs_manually/example.py index 81ec96b7..6c1f3073 100644 --- a/tools/Polygraphy/examples/api/08_working_with_run_results_and_saved_inputs_manually/example.py +++ b/tools/Polygraphy/examples/api/08_working_with_run_results_and_saved_inputs_manually/example.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/Polygraphy/examples/cli/convert/01_int8_calibration_in_tensorrt/data_loader.py b/tools/Polygraphy/examples/cli/convert/01_int8_calibration_in_tensorrt/data_loader.py index 6de6821d..ff8c45e8 100644 --- a/tools/Polygraphy/examples/cli/convert/01_int8_calibration_in_tensorrt/data_loader.py +++ b/tools/Polygraphy/examples/cli/convert/01_int8_calibration_in_tensorrt/data_loader.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/Polygraphy/examples/cli/run/04_defining_a_tensorrt_network_or_config_manually/create_config.py b/tools/Polygraphy/examples/cli/run/04_defining_a_tensorrt_network_or_config_manually/create_config.py index 25215337..eeec81db 100644 --- a/tools/Polygraphy/examples/cli/run/04_defining_a_tensorrt_network_or_config_manually/create_config.py +++ b/tools/Polygraphy/examples/cli/run/04_defining_a_tensorrt_network_or_config_manually/create_config.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/Polygraphy/examples/cli/run/04_defining_a_tensorrt_network_or_config_manually/define_network.py b/tools/Polygraphy/examples/cli/run/04_defining_a_tensorrt_network_or_config_manually/define_network.py index 7be7342a..10e2d9e5 100755 --- a/tools/Polygraphy/examples/cli/run/04_defining_a_tensorrt_network_or_config_manually/define_network.py +++ b/tools/Polygraphy/examples/cli/run/04_defining_a_tensorrt_network_or_config_manually/define_network.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/Polygraphy/examples/cli/run/05_comparing_with_custom_input_data/data_loader.py b/tools/Polygraphy/examples/cli/run/05_comparing_with_custom_input_data/data_loader.py index e5651c95..1d025ef5 100644 --- a/tools/Polygraphy/examples/cli/run/05_comparing_with_custom_input_data/data_loader.py +++ b/tools/Polygraphy/examples/cli/run/05_comparing_with_custom_input_data/data_loader.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/Polygraphy/examples/cli/run/06_comparing_with_custom_output_data/generate_data.py b/tools/Polygraphy/examples/cli/run/06_comparing_with_custom_output_data/generate_data.py index 072d2312..df44eaa5 100644 --- a/tools/Polygraphy/examples/cli/run/06_comparing_with_custom_output_data/generate_data.py +++ b/tools/Polygraphy/examples/cli/run/06_comparing_with_custom_output_data/generate_data.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/Polygraphy/examples/cli/run/08_adding_precision_constraints/README.md b/tools/Polygraphy/examples/cli/run/08_adding_precision_constraints/README.md index be207c8b..f5d2e5df 100644 --- a/tools/Polygraphy/examples/cli/run/08_adding_precision_constraints/README.md +++ b/tools/Polygraphy/examples/cli/run/08_adding_precision_constraints/README.md @@ -64,11 +64,38 @@ we'll force the `Add` to run in FP16 precision and the subsequent `Sub` to run i This will prevent them from being fused and cause the outputs of `Add` to overflow the FP16 range. +### Using a Network Postprocessing Script to Constrain Precisions + +Another option is to use a TensorRT network postprocessing script to apply precisions on the parsed network. + +Use the provided network postprocessing script [add_constraints.py](./add_constraints.py) to constrain precisions in the model: + +``` +polygraphy run needs_constraints.onnx --onnxrt --trt --fp16 --precision-constraints obey \ + --val-range x:[1,2] --check-error-stat median \ + --trt-network-postprocess-script ./add_constraints.py +``` + +*TIP: You can use `--trt-npps` as shorthand for `--trt-network-postprocess-script`.* + +By default Polygraphy looks for a function called `postprocess` in the script to execute. To specify +a different function to use, suffix the script name with a colon followed by the function name, e.g. + + +``` +polygraphy run ... --trt-npps my_script.py:custom_func +``` + + + ### Using A Network Loader Script To Constrain Precisions +Alternatively, you can use a network loader script to define the entire network manually, +as a part of which you can set layer precisions. + The below section assumes you have read through the example on [Defining a TensorRT Network or Config Manually](../../../../examples/cli/run/04_defining_a_tensorrt_network_or_config_manually) -and have a basic understanding of how to use the [TensorRT Python API](https://docs.nvidia.com/deeplearning/tensorrt/api/python_api/). +and have a basic understanding of how to use the [TensorRT Python API](https://docs.nvidia.com/deeplearning/tensorrt/api/python_api/index.html). First, run ONNX-Runtime on the model to generate reference inputs and golden outputs: @@ -101,32 +128,6 @@ polygraphy run constrained_network.py --precision-constraints prefer \ --check-error-stat median ``` -### Using a Network Postprocessing Script to Constrain Precisions - -Another option is to use a TensorRT network postprocessing script to apply precisions on the parsed network. This allows -direct comparison of the constrained network with ONNX-Runtime in a single Polygraphy run, without the need to save and load -reference data. - -Use the provided network postprocessing script [add_constraints.py](./add_constraints.py) to constrain precisions in the model: - - -``` -polygraphy run needs_constraints.onnx --onnxrt --trt --fp16 --precision-constraints obey \ - --val-range x:[1,2] --check-error-stat median \ - --trt-network-postprocess-script ./add_constraints.py -``` - -*TIP: You can use `--trt-npps` as shorthand for `--trt-network-postprocess-script`.* - -By default Polygraphy looks for a function called `postprocess` in the script to execute. To specify -a different function to use, suffix the script name with a colon followed by the function name, e.g. - - -``` -polygraphy run ... --trt-npps my_script.py:custom_func -``` - - ## See Also @@ -134,4 +135,4 @@ polygraphy run ... --trt-npps my_script.py:custom_func reduced precision optimizations using Polygraphy. * [Defining a TensorRT Network or Config Manually](../../../../examples/cli/run/04_defining_a_tensorrt_network_or_config_manually) for instructions on how to create network script templates. -* [TensorRT Python API Reference](https://docs.nvidia.com/deeplearning/tensorrt/api/python_api/) +* [TensorRT Python API Reference](https://docs.nvidia.com/deeplearning/tensorrt/api/python_api/index.html) diff --git a/tools/Polygraphy/examples/cli/run/08_adding_precision_constraints/add_constraints.py b/tools/Polygraphy/examples/cli/run/08_adding_precision_constraints/add_constraints.py index ad287e91..fac9de5f 100755 --- a/tools/Polygraphy/examples/cli/run/08_adding_precision_constraints/add_constraints.py +++ b/tools/Polygraphy/examples/cli/run/08_adding_precision_constraints/add_constraints.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -22,6 +22,7 @@ import tensorrt as trt + def postprocess(network): """ Traverses the parsed network and constrains precisions diff --git a/tools/Polygraphy/examples/cli/run/08_adding_precision_constraints/constrained_network.py b/tools/Polygraphy/examples/cli/run/08_adding_precision_constraints/constrained_network.py index 591149fd..2a420031 100755 --- a/tools/Polygraphy/examples/cli/run/08_adding_precision_constraints/constrained_network.py +++ b/tools/Polygraphy/examples/cli/run/08_adding_precision_constraints/constrained_network.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/Polygraphy/examples/cli/surgeon/04_setting_upper_bounds/README.md b/tools/Polygraphy/examples/cli/surgeon/04_setting_upper_bounds/README.md new file mode 100644 index 00000000..49e3f37c --- /dev/null +++ b/tools/Polygraphy/examples/cli/surgeon/04_setting_upper_bounds/README.md @@ -0,0 +1,56 @@ +# Using Sanitize To Set Upper Bounds For Unbounded Data-Dependent Shapes (DDS) + + +## Introduction + +The `surgeon sanitize` subtool can be used to set upper bounds for unbounded Data-Dependent Shapes (DDS). +When the shape of a tensor depends on the runtime value of another tensor, such shape is called DDS. +Some DDS has a limited upper bound. For example, the output shape of a `NonZero` operator is a DDS, but its output shape will not exceed the shape of its input. +While, some other DDS has no upper bound. For example, the output of a `Range` operator has an unbounded DDS when the `limit` input is a runtime tensor. +Tensors with unbounded DDS are difficult for TensorRT to optimize inference performance and memory usage at builder stage. +In the worst case, they can cause TensorRT engine building failures. + +In this example, we'll use polygraphy to set upper bounds for an unbounded DDS in a graph: + +![./model.png](./model.png) + + +## Running The Example + +1. Run constant folding for the model first: + + ```bash + polygraphy surgeon sanitize model.onnx -o folded.onnx --fold-constants + ``` + + Note that const folding and symbolic shape inference are required for listing unbounded DDS and setting upper bounds. + +2. Find tensors with unbounded DDS with: + + ```bash + polygraphy inspect model folded.onnx --list-unbounded-dds + ``` + + Polygraphy will show all tensors with unbounded DDS. + +3. Set upper bounds for unbounded DDS with: + + ```bash + polygraphy surgeon sanitize folded.onnx --set-unbounded-dds-upper-bound 1000 -o modified.onnx + ``` + + Polygraphy will first search all tensors with unbounded DDS. + Then it will insert min operators with the provided upper bound values to limit the DDS tensor size. + In this example, a min operator is inserted before the `Range` operator. + With the modified model, TensorRT will know that the output shape of the `Range` operator will not exceed 1000. + Thus more kernels can be selected for the following layers. + + ![./modified.png](./modified.png) + +4. Check that there is no tensors with unbounded DDS now: + + ```bash + polygraphy inspect model modified.onnx --list-unbounded-dds + ``` + + The modified.onnx should contain no unbounded DDS now. \ No newline at end of file diff --git a/tools/Polygraphy/examples/cli/surgeon/04_setting_upper_bounds/model.onnx b/tools/Polygraphy/examples/cli/surgeon/04_setting_upper_bounds/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..259c4b5c00e8462f9a1665378b570e618a452545 GIT binary patch literal 1317 zcmeHH%Wl&^6!q8+p4`x44X?-ofxJQT%9E7Rb^%c;1Xw~s*&wkQnPi$oO~;Wvj=JP4 zSm$H-jQ&8sqKsduan!J2%Z#pO?)c30@jXYzm}ri6@)KZfko|Ea5*Z4~qEs^9=|)0a zvS}PHXGt1Qc_f&KCY(_hpO#4PT8GvuT%_E)(y>uY8x7lE`FuK)3G;(rx~Bbj_douv z2Ww=)yrEF;JfWj;ynO- z&iQgOzeu{;8tL#Bwk|6+VN$sD(Ahi9_A+8~ArB7}DV2I{&}G!MvioCrw(V#2HHG8f zD4LsEJCR4E6Nh5TbNJf4OBzUo+4PuN&BUq^bS4IK(VyQ(`{P$AHwr&Yq+z+0oKa# zSL{)MQH6bvO;ve!H-eWHyjpL{QRUxWl#V7(;1Nc|1|1_6h6>2y`$N_K+h!0m0}M^j G?7iQB5owe}hLq(5U6>4n>`62P+D-NM=V#O888pKh zCM=A1#PV8PR95!wwU4xl2ow6O>GEH$Gq~?Al4lp2cm$XQpEogwbDE7MPTzN&{(BC`>_b->2g+M%QA!8D`d0Pzxb_~7{;X9&C5&kh)C^FcZ;d`wp5|m zvR{9L|W<+R+Xor-wZ zdo$N<(Y8c63BBZtp4RVKu&94C(-vCL|6FEOd*V|?p!qj8HYFtq_1f<-2bV>>(|bG} z)toM@>t)!@eOo>KS>9Wi$Dj(=gwX1bFB$cK zTH9virgl`$_2H*nOJA^zf2q~lM{LOk+5KqDUs```+CMahO}9s73?)ha!mhItqWXsj z0$0+MVo)bTRq_v-2&qWq`tc7-S=5I0Whff3Iph{!3N>!1ZI9Cnq?jOuY$j_dKD?!v zsn+36v3J_3U6Xi=q}IU>#$5dMMGG;tj&6)1&->B-sqEvv@aG*wp4gs0qi~wz~;@t za~nv}SiyeGZ5-!yU29*{Ua%l%G*lJkZ(S}fQ7-Ns_GXp;8k2qEb2v3-b%2*XB>mvl zHMNCP#rlNxHEIFv^QjjjgHg^sNmw;CHF~ym#O5VxwWaJrgn2j_A)3GixPvGys%#Yj zSVz1&>z)7Bn*U^r02%W6f1P7|~G#{>lhUHNH_j*bN^s$U=<+?xFb)keFG9lU5yjw^LMg<7a(bE&{#=2bOGB-b)Nfeq>F5L~yORe^( zjMIo8|K|!>_oa`v%x(+gUuZjbxd2^3YXW`+o)c|L)$PaEXgD}z_1@qG(!qan<9|ih z&^;(*&Gkp00mzzn@C^CN&%g81i6w=itPVhP788wi|MOCsrxKtj2 zF1nQD!(z~DE(Eo&4Hb8h9HzRw=F@@)5@vfGigN%XYem;=6Eg(qzz{zRytv!QV)iCO zJsEN(K|JAKV1>eq3s86g7M(xQeRAcJA*=?K2(8E|HC58nvx%0^ zAe7}Xe|#b7n9=-%7X_{=4h0Ibq8dl$igA}-$~zz@UDmj|*PEs!MhB^K7WSr>!yyI% zII^4g{1mF-sA_Hf`q)hmZuhs<@*@*1lN~cQw!QB*#;L-H#@Y&Rn85Nd@0Ob1Jp{LJ zdD0n;+V5FyzNeNN2(4f;U;+8tKA9msSDIq9sS6WCkHmpceogzf>V`KLVzJ+$NJ(RG zYernd6r1$dvHSc5#+I}gv)GXGOB)6e3{>g0+$oscCqR=k;xXFzUKW~guj zUM<0XgB@X($hCWO0rYIi1{<;~8;H{D9#e31{e-x^ZQyd$p4%l|qy%+>Gfx{YTUN%p zyDfozM2Crp{kLj?V!lmV8IV)(f&OwCYVXO=+}E@Z*eeJxK=_kU zP*`aC0Jz*5A&%`|tfqoz$YZe^Ypt>nI!Mkai6*mctHuNwKEWk!KCtpfyhgeo`UhLg zNA+5T)jrmIc~J;;;c_I089QxSNQ=?gjYsGEb+)ZX=CwD2gjQrFlt^C+$Q zlwE>}_K_PT@|lh8hWBQu3XSnUp&n2_oodYppv~i9FEta@&5#`YY~nkBYu%$>HhHI7 zy9?L}7F<-RUaQIzn~)~=5`|wO+W=*dt(y2C#z zj8`V+OOE$-c2$w^FvUW_cKNu#oFc6}yN?r3`j^*CYz)gkebI-4@wX#>eis0tfnY-j z+zJ*$kXVsT(C9Mv@F0{Zqku6_{>__E#ND-^?N^%Wqm$wb?9#b{tx0nESyn3$4{yV! z+4J67U0!o;q8OP5Zm`wmO;G(LVcq%xsN5(@+VRHX7dIl{90^e>m73Cn;2Bbr>Wvf;JNN~{-{|NbHP^?6`|H+J|3u* zSN)j@m16_%9O+GaxJx9Y7{$UxU0oXT0!te~s6MJxV#y9FQReRLT zIl~MFMJzYd*s>x(XiCQ>UI29aA=hsd!~x)ULshoc2dju2uwkLo=wOSEaT>PJc}hD)I}Hw4$hG^~c-Dh{C3GDMs9enk8Lfae!VhM( z3g-{(Z|UkvjdmU5@8+GSP?iN>n20sRC>RSHCUk-ji$_e<`Y4b2!+)&VuQ_em%T-r+8+a1GX4D)C5V6IU7c^4`9DCEI> zVj{~yqQ>LZU1V1zCi?NRXfHu+ZQxDlVGCr!(;KR_@3PgNT4ll5G;R0n1_2(BTT@7) zCbFUod!!e=?m>`!H2v17#_bUXru&{Wd&|dbc_^!wHtFmsKm)$3Z(r#?nLoUWsm?Sb zu3{E#KZDqQ$@&J3*1JyVg>Rt)aDDDCWg8=rDII(Y?}{r$Gw^^W#Aqe z{81S|dCJUV{GHWvdglT68~49fS5$lq=1D-ke_b9RT-;vb`>+gvPn_Qc=Sc%{#i1no zLMU?RzQ!gF;?hq5mVeFou~lSPuFl-;$X^h9U8B~K<#LQWUhQdcdsBC?wS~EDEAM*U z3>>ib#eANj>>MiLMY(*jjI+N^^;SYQeJqOR75zM`RaF5JId%_ zsV{F5i`@WKpEfuYzU;@xCVuZlMP^+z&a^sNAfw3srdg3Te_I6%o9p-IRsE|l<=5u` z?uL#bQgg0#@|f@6T%qnvbyXKp-wTYW)|0@+s^FsDI$Rx~(!t4VZtg;Un=k5siW*ap zUUDlV-hr_>@L302MTZ?-gUkPYe3d(2wN^EOpBl4ZN*{7te)?0F&^`zQ>)5jN1c|D_ z8V-ji9|j{*fC(JdXH01syBGVPY~vqT6Lik)Zf2z!6e#0IxK6|b+k!}4Y_j1|#%UlU zh=u7L;IA+9@I+v8kO1BbOt`K>|Hz7#$JB^6YK)p7-&^VmB$$wem7wE}$mdtUZ(wl` z$||mC{=+7fWkm-gov=cN$s`bQTY;KO!O;WIP3J@h1!4xdF;J@Q$KR`VY$!P;eOAA+ z6pR33(qG1{4+1Dzm&{&xzPrm}zNEyHV@v_nQzswpEPsuBzk6P*9Ad}vcowz}pGPZ`C8tf^S8``hyU!@wbz{>QRyQADtLgQ4+gSiR>0)2bz>9ZX6jJCEQUq4I)76qX`&a~e98ohL1(&BZYkI0AiCfe!rIp^ihGa%#*W>b8_!#X? zrt9R<4BucnKxQf2>yD*&LGt0VK^=46#hwKB^)3e2{Ch186nh#*j-K=ysRu1@T;i+= z?FCvoGBRvXBn=F&Qndl8Tjl_2IFP_w;B(joSrf1HU}b*P^F=jvs$>cuIyO1Epnohy zCu2eX2CAn>S2$5M{5>`Yic*4kq4+fs>uty!BX02=@Xm03lxx0qn-HNZ7Zqv5B%qx- ztv?FsBUAQu$?{O7C$r7w1VcIXE3S9pjjuyn}@CHBmMGh1-~U)jdyLP<4_>t>m& zmAX0B#CO?3ZZ6eW5vmgOU?Zd2(z2gMZH{%D9udgOa0vIh_@50Pkgoz4usI^6S52Kre->uOLN!~dCBsB zBj=VUiHV+&c*=YcLE7>7;8iA`xZPRj)uW^(cqvcf6&Dt*vRgaP%8vV`q~Q5C43^6Iwva7p(Sh ztJz*(tQTWasrG3#6vHQz-9C3I&2PV6E4BF|#JNPuUiK($zn*iv&_l(EpT3F2gVrB% z{c5qmKt@$FOtNJYP*toW4KK`^mt0a9j*QvdDPh{=2*CaLSaH*GX+O>#{`DtG*%|1|I&pcYaA>_Y|ggeV@KK-p@ zmUa;;n>YI^tllAbHmOM^DNwT%nrK5#;^wtqxibW5r3l0|h1`>tRx)&d#v&;6ni%_i za145e6a5ZbvQC>~Zp|oJb0r)}Mfkt;+?+tOKaEC1?2cN&8thL-- zH+QW50ea2igfi`c8ne!l-8*HWd$g}5-*QPo>O18>S`Zu)Fm^*M3idpJ3%d2id6Ea0 zzGStCv({^~%8?SPl@8mv9T`VG@7sxX@Wqd}pHmUmktK9Yqqb@?K0JFTTbr#FF45 zX8iP;J=H=Jjx1a)jpy|66kp41g(Q+BilHQT@vV{M1f`YaO7ptVBoG!4$~8QLyj0p@ry@Z|QXeUJ6NElizJ`e|!EaoX8OR}UIOcC876a-x?M*Xn6q(7CKs5;0iv^j>SiKEj)U3=P&5T6`{EuvguW zUG*9ZWDWwSTg<0jRdAq2>snh+AUB^|ryoa=j%mj#$f@Be*21)z7B@m!$0VBfW_#Jr z5-njfw=5I$Di>kdlHt+GF6*-t$qp}m6wQ@*tOKu~!O$}|^30&{qlxQP3AKa-6VHKD z+_#|OTHE+u?R&#@k%@XB>&wdBs`3H0DOS$= ziSN}T31{y_(9eyfWN6!!&~i(LWcJ&YYHm%XBa&EXmpP5ccT#KH~>hOl9_x}zVzpv3hu;f3UIH*bs50&8{x1De$ z`*1gsX0?koa`m106cVT}xIg1*q$`W@CYP)rvn1^h2~#XR$Na8sDo2Kv+aq5XwOCrL zWZY^|T=LfBX!lQRU;s`Xl_Cp=?cAZdjUA%7$)}8i1dQc}BI@eu)->qS??6VklTOqh z$6m`AQ>dx7)VOm}dT`%5?+)zd;~pQZ%;Z#E9#Xdc%U<<9D>U7sbbsGcjWbF2Tx}au zjx;{<)ws^fZ4)jp1|m<=1NvL@qAWIeytHNx(5A`aJy)A8b3BRBT?kNq{eEl{F{~IV%>`eBvQ zT_1LB8j(;#ZaB+ z2=4tsQuDKug;T96A;Zyb>cTM~X(ycPD00a%n(Qvr5mxiUO-|Yp-iVp`8(k{cn%w~u zgMyM&u$N1($6^u#xBE>zgB$nK`>O$~80C+3rQQrJZ!S1Il)2bb#y8%TuPLaTc?G}I zmO#-agxYyPq;Jcxt9MsEvXY7L^x?*0Zy}8BN%|~=S4Lh?NPWF}3B6@GLkd(H> z(qgBl)7)^m^y#6hUl;ubfd8FaO6$0o*M_}gHeaG(={7C$&)9=*rF)A7hJOSIr|S*Q zw6wKRUmP^3+~!DA!7v>omw0vW^z%;Ri1lFafS*R6N)}j1zft*Y~+FkD+Kj~bm-C>c|++xL?kq1Qi36131_T3pl>Jj%!yKtt;>CpBA zdti7mh>2R)-i3PD7L4+AuZ`)HB!lco+9a|s&ZHn{W8}xBp(i=->EjR7eljU^WSAwp zj(;ua3d@%(Fulq&1U47u8?Uskm#j~%Hxb{OEG#UHncZ9+kd1|1Wz)ra3|y9uqa|s0 z+=!C*(AxmYbWpJELXB|7m}4dR!)#5rfnim}_FCz~4VZ)cJ}}3m<|@1Xon-ChuPk`n z1|k87KrknKG_Q;3QVB@bb$Rn$@;8MB?!4|zcV`=deuXa=)A)>8*wzM){>FCK)9%SE zi5C3gk#tgSo!b-Imxb=L;!Zyv&s-#xzWC;tKAVlllf60`8-Fa)J=!GFict&h)u6Vu z9OLHaQ>{d`jA6X4Ghd;6!`h{VZe8&p!2-dlV|r~O_X{yqZ!-)9-^j(A)Sh$yF4Zlo z)be6Z{`G3Xg`kzn`Nn8R+C)Kjo@-XJen;|(2KVjAnBMofM<#Sk#T@f#gKTx)9$uXN zkrQswlR_iR3t6ZPED^2q)h;4Dze9g)=6j0Z*;9<$^t~}5?+o8Ok8itkmj9Y^bE!z@ zU7Z>U{?UTdP4hq|>dnCKlpZl?~1u(T>Y%$0osV0!~VTNsaXh)K$Bo!a+az7 z_Gjeo6(U>KRpjCPVL6*Y&<4mO{VT=Y5L8pyre9$T=Ij#-LJz#47uY> zA}U25`Q0+kRmgLV*6oQPspm50PKhqCq{kKBZDCh^*QN7iP(bXh)9=uVPi8X5T@&54 zXT#qAQ}p%k#``Z3^SAt(Q{~($+No5c)~Teg{+)F{RYf6kVo-$e%d#gI=>lPrlH7Wi-(-<~aF< z?Cdq$!gv+znA1lg?$kT`11rzaH>qvQD~j{dP4-GtPzMf;+;(U!V-bN9zh9K=g;wWPv;vFCY_CP&fz(LI-#wgt&A1>Vg*lUz$8!A4R3~=!MCMUZp^KTMV zRsf{#odm5b%W3zZN_X1q<4%G$<6oPV6k(w&#>&6OI7(%a292)p>0@vw=k2#&sxN{EsI_3nBc&ovz-t&PoK+5Lm3#t6vV3{<3|H7k{sct-zJ^&%>4l zKb6j-EkdumMplqJZ&EwoceOWnffr=qzwVmxvJ~2Rm)7jppFCG0&BQcxVflX0`eN7$ z&CcjHiF+OIMXMAfbAKc6P+Y$jZ}Mwk=|$muP^L`$-Qeo)=mEa|`Rpofk-;<4={J72 z!8DVCy}vy_*vsdHvrdM*osARl!|aQ*~o` zRP!%yBr4$^U9(H2azEh?<7Fv$lsj19Y1%5LLP#=;`d z@?J|pM_F2Zpq0L3h#}JG;74Qlf04*&PDOpmp{_KLy|_}j6&&@1(X>YL7_?(W;T6pL zSyP!6UOxWo?vwNmfBr+V<$(|lY2ja&SHVSgoPDy5Alh1`pjB~Ef96478U1!R4*3|A zOz3kBlKG2gg6t5zoqTN4-#aW~T1ZR|L$$XUSdTTM`lkEJLD3V`pFe;uvK8wa6P-Ck zJN_ILNwTH6c_4#7=iw+E)UT&J7iGZECk>95Zxn%eFe2V3`H4_6f>#3_W_@^BFE6et zSlFG~jb8yAg0uQBdA7BS8>iR2Pm|>__8R($*#}ih*$1c(D)JqMrA?>9e)GOuFD_p9 zqQmXm?z>TK+eYiSpULc28SZ~P5tugnndkua?xV6madw^Ylf<@IqPbp{0!rz7ei596 zrU+J0A7gDH#+BnV(dqR-^ZxHVv9>y_pp#KF>%aCfFjo0Ratv+UA~`KhZLwr+g$%jW zSe;bwZD{+Idu`g6B7a!Y=HPu9KwV={aT2{OBO^nZWs6yQYSiQ0>yeLA8!Sr9Ki1`| z_K&CRrdK=1+}BI%?i~eY(-0|E}VHz2gu4C+9rYX^UyV3~_MS zi39CSar~A0mCsi0hb!hmb<-S^D5T%1*GR^sPq08Z-%frELWm#plBpJH8M1vs`?wlE z<~$raY(w7|`rF}Hc4B3C;F>KjVQnSBow~H6d*RJ{>MWTn=aI67Vodho)p$$N%Cjy> zjbcxD&bcAG{>LS1ABAdMr^CD)^9>#3?pag`!45X{`?e;54McjKXlCw`8?! zM&?@@DLI;x`hM)U(X{C`skEPY9+K=plxi7C;4p->9hFYf)}UaRv#8?>_}A5qU891e zOfqYm1-Cxw6NxgJC0=-Bf--@4BkFTc(ZY$7k;9X%l3!K(wSHoqMJqdZa)G$6epDPf-UtSK4)RS9<=N$Gc347jX< zdwSbM;AKMm0b-!lBGz?0jibBuImKVyR*w9nzh|wq;ItyYRcS$SP^x0)-d69s`EHf$ z67jhB?~-0Lue)61Y{lKv_R~n+ycY}4DtnyH$FZwiqtE@rvWEWHk)*A^Z{R9|Z)`n? zUFH9JSW6QITF3z4hTYU2lzb=o@xV1r|1-~dgPq4j}VL$waIz;7g8v)d`G>ifZ-rH(SANKS~e- zipT|4Ha5384Ic(eJC22R7JC$~lyMfnZF_M{PVeT;!84Z{g(L86*Kn!|VEWLM^;#;$ z5HX^r&xA0g1?-jgaiM3vWIBN{oAiV*(3bMp)Kv9fV|Q_@0VPY!Iq3DZF6qksz(TVz z0r3-`sz;=?V2cx^M9`pzC9l_7$j2JF&r}-VR@n5RuBFu=Et^y^ zw|+b1n1FbG<+myLa}ohKBbW^?4DOWJP*(vQ26pQcsLn8L=EUc1#x{~}vrAq$3?-li z6tuHYQ^e-^g%jlrv}eNR_7l1q50lb)&68^D1m<1Mhla*8CEWB)izwr$n#>ujT=Hnu zs%HY;ZtUYiVx2#yisGx@cb9!reQZzHpkAG|-U$SC6x21Jq!iCC2*ByGD&d`Q7zI&f z+~7J((RL&z5;8LDmU{PcNxFY^qyJ`SVQ1`b)`v*3H96S&-$BPw6?a^^#K{>aWz8$Olr)VywnfS5K zTV97Qo>s|{j|8nZHpvnJgREqG(sSKxaneF#f(3R=AyW}wxLRxyuAP#M_#@OG7?gc| z=GSGsVWRDNt6ujpnC=+G2G=DkZfroxye=l?)W$)O&C9uNxH#x$PqW;Bp#R>&wp=JaYo;x%f) zXvn-%y|$l)IH?mMzO63Am#iKwRFm%?b9b&UBvD+s*j462d@H<_sL_dq6gZO^xu&2@ z>0?r!tv2YM1VVQ$~ zJK~qSlRGxjADIt=6i3jWy<+V&;CnO>6y9$fm3K$r4<9--Z{J@P<^KKP(Xh&%oqF6o=i*=L@44&S7{P`vNt8(3v`^&AOoivdHv|Ae8xH3{jm zJcy+V%HQW?dIzQPpf$zdF}AwQ>}vRO!H4;VTA^E-_6msKImfYKTO(B&!SLf<#vg^` zJBxILr&jAtU6U4xLW{>nz7oN{#apb@xzuK2MLa^9q0;_7b!Fu?8m;jB>yDvVa(}j+~zGP26-hZ9J4H~1D)huv9v6M{(vWt z3tx~OL3sklj;0j0*-vbbzj-loG;~9{O-qEspt+t(tAxuxY;U!5_=rL=to_|Zij(Bd znNKwEk5}`LcZe%MOis)DbyEixBptsmU2j=SZ5tjv@C6w#m^wbuY#2Gy>Q+dZxS@|} z+cjZLp0{rsb8DXH)r&+eevt1lnh%J#>0P)IYvo#enZn&YPFbc7+B&$T*fNlXRB+kX zaSxyC@nL$}^s)~GTFXRI#BMxuaqLYRheEFYV)yLmx8x=b3~OE32Q1il zBsD{js-Vm6p>=fh5q9Zm4_# zdBLtn*6~LLBc;osVX*jK?ShXfh(BCi)>d68pe91j4Fa{bm9!_d(Q+g^mI1J3*h+*p zX^t@Y$GdIwfT=KsKp>1gmzn(RArPCIVMOTUrXX-gpWfL#&|^WF{rc{*>jjJYBLw#J zydO*i_dXxadHXM~&badLGG&$hPVliO<)Xkpc>XT*I?a=~ud-2Yn^^o-4kXU0l@k8W?sfUWZj@3_dr`@ zC7j3RYwlm{AB8ZH(PGc_(74&M)ej1n zTiA(KUYl#d!ac9}WNe|0l8ILd%J(e5ug!8ecBAGTsU~)wB?5szPTJks+evAAam(6x ze6~}{ICe<5_kA^i`en-0I?{tiGh9h~wM;3Z(34!8=I;jdu=UXDdppxsUf3M`SeL$V z>~(m)E?!(Jy++8b8RJ}pE2AFk$>dad2eL)GdC-wsEjNsjp*b);$Axro)3^LFcHR2-*DzA>r)F9Nb7*-3tkb|lyN zFZ#+%OR{=PXOtlpZ*pV(IQ~KyFmXly=K&C~F-W<8&dz_D%pKZ zyjno$PTH5*s0k}#u#IbzDay)iNv?lwVaS!I-$N|8V>nzGV^0kdUKpqvt6nBepx{m1 z6Q3yzU*P_B{dJ|Noa9>0Ieu|0ENjtmI&t~R5l{DWE`8jtYx_6XxxrmDIn?_=YP6J7 zQtNn0$Ig|)eaV951O`H)N}X$s!28SBaZe+jA}{1Ye7ngyM+`_$UVPhRo}a81^1mb| z^#erE5x4@ZRt zg)p--hVQfW4~ZuWw)MU|JXb{u%*DsR4hK>`InGtNO;mBnJ+r-zI~Rv2P`MCbc5~Xx zDG$M68zP8-ukK6JWFZsE_-Tr``CP+s;W>|yXg?0(FiZ+qG&T{o*%Fa=WVao zhSfdbf4ajyQgQ!lB;PKQ?E31QRb7OH>$*uV*!~&pyu*)~yaZM&?HX>czYcf_&S>pB zgThUP#O>usVSTy(Q&2AIqHtbyngQ7Muv7?+@Du<4@sF zp!hNNbl3IwvFqLtrGbJ=Mo3bsZ#<%a88E%%H>Bm=f3*0Gh!2Wj znyqjK&Y$&r04a@el^ku|4(_eqA#xEbUdSX8%QEW`Nf6w(+-t%oaHiTtk9r_7v-Po5 z?l-HIEcB$)Nt%`Rjd9mO84hv^!N+G|7~!<~oJ86OEn#sf*-(LdI(Kbv6>_hp+D^$w z_fBQ4@un&~yzJz1FUpqaOg9=|5@dN3hLsT1j8#~#+tH6)dK>ea@Pb7j4+dCFfwI3S zjnozzJWx`hGp;k!W6^!)P=;NXW3uNP_S9o77G2MT48)~Ue5Bm2+N`j$y5cD#6Gl(S z9Z8ce1^N>Pp||9!Je#ucXZo3ACU2@`MxpgHGhkrf-tKT<_{8&`vYFG4Lb|)B26u}j z0Z7b8-AR{2P3$1lsv4r$RqR~sHTrZ^Kexd@!#Rl|J#AW#Ci+6mEF9M4)c|>eaMD~% zExA8wc`SM={p;1Ue5z`0bEI@c{-wjI0i|omk1t^H-yMBcymC=)S%?UgrqYFsr`rHg zP9I$g=)oLU#2Y!U4J+{YiVXh}cq_;uotRs9w<8w+`Y!Ym>+yPYvgt)!Di*~tF~2wy z8hW;0U}53I+Nr}uq!~PR(Bz9)yRC|H-=kOBTDZOP&6kRmEkFX?4EFHxOgVHLobUN+ zs9tKt$Bxe%T^-ONX_=CMxbZ|>K`iaatmUPgom+7M-pYRvy!m#*y7aK$tf9=!!`nm; zF7Na=yzsR1F@RC~wZv*Bcz{WgBYVX4)HLLlNZ#~}i=`nI{+J>&9@dYT7!k<<<0lGh9b%nNS zF~G%z5!f=mU01U+GBo5sLAgLMj1+MRbrE-@Yg)0FLM(G0U?*cT)IJl|aA9qV>9k3? z`~}B;F{|NV+wOGG#+9>Bncfv9ppC-0uPqO285$be%JIE=#pf;WvZDZoqB3j`hHl^# zx2mj}pr68Y$XcjQ(!M)(TxxIsoI%J+1St~8$*|7NpY2_9O;GtMAMbr39XKpF#lVTG6w4D_etKLtf?mwvPX}@$sX5t0 zj^0!V%1^eYF^7!RqdHE7Jky?-TQLtRRO`U`tCq>GYHd{Pb zhGJ?;%r?xlO7<%vRcVha{ix(L9`BWoJGx1V6Tvkel5qjrYPbPHpUD27%RxaWvrCDt z@?W~eV#{E~uoWo@@luXM$vxPg?9Dm0dQk1Z$+`YB^!!TM68+4B zwRndrgk~%C1%Wyn#HAq8`^x>6Yb-;OA7cw4xmqfs2bh*C|I_N^ z>;CbPkRWV=7{C)ZgMxxwJzA2r#w--5;kGfL3xo!NeFkA6!U#=H!#Brcjgnp6+$f$6 z^3>Fo=0y3|*TevTL#Y}7@cak;Rb2(gD>YW)5fP*>6_RagUTb*($~RKElk3lf!ApF2 zP=+@b#CTt-{}o|M?+_nojOSb`>cI?zPQup$@Mvok@?@ z+?<+z%a_dk|2|Ed%308$;{?cdV?w3^nDk4V8W5}$+f@mLdSDTVe{Ky6hg}Bw!Ir8( z$hx7bAC(Mc_hvePNSfe25Ue~ewvTU1cX5dNNT~+90~{*LCMm{GivY&6+hVhpypx8{4r zf3ReoT9Pu;Urk;BuLmaZE-1^-2JhI2e>|=4^QgZG6DWEBa-2;8hhZs4w`RzXIk6>F zJmT5gO&w#9Tp+aG(Bu_Rv2+ZbJ$|DPqi;K-Kj`yQ!-HBnhVe$8%Vyo4vtM{ypbrmp zx3d0tA8^~yFshT_hVMx~_{{YI_1e~;lEFwW_NvlX@$OH;zqYj{u8n1^-N5^LBN;az zigzkRI(lcES)6gIKH+ z=>od{cX<4GepF_DmeOY2Ks!n*kk)&ME!8qbFZuN!UxEc6gs%vo;FTl?PSLXcz+XZ^R*)c;|Twkunplmzv4 z;X|$L>=yOu08|(czcQQdv7>|hBF#Zd8AOEp%eKsB_6~>fQp(|G@S2K4*}CYUl9sXY zl!EJh7Dx%OO8d#K{8q04FBJ@I2F-Mzaq}n~9-sDS{&DW9A;e;|$cpqItZOfmyyN=dfJ4#q1PiT+_ul>Z+yUX2$Lm#+3hSxthXE`7FNL{vQ zUJqZ!7YLH*RYqA`(W@^49xAdlCh?Io)gE`5i%9O@5;W~|Ngw>&;(E2+!{E^3YIM;J z-ir7+i?Iiz%>IM<3X#98^t)5ZNb8LwOV5~9t+E2>JJc3C%zVFOW*nGJK zYG4B-f5EI# z-hD;R9Pol4VYLxk9H$;5KOy&cd?j4mqPa0%g)$4?)@r1_Isf32p;L!h0cy5cQ)5{$ z9$y0bif9sc9&uaf4r+p-6f~_3-7;~UaW@hAO3GJ|hS(7(n9h%Pyrtcj?%TTe6uG!^ ziGXk3fVv6TBw_L&1eW~+f5UehOa0@&HZ{dc+IAss!k|X)VSh7I(^&No#Xk<*eHB>* z>N&3YmQ6gAPdiyd<1a>RKO2ENvX39rS9bm&XvzA!x5kvGK6net(?Tl6)VF2}V6SHJ zSs_#J;mHl~LHNfXXH3hm zr2Y&PdB`r#py+l$razplK7Ao<6A0UrK`LrJ&!Cxi7kn>beWX+Zf(B69Fmx}Te;jGf zEtALavrapz5BzNX+;^r82Z~o~jCX)Mk{<&?k9x>F;t`!2rBDzp1S6BEmp!yV7Qa!? z#c>kXsUgFl=}_UJV@eO}O7rp@=X}BO31-gF2&YB8wE0C!jBvZK=+;P}cwpO+Ut(hD zp6!xh)UzB_JrAG48cxc6eTSWjptC|cZJqD* zWBWn`?jeAH|2RVowBNm1h(`DihG@u-X0xkoglSc9m?1E&W1Cu{-zXYFTH@ftY&cO` z1k&Zp8SGNpBQ6Q6(B4Bwwyppr#krw6OdyhLZ3p|E&L|47q|s2 z=h9spur0EtcLIK#m0LcQ)ZGo-ihcXRMld%wx0a*MUGD>1+V)y*sNDC-3a_RGOS0(uv;MfyX!++pqYws?%0ax@SmaAg=M6(c+?YMHg zj*Z$YFbEuypH1=}NrV~rSOcIxEVujg6C)-?-1z9|H;4m4y;&iH$kWZCFq7@ zk2AR;9Vz3SuK9{@)6Z0ZpB?)D#LpU6E8dsN6ZTkba?}B3|IRR` zp(`Mvlr@pvf@sq114Qa%q1{De@J5}{?n<$nMxJ-m8McDwNCfU&faceI`=e9I4E>7` za=JpLZ^GW9L5%*q)A}5GO@naEW}Vc#rY#JTw68Es$#R`tGuTRFt^$1>o7qGhV)v-f zG^l3NlNG;(*lF)=?RB54^Fmb!qnC;K3!raIMF@lZXa`VjBQj@G!C1GY*X4N6R8=Vv z_Hq)z7i$m!x?g#dra>#snDE*U;d$4~m%WYPFl$bTn(GX{^J{~fui=i|0Br`hH<|fo z--+01z&Bf6BoY|pMzy>ZdAqLu&4niP{P#m!d;f>@anRSo)Gc4gEg)K`v?+AVmC^IR zP6BY<+PjBJpIa>rx1m_^|A&YFpu7E7E-#gD|K2}wjr)am&?WEDHwir1xBo8z!M^=; zZ~Z^gSA7DZ9mhS=v-^I3zsB5Wq`P^w{SVXgx1UMi&lsFU#-A~O1~I=0yIOYUq_$q! z8M=!H?l1@5I{oWp?x%y5d1Zq?RF%x!WiFd4qIsHbeicr))_bc&Y;X6oqF`Eg^e=l< zezRI2u%^`a`;gAD+w^lC6UXB_a*N-Ytn*W{w5;7ci{eEUczRwPwPTjshoi-tI}!(G zTq85wQ5}z~t=6KwP_;AfJTYZ!GghUGk-lBRH|-oi0Za%INt) zLEHQ5Ei8R1VL>MSoHTNOpP0DQopiLJ_dI>6Ah%HB5=nkedrdN8>S!8Op89>#!1z%~ z2t(1!)4M$`(|`WZXFs#gjsW_72vkd(Z)C+hP~|&pSKOx0nrnx4e}eZYmnNjk^^8%> zSXR7xH6u&b?tAo2?4vv?#~(0in?$5iYyV6-dd@)akCL^>L2fo={-!E7v{gR%rA=yd zuxv=((Z1KJ8*^Oi&%3K{HUF$|Crp&>NEV^KR`3nj6p+<)rwp2(Ro()5}s~(o8d{r(hTnh+Ln4+hB((aF;1D!j0yXZcgw0Q#F z&+;YOP{XNnrKk6Z_qfXQjjHtN>IZ(Et$p7xR%~g_IINvlwn=U=~Z6ib|_iLa%V-*edh!nXz~U zIgMY*5n*?)cw#}F?L&PKZMxRiIF)7CsqoP>s$j6mXgbovZmrhb1lQMiu3Zw*fI@$k zQ8AcyJNhM@nCd!CY4vK;moJh>E3x5@{90`TVB#3lKQt&^lo1GE+H^TD& z&B@)M@7}8HDPyO;9YMXdr zYa-!3z*Aeiemf8!c7NGV!~GHsbVB7k*9rjY|Hm-5P4kLIK<*9@-qA(`%>5q%_#F~5 zM$;URu7M1F?UdBoas%2Z4K&%~@pirG)g%(hq5rUy?QQfj7M(=0m$7#%4LfgIwU-ur zsZ_de|69r51r9wVRQG`K+5SJ{1^Z?bB5ltely1S!jwK%G-8o~L|luML^V#dr0ArqFZ8bIE9QkxI8SuG z))!VPJM);jsob2T3C3eMIM5S2n^5fe_HbezaI-B{=|*-&fB#G1OrIDtK)Yo64eS%! z-D{=QFV2;dR41l=X8VGGr(!xKE21wm9`Cxwuqo$q-Q7c}G#n_h*PP33uipJ_VHda4 zS`L@Q(28pga*)ZfJ1-Etpu&=m0|j<7bba8MXztY16g)gUlIE0htoMG!k*PM1f^JeU zs*TIE!$Z6&i1IFt*rVPbtKU4)QQe-6uP$IIIMvsv?9~A}sjXOSThe81z0O>e2Cxj) zpP0cVq-=5dCg}k>HBHy7N@%`Qh9jk8j$@!o?~u6Fe4t}byGlwHDM*`P6=&H&`hTc< z^LQxRxNlfxN!HXAlBFw@(kMb@Yu^x+7>uQa?2;ukrpD4{D>5OZQr7IoJ`F{TrEG)2 zB#mth*~T(vyvI4H>$>0jxj*;wd_K?fKJR~JX3q0Gj`KKv>-YOThND@T%{!)Ic9ZM- zj015<{(j}CSe~cNZZmwu9n?qmblbQ z!GzKq$lN<~zsl_3?!Vlmm2+ESCilECb=c9J1r`QkPj-?*ui0ac=b^vDitfgXI$}2HT#9!bu&g7mQrE8 zcPDDziW~F-Qin{@B6a#^vAF1V-c89T7Az z51(b$O@cLjwN3=))CwAt#Kms)PZ!SpIKL4%@vR&BMRdW6{>L-PAg(K{Y==j?cY^(D z$RDJuc=}&Yc;DUhJjrMv&ZA-PZ`JdS@I6BS*gKFQZrxtXY$b1}@-@f%HVaNj~eMItUN=ADA$ zl8IO6?7>Zs>h5 zVIM|vMQM?f>JI(vu6FQ8H3Mh4yvd`Tx0N*b2=;bChFYKUY z+U|nF#_g~ntN2SU6)bu$&s9L}&m&pAQJbJmVZ)?g)I43@@Rr>>Z+Bc-ASpNYUi^Y~ z?;>T5JEXRfnydW!7wj57i;>AzbqY1&u9Z>-lfDJku+Oeyj!IynFxy5gArSs zp*jtyWN-EYGJd#P8xV{;F3nGlhDf*?Vl2F4$>y4(FoYqt*E`;<)H97dkPCPTNEF^C zhQX8SwGjhBgaOxOZ)jw(N!~+TdS&O6A|Nn&CT+yso|>w&2^~4pg?%0qI&B)wrvE zVxvr!Ff4Z~ScMOtKeL|>5_2f#F9ylbtav?^96*|=+hxxYn18S6dx>NFUPulAD)Y>1 zZQy;NTcYEer_4^$ry`FjL^uEpVL`vEAO3%zh6`Tte#V+ zh;pko>^=yzKIr~73L={=Fuyma+~1|JKc?lBr^T9WV_$sDt_j07fAjjYNB&1QN;a#u zm5-|H{+=}fAIr9H8(uKj=Gxl%n)HNu%K^*uOTaPFOWp!h4+a+&J06LHF*4o0B3kcGjAfjqYO9>*y(ao^<`1lG#2fpKC zIFUp7IKWroI99%LfsGhW!rre!FF1sif^<)L_NEJAYQrUWA&fz&d9RcGdl6Xf9S~XO zo-76{LjP(pVsXG_w2BLLb_1yhj{g0%ck+k}sXg=4WV6~FJ+PjxgZ26FL;Pd+65FAG zSyqWqrpa$5U+?}#1o%_7Rb;s+{sQVsvnbS5&pnnk zP6iQY<1genEG(YMr!u~5GW+xPa@tXYrCe)LT%sMlyase}Yru8Nllh}?kp z<4$IbOh3k3(e=2FnII{lCba;cS#P9**5NCrdc{i*P_D*9ehee4`bqG&59YqDt)zxqnut7RK3pC;1u z5CjMM_O=rllMArk@df>;?b8VsrM-nn7uR?JtA^AL%k2FTv|M^6V)=!Wy)S9j4wb0) z#uW=odh6}IF!wF@h)=xnNf7MC-m)oAal<(Kc;>rCh+`&1ZIk5RB$S0wvPzKa?X~)u zFX2ae!HoR19$I^nEgpl)V*55?y1W^;vTmN5?18~Sa5-i2m>hY!Pa z8#5x_sC|cW$`ZVJsVb$|wnUUQI>f+Y1dxE|pWZCp3{q$*+NIX)##E|nyp_8~m{sLP z+Yec-_u=FaY1rqh(^OAsd)Pjuev!u^xDCgaRz1tbI6h3wK~Jz?whFA=dMzgV4_|!v zT=}nl4@epE^^+-`$6HNIF|2z65^BShKx@2lB6oT)-Yp$M6FFT-0D7MQsiix?Xmkr` zosdif0%xhs$no2LIBDka*?@^(=bjc%)=13Spg^CO+1G!>b2yUiv)MBKG}6$VUk_-z zj6z~+#0w9=zR-d8h3^TJl7^3P2Hq%$(9vNm`J>3SH=X$2AnC*0Q*H4rZ?%96?3D_= zJ=ByI0nlfx?qhy_er+Jp8O`=3#eXRdGqFNmRaI48aN2;CaMtmC-3io8fENbpj6+z> zrbI0&c9Vm%^A6$Hppu=sVd5MJdp`+*Epzbh;Ktl%?*{Yc|1v=&H`D9rBC>K1&kL9T ze5UJJ990QuuC?Uua881|^;XtM66^ea04E@yfleZz?zHEJB+r0^@4(zo-|zQ%+Q`dH zaz@16pSw!G$Cv-A;u{4yXm&fq+fLNXBLmg>S<^2-YX(O=S#tvi6!sUiqyVNrX{HlY zA=`h}MKSv{Jv64u#CXB}1xsc1ec3uzQTnSNc+E40&+4R)7Y_*b?p}9g@ux@b%SJ`+ zu7EualYu-!qr!X6ec>Nh59REDhytv*%RscUA)zih(id>&sNOHe$H&pN`x##<(VTVf z6a6k=tH*%a3&_k{tfs}rhxT#<%d)n1z>-ky&qMJ0IP{#ZzjYq5HeD-Geug$;Q&8OK zy?-pWcUgoe=GYfp>ECZ^8YXpQdk@UZC%B`GT#|y*bd0p@Ek0Oj%l7S{*3ouJ%j^8g zEzs_MK{7_=Fb=pTn%-eZvecyn?B<>(IELa}@H~}1&I-i|fNivOO#P!zcbgZUc!OK| zT=sLJyk_#n!NKnv#y6OIE#DI=ReEzVh%h*4ht>~%3L2l195R-xvCkD11iifTW>B2e zbMYahN%7q$&6)ZEuSRIbCO2yC(B8ziXP)hzBH6!V(C*hWf1U`-7GKX3MCc3jLlT}Q zI>V>BGX(v}4wb*CPFH;*oLjRP6TYPySgUz&eIb_7cr|Klr!7w4EZWPz_)zU*Ud(+h z5XIA<);fy|?IR5?3-&P!&zF(-P(MCgL#LQ*2%bIWrq(EFojJxSQ z&KH(lbF9FAutH1o2#u)tOFUt$ z>*as@n-~2O6~tJIB|JB`+G+n|aLK=j##LW4RADF^(YTa;R$M|6@i;7xNyH9ys5gs8 z34H4}MD)2U6fq05%gC}+IJ|_3j!d$^fzku3bS`0LilIK2D=vtc&C~v%jfG)t0+74= zl}|?x2&i&I2CU5N{0Zj7D9c0GvRt_3n)91#TOtf8UILr5=ufmz`p}Ls%Pv{)ZttMY zHa7QJl4R^VxI=W6GGn>#pM`Cq63U1jX%;owE(Uin?}wDgKP>#zs%)3PS(NBj;c1-Y z+X1~U*bIxao0>hgZC`SxyC0dSFH9cXHyv?0Kz*^rpFZi<>6c?|e#z=}V!j+!sxlPD zZR3RVDzi+&-Tkh+Y5Qz*ywH#1hUx-U!}u)y#ei&}(VOo7k^Im-cKbgP(Ix8-Ouwk~ zX71-5t-;KG70d{czr3F)X9j%R=({Epi_?ho4yoSEKv&@!T?i>&X-PY@{&w1^%E!te z!q#I@;{D%BazV6g#p~?<&-D9$E=2uw&fmX`yq6yXv_u2=I9>^Z+IcCgU)fI92mop* ze&QQJH4KWPES_ClFRZxk6`Xzjv~wCDq`Cqu=2Z}^eH6*c&NhLRJ>0f(>}5xF#1VIe zjw9=EgXBn(%Wo7taI-IO3oW1_wTso8%)rFM=sl4F$>mC$uQJssI0Q}^Jz-hSD*|6=Wi#by{+PA|n`c0`ow6LmmJY`*$BYsQbhE?!@OtNHrh&**SlDKDVw^xuX)Q|<1sGH5_+kq?**4%@X zXe6+xrc&Z{X(Gw=Bxy25IXHWS(sQdgYLfhCcAZlk%?uE zU9YJ0v$}b=MjmCcLmRQ}&PTpNKCIW^;!KeZ<}c^>j0Sh#;q!z$*JMtAn$L`NUx1{J zCidx?;$~WO+k^Q;H|+ptdAKrK-y2YA>=q=DiE9ehSmbFcu!h2 zhEv`Y2jrq=X)j3RTeXsa`7~X+|Nbwa|FBm~2%S{$fDfbW4ce*OLZ3ilKQpms^9|@v zvmZql`AS3jcYaHgwO$MWoNT*)rQ5D}tDLYKkQH@P(s?7ED)*&+i~ixmqPswaSov3o z6i6E4$`rLBE(?tFno8!@A?fA$S}vr>roivJ@t2x3;|73kb6v2m1HtsnZAA3P?Sd)q zz%`+ISdkmq(w)17ALC&z@;LAy@2kP)a@2A_A&@6!-U;{>DE)Ks$x~=Gu{VgD8^#$Fdu--iQOT2p^*QrvT;V zWku+vCbRWMf~$X1epOkSnE%4|(0u*bg^KmZY-ONy`=}&L|EIGZK)N})x_-DoDf{Ze zY=5=X$MNbY`*V<08Ebgh34?0rf7@7nx*YB18I^q?Mv43QpUUE&D#xGHJ?1sL&c2q5 z9xMKIx!_-A&W)JyGRC*y^v^9x`7TlQHyvxquF_F;Lv!PTtQLGQaCDJQs!zmac(r^XR=GT&)+zGhS&Ir8Qm z(j~XMz&khKx{iAf$=l+j+DFxkm!1$^FY{^ZnCY8NO7tp`II%u?`KDo&%dtzPBlmab zlfq3hr2Tr$_6SIqD1 z?+~?JiUx>v7fkwuDa80)#y`Q3Z{ z?K#zR#nDAQLy`6{BuFi;+fSb;{66VTeW4E(BQwXEdlSrcUNF1Vw}w99K^;EtqCHwD zCpNpll#F*ZnbmP2xM1q^S_uAmBu+qd(mKhV1`Zm#@ACPHq#u%2ZEn>3B&^h}!$4@X zvL?R5S(=2&h<9iB+4ofD_N2M-+Z~4|=qbH# z-ISK+XK6~y39h1GN6iK#yQeYk4(U*OeX`ECoaf+;zR$sN@_6x%zU5c}UXPf|O^Az= z%?Pzn)n-JvaRRF-5cD1Ik%|n%mfzmuZXLYu{IDU^L@3*>!ye}5P|s7Z&_y!KxmjqY zxxB5=?~=Bm6kJ0;YTZDZnhd)}>#0V`N$1h)J)K+K9I>42$ZOpbi(3Kcw|FZ^Y!w9wG$1 z*C-c$DSjmfnRTvPoO_{Z3OaHE=$3?k0TSyi-By_%^ea+49B(D6Y$e-!=rFIib-2gdJjyBa@rn35^a z@s_7bZgt;@z)lv6C~lxku0Q7YCxq!&hWZA3dH+#HU>EMs|LdyG#nI~nfG`YO;|U@M zj5cXZ7_kwhq-|8l(Eygu4M_<-kV};Q^WvY}`=`L^Jnf{qvwU4O8vxGcxEFvrOCCc3 z#Oc;hkh-OT#7K({aUG9|*YpJJ0mYvYG$=OS^hck~j1D3QR|gXS73vhw3G6LQ@-5>L={;xWhhhtR81G(= zBb{dNJRswkEYtJ>&mv|MtQXfGR>jO&0@KPb3S5Opk$^aacl9!dJz<}P6ifhGji=}8 zw%n5;abqDp_uk!@hQP{x(1(4?xAI#03SF~s@)6&M z;uENfH+P5Tj}K%BQ=Fs&z2PO&yzcMpHu66peBUP%I6KlsfAPGJcp$FcX5Ui4ZF9J5 zW3TN2hXkRh&sQg zAmD(BMf@eNX`QhiWw4IsoP zzHr{#xl-o#ye_{nX2sBfC-Bmyeepr5tE-z7QaiG)h+5sv^~#KMc5!LP8E~ZTU zRVhJ9yj$dNHmacEmLOiuK-C86-@CC=Wc>@q`@C-7PPP;gAIGttl?Z_sgBRB|=cu|v z;g>(=vG%vAxMQHF6-_QHm)gSdgB{ZA7Js@1iifg1P~flb)qfcZ7(E%4jPWZ_tK@9> z4)GkxZJ@9ub7Uo{5XAhLtDMtnCz0aK*%2&gz@%ZL{$_!&cCWhk;*7&A&<#yJD~&XI z&;s$3kq2Oq%n{KBghq~lm$vBLz+eDaLDnLh%8oND@vy~LegN>2bL^1Cb#JxpJGa3Vw3p6A+m_3uKp>OWkANLi$Wl zmXK-V@NDflEUM%L_x-*_knRp+uR77#m&z)

-37G-NY{+y;?OSa*Uz*Hq_`L~U>1K4xQML;+iY>XeuE&M|1WUpmftu7$Hg1Ij)Zk1lJpXeUWKFqk2-J4(?_9ju6()}*j147fMVcDS4PQD z;xsYZ59A1KGb61`RUc4yT3wu1`&-IGnLr;~Q}h6(0jv?-5z!PfKSmyOup9yMlVvsn z>2Uzvv5Q!!#>a2CGu;7JMyoA$gi>J8R7! zm`<*uOgDq$L8o$0Pdz=MHaa@$dGnBfcHhkd;0aeU`CcQC0Q!OIC!M&d837Sn+X0Y! zFaI_eko;-~qF|YzrJqfdI}`)l2M0eld-q=Lzg&6kY}0}IxjVWym+(RXk$f1D?Y7Qc zdA5sXH59#f)*%D{drXCrLsd1k(QLeuw~d`=T=5Z`6q>1`bX<*X`%1 zV>fTSlo6%pj*x{pIYxs2Pj&QGTZfM}s~8G)aC013vA!PH&!BI>IquMTEp!^$m+`FG+W z5?B#;spD->LSI~osz8nf9Njs1K39n&WSu~4#_oAS*p5_LwqdWbeYmH8DxSRb4qERs z0pK)((dQ|IFMsxMY2+$2b ziei;}DH@)5!}*oNL0ZetD56fI9Ml;+mjR00G-oYFS{X!ju}B%B92nSPjuNY-s#6F1^Ws*c9d_+%E!2d1i{dA$y4fVx$P92c zo7BCTwUM*kzq3e>^ZgHIp!xWA%gWU}gdyd{>i9iwxp#)1 zT|jO*uxjV1dd1GJt{ppTx7|BnNtx>d^RX<)A#-tM@%`HiRnPx=?M7u_!`dJJc1Q1E zh)p6J180kvI1?rk$wfN#)crhgK_bx1yN*^<9yQsF{v=tH%vLj4SKM0X@I2u3lzlw6 zZKP-Dqm`i3PfeNO>c31cbMHsH^64x#E!WXRXObo+hy*L=MIst$#jFr*K$Dme3C$Eb z6SX|Gt5gJNbZE)GObgA1l$cW?R0=?0|2Xl|B>1 z*-8d|>78|t_n^nzbn`G1Jqw-bGwwe`yNFD}$6h8?z7EyNr9W^SirL*>lD|OOEHtrP zanx&xhOWGz`};nl%?K9J$z_zOf@-0uTOm(-u}sdoI`0Xd-6tichNz5Z%GLWFpaY7O z&_;~zP;*0=uCTzOtabR$XGj1OT#Eu0-4!Yes3waog$2va4Y;fC$+QmAY@)_{@1?Qj z5muqT?u?v6f5V8*s7g|c4x)H#gjWI86iO#&p0is*IF<(W;c3n6J z1t+dLfCK5c%wm=@!ET~5xlfK~<8Wjv!fi-dixh490y^_F#t|XY+-zH32DB`z0-3Kw}!t$P&K8m!oM9lGNG{k{uMc!cD~S-L6wj#9wJ-ui44CUOY4p{2fcB0-SxrWzG*l^up;1)PL; zPx;Hk*UN&500sOKcX1$rzg_a~0V&EH-`()sOm-c=jJA~lmM~=1mFeA+u@+HWw_0@s(FW)Rt6r<(Y&Jj|>%V~TCM6oV zJ@pe;18}Sfvw#VXNPsH@oe%q}6v3mj>Z#&3yvOL@{=e`y zqRBezw#az1t>IN^ScIg;V6d+FcRWBYu<;X+H+u9Ln1_dwY=f6xMQM?C13PbCV|!=O zXA$7RAeaTnlvKeBr_>}ZcN`6|1wlWA?EnR7(7WERF_!^kyR0SomabBr(&qvz(UrR- zceg>uOVWYJFL7=zDv8zR9wk7`cVqFvS?D**U( z<+kaPF8R(WHzoVsCYj_kMqGz>MxXXb@W*Ubt>oIk{4labg@x4xp(dkkTVTumN%8F^ zecJ6rRHb!U94LUV4XYxV#|E%DR{jf;4|YPhGL0bDwgmefKxDxE{s2!v_*1$Cmb;hp z-kp^^bqg$c>X#44L+KSC{u`*k{*~b&(6?PnNP}MYPjEn>fRzaaT3KmNVD-y;E+cF$ z7B{FlcfMj+`qNZ#wM|g!2@cNa&;H8;RCh8!^0Mgk`CC_48f#K|`J2_gMD@NqyQYx5 zfVcJ%a4vIjgF7P_|H2)t+ycTI?7;rlcmuPY*gn4uSdZQJD4ITi9xNG4@%n+(WtON} zMe3rHK=0t5J9NAOIQKN)zIicA)5EIYT|w{V;Y$feZr51t69BUWn(M1?$`6MfB=}{*)3XWQBM^2PahvNz*d1LxN?zun4XEl#@C9Q`vG~2*Q&22yoa4GSG zbN}T#K(~u6wv_=zs#?LF+6bQDS<&P;O5Vr&&qX0f|E{{1CqGYrX9tsGun312)JD786Vm19yw#9B{9o*>j~oucXBE zft0yS)s#P=doUS;p}$K#n=m4tvcEdf!ZmMRO2&{i*q8#gUvS_%CYJr%fBlD`!8li` za{l#VlTW3MqzZBi%S{0RmbEn&(W84b8sK#1!a45B9 zHV^5s$nXsYP=5Mr=0Pt|)CCP6zh%_od(>XBC??2wZ&Pthnp^vB?MdF=N~oiG=T#^d zNg%~pLJ;!~I&u|yz?Gb6wZLIf9PK^}P)mZtkn*nIeE$Q!McG5Fg{0*YSDyYB{wF|D zoPGKOI0mwee3d+3qwZk?Eh9e=UEHQ#v+}1EuDfp(&4{|U+oc4DuS(f7C)tC_?FNB2 zVnh^DzN*F^VfRs23OVogA}X15($QmGHLNM6<;%3*GVCh@v_%e57}hum3jD){+cOE` zI=pYeXt$~I|5XG|azpu;5!Px}P95KP?_lsP3 zV;;iK^Tq&1+o{8Qy4R;T-0?>PO6FyN`91d6X2HjoE3Z=gkIfHrNKs)b0q;aO@#J2G_-JnBEzIxZ!C z`j!-vREx^|A*`bd#M;p;g;}dhS+u|j(c(J~u=;^GsUDMgpM6F1Hhy=R%Br_Ai70}J zXzE29{#&wZ(-nHX2d^9@ZgD_OgJ>nDNNf~y8H@6n3~%aDEBZBXL=LO3iEr^4Lnqh1 z|IkFPF%M2YIMf-kq?T+ge%SW2tDEMD9R9oSS}U!r74RNrsaDR@wZdojg7_AYV}n2M zsS#Hc?JF}>^6LD?bn!!)Mu|T5zo*FPU-3F zp%Vq!KK=`}e;tPB@F$%XWZ5kw{gqq(XOu7TfK}zK6x~C$I3rkPc=%K1hq;aU0d2LxgkbjdB*N@;c{jShd}+`0R%uWBc3qb~i| zmZNXB1s$yJq04dVwtiDl6FAbh7=v35|7=OV$KXw3P6yh4u~nMKEI*hx&BRpg=DDxx zc3|7M(%|Jgt_(GuJ&f=qd&Jb)9m&7pc{=eaIxPf01DW|eg zviLmx!q|x>RELIyMybO*dFS_f=EUW^{ht&Q$K1Af^xj`m!v|bD z+k60RDqN!%_+70H{>8dSP2O(+FGpLVmqhd*u`W@{mC+OxSJY>^RA=Wp1G=%L(!9kD z0lEs~({)&|KUNY^sZ=007ozy-DX8c|HWV11^PGez9KCP0V#Lc0nUinhkv1DsH!$02 zQdy+uB{+Q~2Kyz&W?AjF@gBH$WljTCh1?ZTeq}$S+r)Euz7Cr@h(Y@V^28{?ALF~s zRYG|fT-5uYc?ePIzO?JAlqk!jyOlTb;(DJp|1oEiz}S8I0V;hE(L6`&zcefVw|;2* z5O}yW(9BN_<_8i8)_PyWTMooJFDV!0a+@F z(9fapaocY<)4;9k%Tv5Rv*gCFi?=x3CH_3wGqA-06?*j!(bizuK=kb2zXb0PBKH4w z6x%i{q8KD{I&|D}N3P>jYu-pfXpyaWH^N^27^jZROW-WuoNxhoyoikC??>c~0b6Zl zMXr8X1Wf@DgSP>!>7LM$pOLLAtd!lWpnj$mN{Kqbo<1eZiw zGCJjpIH;I^{xd+ugK$Wri;HybK=eJa0t%z7ancP8(UDUByi^~|27cwGIJE*$1DiH- zi#JA&oA66+)IH}ebC$?gGXbbzh0a~dy`XOM(RyXTD+FW_5k;F#F*w$2HeU@T09}b& zBH$FoF!}LdDumh7Mp$8h^76(Uc|;WV)-24H_Ie-Hfl+O?(G)xFryD{? zr%3u;lX`Z3_&dve^6drcI9fVp+9t2E|6BlakAEIEs)X)cNcmPXhHNv%9{0&yuvYkV z>xkz05`ICzU51u2VM-zS@_J0swS;jea{hV!9q|^+Kex_?Za!=qAH|c{06WW|K54(d z|B2V~$>yT8s9I86w1HjQd|i#)tTTD6*QD%TtYq27KMta6zCmyxEd=FKSdH4kR7CJz zUTXIAbK!NsD%MBTE#4on^fxQg3J4L^GpH;uvmKu;axyJb#4@X#;V#o8indJ&wM@K_ zl5n5tqLbB}VB5AcAG@opf#<7xPIOVwM=4zP0XRM5!+X0l=;#pBE|=0k7yh9KixTkU32D-)$0`Bdpn#R5jQ=RM9ECHpRKgL5Xw{?fk3hCB` z*RCX>qEd!vNsJVD74SvVH*dd2(P8zDsZPhj?-|%(rukB(2Mf;&hjMk9@_9;TyfU4PRUj5 z5ZsSsUv0eyUf*?r_wAr*bB;&6OP4JYrdO2S(9l?;W|8dWsgJf@|7FPRF$YKWibanx zQGt)nx(H=Vc5$dKwze8M`B&nit-DxNj*@#MwyUSboUw^7viDrB+W#5V@RY{eXLN!b zf?UYKI#E1T?V4hG{Ep`|>|K`wlhEYhi_yY4T=r5YQqZ;}gZCMx@l*X?dftp4r-SSjYY+ zR{^ui#_Ip;D!ehg$AI&=p3C;=PFH+SOi-y3zhom`6mSwaA$~AO*$lU?7(|yvz$O+^ z%-t&smuGhq|KXqXI6xw|^~oO>_s>UQ2N%lgHiAnj{)w~x`8@2Tldv+&=MF%m3HD+ugtm1_Xmz*AI!ZyaN^0d$8tc;QmpXcEkr*5gWP>Rg#X6q&V zuWNIraGnPI?8R{>itXCbnx_UH;a{g>UgB z`lvYS5^~zkfNoZyaKz>b9!oF(LOyMx0kH0@}@M@WxY>U zOg>s1I{e_O&v>XE2H>bOyNlFDa{Z+mA84CSOmwF-D|a-yO)YBV;SZD{a~j3EYTTUW zvMxtSX}_>S*_`ZO6vlhF&?mmRi~!|?-*3^DvctWm#SIHic6pN2=!TJqGMrDnC0?c? zAtPa)M{_<_jy*z7n%&3J_K-eq-M)>Ja1lwOdhhrWAqpf|wB0gBr#4(~R+4DX++FHP z+NP*}_uw0~^6r7`IkEc(Boy;$-@W5p8&WN_EX@~*iwsZaxRiG~-N~d3*QQ}znu|`t z2eloOMXHTAYJPTShB$9G4ST=bfsd(sX}(#kUS$K?HV_x#E8UH@9_eCTR(oZKbyHakX$8d4ZsgmmIPGtDX|MOg z?UZM^s0)OqE^A*Kma{!TL>smCSxZkl-^jT2F7K>uP2=TQCD#Kr)2=0ijqeAx^VL2$ zY*TWrNvx1E{2$*nXouNlq8@2dt0eLmK7KT`Za=N-)scmry;8!8bDD3p%dTT_i9%_R ze0z7BowIrL~n1|k~?i(w5 z;ys$5D$%zwOklHOW74_|gFl<pC^KbuXmMJT2tHwf(F zp)s0qYqs>88ibDS?73fjJOrS0^FnDz$;7R$;#)0hSGby4&R7>PtncfrHzfVbtbOJN+6yA%7kFdjp_;Qf)^(N`ayIedPwi!j70dQm z4v-Ul9q@X~OwgnG&Kr!7kdSqZEYNG#VvOap;^s8`KplQb^Qbh211S1`W$B)SL>s!A$_Fre2@_ zGMRUh0!{sIr}ZN9!y#>GR~~0_Wv|;`BCJ>#fYE5JW4G9V>gaIxx;7Xv;-7y-X#iOn zFkwf~l=0rOcG1dT!Ru%Trap;iQ}-Od46`99-;6)5BC|s=1>WA?zThMU1S9utjPiK2 z1R9hjbbKI>mxkWF{if`R?m}IFJX>_)DCEPhD#m~uhSxCMTH)NQ9XbK!szx9Q8@1k@ zZ4!MzEFjEQcu(Lxhg~}q***HVb%3GO4uFSCszN1JzKG)=3l1W3q8UHDVt|8H1RMv= z>yWoOCW3Fd0gwZZpzX|xJF9@d{P;O7)D}>7af6q@t?U&kvnd>fg5VJ@C4NmQf}|{> zVu10YY>fGAxk(hrzGi7NM+_KFDOQ_At0GQY-gZb7*bh$f($w~SxeoLileC?94XMz3 zhGYTIa>l{nrR+JIb9cOCXes8w!e9bu+_ZzVlO(bz`<({5X4!lq-`{@cJXiKYePSb& zRrvvlZ%=dAD;*ntRsxZbAKAWJ8H18XPWpo4z-W=C_QLQ^4jK60uSviYzS@EJ*aIQ{ z#w!`F6-jr)DUi7A!me*QT)Nj5rYMU=C;$;$(E66k)Wa=NlxDkHmuGP-;yq%}`nJ8m zaeJDAjebf1IQ%kU#m(Pr|G1aGUlabg+Eb@6e|E~FhcMmW9v1Yxp$TFKL!cB~s`%T2 zu3mhFcNp_vagl25M6M_r-{`chlANd$PvVD)w15Qnm`Ka1*)_};>M zyRZUYWA#rK^~R93-#TMm`{t`yH8DLdGS|Jb|!?FygqX2s1JP&Cb+s~+AM0mlMCKY}Ou6~3htH5tB{ zlR)2HQJEV|+IijU{OZ($J52q&?^Rl{n3CKKR|}o3@s^eb;d5!H&a^`K^5{t}8=&g3 z_6mO(MtTlS=L6&16xTD#TzVkW-ERc@?^m7V? zsjuCY;E6^q5o$r`K4HAFQ%;rx!92!g6FSAGtKwrFNLMn|)>hiBgYd_#OF2VXLXwJ~ zgY(_Kk@S|9oV{M%{$5-e@ipaF`&<)!vG$@7p<_DS{gY$#>SBvY=Z|^#@bra$FGTSz zfC`%qDVE5=AEUd3ID|t$r3)lE`ZP<`hDhY2YaB;_abt3 zc9!KP$?*&g-Hxi41CaWS4-yyM(s-@>I0lRYYFQJ|u~#gsbsRs~k3>C{(-2&*;L~|h zxaR#*6X>8lV&C2CZ}_O;LTqzapioWr(s({<1AsvtFHtr+>D=V`4H1`c7~6~Yncbh^edS=Rwl(b zX@VU+?7bJ8)yX)XWQMafIBo@8_G(Ds-F9bZyc`PBaG7PRsgQ|r ziC?KAigJqF0}!w!LYE=UDlwgq4($a}%^o(YPzekS5B?a5L;@P8bncIL@^BWYbLWOX z5;@A)kExGHI+{fv^S=TX7HcUukZS-Pbk2d0LYFBY2Vfn^fN2`q<%-hUW!QDA7muwM zYIPFYuQjX5Ul@)iQ2JTweST*lbPllH)e|zJXb=)LY8WhC!ZuqKzq0FD&gLS$vs{_D z)dc7O7!>-A3If_rS%)spa>HBxd?zS2)sdX}+%Y=sE8GeCBW+nrI{_s(57mx0VJ_b|v(A07LXx&2;PptaF9 z=r|a*c1V2%P0<1SOoEHLT$zQv_YqJ(AOzG*4cnE?OcKwG<)()71F5@*zX?vUK(ub-ifJ@t;L@97+NiSp_wUUKQ4Ibn4z zC;Gcs5WYcA>6qWngA&GJxuxwcT?f%A`~JLF&)VyOwoV_G-#R)pr0TnD2&9~_Sw4)W zyp@(}$Z|`JMe5<<1`o{2Jc8443vpx3-yhUD=adhPESa8F$%IN=NsVp&DkQhKGW{og zYQgxtEb9ioLd$cRLbTmBfjopN+SJ^m=ikDtA6?xAnXMMdCjv27wkJ(8G^R9^t#h9YmTP4ZK@t}_rrrSr1xK; zvG^(a*8pXC`->{0bd(d3sUdn^2cNecm}50{b%v9n0)UpE1k0HNouaW#LXi}XO;bo} zNS%zb&!7)1Fq3@0^1=Ct*lmbS81;2wF|7Txh01GCopy&T+*?c4ypcQ_TrGv zqDr9A>=+BRHD+=rG^j2%s((Y{DeWx!DC_O<=haa)yCYYwv z9@UI_gqqD>XWim)8-A)T9IO;P=R<38JF$1T`tiA(px2<>BD(>vY+-yl$McH}yu0SB z0B?-pR!-e}H)K{SHx|*>BbA)Gdo}?Zqx_EyhWGSwFdy9i*xt!|K;@|JpWAzF1iuS2 z{JB08mW4OJ{f%lw0;%_8@3GGQ?B^ihhQ_)2t>L>lJwW@t6zIsd)j!|Ilu7|4qW28? zSl7VZo(fC7192cR0vfoXK*a}2J0Sg_52d4f+xz;)=LDN^OcE{)0tZUCmFIq^huDA< zgcfrH#&0KG0gZ!c{|AGBOg{>-EBf5POZC$_<~lGSy)cz7a2#yWIc=4J(%H7sbs7TV8;d0a>8Og8uANb?WIJ z`?6>o*DQ{3ow z{2^#LS<8PmUIdhkcSN26P|Pz{!Y$?D)FQ#DzEojRaQedlkS|dzv)s#u{{Md%Ei^q-(^qzE&W%HcB>;zsiV{z+}u(zwgOT zdwIy=S)4YqR6BnU5&+&7LzBzxZteN0m+~@~e;4MR+)w>PO;cvX@}$mEpQgwjFtelU zk}pd=8QY!R2r%VS{?q5a<_FimPYN)b*lj~4X3zO+q1_ciPEvpnlnvBDW0p1n4shK> z<84UiNL~KQ}5um+|_cbHAtf?)9^oZ__r$po27c z4MhH?JShXoR?>YbDxoJ0@oiqQO#EpVzP@ax`lsr=)*-GG$#m3BZxQRC=Q&r zrz)msb(YnDoh%Ay)AYmA}QfJw!!?qBCTK}Ez03IZ02Na#(vh>8%3f> zlu#rz=_(KiRR~4;_e{XD)_%{o-+ivL_bJy|f7azBbIxZzWt4l2agRdWqi8FSAVr&k z7b#7N(N1#H0NI=939C$}M}v*5mkQsC_u+P2G}snC$(RPXwJ&^1A6tdcZ~jY=%!bOg zG35JtZjLfxODtjmjK_+LZnhOEuPf{zH?9o7;L7dyB6v z)A+xSGZtg8v8P9uw2{2~*1H-^A7Z}-({9PeuQw1CwATD>~V+u8lpAK^G@X9{Q-u3~DOx^F89CDZb)&4CczBKK` z+|Xf{PyO?;f_EZfbQ`D%f`eHf7x{6Tf&3m;YnB#8(=#jjd27u5-@C;)+K%cS7r-{} zM*~dN&WUoVuU|m%()Cz?lV<5JxQs4TDwYa6Rmv*t?p%-KTcb33+%dU3c5e>RwKnE7 z-eH1zd1XI3aM?=t77H^7FpaH5HVymfu(Yx;f9A(@JLa$FL8rIuC>LGj6q;~bya zm-GgI%Y=a3@$X7Cws0Ph6fU=*Vb&$3v2R?GZ>8>dqT@`qX}8R zOiHZ$Az#M&C@D5bjAWF;O}(<;b>(p6LtA@emVGZ$%6j^HgQt0PvPF#LBuQfY?DW>> zkXgqg`g}=xE~ibnQI9|Gb} zH5@s=BV;>@AyRW}1M;vMMQSBdavO_P{{A zjA7x&DaMH-G2!C}ST#F36sK+&u17!a=yy?dmENN%)9<}rOaI9qONA)TeE8bc`S~61 zD$kD>d$jrCu2g2aWs|hc$L}aq=2q0vK4mtV(w1Q{66i;jk~uFhTlW`QokLoN&*P^j z8rC!M^Ct$#yEg<5!Zqc?=utGF2yshlh}(L;mvU-nnEQyy zUTtr-ziD{&^D#g1{lk6(2l;s~;1TT~rK)h5nwiCU@5)okvWu}i1B~Udy(gM%UdQ-G zua?!O7Bn;ZD+ilkb^;A}9lmweR+iUwvq?@6~np9^0ZNYfvVru;o4V*lO{Vd#a0_e9TAb zxRX8Cyl{-NgY3vzL=O1oHwu^VWQzI9@?iD?Z8Hg-x}m2PY(l;;%Q6Vzbbp#`|AkoE zPut-qa03PQ>W_FB$ItNDSMU1?OF{7M44|UoK$>d(1jDxRh5s>>@C0b;0-w>}<%gA^ zhM8R226aAMVRWxB5=K&c!(d{YP)1VBaSTWTKM(>DM76tg8m9EbWK!Kv*o00%s?F8+ zN!(CgJ_P$HNeUi`DL4wjL{-}`5hu)w2jWzNAQ-@@1|=%}M0WukX=ax}XXN_c>EQc&uj_8!+(bJAh z3Dia{dA&5uv1f1iC0ib*9FnM%V{GG@FP}K!}v5Oc$ ziVmUGfugq9{2)n2c=7(Y7qe2bfIMSEpXPqj&&z;~7cA z0f4cs?c*Q~_1uWvu0v+wCIQ;OaN(X`Jw}>{BDh3?CiRv~% znE@8h4%_E9vgac-zfN@JbqMAZ73tWUG4mAt9L9xR$j{RiFI(tCE%mMUdMWHClqPCL zySw)thBs#4p+lE9d-*=_W$tn)eq`T8eTZP4?gyo+h3OA~K34}UNn$sS+N=%V933Cm zG+rCip?SfJ%)SWm71$qPeXg)Q=Hivg=CQjS+3Odxa9D?DQ=X%p{dmSN$)MV-*f)R5 zn$lVGwF&9J_N=_IrFb4{reaHbDTxD_WoJhSk{b_Xj2XUM{rN4%#2r}oqq0uX=U$o* zm>!GsSw^{lzP>xmKt|!6wdHw_=dajHR+i51HjzJ{$%N0?GZr>*!sEhzyGMHtj6`@$GqW4(P4a zx44nFSl)7@eX+bEXnFMemkyWa;;f#$F~8e7p+`zz8x&+Jhg+Nv(R^o$HEI|>ez_|5 zSvTTr3VWM!e)o6E)4~US*A9qXe%bxX`nk4%48zy!G|w+`%X6R}U%u@3B;+i&uWZGV zSKgv|bCVlqsJUhHgbc1z4;PMEvtO`z9pjCDjtSsgp!Wc*3eI-Pt+Q zv_TTDz#tbJFv3czWUhEG(`0MmG`Lae1wr)|&v+h>MP|2UKmvQKaD={&hMrZVxo`1P zd%H#QIKKX^Ze9Z=DvC+MbHT>6Oa*{vLr*Tzp~rYKtSv&Q3kb;{yLlBRvDPE}(L=M2 zbdFe!lI8GQ6h&rudln0?&}43<1%p`re&gE1@DkSo!rV9~hTLjz82RH4%ONNGBtI-8 z-L0uo&#B@%D$&JaW|GKX#X-~f@vQ1qr~*U9DL&{Z+@*&Q!LKgu)>mj74LAcuW_sCv z^xBS=d-zR(`kDqO-)t{X7IrwOZ_PD+T$WAFr&oHNV7zW>M2L+53Cs6auYUk?#)17T z3taTOYOG{?zJK?1JSD#j%0~=E%Q<{|Bn=$B+3!{KgpZ+Ip`^5DD;Es2+veweC#Ixv z-5Sg4r!+uQwUB#nm^-ubear z*J~wfX8|X$vX|&8DE>*dnQ?AN^p$=S7o7R7dEU2IZha=`NHcH)r>ecYCv2I^?)Z(; zB>}?GRC?JTuiaVt7~S3`i6h5c^DSH=+$#1hJ|SOu&VZ@yFy#CFqQct%8&chcVa7u# zsL^Doe0d0jU~Nq$=uAN_Z@K&11hoyt%y$%*(nP+*o)%p9TEq?_Y=xW>AGNgX1(XCS zmu^2M@g%n06>slRvr=wePfZe~PoLt>_v84e1Otyry@(^hc4FODwCjE|4&4lVxx z6p)qM^g>f7F1w((?e4o}T>KOp^f1n2{aDN^ZA|D{Z z|LPwJv}lhr`znPN#{@gf=wOvVS62%#<4w{I!kuueX0ScLhy`2Z6E$IW*3tUZ`*sdFkrwTpL-dOzuee( zeFUC!ko}Jzw=B@G3bGxB?}8GcFqg8cb8fHf3W}uv{q%3h{Iv&)4Ns^DAXL2bV)8+_ z1H60vud^hD-^01UVC(zk5Cx=V!GMAKikk{?@_C`$_oqv*zz1!O-(u#%NN#%42=6=v zo0N3f(tG4%5*hQ)m-+XxVto4s(PV`te*R#kBH0u4!Tly(+@3EbgWPII+DpG+#B;%u zBlTAn3NZ9L%i>VBJPb;@UV2@f=Q&(2xG$Lo|9<7?$rn!oB?)t1IcX5M8QYs;y2&XW z;DhT({i=X(V-PR zlcxMNy4D}qFYf`9Jcx9@GcK$2d8M3GOcbJPS% z)8MOJ7w-UcH+G8Fd91x06=bY+rl--&mVSgQ+1|cXFUplpmUW+tp@yT@GhCK0Mn`@7o>iBI zgb1Mw1~})nLb2b-Wqz%L38@KKt+8l5iewG_{iF84b z2DP1{@p16vJ99A z*yzpkKd2G@(eS3>^?}*Xu{*Z}iEpYO-WacHKntQ( zvMq}NCo8853*|GW(?b#Cp4Bt$o9Of1!D|`g?N{l5lT|D2c;(lVDuC-MD(RBu7gjYN zGr{PZNsL~r%uO4pp zMG0lPE>P6=@ol@Of1I_Mgwx##Gs9I;sj^mDX(ax%vTd37IV!c66>2pN#mNO=^Z3U$ z;T>Fzd+W-WGa5_#11S~@y8?;tOh1GT?iKx9ThuZpg{gVOuYR9-znR~P39HoU){O!{ z-0fAU8D&o#w&LngcNIA0F|c^Wa)oZDu9z0RmuBHWdF^uroH0Da+3`h>UV45wOyDpT z;ZSd$*gB18h;(t5A)4Q{oV1|QDu`}nmW&y0r6Rn;&bWj;Osq+FoIXwPR0=;8o=PgN}YbvZQeP%{%2i!cI~$xFXDuYD zqiy=jLMSt#lCxuPP^a|=>9fZyB^G5QC&wlmn<#AuXwfCw_5F)9i&j0)x-8=sFEBg@ z67eeA^@XWl(A&=rYnx{xCzXkTcx+>uo4cT`nP=i_-0psw0r_9+Pb>r30~Ocy$?BFUg=@GSM7BzdoICP%KC2JWBetJ z#Tyl@H1}`I|Di%;_Z6hgWvryG?1Qxcb!PTc*~r}9b=<#@sI?eAgZ{@~M&DhLxlzmT zZC?w}(v)@?*kxeZ{eInRy4P_h&1fASr}jUO3B3n<1HxK$Z`S2EMAl3EBJKC0St0#n z4}H{H<{{H^cBBWSw92=a2vTDRU`W6|i2tFY$hSl)HTW0v%<2wEfUGuMosHvA_lOP) zCk}w|d;FpBuuoL{hUtBODCDhzgfk0J&g3{*&{+>06n@ZhxR5VC0H6iIQ0Fp9{%~Lo zkN!CutU`OyOlQJNrtkzw-?w&MxmOOb$=bw5C<9w(?$p*E+8Q8+ijbAXCvo@7eV4d# zkO{XN>Tl7NydM1S+cTcSlsO(}w|)?+xRVu~_JBwLU93Y2|MgP<-#0;lUdB9jBwgMC zR1J}4KvJQ3?JYcIE~lSYk!`$l0Lm$JS25cQ1M!TyHbBpIUF7LJJVS{UxmA;}@ z_ErJajF8y{Hg|u4!xb~>>#Cx1CCzhOk8{_>$FS@3_fARmo^R*vF?ZA|%g?kI*pc0- zo4hithw*AX(}i~^!1Uu<>5G)Qc-8JvP;@>o*G;;7v%6HBG7SWe?{x7kZAE0+i?s63 zsyR6q4R?EZdM&FE$|p~H8CNBnX>)9@Rl4L=4fSs`gK>?MMk0p3V+DN2rjH|Hr_$xM zQpV>6!hvRV(7tf-5CHrj`)<0Oc~09%^Dup+Ul3!lJi;%gXEv>_JIS1^O?fvI6|T@a zD4)Gt)1NRRyOz+*Cg2hU{1r-fCv|kK@HD-@iks0TE^~ad=I%~vipQOrZ1=G@2)>Y9 zsqVKF0nfv58FMB$EvTLRwW}ASj#hu$ku}@Z^kc9*K_D$~_^Q3hM0iM#{-B)MjESyo zw^}DD);n(4Un;6rBfjo1g{ICjDmlNW6ItuhsZ``$;O=n6%SU%MU#dRI{EV?5&FMNUZnvo0=|wN&Qo5Eb1}P>jg6FADR5p z&y^B&7lY(vg^$Sgp2}mp{fcyKe&&>zgdqRIwZ~y97#>?(LHz``TH=LzL9}IWqW_g3 zNy9pOkCgTb+0Ri6_o=h>KrBTbGPc24k8emH~Kmk zK$FMm9jEqfAAl4mSEYcMIkmG$Q#3f&7aWJ4?zNu6bU%JOl{m!%yG*++T}uR8aK&cc z<=@H9w^3W_+_x9QKs+4}l@E)5GzF&+uF0;2WB?d#AX&cW=Y{{=Ir=~4QT?~y zM0yIu{YcAqldhJQuhKhV*S4KO%vlj6pMGJ3^a3>ju*XKlZ=ubQ?4+910Zk+ zK3@z>LXc7cJVXt(17Ii6g+5Y-eSos>{B=XrUe5R{+w$*#5x2a~>;}R9j>9k$_DwHR zA>P$ACo;dNh%tX8-nYC(4EL(d%zfy?p^*idn zbgXrCbx{w4#P0XcJdc#&fA?jl(+fs8^2+U9qYiuDn^xCb=64V_ zwC%tHqpit#BA$2EWU9^Dek_OPzRA`&wX%n-zkhM5IPmid2ps4EWv~SWAhN6T0TlTV z$ZBXo15hB@j|vJWRM{jw90t}@43=gIz*572HYuC$IR;=3d!WQj|E>?a2~|(%qw7GQ zs!RYmHNVw|w4rVTRIZx6<=TrO@nak2fE+qhgibiZWbZ)`qXB3|Fg+S%ZJ?j9dYqeC z1$Ee=KoEZf`1}w^^tfWWY+KZ4X*2>6FaaK1%o0dDP;bzgx1Lv*U8;t6wzS;xE>n1N z0J8*GjXA^IM;_n!{levp?}>^*^`QRtFvTT`T4C*u!LA|DnlcHCt!Z%b!>+3vjx=I~ z>!f|>S`-^sneEtwsa=_XnT3OCt=69VTx zN&zDHT)vSpx;J*&bA>NF7bb!SHr5wO4mvs!rJ(m#fcqE#1P_mo7tWcVp7vT}Tm4q^ zJT&>S>Yro0YkRcM|3*8fqlw91U2h`m)=kKK86HI`gk^;{ZsqIwhZbgHf4|f@yDcYt zc|L!|VB^VJuTwKeoM!|ZH*)Nq$~01S9uU&1G<062017MBD7M|d)|GiF)J-i;n%ll? z9_>D1&msqE8eaGf_t;nP)&;StN8u9MV{vAJE-U>@H(vd@GE+(yJgydrJ$qIdR1eZ) zgbo1Fbt~(gRrq*-WcN)xrKF^!(aTMuCYBM3J8a`gX(lJ~AtHKnPz>JTeghPlZ!a|! zK`@_tV`FqUzTcx14rT40A7KO|7!rJdH)!|B|TtZ|2$BS@`1$t zd7$>9PX9bm@XEu)OTUi~4FO`$Kfo$IDou!i3fP+xbDRJ`<%F9AC*xZAj~f1ZobQ^o zmD)X^cDK*ukRR3`zl-L;g=V1^c?fW@p~jtbgx$R%?=ZW0cMK^=_HXTCg})1!#gC1 zC>lZ}>vuC6nD{`4L5%QXw=GYZO0-zxs=A!YuWu9+s?^x9l5`onIns5I%!DYCZ(Cww zFH5q9vTgh!%6+S;gX{hp3K^_(0wAGnTYOv-MFR?PoEHFcBS5~AlxB$EFeC#I#>*i$ zQat^J{pe+HvzC|-X59772KH>B*D^V!Deo>o_kKQMHzESm3e^KkzroBt8zd|V$$zY2 zhPz1cLxA>(4t;Vwkd*-NdqGhddh?vmk@Ycfn|=x>PUM08`#*quuf{>pXigF09Ehr; zqod1p8d0Ml{e}Sxd7~LHZXaCF*^SuH88l1C%xo+zf5STlg0Hr$Pk_Vs`%!mPkaT8BivkEuyklj(&wfxVCMu3kH!vS1zuR!BIOr24~jyWogPG`unVc8nTUi7YAyJo|2UN!DwPnnO- zTi;$|%OZfZEvtv70PUXWFVkP2-dL-c1v|N)>b(sF5`+5zU=0B1vFPH-!k!o_zjyLE z2?_G7_rC0K2MXfI)aRVDw%IXawo#>kJB^g%e;qh&R690@ZvGJK)0oLs_iof_(Hcdk z7#J8x%878UB(8)PW?9psCGGMO6U`#YkjX5$DZ$PjfrrPh4slt+v0@>{%fyI~hu@&S z-WUm|WXuTlb>HkZGSe#H<~IjPle3_)k^WREMQv$)u|gdLqBSYFR z58HEKQ>lNVLR^U!es6JiG>3F=*S63}eTe<$&!U&#dnxs2i~)odp7j`{OHMfgiu1_b zWp&{W;Xk86VU@%y&BzXuw9~1tc=`FoyGh4FJm3-()}IjqNu>YoGMS`BhF};hZPQ5M zn54SzUk}<`UTB8FG&W5sMD~7InUw?t-*3umCx)=pT` zR^THnR3X2>xr#mE*R&*sngQJ-z_hu=lq4l;d-Qg~UQj^`3`cS80`+ZYa*#F+YLNn! zx1%G{@StMHdt0G$e>&v`3|NMwM=JM6H#H<8JO6dWULwp5WKnPh2EnNa1VUqZrd~mP zmp<(2;mxB4xj_Ya)J0I|(GC-xJ!9o$*}|eCscnAg(%2?I!G|Ww_u;54*8x4TI;%`{^Aor*>=axpLe4SHp6#6RKz!)TMP(N4$5p#0;>=m zT5k0#ed8>MJBI;f@d32E;AxiID1B>G){Xb#lmPLPn+~gym)bejPK~#&5vqMVb(k@`j zI)a?>B-ArzxFMV8grIO)VPS_f7jozQ?cMZW!iqL`4gk;6#*Qn=1k?q2iWPabIIpF3 z;%}Ck!Cmkf)LOAwomDqMW49XJudplof38GwW$ty57I1kpDhX{eAf<>mJAZmyQzxw*SasZ>uv=`IwLB^-VCRdrW#oOdRpP;-h$A-+WY zk@ByDrsDUq*jas)n`(al-99eZubW_ZE8bJXjsw#G92lY+>~?Al$e^O^hn*7Lek|-A zNQ8#~-9tYwKsp9QAU8+*Gh_*cQh^8sQ(+D;Ola7@qL7<|`x&P zL5J=q!(cw!Cv?lH2apTL!(qU+*&qVDe{=I=LmVBS7KSph%Pkf+W8%--a=?;($Qf~iPQkpjJTf&m14)X5xZN~fe9r*3b)T7eM` z9kBP!I{X{)8uNkH!&CO!CJgOn>2=ycIjq*OZA0|>ZUVrUj@;W5R=lX+nk87L7bF3r z+7uKvKzCBJ&`B4bdepTTZCp`*(&-OXTlVRl|4Gm*2>9|bxavQg{9R3SI=<~pJ_2q& zYdkui^nU8{)Q7{X%qhB6^dvQwW)Sh;(%S#N*QEIU-s&lqdn0Zg?lpk>d)u=)dkiiL zTb7t1JB;UIl(WH6q5`pYU?2>E!OJf$?Q*h=auJu?$V(c&dKO}r+D;7!K7bxe`>|nO zRI#8;%x}5&$DK)j!$FJ#)*bEf%d0Cz8}=ZAU=Tt-;Qa)%SWn@#>=I5$rmQtLB0)z%aJ<&7#*o zhb~jAv8%6HJL_5l+m8w1u7b&d!_A^|-m@(?fDIo%$>T)#gy~%0h#k^9mE=lU9Vl8q zFR0$$Y0&v2)&1@GnA4hae=JUZR(-n6nzf?B*O++x(}*vg2%nbar~2*RcGVJX_w`** zk-t8}x52+LU>0y{app-`vh8Y$?`nk-VRYfZR9CEsYKu$X$oK}iZq`VeJ#KkIu2EY2 z^%(xi`uCM;$1xseeU-o3>$){$M?fF<*&l8dp(4ETzrYtUqVZ#2=b>z`<&R$|ToFFa zUBtV+!$qZYIL_PYc7L@^F4cj|itILl<&ht34;kZiE#f?*)qC)g&1-n+V|ZM@%PjZr zd0~O~<-OOB^La0=Q)-MzaU9T>8kSp8kAiZ2n6=9&B4HkW!-_JBAtc}L>z)CqZiDV} zAAa`VpG-C(tF&N9JQQ^?Y`@IIH;-3afuTr^~)k``M}`Mv?hl}uel z^u&8X)M~LplzEU;&`(uiM+& z@DBjt{}>PhURf)2=C+^kCsL8H4;?AD;}odG0Z0X~!-D{;fVAK1r!}VF{jlwq{MSK( zf8X`nWeR)-BB)xDMyQ&ZC4$C7HSTENU!Vl@l8Jl2`-sw&D~~B~xfn?o<#NDPAk`ew za%$<50=+~tkw2RL*RO%zxPOxu!p(-D9_pbFu_36E6Mw@MdMkV@RH8&aKKckEZrv(Q ze#2RYCFa0AoK0j~`=Fu&xZ+VrnX8K$c=Fa5y6H*&_l2qBm7jhlML7@96=94 zt2=j`b(!6I9iFUCxm7ST!0A~e*#U=zQ`|5p0r|tfu1eWq2j~_+1`ql2^)?jKx%rd~ ze-4Y9XU8nNpxiRcW#E9`P98V2JzO>}JyU(evL8Sld=O2CYENpA;&QhKs7)h&`t(Jb z`I5qSK-e+-*MnVE53z4>2`gw28vO}nVN8}lIan0zf<^sN36Hr{VHW{*Bj=VYBnOZU z=e`aAILwG0tPEg%jn1~*$GpATwRA9b&txlrQXWbSk`)RPx9v7h9(SE9UJsH!)-8Q! z7x{{@jzU_o+0GYR?r@Bj_G1f3^qSY)>3;Nhvl5?U_-u=;e8Q;oHX`&*`@`HzYO3+wY8j zBWGJlv}G=+flOdXtB!%(LTp)CpL97$#B12dp3m`bc<^AyAgBRslq7$0)hNP_N(RX; z-+o{7oRI*!fDMB=6r&siLQ;w(9RCXw0FaL(pt}#an0n{7=3EXh5^rpsAwJ-hw?w8x zJCPcc8afv~*&Y|YhHurf&%^9nQ}!eiu}yjAHi5&CUr>9O0fcR~4`!gbnS_xL-HSx| zBFx^$&R5_4#>gP3TtfcUbCa~rT<2y<^(Qau;9=It^O8qFUe>{rTk3ls8o0ZTal>c> zDnfQHu-(AExxZ;2_;VupK$`D)9v;XkW;!owu=64)fB+T`_{(`F$GHsMJ=C^OQ9Rpd zo!HGvBAujsY_(>?s37NuT~zd&T5!|-MU*o%(=nZM`TT%&@O5qe&c6*MK-2FuXvR2C~++tc|2t*r)3f04o|f0fTarwo=yae&mRN zz>Kp$FZ|0Kw*SP-{MU(Z|Fbt?&VrBuNty{n0zq54KZ7<3O;YyXzk{4IDLU9+)SDB- z&GqEAaGLxq$dt`B0Nn*if`2pNL{vO>!>(=lTS@?E$;2vy+JNmWL15Z;gJ{lX24L4G zZ0uRZmyyC*{GjVrLvVO_xRO=Du8ViK*7`h>bNA~s@sHSnceG9ktorJpCFGZ_*obPj z3RQhHTg~g3ViAYlUjqCPd~dkBn9l?X1YWklV!%Dka8KZPEa@=Jacfsp(h#ZxBn$uY zt30tG{^b$t{oYN7RR4P=@sY9&rkcT3;6lYRUHJi;J-`SD@shh#2pP>T=yFP5ZRb}Q zSguVQy}AK1xFCK7V51)v2BZ)NAyNn^sdF=ya%%G{uAWEweE;fTm3g|mu-wd$0Z{84 zK6P+&y8v8Coh}BwabXGL!fFYzLTul123ejvx#+kI*dnB%@a6rBn_nGxIeWEP(>GXW zpw~Wj#ACyZUlO=}o(nNcOVNhY9HhmKk^7~q<7Jl1#%l3`a&A*(A;eo(A9C%Q;!Op^2C zjvI4}au+zI7M-K(0PCaGD;{7x;Tk<_1kp^D?^wlV=-S3ouEmywQRy zvZc(A+ttYRNvSy|nMjPswUd?-t&PIho2f(4Jp2Ckd)A5d5AcnSa=Cijf=0*bvm56t zy4PvkQ3N~TjnO(Tl~>MHR-IpiLThinJPJ}HVp?X7Z0H(rO@b!CKj#j>=WA1g{FpB( zqaVeXIH!GnELD5SjO#b=>2#m^tlX&rjBw?e5|uxu>e_BZ17P3YUQZCuv;6fq5W?Gx z(QSn}A97eioW5K81Vm6U|K*#i4#9IwsP6Y}1yG)u=<4Wj@t1zzNooOo=b(@t0OcSj zY7csHaNgBUIiM);F&^y;nwKHlL)aE1em5Nz zY!=>oHNx1Z7|N4z7pn%b;|8&-1gNL#$PE&Jh}6>4(_^eKP!I+Ks-}U0?VINJ1@v-x zIgrW*Rb5b~G!0@cK9#y0^s183q*h46-~D=tvI;thh&GpfZM_OwQW0HC-`x_`v@4yt ziV?DS78Vg9r75H^Fb&GYug>c|IOCxxV}l-6Tw@tJDO+y;FB%+9qRkOfheva+(-&t^e%w4R`GVN;?!2waX!Greb+ z%fI5>;;Jz}=*wGE5>pr|W`lDB4N5Q|BZy*S1F7$y$8{DY;tR`9R0kZHg)Ex7spbd3 z+gf$LecQ<|HJ;yR<~b?FmwCBh3Djx`P2@1@e`=j1Z>-LX_SN~U;;`}>5$!G_k@%ps z&qnIS-`~?}`nRb3T0tB3hr?3SrDhK!U#00|Jp9h z0VeKHn4p>!bh4&EJr*tTvaUV#pqY>`FH1Latk3{7V~syyXmiURAa%CqK`lsMvSWUw zMntT@t%v`Z3XPVF*sn^vFg_evx1Go@E821*&dz|Yl2BYJ#_Eh5U?UrDQDjq1?P-bU zV>;j0Nvvb@o(p!Ls9~Gyy-t3a*H%}U_h}6P|MNwQ)(&!oCTY{-@f%C=u~ue&zczm` z;GCkP<9y-1s|)8dpK+^|D5#M)GVpR^C|1QUZgD8~=I4d367QaXOV1|fMGS(hys(fR zw6*7eXtW6)YxtfW5-9xs*dJ%edsInoxxn@2r*TJ6%yFT)7UFr>_CgN9p}YfWKd&1g zBjM)>2I>hy>_segU;NZ4crj=5Sc8ii)9Azm%hDqDM>&a|J2`rHZJ(z~8pM@~eO#Z1 z=w%l6E5f$Na~(tps^}Umvfrt4B(R>ku;um~fgdX>zBzySu=mTZPMO+tS(=|}Rx8`N zCG^_rt<+a#pjaue(EV|`kr7afTjc>^l|nWzNK%E(*F2r@`MTKE-T4Jm~d|=yo zqQ2i>5e+5dZwrk7S0P(}*nd=nlP&G-!;pmp!lN-;ne>|&oYNy}0sy%nbAM?b=i9eR z1A-5HEaD1|3ceEI?I6kOkqZt$%QRBOE@`5588`>&=Ukpl827l zTknN3jW-3g`X<_iglm7=3JK_38_NOK>y~`4H%Z$uN<{G2oCIks^Y?B?v zek1a0lD%ak65Dw%|336}b_n3}!^`71^{9^4*0WGcv0|MiP?Bneo@ruJ0*7iVY2`4S z_VcvQVo3+tr^u%3%*JrVD&rDptHfNq&}|(`TAQ$v3x(28>TqQXQF|kSC{`jQK}OqF zZVMG*r?$dtTl@e--m(6pPOctMtAIS^3Q6OLyvDp=nGRjos0r;EI?>eA5D{kRr8dBU z$$f-NP2_=URfrpl_+8uXe5Lg<+bT)idNYXrPn}`qVU36qTu~212jo{$UMZ5Uj>;P<-px*H$tqHop-k$4ZT= zArQ^xjrW=j*y{=f7g{qiGAxKmN|sTw#-;aibUO^ycXbt&&+~TST{7Iu0k2FkIKqvC z@f)B#qfxYz9jzt$9lN4joR`-MAGEVyoa!SjfxM9h3J8UIXAwL{g3R$IOJAR`MGbBR zOHv>JaXS%c9~xNfAq|m&W}ls99@H=p^8=uYxJENN^L1*Xp`t>%EL2PDUhciJa7(@w zClWPQ zwt76SzJ}LNvGz=D){;Rf`hu7vdk29XdFRv$4p38asv@ia4AfN%NPR4Qmg{`cVPTQv z?qr{}abva)q!9?ZTb8fSWko3KgZjg=?%LPMo4cC{UUwGQbQIJOFe-JRBy>>JHvguiQ&k!xI30(>GkJe>9n$M zicF+JVW8vhXU6+2uI>I~h)e)o87;$$&W(PQhLYCln8i#O+RVt*$}+XEgVMhre*zv3 zxtusuA3*klh5y!eJxIoTB=ivqs&s7kV2T{tqM}Dtz-9*)>~l5MVsq9k#-+v zU%|5?>{CM*)CkM9M=R7Bxca)zc)ncjU>mF;L@*f(6wFGHG-M`mp)pQ`m})am zRM%OS`BrH{b=i~;sykQFBj$a2ic`s3!y)}-uY}U?alZQNvs_E_9{7c8bz>_}BBTp# z0}H(Nzs^@VY6_|rDdN_{>*LZm#!lmBSZU6g`NjP9dXZ*42Zs(}P|Cl9#lSA;ZLH3i zF6X+voM&9nO~<~$+&*K;Wdy=LVWE=M9eYdL(ZSG&i>2T8#mtXn zIe%4+;gwkA53grJ`h6M^`EN5HsW`S_EutI;%oM$6XW|NPXWC4kJa6EMm*iL=6lmxS zTml&?yKVcOR}Jl(qOWyfqy|V<0skShj|IpxjS(FYv)AfF zLiAATF`QGp8kS0}Q-|s!6LIW}NF5ozo+CJGaqmj8s9CG9;YcmQf*+hW_QpdBKc&=2 zDXnc?sQ8+iyNZW@kWHyan*H2FT!B!Xa77YnH+i;+jkKrErTCKUrh*HKl5`92BM3Lu@U2FLfyk%f z%^`TY9xOq-ZSs3yOoJ;5qg+-vl!3lcq&!pR~ya$QGTG}DoQ2leA#%=?A-TT99B`mXv~G$L2DU* z$@*C?Ev2(k!smbN8;U}62Ug&8&<6}Xwaf4hCEmK1tEH7td!dChsnlktMzJds-pN$V z88Q+t80$%k7n~@5K@MXWP&JynDsAoCD&c%KPKJdzRB@=;g@CPcT%2{rMT}jQu2Kuo zPBhZPQ`|&9!kY^bSs9@(>7^;{aR+p#r08_*t10zU39X+1YXg1BA$78J51q-r)O_a= z-%or6{R08BKLnNC80RnHlZP#whGyt`EP_S`8eBD#^2Hf0U2gCT1DX~5$6#~C^wm{t z{!K-Euv)E!N~bFd-|FL$(Me<;ODb#4EjODT$2x^HxWgOCf2}*h!`F$nZ^yBMDOjwB zbBT1^O=Tlq7u>R4cG8r61fJPzcV3N>o0w9Au@`59BUr!zS4CNWkB@#si|E-}u_Z|v z>8VdXE9zD1Uo9wDRLbzc;X{JWf7|C-n<_-GhV1>l8uFo7A|XzuIOsHdw$* zPi_4DY5PG!9^xokU3%tGfk9O--uz=ob;XWEz8x^wY*6O4{Tj*$@M5?nH?MqmPtOsH zE`K`aw~v{q6JOf7Z15g;00Vqw3~f0Ly)$Rdm@pu0*GN~Tpa7TP}eKsVHoVS03@@2Z`i90q-<$3dBrh zZ9u_jVo#5h&6>(St-fT25WEcUxwTe}v=!1o$Gs%TNc-mpJcvLKEO!(Q0X~<|O#PxL z<4du=01?&Aza5P61xb#Cg~^`K4mSj%uNj29>j7nuxmxG?P^fx?7w`G#sHh>xJ8yIZ z&>g7kKq3&h7n^V4`};J3x6sS2-qlDLbpdfsOAK&01yFc>kE!p~Fj%Ol(q;LZdSg%P zA)i@^QhH|zivQ(JfQ+NCNVQElnpm^7Jf;rrt3Mo;Ci0qxK=c*Lg}z+=~@eZIBns=u`?++sPD^BJ4}=z2JMY;AFX9{`&-h#u)`891Ccp%=7+ zbNuoBYjr7vfjSa+iz3|%TU+OgjgGW>(`_oE?Jaixo_9@A*o56tdcxqngb48756i*6FBeqE^y$61mEC`DFpxBm2)Iy| z7{OM)@3s9X+WtgO^T|>TZcu|y8^ISb>|p%aUiW}v+t10Q(#>6YjV-j&=4|=a_t&Xd>=PS%)M8x;`z2pRt^ECGU($2qX1i*uq#@}gu04R zsE^e=*xY1!J-3DK&VZhhoNis$U>jLWW6MuQs4^L-`wTH!M23^DX<5qQ1Z6ZuTe-pqByZ}v3sz{-Z)yM?OmNs?#=2TogTL3Ou`w{ zyBc~Ap=8G!VdQgi`WXMp)I`HRK*tG#ec<0Z$vK}`;Jg{;wqc4oQ^q`6dgkqn_fTDZ z=i84IILGh?+9T^fK9CKNk@3tv4hXvNG$AAOXtWcSG?g|JuZjKIc=K`J9_(C>=!_7=ZzK-VXh zPxy8px^jtSez{bwBkQ^SV27T&W1k*M4}U#bsz<+^!%|Hveh4EzE@ah@3pKXBs~>9K z;WqY-P>29*C%X3(P+odzaIbQLu3b>+>uHoyxIyNMkki*Mo-caxYYaI(!BSh@*y6aYMqlQ)oyl3O858MYPRMT~uL+_`W!LYSmf6fRJjhB@M$A`h zr*+QGFDI<^tTsqS!vkf z^gs;PzOk8kbhyVVn6qCZ{Bf3dl0`&*ukIO+;R53N1GnW@zzd2d-bp?6Vmu7ZAso%_ zD&p?$l6|{Wk98x^x1Uwl^LbNVY;w6FYsGol>GV1aPoi_Q;q0YAyA(Ca(pFlZB#WZ_ z_30n$jW$hQm%l{&VZXkKSUKEUip65kvo|O9DeJ(;VQ7;YeIxvxfHb$l{_IJ|ED z<$XadY>K3Y3nS`C%@#JpB~H+ za~fN2<0?yuTttj4GhqxgIKvmkl*g@m%mE`t8ZPRj@8rHT@8X_3_!(+mEXRwdagtf$ zk7xR`Y3j22j~O6t$z*|%Uw^ly+N&>h$`Xm(`v}&<;jGhhr)d=R(8`niGP|*!9`Bqa z-woJ=qy2hv44(*ds)nN{k9wgnKIRFHO;$XE1RZ5cWdf>$NE~Gol)yv(E5H=yPegp) zC&SX~E@NfbXIF1+-tFakx2$WtkC@|mcaGF&ON>A^x$~!8O0j zYReijh`kL*yQnK0HacTO>@(kr4#)dSc_yKf)#BNm7{otzySQi+CA135NkPZN#`Lmz zLB6X#$#cT4#W5F7Zo4b$f+9D#T~*O8vSIc{rQuiZsruP?@?Jruon>!a`QBIATh?Te zZf%~NZLg`i>OxKEDpOY%9k(A}{>xl!s1Ib1sxk;C_r}7$#y7_y24*HgHhgvv(v_bg{ITLZusum?^P6h$Ix6y0Ar)e*+w5hSb?2(_Z{J-Zi zLrN^safF?_c*4j1D~QB5JpN_x{Mtys)jZ_pTLRx`g6qC3Zmu@Ki>E`8?ZIV{KD zAB>3Qj!5q~b^{fVD728Ic~R|cSa%Aud6ul&4ZYe9To^Lm-eW*JE&jOiNL2X$V(&ep zn#}sQQD=PA89Np%G!;=P0qN4Ml!PKpAT&V&geqMEff>6Zpp-zUDg+Et1OiBapwfg; zB$P-C3IqrdkP?uZci%X+dH(NN?}xM2S!bQY2R}%-bC-SZ{oB{?Dg|^E0Up^XFuQyk z%5a3lC6zJ181rP`Yndt=Z2M$g31i|u>2vpr3jF<&CCZ<)>!xU zsdr%M#H1Xthc;*tGKwW}F{Qd;c}@+-v9{I%wDuRh4+`4RW-ZTSnTx6uTgo=k`_d*w z)IDiAMa>jgTz6m`0*%$8xjG=g%P4UY_=FX$ZYLZ>TRIbtBfW$jQ&T0mY`|p}n@!tS^am%(3)wKOJiF z6jm%Rc5X*7RoUM?-m|#$^FnSl>GizfkwS?jl&%u3;M~|i8^TdR=hL$Q%mk=>TYBT= zMpyOmurNUzSWaG%&P-KUx{=lwJ*%UMk%4jJv7J4GZJwU7jC6PNPYj77I$JtPV$H@w zuP5CddqaYZzB+5yC6Gf)_Pt+PHvUCj)#xxvmRASFyu9{vjKTYfr7+kQ>J!ZyWh;R7 zS=MoVTq$CZ?coyJff#9Uz#33iCWHO`*XoptP-i|+&$O>RsLUK%nd%X3k{zV7o@r6N zlUpv;mZ(`K^b4uCoJyd1KekrwanTk*>K(ujzc0;o z@tvW$xn9ha^VO1vYE#CD0*tzq5{X1lPQlm-1tgQ8qs32}4=fz$Z$=e5eB7OM60{Wl@gDx*fpb%FG(tzmN-4$4MME z32Xz$i%5S@kC@uMHxyJ)TIRM7Tx+nhsOT{+IAULGg#12@0hG4JaYlHDqY$4zCZkLkR?)Ug zSy$7f4ZlsLgF&Xe)ST&1BDN*Ot`EmQkbzhkZn8m3^n=5L3_ZDWoEn8K9rKFS5lo!iiY`wG}+vqC1aO+#YfuGTtkjFNT{S1wEn@M5j2OXq^6Jkzko60cqgrpNlTV z3|?xHhf&kLFG#RRBZSQQ`n%(dV=C{)U;5>i^Y|PkFi(OR8#Jb@q2H8B zz@V;nJ>-qr1w3nIMHG{Oj8Iuw*#U_5rU_Em`tmiFmyxwKtSn#>2Yh>#5F4ui@j<1O ziL)F6-1z1_DvN(-5eOG@fj3vRx|I2g%hjBM@QFGo`(xqbj?htEo$!esUaJb_JDKUV5X8}dY6zb8f;&8}aAadk+M!9e6 zQz#uI!>SDOD*(}z%}Jly=ya$1Wz*Bs)j^$u30cAI+n$(_Nzcfr z&9207#$Ii)2WP5!0>&eKVnwK(hX+DT&MP1guz`boe9Y0-wBb_RW%4*UP1d!E(ve2> z(H5)6LiZuknK5!~lN>e#uyQhBus$pXAW6xh_0{&$r5~yEPo-p6Ec>rQyAozKsYQdLbA zY%xHlrVN^PbOHD%qXn#AVwe02Q!x!3K+hY{6~F`Z>7ComBzJXk_l2y~(1o~AX;flB zby`@2wiU=x#d8g~vj!O-`0jySih;(8iDOF&4AADH0oVU6K0!A$w6ZZ(qn{iOf=Xvn zmW2haea{bZ=(bXjS2h7?m+=WH;4k$O74Qp9O-;=)A4l(0`}Tm6YXB6@JU3|Hu#cN> zFaALwgjwplh-ThO;|41C4m6n>oWV^CSVA5w2Jt8b^1dxM1nC(J? znXV|2=^&=5?C+sOWD;3ZTy1mU&O#n+o^_sskd<2=Eh)$DUEdw6ac%x2u$SbeBmS-AuMiiDZ3r+>7 z^IIToW!Ca}l2Tl9MnykjT&`ugUu(UxL1v~P5i@BHy2N5C1}1ZN=Ar%lag#Atf%@d2b9$WBlUNGx(? z(iiThDCA~Fve82r<yBUpyL-c zSTDquhs8pU6_A973JD_nnpQ>y@{E%=PQYXf{qzthpmZ75RGA0N&KGA*CMh|Jva|(q zk`TY_huDSAvUQcbaiIBFm**D?Ts{_gAr29qx9L*v&`2)^GeaK*BqmDHH|P^tkg1W zxr=nYz^qILTTZPO`4RwVKOY=SPZ0d@DWiCRX^=XAC{~tIl zs^Lq5J{%6`f9^28YBDp>>=yOjuq8NBfRn-;Q&F8!PH!#?KYF~L)UaSwl6y`W+oRWQ z(}85oHX@w+=0P#Sv`}2nE0nJ|?xYA}>NtmBaCaF85pSepa4s!*aa6ego4}lQN$L~~9;JaKL~&kO=rg%+6<5D8*NcuJdQ>C)>gj0TPZLps zBOb$Q$LN9n)X7_miRb<0$m@!6eoo_d}Loa(FWpgBxpAtD5Xg4yPG~dzLYq zb?STOg1Uk&%6bZN>lzV=7Jv6z+vliAlr?xYb${K_{a?JS@1#8$s;sdw@04vHakwlY z>s7Fr`T;j^08MONYhkUu(-&oc~!y^s7=gzGI zG?-}7E6|Bx1nz-a!Y6ru%qTYxdMcr#-#~ql+RZ9xk}+WfHV zOBiA^MElt<^?d@8IRrUu0MSDf@UGY|E-o%j#l^)PCpFbg5i3hDSAQosRMt5hNI#2@7L@2C>`jZcDxT0tUjSUUYtG+F743f?~^Z8 z;?W6lQ%mBF{eFH!A~m2#a{iCCh9)#~;y26=AHDY`%J_ADd?1YiF2F3kRC(-~4!R z{Kvt~Q-=o9p(8)e%L^>)qhLN?x9-UA-=D+(=A%(m+H_Xd-h!ur&p#<$JHOf+3U5yK zv5{-;DjXRa3QdWEbUzxHVY5yS&&&}B4?F*#?-!}0?`??84g9F5B!9Kr%{Ht-V{W#7 zOC`oqFxWo2_bR>3o4!LqyK2mgY1YF%BowkNG)=?xb&ekzz5t}v42-4N%xqlt8n(QKBvW3Y zRL-~xvk_l79oObGlxlmejjVetB-1X9f91=~E=3rRE!L;9H@H8cdihSG0s=^v^h%%6 zYl0W(LD8M_I2@MS>Y1Ckuq#q!b8x9F*ImoL&#ZWb;a0*e{4LNo92+z0 z>0ja0IleHe;lPLr&6m7!7)(+BJlu_}slhsCVT*b+aN@1g;7d7kUIHTugH8`2(DBUK z?pe(fcl@LFHfm&ujJY|bg1eqAlRNSDZ6Qb(F`^|;z+i3RH-qORh483Z&C?Td)|cpp zr1!ItydBq#!F_lZ~`277^90qaK4A z1}h?ID)(JKKKs<$mK;BJ&`O*f zU(wjcn~ouq11ugqhN_*NJm5W&KJl&mr+P)@(2P2~Jx5X`eXO-FyRf6Gn^wzQNU6p$#VQLgP6nqz3638Nn3 zeG-HF_Zh`!{ioN+X>%soX<&wWygsSMprLyk1Q+@^L`m9GF4|mcDO|1t{5LW63YVPn zf?POUDp9xy;2H%_bjB#Mu2++x?#JpAKjlFxp8kHIFT(XsyT~|QFz|_8sf4YOh#7kp zzXesQ9@#@8Pz!DR!5ytL+&@h#2)I(@#N``;fr}R=Fi(NiV|I<@0o`5cO%WH7HY@Mv zhFC82Ed>|p7QK>c_gMO93mHU$e-KXZJ)mQbEd%iDK#~H05~Q~Z!vG}d7cBIgPJ&l) zP)32CH{?7I@)wyY8AhG~a70{r^JSe&$IKXpQsL|#2X<|Tk*$0ef9n0r`C~9P@AYl4 zDf&XU717frwv)51z*)2F!?d_P?Lfax*PfR0#3c8c5l&EvpM73-xd?>;I@TDL3mV&0 zn7aEp0cNN$hX7#*qG6LkD{QQHciFZ?G@J@-_3sS#F~ltCE#aPY>&_xw-ilsDlG;f8 z_U7lf;eC9BBj~`owJtC=CtmK_u&GABEUDsUzLqLmZ5Lls%B*r#4&!|yXbhs(C36`-E6PgLSJLcuvyHCz`Q)Kc#ML2c^HR1y# zj){97Oi=W7tGkF)mH}g55?QU}^ijqghE8ZA=Q6CMOwlYA zK~Z`d>3m}`cp0iKkqn@bfm|K_$bXiTmGu<5z28rcVRYPY29BaN-ZCm7UqD`xitDyK zj(6817hag-=XhjlDUW610||;Rsr33ce8=jOI`rlqfga$a3_S8E%TFQEVVwMh?fxPr zWf1Tu)UIfODM^I!`DyEc@%S{_bmMDB;sXt-rPswn#1mf)8q%vyDd9 z46Egj#km9o+cvrGunF?v)aKBNDTZT{FO3=o+71SkQimrJaA{Jo5qOnw2abIir(GjV zarGB;!*$`9$GDdh-SlK>)4HL$2lM6|0|2iQ0x+Pt?JozY+jU3`j9=b@7+m~0EoC)+ zp7oE`hV<`8&p}hFA$aNY9}-B_{D5<1zVpBSaBWh(A_!=45`TcT6ttR7%dE|qd;hhh zoJIfn5L9De#@zrWmaXuAT8IDsuAfjH{^KOkX}}1Ndc~p zM{$OLtd&%HV^pXVB40^!i+enxft>1;PhBnK4_a&!JaxedF0*Mn|3idi>3Il(3>@x=m!L*xe2-Rt54jqb=GcZsns-!=f70VNW1O(qqGAF0I78$!W-wWaV8T z@-i^M1P;auwZ=Em7Fh%$?&_k}Df;LXZ7@UR%1+3>Q>M-W(QcK-ed2N;N8|;rnz)~M zdHd$IdT2AKUH8YV?ijm!R~xP5QDBOMWMndcgLnQgQG{T*hkPqZzmlC>_F%%B(TZaX zv?(Bc&qY~S)<3P3u5L{yvg_o66pz*t>SBdy9j{qp4`@o)XpGF5_lhyDIRrU zHf__uDA}XI+emA7cP=22Y=?Xnjn`pXTf;DV2!VxXbl-McH>rcagSg7*f~5Ag+M=Da zwr$e^l}pJ7UXKY0uy`M0%KdI;3iUO|h#F@z(}TKSjwj$iwi`&QXR7iIeEwMFSnWbK zrW6{=IBoeKUrB4E7{j>2bbb*RvP_R^$L?n#!_q|hwgr+Nwq2iEU7A#+=eJ|u4Sa?e zt+C=}2^y5H^R}U2WILJ=Bh1_BZK;b^nM?DY<<30MGDBP9rO&Bz7h1q9MT?^xof&C~ z8}BKJWSLC*E>A^<$`J~Aizh0H5bsuM@1l_R;oiUMZb1vVV zTeod|jHucI*3T;CE}nAs%2pg}y>2{t$>xurM#XICMXkp03K}NpUQEmv_lNO*fm@UB zSQ0SrD?RU-cz%j)D`sx7f>R|v@XCRl9>6%+BL)0ghATI%fJJVn&hfMykHb} zf#&~SU88pn-V-1xaOur{p?e}YcAvSsbq2^s2`b)O4&|e8s=^#VMhY!qqTlJUExF$r zx0nDg^?q8&H5PLKc_}FNYxO1NJ6C9n+TkWP8eM^vC~wsA{ACVb#T>JOKp1&X@Ox~a zY-=gnVJq%qBOJD2B)@`Id^R%NiR;v^T~!-DyWJcol(y0*jkZy?JeEk`FY(!pdceS! z+L>`drUzTOJ1?doVI#UHZlM3N#9RQv4O`KeBJ)y1q@)C%=dM)8u+E`fG=B7+TTgF^ zy`a}4b4pVheYNumNy46#?Hh&=2>kP6cC~$eHNaf98GgfDpv|TF!7Gu@(uT>DJzI#N z7-b$YE;oVCoLQ0yso3C1K~P_ZCT?BD`sc)qX(2`CLPG74{SzJM(uO@;a2}J>>-!1< zb&A?^BX#Egbn4D`r(N`VG=nK5EMQxvYXM|Q(UWiXhdWH`b;v9obgBT1EO6bvkFrB@ zj4?~e=#{B?l5f_GGRlZs`!0Wf(fS}1p{xa5>{wP;v%hf+ZhbN>IJzvjNN&;}-cV&p zSj9MzIq{AYjN^A0J+T3ev0crp>0+=Y+q^g#C;WXOw^$D~{q@*_`L40**#aTA=nyLT z20-cfCq)MyNZzooS?d3L^FMgf+CEq|-_xm3F4sBcLWUb&tV6?Du7%Ic4L2dXQZT+r z8QExq<5WtU-)*m)*mJHKO#p#1>6{1|yp<`mi2>Y&bHPhDgg+kZUzxFE!smeq>dA}; zAY4r`OFXP#9McnCS4KUDB3^E*)*?Iu}wv-r({g@fKzQ`qThj36~0|PwzCDHz905$ zA`ez%#*Tb3nf|g`cFSrYK$Z^^5tc)Gpf(V3)cR=RlIqMx39;$7XY&PD zG;0ByxH!v54_pa#j_p4GC}HB1A@ceSUWEyCzKcno=bx9)h4%OLWA6t6FzV}q4!W#q z(oAhc00|!&W7F?wk&b#Bogz~W(^;mH1zn?W?Xx~Af+#t5|1BWE#YRCCQ_DXCUs#A0 zNd!ta5nqb(A54yKZWkHWCJ-Vn`;5>j!{WgVPyGjfiCa!xnpRCr@g{8nLsBR}SUsbq`X5Qb4FoC_Z?#LH>4OP_5l+9ZN1YU~trEs+>al=24qeu|@U zLE`UAA9HAPFJwdaFat4daX7$C_O@veE!l)Yv8!UI1C-t2;c}J32W}-!_XDIj_!^bZ z)Zh^_#_R5p#h~#Dx$9Y(ch>(@vvOMF0iz>%m&dby&W>r2&xCI(RerR z5_q|-K4#lG9#CRkdup68c{T6#Da%U}`tsAcpW)P_OF@L0B2E0b;0W7z&%hOFcQgwd zI^jx(CytnWcE3lHv7FSvqFH_YXQ&QyZPE+XYJyY!b*0Va?c63X#5XSsOIiCR@Twsl z$EysxuIu^0t zN>wgOroK;{$=(-8Z3*cjo7e^f(--*a!Q{*^bgR-tDhr?Yvtbr)N@AfmXJDW2rdlGX zdneF!U{eylVo6~1tyc9!jcoHp^z_}x9AMvVyqzqZtS@j89eCuJc$3J6W7OV^3UqF% zbID8>e57a??plI6fbE~RIlxVpjBXIAjG55A-^8Q8V}4b?-EBIyYN~&Q*j(PlE{h&f z@~?dPw)ZsAO*6->czouh)(Kcen#P@QM}Nw-cY(!R+-`6aHah=JBt-DWBA4Gc=mIJ$ ztheY@dOFxr+W+-WBN;14re{hd=4&5REN&PJGTR}JYt%b^47Uvz2fpM0pt4O=Z8K8y zDrgLOd?rB=aGCC7@UHsN$rAEXz6+h~xc9=%+OI(1V#tL;QiZ~`5;q?whe^=}rHTv! zDn04Kr2Nr7SFdiIY^FnHiq-Ti$R3)BKALF)Cs;~$-9N2{QmOevZ*p~O_*;_FwQxPp z1$x&|#4Fv*%FFy0&`NIUsd_kwwI9`El{IBN&6vWq97EzO@$bE9_4 z+L8;{JDq--VI3r_eX7O)>*M|qEkk4kUGWaQB_^{dQmcj4y2{rvROf+S>2J>Oc=V_f zbsZ=^9$&t?g$g;S8-qiwtKI(XSeMjJU9+YVBfm`C9gn3I{GltqcoIQJ&qg-uDk8ll z7PZXBM(aW~(6$OYSL%Ge+Q9QAB;A7`q|XJxRAAB_Px*5@i)}x0$r|k`*(cnYB*;8rQ|*IhvR5-vPu){ zV{S3(WD)qlN&fE*6ucAZB=W`OzpXGV!rcu7hF|j8-RsufKk(xN$CocKYxdbn>^`RZ zB4zl&{+J1E(-rLOYDg|Hl3!@r0BA0PU_^l*(9}o0x<-Y=ZoAd+*gtu;!7(sUi3zZY zJz(U4wjUvW^@`d?aDa_J4sd#u^IIRS?$YMNykzAu+KPZ< zHFSjaKaQ|&*72A}mk)^OMs7J=eR8y-6MW!w`;T_qZ|@k4loJ?VQbNwwvs-UqEY{Bl zDDj3VRel85-g=Yf{XlR$Cc=-3`k2$koSxn#DtgZ{q*S-k?}{#+b8>$!NfJ8&(@C%9J2-GkA zb@%RmZ&BEKFyW@HHOKbsj=|x}oH!B&e_s+aWzU)8*pDJGo(q@6iYh+c3mEe$-;UrtTtTu?Wm(>NX#Bi3wm~6Y*JA&k&rnZLHI>0@_$Ia7Z!rKm(9PivhA^ECyi{_~+XdMeI zPJ9temgtv}bJ+{tn$1SYji)8yP|6h*WCA3{=SB(v?pIfdXDWo_NcepIa7Gjc(?e>| zHFHR+fReVb04PP~zzn8_0R=5VyKQJ5|?S(UnbI z#4a%7cC+W^<`U}G=J$^`Xx@7$^VcPG$$z%Um4^2DvwXKy%-ho#ClT=7Gl+B&B%$}H z3Qbtp=0~H{9Z&0oZZhR5=7@T9fBT+Z>v5vBTfKH_GAM?Jo>8N}22cJM*2*GSH8{RgDQ`h}LCP7(nhlq>jVafs> zcrbsiyj#&JF<1aHUHwbe2%$-2~K3*I+vP)IMgw zr}uV@$k4!!C3Rj&s}q3dl-l&RK6Q0Avy*9E^uC=wIx&F*!dCnnW58WmOP&YOWeAMa z#8mqVCv||^Zft-LJ#H!#G*UV?ncy)9p*aZ&2iY=%YwfrDf375EcWK}Zi zR7C28O?p7A54$*;%)0B!YUTO)(uPsVr@YRNvDwj`rnXp)sj&-sstE8g&r4_A8iZI| z=SAllWm}Hs1c1bfdeDDuDvlITIR=H7?Pd-G{I#igbvmvx-rk_zA01e=P4O+jQ7;d( z6cjvqPZ($D>NHk@2plGe8;b!eSv3iaZ)*@$)2K{tLAH};NOx61h9Z6*L`tE}fBPh5 z3n@uwR@Ik2$@(0m1X!4~|fLuv3@rxng3Oqzv);3|fFgXK|oCfG~k$7A#iND^2Ny@sFZj)yMQE3mrvTB}B z^Z+x)IFeiOS|b=tz~AU58&6-%yyI#|sENhJt+w5%xOF+><%Psm6R>#y-d1K^SzKTs zV1QOipa=NT>-TV{bUEt1Y*S2_?fk5l0E#Fq(}!w{#UUj?e%!S$t)UtL;adVhHJTPZ6eQTJ~&Un_Wne6{}0;5(p1 zt1Jvkcqs^?Y^__{s``xQd_*9i4O%h2tio7gjIf;Nf`k7A)Y(L>9eIa``T(OkuC?wPA!76)_Ko~boy+r zt1-RR$nXGFUJ%>|hI^UfyZq$h6mUXYtX6goq+Fl$11{jhCO$QDlfAvYZkkud-nN0* zl6dsQGsGmx3TSZFt*iOZ_u6XfUyl(8(=c(^i#-)8ML|^pF#f*ZX%{9BO;qeI8PG_! zpo+~OQw)d8`xM;m(8;A20#x70a~$5%;$4^Y0x&PR=H$9Ft!&e=9I8KUY(}FP8J;{1O8UiHiOv=H6;JxVdM(7t*x>`guuSOVM|g! z65wD1Ki)0gb;!$O#6jftg)PF>k2_?rV3;?19{3H3{ zduokq2bC`C-W~%l+WwdyH2R+iu>TMIH_w!FnFjU&vSNI zZb}pV}QCAU-# zh$<`@P*1lB6zP_~HP+vb3*&7H5~PG=L=Z{KkF6&nE9XeH<8uoZ?*Kbq8bqY9$5Xg- zg7++N_2(!38`9mLIwca}RQV!Wd7OKh*?Z z;M3{iST65;O6<*;8#8n@32snl7U+e(v!?{Xkdbu9K3& zkp47t0zo}fc#=jNc}rlv%NOuX=;b!f4P8lE51Ll8b6206)Xf)RuH+>twTik&wrkhy z>6@1+od$q?`E$=N%ImSTg>DxM6Vt=d$@9p$qAsIKue{mP3y;`9BUWkWv^=$BNqACg z;RL_cu3D;xNoEH1n~TcRRfJi=Gg17bvF>_d4s-N|UDRgmwgvwyX)QUIxG_1!#Q zIJ4uL!#VfnF9J~w`dW@kesDiE4CF#4F%fH8`8nSZ`yi1p2Orz%Wu9?%OwQDmQvtQH zQ&;3(CKu$H!kcK0UBf{+vQGB2hu2^u^VPB%Xp=!Q?Z9Y=wF?` zQZT)JWL{*VUCX|no3h^mKov30zY#YOaB*OhFWa_EAn4Oa#BPDL6eNY=l~4t1oKCtx??UyK;{o8shJN(y-JB%z>Z)hP-?z%$@0uJyM}xIh6Vub%g5Q%@8; zh0*k!&|%xW%@;nSH#M&7_AtK0%@t}1vf^FCQIs!oA=WKz2(rWrSUV*>M00(K|$NC=@Z zq+D+#lZt0swxfOLN?nm>00$Qd$n#aw(ouX`>-mnQl{@#4mx4hC_5C zythEh>haiUV-%C}ngyhf$8aUwL_&dC_r%o_vm+>6TajPSVn4;=Sbi#iZ#Ok{vOd;F zpwpQjD`@1G_RbYi2hl2yw!CSeBD;Ao)Qc7Ooy8nMP3vKL2_!qI-Rd6vgE}6lX;35? zd#feLKw8KW&BEPKK3hnf8hiaH;fm6Dpz4y9jH`K9>2XTs4f@h-Tr}o>h5kHm@t~|b zdV$T0?zU5ktz_>dw#^5b26H!GOD9x5IM{B4r1VP^4pI%Q$${@#J)ko+NbN=-dhWim zt8pL)dTx(RT}9QQJQHPUCIbA2(bkd^qj2iRg(l)_mJWdsT9Gc(X-1x~L2Hr|0u|*E z0rn=@GreV>?nV@lmiu#>=Trz5QX?O9Q+s5550*vz(dVLhL50^Dtu$(55!m#OJl7xa zAOVo+F-Cji_CO%zNdPHHivw0XZgD|oMI3`gth`Xu$><|rOz9mEuXCd|36a@;1y^9O zL6?uu=;i#>@POqAm()$;!JMfliMxPUv5+?}_Wh*7a>NMZnRcJ=5*EhZ1gz^>HulrR%e0ejoIKfXTLrZ4M3T1DN@# z{aTwHF_=Ab zmisDGaAG$4VP`U9CBU=V_%J8WDMUDhf_0yoHx)G_Ur6fmuaK)`sVHD%A9FZH(XHWb zwPwf^rM7Xa{Bce5@i9j`*n z=O6ssH+P-uGf;_^=H<8PK$T;x|3q@nK=zc@qzbZSm?^zt>vcPxO*4srNU3Ze84RQH_r|t?w?@^Sls-dfAd2{_wT>?={JBb zf62^@3TQvafv(KQMnvAVGdtfycx@xHZjZvR6>2&lgP^RflmG04+uEiEbC>!{B+dTt z0C`lOMj!+3Zr8jb$>?osh}dR6uxt3KUtk2-&IJTQ&4cV^5D9Yxbf zLnCH&b+uX>9@_c^LXK>GlevK1v6e<*?2Rd5tVHgco3OZ^vc-|+*6 zeIT|AP#@tQl`H-}-7h6&japB+c1jd(gPpRTJ@+cwKwZi0tF!I*8?JTb6Ax?@6i6?y zA4fQKmC5+lsA8rOD>rnY`cEjU^mY^7Xa-R26na`n z&QRcTlXYXPnJrcxEeMU~HC6H2rUxJI;%ld)bE7(v%DG?;zN-0hApn~7AILAAK zOS=J&`9Vj4VvWC>3oGDJm&Y){t(Ji5w?PM`Yl>A~8kW$o5X<7NA5$EtdsPtJ|2l_O zE=O7BMDOlN4aO4MRcXh9mTu$P=t8|CvkISd8LL;9P;8@sg)fyvKfD8AKp41Hu5yN+ z?v{3gUQ$55%R-vC*s&m~a6LP(QWChk+uDMg0@*Ga26Gji|5SDzYcnSoKIFm-SOqike6e`eBo6=b&`8z#1> z&OA8KO1!`tjMPYxlS+iZgP-2nH0cg)>Z;;c6RSPr?yN3d#;s>ZL;70FSxZuxfaqA>PtAcWQ6U)Og(hH7=$RH#}d_ChOzuYp-k^7cY#% z0h$Pt)9jy{7h8OzIY_zK4D`~sG;X#ir58?oR3KIa6p_X!aO(5%EHvBowvB5m6?=}V z;;2fad4}eF!WMTc2~dDapR|t{QbzCpRBl6%6Cz37*v|{i&W5*Cs-bUTOf4^E;IRTOn1G_+1obl@Nhz1penr!j$Nij(iP7PT^ zvnD3=$yZ+9<^tpLxAT?Ul^A+SHjyYzf7Q;_-UBfUqod%DHO2~L1z0qso#8R9u5fbZdQXJ1B|>^@=G74cqs zT5%JojSo6;J^MeDo7GYDyU3J~FGcd!YV`NM0@mmQ(Hpr6;n64$W8Fns_(e6I{z=QE zd3otoWFHQ-^V~7$?J*;WY==Vt*TwhWmY_)Bo5jS-Vrl*X!K;mqtoD&fMe3)hUBs>- zzP+O{xY!o|V+IEMqi=^vMn$P~CmqSNL~;Xy%jnrL6;-&V6eUqu8}B{f#?dzHw<@3! zz;I1SWVxAnX>oJ7YffDNpmS|I`$aJoMpKzgIu}GiA>2-d_U5U@8 z^i+goOtvpqi+)jVnfAQB|F{3mi6^hnO7m?T6ZuAB>SIezrHNGl4mTY3(gS(CZ#6J0 z2n$zBE(=J^*6lTSDllo6)9#_P6U-<1o}*e}=Q4E-C?UBLF5P0uDT!@IJ*v}|a2&oV z2mdTGw^k8zCAniqBTl?;zzcq>5!%`oQX+gFBqz?rV4Es_Gnjwp+S72E^SOA4?xowE zYNxn?#&@tLa`678F8`B#MKW#puBM=!Tfo57t_L8QF7c9$jR&mnYA~7b8Mt&N32%qW z4+sE-q&^sbKOomJ64o$yAUMl{!Q5wfc-8~5fYGk7nq4&q(_n?qRT_-c0CewD4V5Gh zzm+6HmC_biAP`Y6*B0};+0WUwOU2Hz88k9@AVX2@u#%S!)hPe=!BIhDH40P|#eW-- zUv)hvk0|fDm{&H+>;8buUy-`hJ2C_!b@chq<$_$ibr-+<`ss(#tJ6juZAl~x*~W-*>^YJ0BO{gE6CP5|_yoAOdN!4-=IyLcK9zk#WyWE z6xR1uDc%Xr3jF;40`{MP(oyj95aie>wcOS=Lyk?is4r!M^lD%c2Sg74^!!C`?t&dE zdR+bwvZXr{hf~ImwG$l=e%;6bMC6-3_(=^+vTu62a;1dpC}SG{gNWDoHV6g-3L<6yU^7_M z2EkvZZ1)hy>`Y4!fXEQ1V}6WOi(|ZDBAfXu9^K0sI`bfEF=%47*#`?rTX+5F>QF~T z3q1Z#3$QDWRlRFyv9+U$x3u;WfhP6B=)<_7Kw3DRAGN&o40P6??Is7}m5h4Zgd z0!wJvT08!(*iYZ`y~97tuZA&!s#3PXP9I=`2rhT;kIO}YJ&2%susJXe?zWpLlIR9m zG!~ARFP|2~j6SiW9vVhM>+~;2{*MJRtEA6AG3k`o8;KIfCeVkyTMEph{&lWra5yt7 zuUq9y6J?HgDAs3$u>iWAk8W(*k+2|{sh=DLIpegP2R+Z*?md<_6%-YzP_&s~s*+Fb zgHhks@p2FgltG%#1^LTBCsltu^%P>eBpW-vL;cHXf6!AF>|ylpqkP^_=pGQrU0j?2 z<=sOXjh4JFWZa?$mDMF>OY&0H!oz2JD#HZDOg99pt(;v z2_rBVG@V&cf&*#69N;eV5IEH(q8Pz5P3eJ{t-k$6InNd37>W)AQkJ(M^|eU> zY7M1xO!@-mNna)=JlIQBAJ*6-&wjixg}S*~Nd08xjy?X$V}^(d6JddhG3V;;ZY0{> zJ=VI+zy6L;$O+Ivcf@k%Og+)19)09=_rRj@RL0Ix&|@xyJcdG+2ZeT6);)J$0gcB~ z(wJG`BO_EC@w_Y&qC;e+U>e9-sb@2nn zHw7*oV#Z=)=s~h>*0>WUS+{2qBYQ~T46%l(9Uxbdlc&L1aQ=WySy(GTei@5rY}$;d zPod}-iL^`?wrJ0UYKdz5v>`%!5|fp?1X_`aeuWlD2CzDwW@V-(u%s%Nr(gJ46*+Y9 zq~7OpIKDm?3~cEuv$=O}eU+*HJS;)$)4GpIN(aWvnN6TTzv*_tpdy?TIaFAEXJ2s8up?m`2y=fonYb|>%TJa;-X@w%!x(x9i^UGdv$kD#qh3DsUjt|>ff;CIX6B@M7VsuC^bY<_ zOgj=MVR2-#k{dFx@eHQCoNd2mTm9G1!w&q_2YFEr$-4rVvC(}WOq6p>4U;A?sdwXK z@BiUM0?1JLYuZ@DZ*({#q$8lDNhti+g@75k29(qP8h^mI?EQaT&Vv%5ru%vX_)o7X zP;vKT*sz5`A@1G%Xb=!-yLOgGf1ZIYk%CIz)%eXjtx3<;j(g&#_YBDI_VBIAfJ}dx zO>f$RD&}7^R1EH-+gE+XPlpGy>3y&q{AAgI8U zN?R~+hzBo+j)KUs(=}2dhFQ;_6UxeZ8tK0qW-jGCdv=ohvH=8mIsoB|_e_&wqTyX- z+btCkT}S6a>eCD9z#QH>xx~I^b7AwFTU!XZxgPJ?&lvR0yVN#9hEr>9_8|J&X>2DH z>1721zKv{cTkcZ%zltq?zBV_o`DjjhL_owfK!QbMN?_}e835q;rfiGym>ulQu|&~x zt#xXWdGR|`Kfik6*Gn@pC|cFvj&R=s)AKufL#_y*7l0X)1;U^bk9!Q!Rj4J!#mdG} zAWJWr&$C?Gli&iH|Np+JZs8=zy-SqooBsGV)AxbNQJ43Sj81iBw#~792&@mNT<)_T zx=d>g4CYxjuAt;Ka1&Tg<>PQ&wXicK?mff0I-316K-H*&KCte;23-9x>mw!_UlBCl zAP~@Nr{C83)G)9A+Vqf))4elBd+#mHjmxIVSFVhEQkNzxQ{`M}uoX7~!9!~AEhD6h z1*<<)O8JOs9-1gJ4#_cfZ&|Hb!TvHZtQdDkfS=niCT8`Br0Ju07jlYV2w0T_bX7*1G$UFjBC=3!aA6>DAe#zY@)WU)ZK;AC&Pwg{2E(m!nMvO8DW7VQ z$*Gkj|88{6CSCx2g$N5?k|#gFMWq)zf>+~hopaaKD}fmVIUZRgJgPgPb=SH>LTRX~ z1F#Oy1F)Y@@BwbDEr4f!2a}I#QV_F)ikko3l zf@?L3YVUjiS||(qVSLTb#1N{psE&)PCde=get_HNL^7#Fkz$S($)|y6GIdSrZCPYi zzsxVE6Ha$7^<~lIfRNO;S!u-A<3Qk)(Ca#(&|u{+A8v&0yCIkG>$$lffPuu0+S&X_ ztOU%mINOIhj!DI!jY3+;-Fy960ZV`rQWC9~@16 zLUpcyI($BTM0zG{K#kVP!iA*Jx$fP{R)zPy&o4XAgTby1XcWT=p2Bx^Gn=8cER_je ztcAO(ryG>07`S!nPn>f|p`z@tQzGaSXch7zBUUFz5Vv^sW{a2G`km9zu*N~FbLxIA z&w;8YsMv!4pYDfyAFWezxVFpbwaUB@A}m>BPW0xrH{9yGP}7N@>Xaw9jmZrN6Lrwq+U`Ju3~gO#^by?JKPCD%Jzmo|x^=!lLx$Jk&-p zlKGaM8Jt&6>*UvUIy?YajY-fNx@qRFMCe1owPeA;w2U|d9c5fqIcm`1O0M)j*%OVV z#WIp<@Kgl>^DT{SFnvAlegLUaAi*rmk4eBB#(BTcphhm&d&_t?VyL8IVMm$sW=p` z;4-s1RE`D|lY9Zo5Wj=*UdNuue~x^-)A;Z6@kW7Fk<+$~WhE`sg$xy?!|@86G~X*x zQ?mxA#-!uoH^pQw9vn$fHfC@biIRM}`$kifc=guRebjJKiex-us8rqu^ZZ!gt&K|% zPk=>KUtPotCoE-E#!>seGih~RRr2lkJs%FW+QUKyJzrZ$=eHMK9^F(hsa`cH*~iPg zI1TFlQ+?{83p2%CxM0lTB^--iC%uMy` z+3Q}rJQq6Y?ip;uyL*tgh*}E%-uMX2T6AVy1_pheEm&MFdD_^(hqR+=8v?F$hkvLq{CXgzVEoL!-D!uxwHph$b+-~)A!P(@Rx4)v> z?1g{6-^}#y&{R0+mO(F4rv)Y)eF;Z1Epyl zl1h!1lqlyS1uuxfOGQD8omy=t;uQ|9t!$y9F++18U{|VllXeN6{I3-4`HJSy0xj2f zm&N1`Lw@zgAeitI!imjtJ8Eb1Ju{)}>Q+7$$}x=2w3(xgn#R00BP!hF`r0<+U2U1( zB|m@Iy2#0l7Xmupd=k_9^)7ZCG{JQU5QomKCV<f6fuE!n@+YSO2pV!xoJ%}Xo z9l&(#c}sP&JFr4toMgI#TtDCj8>SEofB)I)aWm{}cb0N^NPb#40mz3r#4iNIQK=-D z%6UE8Ir__%qlbZke>Q0NHlg*9${KB9Cw5B2uy5KmrYny6xi+pc#T zXbiwM?%CM%t{Gc9_p0SjM<}Ml`MJ}|LGxP^hyT_=+ea%#b%1|nt^1@U*=uW>wzt!W zrV5mA=_oEI6&_A>&;)( zP1W`H7|~RQQLI-->2ZQ01aD&9h}i;gS}b*Gk9~Q;N~E}_QtJ+R9$ZtfO4)Wsw%_4* zpKd`X^_Wy`))@Ci_bvFnHM6V18ryiJwCNaVo7ZbbEoK|u=?Wvw`%004sV_fH(&VF+ z?zoea=x7?RF#F!CACI*@seJ|kG*fL9OyZ5!6s2`ru}d4GNU|TVzE=n0Oe23UhScqt=qpq-i7%d9)A@UK2QO`CRjHhaN4B*hGpv|=SJfl> z?n$tS3Tt|>b$gwoc>KKT&DUeT%h{LucK{QZ5aSR_;vI*KdT>CHF&dU zpLOxWcMZ1`vPr9B$YeRpbh}cjFHDv-SGI}?gRn`*1{SA6UF|3sXO%OLetXxDppad? zI`dc}F~QpRt@cv&V%7}_EJ`ueT+ZtlZy~=XjqP^MSh9~%G&x~LdVN4~BZJc#Ng4?3RPHS&;)CWjUEA)7mWc~J z1oQ0Br2qlVDl3awHTz=}YcXFp!f$;n6+?BF%CIMgb0&Es|6o?A>+@%#;IKSkC0Qt? zo7ox6mwy%~FJWW~$K{iFnvJnO-9KX|nWf1L$k3V^kY5Iq`}llu{kq9(4rFj-HT$m} z@OX9YK%QNC*u{Q}5_)6pQvA%gWEwg)n$V3ihV-9Vs2eXh>WT}wE11702xPmpOQWluqv@7Z&TO2@ zLieTglBF~@{}>+?wmwkS#3e>DiEULg)26k1r|kHhq{e}e7ycXd4(QFjJ4F4bC3;es#P;2!)A{xG($h8-$Nz({mpqLjNTmfP%c_j zsx|)Za-}FMI6FxEmN{0)U@iWWCzyVmDbU9IoSh!L$8x<1Jb@h1Pqo359jpqwkU7iI zlr3?i1D4BAFxO~oHX(0w2Bx!Sx>MA0g|&ExM9u79{Nolg|$sr|t8MF4{_3 zs!lEqXteL(?Qi-+X3X%i!@(Gqc(Z5dD7gneR^Oe0IlS56M)87g*_>L0vE)GQe0gHn zjhbZ+dRtlO$hh5jnpMRc61Rti$t0u&R0X-|X9Vl?*wmSOt_3jUC1w&+JslRR+y z2W!TD)9lOPMh4EOP^xAH9jUu=IKQ#XeKKC=r%Z?AF&w^J!V^>4BMeS_%(s= z>nR#@5oHB<()To7%rkIOXMN4v9mYG1!5I-N{Izj&s@62``rxBRyyhq7Sciwm@Y~i2w0wFm5(I_^j3|xMr=x^*Vw?0 zZjY0WnKmTl)Zx(?j<9?LAW%xMb&n$f(WrZYS3FI4`?>LS;TmtW| zE(f0*7DRX75LOdltL>HGKF()y*DC^W$Hm>t@q6*>j}G7|@T_ zhKlM*_|xh$^z>mRM}G_7{FAW^vHlU7jVr%+t_QNYZCy2UB?zdQADhqutX$irw2>Fd zU2utI{dQe#QEhr%Ief>pb+hioOId~7SgcUM!rEs%&EP-`knT4SIPw`i5}?Y8#iWM| zm~R+Gq$`y6tt0YOlzKqs!pcFCBp%baggj@`tQ zu{`T*VNO8P_|QF;Tnw~|`;bh{@g9Fk?OJH^uQ{1=3n>DZ1!;=SuEl+i&Kc^A=M}Xc z)t$UKRVxxU<*$cqH89o*yR5Y*0Jz%RG04Mgq*~;j-j--ge{`mr3uMtry$f$tAQw8t zgI~E21m*amdI6;r0OP?jr3JOYUYvwMCGbN9b@E3cI62|}TFDdpu2HRRVRk%vBpRHg zwraFc?+$B(%y^2wL}t{LSuA<5+!|M)QS8o^i^dG+`*sxhzf6nvND~QeezS}!`WjL} zZ)V`yvQKQ^0%wRfQl4Lhja3HX?_%pfFt@*&a&(s5H0EqoT|EH>62X?kye@~+yYjPx zn}+<_%rxEKEi1C;v@+gJdXj>P+g(eplM^_LPPE&p9csrHwbxk{+cvI?)P`XeH9DG6 znVX1DB$&lI6{i4<$uCuF(#p`vU%9_jZs)RWuwNz7IMOITL(EVNlJch3u{@i zE6+-8a6V#(GJ8(&D3ubzQKBA}%6J`VFmu-j8r^Ib3^cHgG=68AaNnG?(AF>U`YL_t z1#`)aCj%X2d`6xP?3jF1p5M|c_SQ}RPwSc(X?}BKTmmOayXAZIkio>Gx>g_uysGe> zp$-NCkAExHzdv(iMG}(VJF$*+7Gf!={k^T{rIa;<)U5VI%KU*%v+-+={sCL^0nQ~g z@8QO#{1d^+2BATG4fIE6RVF-#VAFI*BiB2(y~ZI4Z8xs6DN&(Bc~ui)a&&BIx1Cjz zEs}H6j=KRk_9450*%y(j)mEs3qb-U|Zj_8IT5cqK{RxJur4cZ(mwHXA&io4A{@z2} zg8rFggYytN++FpP8;_-2mdY>}zhxsG@l6)0opD^>2;ecuysV^8OiLc@J^8@LXYPPRprRU$rO1HD2#$xMlkdvw{*Q?q}qUgr9Tvy1JUxf%|>KrDZAg%`}%uQ)k*;XF|ZoP zO7GctXUcw5_2PmRC(o2%`%}DP@Y+voO|17bW<1#`ws5G5={Umt6o#R3zn9hM_Vtx(i`v>re6E>Z$_r5mEdV@ zTlAfZfB6BivI%SR$K$oFr%3H7>Lb&-tt>~SHYG~FDH!vbH#Y8l<$J3aDt$+v+vOj% zt9V`7LpR)xC^FMkQJ8rA7^RG^8A>ajikYRpqaT}}@pf(7RHmQQRBj)(@cZYI54JSb zD5mBLEFCoxgR|p1EpVbF z+eDSDrXG6g>7}>Qzty#D|DwjdFOd8iuxZQa^^&k8!z(XHsc6lBa1=i`8(8cK@ zZSKMhi(;ozQ-=D=vzlPC@GM)>pDUU!BV9|tE{;3clKIpnVVu0wTfvxf#atO>8#g(y zUqb%P=9|Q;LE1V6r18aNO7avGG>G&U-3u7#!jX2ps3{ljYf0C8`!C+Bk<-fuUTD3G z>cJ~ zui565?_;k@{Kxm6(ReoIxOj4S^Y)m_Y7tHOF?WhDA%0Z+s`LiuWrH78KrY%R7I$UW zaRWNHvsydPd~4iO%o7qXYRWwvEkTj9+ZK1#K_TXGaaFZWim>?)dSw|&IQ~Txqd_b^ z)#MAr4OCvud=%0FIL{^QrM(e!n37Hf_jQIWH&jMJI59LGAIR^NJGtY6?SyZ!I3@{< zG{K7|mC7s9pKlz#tgd27ku&}4V20#iJ{pIsLZ9Et)eoFbq~z-R?coF)(d+}B$IR9_ zR?#_y+mg}TSc$RhnzHkND3=eFibWS0v4tY5nE+y*#K7p5^|B4n_z7|p7<;jI;#p7G ztZq@5>B6ihZnCekg0-xq>DT$PEJ{t4Qtj9Rm02GcF}~#HN^Ni>CmIA~#UY{F$G4?9 z6#rtf;!uB(_`e>+|HfJUzw^ij@R4M1%;~VAUotl@Jw$doYHg?8es}&{{2Pg~CtmJ>VRUwk__ z_bWt(U^qydRt}Ttss;q5baUfMBKW&iFF+0s|ArMr>4O58t#h7-49bJ%kYTmaE$M=P z1Z&%Q|04sf%rMtYcrrWD)P2hE}@B|Q(;hn2^ z0$f<<9-x1;Wf{z77XxFmDSv*&)A{hD;EL0{WiI~ynM=dXy}#Z#sCxTTc@v$nhZ(Ivg~U2x zEv-^8fgKi>nag};t-u<31eyZiKiOx)gg^`z;yIE|3hOC$IXTj)w-RxEJZd^^svDo7 z9{vH{BJaqB35J^Vvr%`OAheoV&P3_3#}oC10ydTZ6`y*^7j8QFPf4B4kov>p$>;-h z$S(RIULe!Bjg6~vj)Qo}XL+_uQuLKP9!CzsYgnt;loJg0gfc4#F_sgPHbQT4$jcLkYQ?-h!jKmU=v2pX@F#wPx>?phBOt|z4ELu*1V30W)K`{rnu6v1#x~+OObGRM9C4U4$f9~9 zS+LqJ_iX9;1~nyfJzl*-pEN^FnZfuSZy%#3hfk7)Pq~ECv@WSQHRW^&$ymx8W3^aU zzKVNnwqC2a1{ITeDg)!hw{J9oH`w%5a&$}XR9#>W_t$RMYO=kJG@qrfY_2<5yp#UC z+^uSqU%UiqKb_ZFQi(BiZ}s`K1SD_!1j<*H37GfvbW7lgyx7KF`Zgt_>vfJM6qdIf1=%s{!`4@0ZeX(9};Ypb5q285x4 zas~%@pyJRNv*D>K4au#0uX&%v;NVUjW4oEodUu%CvG<+-^pD%P1glsL;&5iWrz~lS z*H032cm}>_;hOJhPjs>Zd^E#ssm&^JEHZ~+85I5Zb+I1XQTuiM;v%&>sp(+bc@EskplbYe}-P%yawg1f?I!`qVeIAXI0lk1PU`~m@?_J19e-2iifJP+(iW66#y3{@13Se{C)Se9J5I`=$@#{UWsS@Oc;r1vetQvS_ZU~-Lu-TL|SowztcTA&keL1yVm%+59l(#9fJ zr$#S-igy(M!L_U4R}%uXwxIrwlSQ*Jn9nty+K{*)}509ndFs%dhzAy$~TL zZTM7*kJ+l&Y}WQQlqfy~woC8}5wd?b-ycN|>J)B=TzZwQ<_t)A)*yYnKiCFN>WAdu z`QrQOpF;j#;;PoI<|}GpH4xO^D5X5Qdqvf+g}LF7xnsfoh@+CYfO9>7;kH79gI36{ zCZz$E+k-a@`P)$$&6_}q)?0=uf_b5CTmC#^FPE&=v}fZ}0il0+*DSE1Goe`V*5>(G z|EgVQfqo6e=RCd_*5tQ%xz@n_ge>er2KFZsPm*+fyR!AE;3Jg33`iMmDQuDmdmEl+ z%m}d66G{jNC0e1r;OW0Stf6fUR4m2iw~^Q*D||ltU$M|BdS6qDlE@%HD;VKoU`6k+ z{#*&(^3wD4B@;fEH^M;IRs{&h@Qmw9vGd>%B-YGiqJ87gXv@u*pQ+uKIBZJ_F7>GX zY&)w4X{xbThzlKPMQSawg1ccJt=0Sio8ZQa(D_Dv8TwM!UE+?QMbqeLrdeQb`f_)A zrj2K{Z!|q2@g)6r3^9T|xLAjc(!i0q9p zOECt^pwUp`z5;Kb^O$1M~mu!#N&meoVyXVTEVJK z_sHB`HR)sch9V)-^w^UaF>;aGybWvD|4!nxa9=n&#)IvweP+p)SIc}VCU2`pryI;Y z{t8RK_-R_eOZ);fTZoOS(^q;d(-Wcg=2x$P=&vYM`8w7#=hON%7hLP?Z2jc;h!GkE z_AT0@5^5E;0q<06u(V}QWL3Gko=P}H_ra-U+0&$iFqCpGYj#VsH@F#@P>-}-uI&pX zD9K;T=>K4LRt}eTTTAfu)ZKav(M@kynkg`v#AMzbuqlL`feJO&w2&=C?Ux7o*i0I~ zD(g#9ft>CMj`t?t&x0()GkMRu!)vF2=5G<=X(MgGxJxn1;9QItXxWe0-c>R<2h|RQ zqr_wM$o~3+lJfhvBwnR28X=3oR6{$aQY{a47U2rNh)j`B#4?F=S!%TQR5Y_2im~1lj;0H< z%dxT15|h~mH9Au@NQUPNNtFp)fgF1n?!To7mzqUYD80xX%8YcZoCpdL0C{Q)(8g)a za&@CXaoayg8WFq$hQ`^YDF(Cd7+2J-^rw}JNytRCL#OqpK0Pj3+wj)pW9NW&s1$Jz zsAitLB!Rr9!LsfE>{BNF}a{g~=2j7Ca1D~d(< z9;=}j)+xA+&+G4We1q{w^Jz61mE{H)^Ii!?fVW01cIz&5$H~b#Oe`VftEsNob8oL3 z%Vq2PsUs8Th7D8&QvYca>RzADKCu)KJC>xqeMKRij!mdQDSvh;%&Ax={EnRRxo=k- zChPudT4|v74`S7@~QO`pn z$%Wf3UgF<_kL5jPByhgJLZ>W=}I-kT%+H5~y<-w=!#0E?#p>BT>i0S5CozopC16>xOU87Yb< zn4WbnYuHhwvMCAf@g(e%+J7nnrNs6H8=0K@$za5NO;veehb4|EVouSVo2rchTivPhq4t@Y))^@`}n8cqt`N^XG6t7^S@yRRv&jas87Y!~$-P|IOsk(SH^EX#ewn0t317kpKVy literal 0 HcmV?d00001 diff --git a/tools/Polygraphy/examples/cli/surgeon/04_setting_upper_bounds/modified.png b/tools/Polygraphy/examples/cli/surgeon/04_setting_upper_bounds/modified.png new file mode 100644 index 0000000000000000000000000000000000000000..9225c118bf36448aa98d01da61dbc1c0076be0cb GIT binary patch literal 110733 zcmcG$c|4SD`#-LP?y}Z|EGZRZ9c%WbGTJN+vNIw($u8T376z&8veXEXCHoR4*|L+J zQ1*S_8S}fY8M=G!=YE#Y=ll8n(d#lZ=XGA^vAvJ?ah&H>z-^UlRL7W(k&uv3-MFrx zMnXaXAt51`J4#MMa%g~hvm5x~kfYkQDwxEaotp`0MyUK`jHF9&yr<`s&$sgX2PNA*;E{7La~ z>9ae6dHKq_q?{M5Y}lc6D)a(&{_)>4TK3VV0v|W1LiOqN(}3 z=Xi$BoY|2JzU?Whi6cUcCw(m^-s=5#SJT|yyS5VHm&n>bF4WX^tQJr``{Jy!k@wzm z;wH+wNZE=Buj4l5z4iflM{$QS=#O(H?RgH5l(w(D?w+R{ANyf>!qo%kAfVVCsFLMT zdMno3@(EK=?!;gQd7|Oq&V8qyt%{zVZ`&W)UHl?8GM}`d9iLjZv*3CM%ugD|hwEn<;A^zoDpcW0ZOH zQL?@}x+02pnDkvl4E34x^n*z*Z`>2$IMUvHweVkES-LBV94Ax+1q5z);Bi>I>qm|# zmop_}MN~E0+wLPIK|5nGl!Ti?iQt^{mJfnYW{=_qSC*I3+Dv(StyJ~>ELIdt-&7Gz zO`)+LY@VTf@yQRmshYf%VL!yWfureO=-{?tzs{niokd|6WsMAtslxr8VP7ApslYb) zoFvi6RKPM5`IL%m;4)bXD>mzSvKT3l`isC5of_GvMm%W8pyV%O8G8pN3NOQY1=eOBD} zFygySdKywYYq;q|DJT3$?s7tEVgv=QBxAxLzYLf3kq>DOxJxT(V&6Kh)J_|@O7d#F zZ-Qnfl)5zgs&c1%jL_9l?_G@|n+6^U!KRrI>Kb@L3>|;u%*?~$_<7a9W#FrYJVkSMFPw$9j0qmj~4;-sogGF@i#G>v8x zr*}dz2i|FwDr*$6Ft63F9IVskQnfH+=v5X~x@_7RfxRf>nu%0ajq6;QsH#XYOLcK^ zE}W$4mf}3~sW-uDqR#o9I!E9_!t!?W>f@REiuVyWn8lui`~iey_!HM35WpB#mb;&) zhOA^4Rm;kil{7&z2Ve2(*~?Pd}loxmjt%tK#&aLwDceKrGO*ofTbUz<#!xDQgy$+Mbz^RugRh z`tgw3Rb_LBKSKfx#uVO`Gl+Ih=9DL1*Y>u`cO5jc9F-|dYE$be>qSnmoz2%^6M!X1 za+VZICwu>qxLta=YrTEXLNPJGjJvEzUhH|lb`+z;4{A?vQgcsicB zy-uCuS;(!#W#`N(qmK5?&*+U{Go}EV7rJRQQR7djhC5U*1>Dann_83*#Hk;zUdAq? z&c`&VsQV$Zc*6bK>>7ri$Nynx@U{GbfcxB&5B&;`T6^}E8s*LU$$tYSR%ug(C~cjx z?qV}PI{x6hr0<@_G>?L2md^CcUUO?@e4>;rXpt288wgoTGBOLFHjV9)*eW;c9qdl; zlTaNz`r{-~Y}?x1RD596mm%-^!o)h{*RLC4ibKnn`j|`Gw91=D+~0{6)cw39NqJq0 zd}L0`6z(VXb<`+qCA4hnN|tKD*pJZqA$0woqq*u1YNJI}b2u3>UJv$>af41RM$ zn@*#5hv`9wI1m7YucuTmn5-m2O-#eHa_?M|L+R_sg6rOX)P@avi7l-w#BmwM96(!Y z(i4hiI{W#?Kx3S$FS0dO>ZHz76EOIxYo1?Q**GP&?s9~0LFm%!g{+2Axf(&L-=FVn z%5p6Em4VmgN|W`I6jg0+;^=<^&v7A9AY_h~aPGrR(jC8-dd$@zApJEXG{mstu03Z# z_|2&)^CZVTyRVa*_^G^nP`s#KG$YEzuzlRH?b|3m|MBYT)Zhc1h4Ec6np?QBt%rFn zF*J&TJ?C5orj%in0mZ7bCs;Qas0%7aD5_q4atTm!$%sMjbZbF)Re^b3w@1&}FB@RJ zCf~)5QzSY~2PaVVQ!4JTMplSBwX#`Y?DWJLX(+d%1w)Ik@FLV(JaDc<_k>pbOeqze zx?@-e05G7U=tzpvRFa|@$Vt9dnEpJ{N|zwfhnvbVJVO=)I0n>i_l)QHpSiiSocj6L-t92UyceJ`Q>T8UJ}#cmsM!>?)?<$IC=t_G?ZgOtvkng;uvMDde=Du)Jep%w6kt{pgT9om_f2NY(QcE;3 z#h5ld-=jF=_U1?7JCn6;?4I+-YNxw>4<{o;nS+vjL^a#}K_xwJFbYFjg!aed^rLO2uTM7&<=Ur(e3p{-|4~rTc##FrMUyp>kI zTGI9|N*}c9bIMP(pFnTn_&p9u0u;o4CqGS&kEAw75=?@jzCcorRt&&j_XR8=&tPHnVI(Mm9ue*7ih0K{o@yDR;bY&QY9Y@*D9B<1V zKqY>@CfD^odC4ZaZqrX=$uN>BNJ}Y!&8f&|f9Sc8KZ63eSCp|hAHniFvtsE^#1DEb z+#&rKMM_Fq5n}{8%ZMbSGN}MR(f>~Q*)O^+wJ4Jjs7_v*kjrDz<)e+8f?c>Pva@pVYp_(Z;g z!1U~Ue7ybKWzo|$HLry+#hHuP7sb}hesm`-hq*_7{4lD}I6pDs#ZT~Q6~p8bf{z`z zCn#Tw_CLs%-rFWO12gmYko9V~sE0(d*$5hdrZR0JPqZ`&?i4Ww1qKo^QE&l9ykhKD z-TA#Yk|-tR5b8gcX09@8>^N1Fm=S|Q?ess^0DyacNg}Mqg{o#`kGZmVNSv^M^eqGo z`NVk%B~F{}fJ?HE3DZn4?{P(Om7~ zfZq3R7Ljj(plfwG3Akj`LlZ?j{IZ=@%CjWCjoUG=5HzezaXyF@lqbE{NCd}j#(POb z33B_qFvjdW@uS#7wy_ANK^ z`LjG++F|ti0lEb+2Yn-2azvVu1}&3a-k2ihPh~ecF|@6DwZg%Ypqqvmni{2Xp|%H| zY}d%OkUizIiDH5Jg>sDe)sY#E1ePHD`uCw{GuMg2bIc2^ZA~Q56h!e8vJc2rsD2Nf ze^s3PAAC>J{v0TN&T{+@uI*;IJkq&0QRNR}>|B|;i+4VGJ;A}Y`U7QMUYY07zy?A& zRne}44>@Da&vHDPq~=|G$D;4>~nX@YdEfilOTzB|e% zie#8H@!R-NUjfF*kXgo}%dG*kBJP5sQyQ~wlvg5YyNsaPk=DBeyFF+T?*QUg&gEMWDj0A>AW%YR5p#xj2XCB$FVrFQ95aZ8bV)3onHu&huouc53uLdyyX z7kXW$LRHmit+{DO=_ZChb=Q9FkmGu-^_TUC{xMgMOv;I*WvV0YIx0;qA*&M#nO@#S zDtQ^n6Eq)E7x>cT_{1ETb@S5stFKJKZa=NiRQS=9^Q3Kq<$7kp zmxnE5apqsj>(q}$a-}Fy?~ID1{?bO*I8BVV^UjrmHThR6&CP0Fy1YpQH9b(@`KBJYSQbAI@-bCs5jF~7@cF>GWb1k%iji{uH{B>D&N{Uz@o zUw!gC3-N&wjwy!py<96*b9&FKx@PECe39cKu45EKqi=xfXtf*_fi0bK*u1NqTYE5{O{-UW&j#R69OKv)tU6zUlClKJhXauY}IfihO+9W!*Er= zoA5JFse+iSB?C>y!mx};Sz|zbmBLio{e&CX7M?J3E(L{2!)DOjOW8Ge2{in8EY$TY%v+Vbdsvf1R z>SLCBu)3q{p|Z8&&6b8A03LG8P@I#FQXyhO!*lKqK#VI8;3!sakQrPM~6*$CgO?|q;^Il$^#k^)zjI`5_Y+N097 z^sDzkL_>137q33eoTDfC@SQ;1Wl^^dB;$8x8{CF|kxGb|oKuBf^=s;RgA?m1bnVYW zr!Oa`xSy?GpZ1W``)IQ^t|AJTVvY2HVl6uCkZ;LjSo|HzCyI-$wpTB|CCBJan4zLC z0qh*aK+39Hf`mU!l|yXS+IAw1fO=2dslh2C>hWC=GyxqGz>|2*UmdOv)P|2)+ce3? zirZtNBpl5IC=kCH=}fmzk;|O!2+Z^ytaS0pNBVtjh2Qq3DomBLt?zdGPCiUZBW+YF zLwCv(xYOMQZFm*6_eO%HVCeo%sn`oM#R#%^Ii9Y2xny3?hCcDlILAfJ<=D z=QhsFK!_BK`ARTSf!XYx^u_;*$K;uvU0+)xtxcldo8aeOi&=7tp)1d z9eLjF{Rx@<1zdhtCoo5L51X8+rt`rH4zl!t!;M^`RgT0&E19(VLJ5NJ_<;4G5^45J z9}A+oglPNw`~B#H$UwQ%>*OEbssz;taFNgd?wY~kpW)gSVCyk9#J^oAtg9u8E>ViJ zy4E|E(GJCPk2Kp}4xwf3PoNjG5$C2pvcgXYfG*7{rSW5;v||G`)iWguEiU*y6G(dk zys79`<^+l=jRy>BH|Lly@{wFzAD0OLT!IjlIpY7`GOhpz&?S{&2g&OgJBnc!# z%P=7&QUPwD6a<5lrizDtYlCFO@Ah^D1>1X5OXAI_v*dpBlO9*o5`2K-2M|4P`&-yP z*BN1|Qu2clC%Aack)Zd{Zz6{Xq63NI_iK_UX?9i?%;GX(iRr!ar4#Li z8dbkyWEsY%pLT%gx%{^_$!` zQLuBUyX}n=oe+Xq58N&+{&$@FEAIXCXCTVgsBS5MrJ%_%yC(3W9KSa_Uu^up#18Np%(|EQBu9U0>#+A zDuIcNy}aPc%mlv=_0hhe5-U!Z_sx$GRh9BAxS6{`AJAEq@(K(YQ}RLHV%{9>jC*{T zUFNDul5kR>G~7!~#Trl=oy7!l%ztwNxErtwKv-iFIc6%7-((;3hq;aa0dIb}gf%(9 z1Wr~rMhp8%_!D7`*}p|*BX%hgHn!Pkokg~v#B@S^HUp8nS12hde>1MC?AAN`+rQw1 z)jzt?bWntIy8uQPttvN7lNpIORL&|#J?nY7LeUS<^}U?WO5>`PE`-%I^l4V9+oet= z&QVmy8yQ&+xeu~MX`xi88}%!48f4uKdQ^x&nv3jGpy)~h`%45g{)_bwyx{--dWie6 z<)d@PIq#r{HIBBET&=oy199X%ZStX~{&lY~M{l1~BBi7Yy$Gj!b0vzLk)7ifnqB^h z0F{73US$@r2Z_OAu||6Xu0wp<*KUG0E>(!W>b23FD%5go#|^H|8*QD-l9AHxdY8;o z>WuAn)fFKN3HZ>xQrOFtqs%wFIy2L`I@+8xTvGdK*LfqIwwJ;c-7dq&{wYU2R<>OI zUY0(eq?Q@{ma3D-cJUa>@UGPM%5sC;ryRc6Yk3GA$`Dp5GkuOE;_7*l`8m2Ds$-$s zUVW49ZL&I$k(tezEqJ{c6usc`5GO{~U=q>}7jz5aMYoMDO)a{VLvKo*pPO{|FMtOA zlv*fN7f5IrJJ$754RVNE!Slg6Y*mW;_OP;#m7jwD|r6fxKyz9n#$u%4e4NQ|n8Mwh4 z!E$@f0uehDuilQUkjC&$n?!?l#lYEZV^oCvZ>arxoTa0M#aB1XWhGw*e*L2%rl6_) z>HknrO8zH_Xa#jZ75&wBqDwHFQ>rLkFou*!f-N7NQperhy~WePR<*OTWw{WdTRJsx zH$guu>6%>gg5I>UYq_`D>`-!;`yxZrdmM7KvK z7-CwaSCl@QUY8<#BjgN6d~0iaiylcjMq4fN8IV3X2O8m9iaL6mns|>eiVljt&HtAV z5%kJK(vLtxH%4QR5_V}xk^sY6wmZGLtn_MII~hvvgE=~Og6~8D4}KEMwONV3QK7MZ zW5=+cD@V)lq#i!EA}{p~(ps`kLw!qH8ONbKW0o6QUAb&%MX;ZnS3%fF$OVj-=(Hh%lTdS zl1u+Y<(~eIr)Vc!d*j+2c*E=RZBbNO_iMq6JP~_FMm-}O6e~QPtLa}aVY%JKMEzsC z#b(V**NZ0Cl9A8XW)h0=0d>>%<{O!iEcX@zxxoZ#e^C#qs=HZo+uBOyzl#V;ZC`DH z2%i%jxS+()St|t3K6?ICb+NOvG1S7{r$}wHx3nDVE>y01xZmny-wvaNLgtghY z#Ama8$RcR?$4T2a`ukxs(Y*G-65s1M{8i4Ie|BMLI$_f^n_Kg{M%!{SENeBzy=+Q7 zXSiY^foWZQi}SQYiN11+|7=<$yrqy?0SjtycT})#M0p`_ZDuXUH9z2CNk+=xdUKo% zbli3h)-(9gwgKZd8Y`af^>$LTVWkCouDq#pU^;&>A<-}dBW?&tvF~z3^oYLjArPZU z|8Y{fBt~WUs-R)xE=GHky6t}3_iz|`FGqMfE-fK2 zy}PW>uI1SFsOR3+X8&S;)TOoTStqsKF}uf`NgD|rbA=_JG(4`|i`$lfDfHcWwf{T- z^OD?la7}lAiaZ=j*_FE^pI)%7J>H#?f3TthR8%d{zu`Cl9eWo@>xqc(FfJwl zcmseXKwbkFlhbk|_FmlygqBvyv3@tMq33&hI|~#oN54HH{+tOlbnd)U!!Bxg#a)2v zqj7y;^2_8D)eUrVQ(0xi+Wq?{;;f2)3Ua!lXM=B5*U94T{k6-R_xt)@-xdXYDtZ1HIX?{DWq;vX=bo`iYkB$5+aVV<-REmfSNa z*mapN+a1Axiv%+yu!0e|ed&``BLaholeWf3;+K&@WNBUXhpDD22-)w1Df(<6le`3x z5;pI`3D~$#6|+H~Vc6%Ayocd*paOrVK;+xo+eIlyO;7%o2vz2yk*M-1?XkEdS^%wH zSru5uX>a^6uSPR-23TjV${l&5wQjEEU(ht8fV?;G9zY+_aJJh<(_#{y1BKO%<60(O zQ5^mKNT#lokTDJV>YF&e5eha>S{u60;g}vwat>|0?7-;)Q$(08j!VjZgp+Ydzh_=M z9;Q^hzGcN7c<1WN;pF4JT@Uv}cx@(V0=MRh?>@PoB!=27+jR$eoYlKrIxkoHgiU4) zrLoAyAt$$qtY|uQ8s8#(SBx*ZN0mi%!wQvD$0zE%To^?Yj#< zy5Q*4XM&SRYC&uv-+T_D;cyTGkxYrg?RyQkl5ST5aULO#DY2rSvtS$mwMyLrb>Qa&HTmK{9Z^r{&EG0V z1PtD_nf7R5b8;DyL=&QS^P*LymmV)E!}47KRHzX471@`A8{O2@M3x|775IXhpz9=c zbDQX9CAekjZ?X=5dBe!Ej4#E^Jg_bwx~igQ*IYQ(eSM&H7!@8yd#%!$=iGs2J5GCO zBQ%CmCh-gjdkB@)(vJmikluyH<7w4tZqo~zVZ7zs*x;MLl;>OnQ|VBsVJw9-#(sHN<0weJWnfUKMCuJGk3@JNSw1;E2@T zz^aeqtHi-|S2A1SjM^uYx}sNY^u**Q5A_qF2~y1-^~yN;es&E1L#K?djW3b9vv}6T zy`5{yA9x?d+hf?+Qsm4somCVYsru9t#NQ+%m=dJY0IF^lu_Fif-Z|g%jNnD@p!9FQ zBNKpG5~J|hB}UZW{13kO@%Y8!|APLoeIx_);4uE%hvel()aK&dJFEQFLwMW}O#%52 zqH_V^JofDA>thDR-is9!74(OoXn+3eh*g_13ANcA$4<$dvdv=BE0kk+ZUKGH7XUcy zo2E^jJ*BDNGG^L zvtJYJZbSCd>0f)D+iMx{SF{SOzY9~p5j+OYJ!ocAjib_nL z!`w!PMoS^AjuT!dbL-M^L6NiGUKW1b=vU$HQfC~zde0nyhG`ngOPA;1{(lw4^)w6o zku?SSt>0HrgkEa^Unq?g)#;T3pUvJ#oHh zAFT_Z=#8P~mIWDukF%XA%eF6fRP9C9d(gKKN@RqEzCtE+T|paj9|XAW^@Cr&rD802 zmJPCh$Px!)v`L}pgUq4_>0}#Pu-0tvl6Zn|QmVn%LvJ`PUfW)tdAmHbg=9{eSkt;T zl@-dZFQ&Ru{)WyE6GcSvu|?reSMLe!xn6j_da<$uO8*MaUkztI&P?Z@;mm_T$g+0eJ8t-!GRX7MXvm-vhR@pK+vC8eH9}DJiu_@SAM- z^KNE8Bo9osIU1_UbiCc|_`aBGkcI~>SEIFu&Mb8mG9BA4C_QB`fAFKE0ffP>WowQh z0FA^tQoI&FI#KC?G z*li+#)3s579kfRn|H-FdPf8x4;B*yeQ<1%avX`6#nv{ocL+AzoD7_7W?fvUOcZ~!B zR}cf6cqC|c4D-L7Ca)XV)$j*fnm>1!Mrx0bCxS!eCE)h(OFTL8ITa?buImu|yB7#~@I`wn+9V{X{d8@fwH2 zrO`gupiKgoeEUA)JHF8tGj_ZDvqIpFRIloYQ?DaRp7Ww}HZ!oD2{n;31-qzKaxb7% zIF;^ak?7^+b)L|jdUH5T4+2nII)m#YTo5I$XZ9<^Ds~^HENKgyV(+hseSc4ufLe~0krpGF#UOn ztp2u#GBSC^T%O;Rd8CdZ86(!$wUt@&grZb-uIKx z3|Yb`z?-6ITd=UcepjlQZsN}u%AjVsI&FEyg>SO#kby;(6~{UUalxOe%@+)`KI${e z6{jy1thuxrN?eq(kr;OC9~_hg8n-Q=gR~O>x z%cLUG*3mLvKTCkBg#jQHt8)wA>DJ+a!(9&{a27dKj9SSgEU@5|g5RjltUhn|zk--{u4%@PQEj@KB7~AA6g%a?}OKEX~v52-QTmAa>A}Yz7{PacDV~J)HDan%yub zl^@+86z#j^kOcJ|S3?g^b$2lb8ZR6-M7gMW%c?=lhs5tR#(TIHQPT08ha(C?@UJ)9 zLtwY(Wa6V6**>_8zm<7e9`x1I-4E-17U(CJwvmH~^>bIxs$?v~s5kHl+`cm9S;|c` zy9E)TC?{|jw_fvIk*Jl%d-pYMbs$)&5X{l)w;-U|XAY5c9@*DN>S*|uYcvz=DFJLkWb zUQsHu_YogxKS5viI

~DZ65#9HS!8Dax6H9I~4fd)?Fwi%OWq$Wj`W}RvIhPFi9ZoU?Lo`PA!1OHGb z)_3ni8M|}2R=$+OpvmB1T3=somP}?>Q*%FeLFqRh_x|y~Ttn=vl*c_RmbWKDb3@s1 zkd8mJUGy^6azJ#zys~_vfF zJr8BqpWF6pBT3VOhHc*$@*T4ljur??b4`mtBWDsq6X-drJ@Jhn?Rr`$fzqaYF5mTg z=t6r#8{gWJ5`thu`=xl}p_ZL6&LL4)H!y!2#%<~Pvm%qarx>_>Z8t-}Q~01e!%C{M zq{&T9n=N~lMtor1urU@1F*WXA%<}#Mls^WxU@f+y-i`qI6N9^oc{D}IV`@$S;U_2Ytjiux*2sZ>@$i%jpT~Ov^)Y5

T$t^6rmYndXGs(3a>85c&ZqaTAQY4e zKgx7+J8P6 zL{EV1V)M%~Z=o2CJ$q~_7BXS*#boIln2iWA^#r4RYX=UzV)k~(=8{yFk)gN#_X^kM zfcXUaOszk-&~R8Dlr1&fBJC$gG6c zc9isf>Z?j_KGU`mvQl+A3%MS*Wm$OO(d))VN-D4YyxU8f41gnx1)r8fmQq6RcytNN z{M@#R-Y8wzF~=@^FTOAMu>M|~i_K<1Qc64)X0y|9&whU0d;aUp&&~G-L4F)An8lCo zwaG1#v1s`6R&_fEI_a#ecrw&INK52X8F0eHw&K!LPIw%7V)aEiJ|*IYC`hF*aCi#2dsr|VVD{M6Bv=6OMky)d-Q`k_PPpN-^0KB!jfngMCNO6^0{{gw+u_pW~!l$y-k7|8lu>A1W%U zirgbwZ=uh#mhBi5SK8-Sxt_qF>RY@ezbpe zM{RB{d}(qJ#UI6{%7vc4ifHse&Yrv%ac#WCsZKTPWq@sQPsilO&XZYxs}0Y7eB#!V z1YKxz+(VswPKpHH#Lg$GDB0A#JeDmw8>_U4rjz{5<+x%qH8*_49+@j_-+VBp6@A7T#~&V7uzQ6KOX~)tvzl z|BbiM@utBNE}H~N50%nVZlnb!Kp9p3ez&pZJbtn;A6Mbj{(A5dJ5J4@Xje6kYdza? z*`&|uQ}G*U+Em!Cc~ZS)Y48*fu&JACQvsPMHT4_i(CzkZiM<@Fn+PX1wDH@L&tE;f zgeu@Wqj^I-5#Am#oBFLVIrY4Z56FDLCq1LqzkJwD6~V^+gd@Dm->aanxrha>$-PW) z)YXQpxQEC&-_B3$uXaicF*gr1t`sZL!0St3SK*xzxFyW{l+G~Bq%5R*IUL!&+S6BC zwGk2(>1tQh!sGUY4f0$YfbvFWTdTA1Hm?I}R?T!BT-iRfWs|Ex1~}*7ZL1Go`QK>k z1!`qLE$6t{iR=#8q5Az!`CoTBat8vZ>;x@3$q5!GWbFZ7bgdq|`Sn-W@rcRPWZ*r@ z#TPTxQ@W zQ^#Hsyon9Y=@0x@T4bs+TNY_$kAhg7=2_RgSSq!qCYrp$bZ9NHqe?H81`(&pJ+=>lY9;dOYxO+}!XpFWD{FP26!L9Y`CHp|!qt zCJ@*>{u8fwH5jUAGhSD{oTG7H$q~Qg?_H0qE!P`ML*mMTaC0=)5^nwn`X1KVWdA6#ms6dkyTb!_87y9d8b zu+?bqouPi8`WQ2L=1%}^dxhZ-V#0~4HEnD^St7^f-)X>Qg~#Apg+%Av&Q7xZJRWc^ z`cI|a#H~%W2QpP(W2UEYd=gb&i%?5vd!-x`i0J?eMKtg(+t2LA(|sg%c|Hy|Jb?BB zoJeB<^@2=(pWlKVg#+RXv>W!ZAM8aCIPd;o!ArJ%3W&!5Aq1NfZJ);?k>Vx>O` zQk`UY&UWWv`H%)g%WX!XFxY==#trcu>Q24kR(3Vpd+4GEMKI2OjQ-2km;qUXBCx%F zrbZK@>OMD9H6_DSwcYKv1kjfv#e&~Jvh|7O~K$l8{YjG2kfPCLDA~)T2jMvHi#tU zHbF*+@Kr>|_!I)Z^z@R>>aK5E_IuYP*1< zFV-zy--tmObB+{`Q< zXS%~u+dlBgm7<0G8(0QY#qpvek0Dvf0B-SD|N8Ho@m5UWaBJ&}BDW=WW6UqU3DkCN zPQLrCk($Yw8Fe^9<QP+bEdGFZTr^?0y`ySZMY8drS>^FzqxS_2$qgy1h_aW7Wf*x(hUY6zD zNFVyn-si3o7{P&Wc+P3C){7aMw3DpR=#KsB?S&-}V7W^Pd{MP|(ZZ`qrO>^>1zdb~ zPk5qIFbR_u8WWp&6pbuMy~vBMkKDD}3<0_^jWlSL^)9}u2p%uFJaZ9g;Qot*J%K``G-qZ`j(|;r5Kl>{^-a z$;!tI$mJ11O!U>WF&_nhK1{6@Z2`~xjXzp$hgiBd$bvutIWFuoXKMh z7GbCc3UfwP!ZE&k9D|h*Qt67xw-rhA_P<#bZ^}bUyv;jobs;Pi4b`y zXiO?JxDc}c@mb)oVw=08w_#~9g7@J$1|36YAZ`iWWfg{M{Z?0_uhwpG!S-(>Fl%j| zDkIxt7l4yLgyl?O-uHi+Uk)|ahJ@c@pPL52lY;Qna_F=-nqA{0D1f9sWDYo0L`{?~ zJ?G%LXyfoCXfi2b$Q_93p&Y~mfY|}#??7xH|2bahxADM{AHsNG)3>=L3jdl}4uQkV z?ypAR6x@C~k;1p@LBw2m!XYggV5fv2#ng3lKZRe{1G7RFD&0<>{i&m#zhm6p774Ya zZ@T=cA$R1hWM?MF!Lw4(!@!0BectTB=t&JkUN(udlhYACu;Kt#xiQl|)t>TU*uoE1 zzG=Le{@IxTd-{mLs6tgzW_LByAJ z1P*XBUizV$=d1~vu=f`wzOs!$4?puGmP9L_s?PEpGDc)j4bpyy_@Fxgo)auL`2g|#eR9*zQ)66y_`!Ehh*^p6=bw5P z@XfAt>#)V~99=(H?J7r6}vcKMod_1ECY;H*s68XxFwgv2E{h#?EX$8pD zh_6@Na`~eiJDoDEv^0N>fg>b`)W0Qm1wi0-lA{-Es(X1fJm!uMGF|mXCA~Xv8GXpD zmC3t0+_mdHY6o8e=U_GU_UAbV&#pg^BaRz=>B_!uzJqF>{%y$ZKSh0dAQo`8BVUv} zWBF^NC6#U@`GL3hVtQhi@YZb@U{CU*Q1rOb{d=n;uu})?Zoh$`ukyuACduhA-tu{O zbc~qhP+pRQfAf&*i1fenY)K?UFNA_B#9>eRs`q`%i@=FFVua{HJ^>q`qB@Y*8&oLH zw0CMK7r_$;?g|uFGqIkC4og8@h+};CO~3P(4L#aSlSA+bU_UOO$+rRcw|7I|7`T^M7)$Qgx zd9}i`cvd%zj@3iTxA^D1X|uo*B?AF57To3Wm2TUU!kW&hmwL$d{a*==aL~w~QE>D_ zewLLq&-i+zI(_X4wcvSeyGSh(*RqtDV_T>02#@npk9!J5^HjAXOV34xeqESje9ry2 zJ;j|5%Q&4?b@&ru%gkJxbR09eVem6wYJFbKHh@=6vaW8JbHbE+wx>UFTk$;d7|oG( zsQ0^=!9h z%C@4(DWm2I`GymMBbjEM_q~h?EM)XIg+$pT_fKd7(H{EA>$RbpI>cP_S zaiotg(1`%|@GRc!Vvbrq`*+~*_e!p2e0|9l1HIC_KhKZl2ACGg`Y70UQ4kw#w3DZY zKXL%YziSl;o;crZeJ1gu|CA+4eqWw}ziby*fl#UQ|6sJ3wv_wYN3#=lT)s$49i2 z&R$_5tgit2{K)?cwfy%*(dPn8l(jXbt`X3hAn*l$hU%IiypZUEdT5SineAw#4lWNU z>ugW9VTvxe@f$G^>_ch&Spy;*9YXGSVv8?G_7nX>*w1paPQ&pGSEovWE;Qr1wmo%TAoC9@$$j(adhJc?|kX>|H8|G8!*?fhl3-*qsZVsqCj38)%L( zP;{rQKR6a>ydt}m5iHA$8n-py>M*)5ertO96IW<7Ti$05qG3e@x;)V8CfL5Pey;Cb zFW$mJ43nE8l|QoXg93y{m!z5yKnLC1f0M%gm6rk=yJ^sRF8S1bKb7C3+atWD^r#J_ zTR`czG+GxrY{Iq^^*$?$O=|&2Z9h(52I@}WM|JoT4{{4lIQ248+A9Wb-Vj#}Qw7*Qx({SzU(|2&`OWq;mF8frxxj9MumME) zL&`*taZpe-;N(L3Jm{O&$mpG~GYzd>=PdJO;nm6g&OIuW3ft*<<@(hlMsPIGt zg~DEyK%aBjyC1Bp)+Je0(h$KC+!(u?bsI)@@F>8%KcBHt>#oZmWV1AAJa8Ciw*k9* zOF(SzHf=j8m=D^wX4(;qEG(3|BuPNI@RtU}f8^i4cPoAyT+@?hc@o&Wc)Ab1kx1`{ zFIyDI1g?UQXx=5ihd2VI0Am4QtYfulonpLnYmR5KD??zA7TFz}#o5oM1jkcCXppsO zMDfyoC*pWD(`FLPi_0zI2LX%ULAyQ^UP18v0LDOTFVOk*yTXE#OR0tp_2{+AodLR> zggXHuyD`7oz0@bQ{X`sBPCKMOJv0;Q<*v&|=uHEwx)x~c`VQwW-nOP{#r+bFD4a>y zDz<`Ac6|U|`T-ioK+NRcK30AKq}a$uPm7>m?{Io089nozJk4(i^R>dl{bKH9(2DF^D3v5eQ- z4)h&CGR6Y?U3|h#JHCp4+ym0O<&3?kAh2sdr2nrWrZ4O7#XRBFiI&LlHm4jIlB`9Y z26neR5n(It(H!NO{AgojuAC^bp$n*A50XfRVEB007@jVnw(EolBWqplnJl@BkiO4f zdV<*F9ccZDnZ$PwVt%>jv{3w;I5<&WTG+J zWY;NLcRN4Ji_h)S3&3Xy@U{ySe(dDr^;B0OSc*=?!6@Ki%ZokSg@W@pq^OKd30yap4VPng}!?Wri^>6M6Awa!=qtBY{ z4f8|i)}==oGL^>7vgbLl1B)4jUz{CVV2JjMT6Lx#8ao+wqVFwBp*Mk-*HAU~0%D#L z2TXJTv+0Zc{w}a^qC+G!`UDqRvU95;@0JsunYlC-38C>GkrZW3g>G-pT>FKlaS-Or zZIkTuxi^gBYY|#UVCr5=80hgNL*!1a(AwHqza-b-E^sVpWf}wR?qAaUDa5t^xECIO zVk3|S!pOdAMAfvD9j8CiLak2-Z{RJLI^4#P9k0sBIW9pNLL3%jjtZP1W~Kl6Nvi+l zaghI?&UO8V)sT=_|2TY-j;@XR4>Sph5!Dg6f`Wq71?XXtN20V4Iy$dQ_-qD^`k`2PRyhT8G@flqK03Tq#|y16tm4E&c>qWr=)ZXW^G_FnV0{>vtG z`ouI_Jnv+rSMMy^SJtv2=2P{B;@NuZs%_StXM7uMDRCF*rHfQzJqsViCHp#iCN?C} zr%&J-J6}tkZvf76scz@?V#bmNzb=1v-KqLUmfI<*Znv90DXp=Ik{M`M`xuuh)=V#j zoH-upn%M|zccIt0^y#k8WG8+oQWL@(*!OKDYotZ&iQKJ)koWG@&Xg;k156;zxa;}KVlPe71o23>Y(q{l^utHs=7&o}MA#k&oBJ@JZ-r^lDZ?M9Ed z4^Q`c6dXMeTeggwrY6ES@#Sthrn;<}tVj|_9vgAPju zk&aEn1FtApZ!xam4pR-b_@Sx5@|1VagOJ?Ge*v5X^pcZ(kP)zKW{F7hbBE9Yo5 z+fte4h>KP1^(w|(Id)ufgQYTWzc>Vv;jTg^t~Zq$^q?J{cZ++G$aaaHE{8COsMwiLier@}Mh!L??UhTvJBf1+s0+0#C)7PnGV{e7LbWiOpma! zS6<&~?zUPUTyoMTR@*|Cv7)(7PZIM+?xAP1SJPkzd_6stdPq>ALePWn^h`IZF|Y2l z$YBU1<&z3YWDeJU_}`YL|9NS}SUL2UkqwjTw-1|#c}Ir+)Dsvh!vZZ4v5Kx^aDk&+ z$Lj!cyv3^2)zdTQs*B0lpBkh&aom;c@L`_Tu&1V`jN!zUIPFfe!BPF6ip1Q>_^ZAT zzO~J&^oX_3YEhQ_V;W5xU9W0~+9lkVsV`;2o}LLk<7Ab>qKKBY{NSJKcU&_@#2lZh z+_Y|zF}1ur09u4A2!@8Awpkq~FuW_aE;h7HE0O_ep&PV0o_7(bZquNsao00RRLJ~Q z8_ya|$E#QbA)~r4Ta>%mS^Mb}_QD8iw8A%?XJO;47k4`K5W9!x$5U9lMD^ZBxSB=@ zIBjKP{nnWxcO6(ehK{id4}qW0Y8Pf$(aV-9&xOO!fAqzhd?vl)5VCJ+ijxx)J&N}V zH?nzgE#yHd}5*J6ZC_i~@L*M2_kTN2es z`9_4U1@xw?Ak<4ef)#GEytbw8etc05pUs#88)KJlCX-SC0=+p`5Zu@JQ7z*>YS0#H z4F?C!-0Jt3O>9AF%F|@&Wp7~tBD{UJi6Ix2=niM*{+^G{jjQSzCr;@Y`~3U8wR-yW zjuWWksH9{R6H7@074mY;zjx$+%{u(M6McRF!>9>GY@!gH6O)rw7r6BAQLr5^$}s8burU3z&xWG?ZsT%CS)gfs!v=ew>?%~?OT z0coaM_AL#A@0LSU7W}Al+rL|?e7Qo62(!XS^*PM49cyDZ*#~SN-$A4=hiLTqgw7w) z;$C8sl9F@x_dpC;pzNEKy@p-(d|5JFFb3hiNX6+>A`ezUt>Z)IVh7g7LO0_SY&+x1 z$y2AgE{HUl8_^))gn2*ARwMP`y^t1uSd!MNNsys{b9}pI{1_`HaX{Bf)jg^saa2fJ zj;u-MaJk-WZhKXBy za~BG6xZH34lIjS$_`K%yEeQZgUdpR zogMTLWro-eE%@0_s#5DHd$jwKVOUqe!#}>Cgq;GjKM7`k;V^WSYN(=~z4*S`N>eCC zHGaWw*0FyUqt`cRQerDw%8GgUQg}W-NUx%G6(OM3PEw`=zh4%#q0Rq#Pev9P_5{ri zCh@fdGYDx2xhi!W?sC&E4&0qUf;6h9VR0|egh_Png-gO4V=)u!5J<(&ryRbg50a;K z`Hu5JL)TL~ADX%0dkm_nax#Q}H6+OA$9K-`fzJ^{ z`_O9PJj;$7m@DcTT4X}R+-iqS^}@LB44P9n3}wEc(7I#nCb*vaa<6tK__%Kwr7vDP zS&EyNp@%MI^$Har;;Z>{)~1!@)KK}HMh4j*%4~X!Y{ZR9mIQ<1C<7A~i;dNBdg*z? z6Dc3(gLy`S!rAFGO$x;EDFM?S$= zwuYm1MZeiTzTY528N}Fu*1S?}TpgK{s<>?YjF7d#Y*CoLGTPpo<@8R)v@3<2_eXuL zP2c*uab=yg=_6uQ%Ia}YZ9qg=5C%(1u$uACB_WOW(^tv5mkEufHivFziF0?)M`#D% zEhjDxtcNLCB@=HTTxtgU3)V+19{0>Mekdl+j$&S{ZbUfi#S+&P8#X3saJTAB*PqQ@ zrpw%j=ouxeSkXS#PQ7LQcJ>|fHMHI(SBr3C)>U0bwj@L0iV!{ZOObD!2aq0OD_ybu zHzKLoo@ZSWTR|F?Z&c%|f#!`)iehT0{qf~&7#x?biuo!|eZkX+x-7(Ka+t3!Wj)iy z$R#+8Aq1=6!$UDyu_?f{?o`UZ;^+*wGs`lRC3p!|5KwMWP)f(M=aP+wM2Eg=g>sOI zUr!J8p%bP>SjMRs45Ld@U31VbZszLEL`&jP0#TA!loDghsyVpnvDPm7!HXJ* z`3A+gVKRge_Kw4uZ5EP;4I$g7J@dxJ;=yCHI?hQ?Cxw;g+L{aXhym@M_$tcT0~d>} zlRhpPpb}Ra0w`Nv6AFZiV@iy3hD*DPtk1m(bat*BCY>@dZU(3|>|NhLju$zmT9tb_ zD|InTLCcm~ZeB`W5)Wzl}^*A`&e^uhLe3nGy0Q0ZE_CgCkZ>3{#cNakRZ#8xUU zNA|RrrFc~zO;bVAgBH+LaE$f!Dvf8seF-bF?DuVglNDLH>e0)eI(1CCo1$!W-QkTV zO5(D9eH`-vW(KmS8N z#mNH){?w_qwk`N^MpSP93qw)4`|)v<3@K3tex64BEL}Z%N>%)RIf(UmN7p zn{N*Ci3R=hNW8hg)6+=3)C{S$z@{|>qwIF z;*s22AE!sz>dFP9pJ1nwlSsH9&Mq4KNrBi4(3tOM)YK5)s~G0(}%iPTyW|sovsJ-z1+KB+^T9&J-WD9s3lLPVtdUw z+@f(2B9Iq`B3tVom~a8+zQ}j&s>dRQE$YoMB}Oym-Er%bw>m z5oK2q313XFZ7p*0!H@D?SF>$SCFJU<;=SLw>df7AESYA%ykPA;i)~D_cRdx?2xgV+ z0%HbVi2{r379u7ou^7hH>>`_{C?@;lSdFuy4;DGs1@DEryazfAINSYPEtG=Y3}Pek zYyEQWAD0n@8e!Kb$wE1>iq5M-vqW#R-cx&qe#jHTn-a3yHu^7h30z-Oz5(0#R3lJO zoZyhZvT|yVw8yu-l&(6T>eXH7d{?Df7X=ry^5G0Wyvao}<>1F79Qx2lk6SOe8XGDv z`rkp!sbFSdUu~|KvEwAPqkKGUD!wqm1uR=>|B!+&QgB#Ex%?H-rNaUo_+8j#x9B9h zFa?0@lf?7eik~<9w`&04%ia57*B+?3tPfv+9bwoLr2>G=wkt;x#yWbqzc&paj9Oiz z{sb}H!yw}m6ftRRRn2bG;-v|wi;T)29eJEcCC;T<9`fyU+xPXJ>5Z%;qhX9 z$Xlns={Ob7L&&&GuQ1E5%Rwbeu|MedzxaH6Tik&ok~(jBS)}Rq0pydpw&7?{vua1S z=S1T6@EBG+Pf#Q(2b;k)(8)33)rUq@WYk&W!<#F<2)45XJqvBMW!=y$LJX}_h}+yN z&#5xeFOi|;k zU^fo~NPnGh%xb39CIppj^4qTaP0p&N?^RmMyH#8~GA%S=-p^-R7+Gi^f-=fNO=)#t z!Symrk%Am?vNcFkuXOBKM|I-r2@t zq9Ph?EM|oziSE;r05F`q$g~--Is--He9%74aAIQyLK00UID&Hnp#{{B0kZk}*eoJ@ z3+NnHHG1@n$l<>ZZtux^l%{UF@$6PaxcA2TkCu z<$#|nYvgbl%hUSl9yun66<4}F z8H}3_if$HdJhs~7s1i1|5$`IT!)@0l$mfTU8wO(K+S+nGxhL3v9GZnWlwKbT zN}3b6Cl@w~N5uF&=)S&aks?bZ9*U$m6<76hCwd>?RC(x-UZWK~8KxiXl2uEVm&j({ zF^Y+PLxYx*15TPK!%bW0Vz(Te<0)b@p0NT^v|eJj9E@8KyZ8Gfw#e|zjSD*I9V5Rl zCPuLZQW?rTI=w|)PrW$$4?%MsU;^~+wLK!c1%AIcJ~bYHh`MX^ z>*{a&hRp{m4uaS0`{!$P)`0`m^WyJ6qJ!3YL>>4+zW+}GiTksX+>}bd;o$o&K{YmK zn~&F7C$6hqGC1|&8I{WID@j%QVo$p7_|2U+=xw`BC;;Ih5m|HZ$(iR2sa0+^%)4GS z_-s|cgt2)>%~Z|Zb1Z3FJxCP9z}x~3J=BPLfAT?tt$p?dD<8Us=##uis>oyc)U0GN z#?sd(zmA@Z4Eh{qP2cAP()ZO*fdeD3ePFzD5tBFH)$8vI#oRV^xR6z3kA%ZzUDolP zF1(XTDIam8C@(_xy0r|r=<5Drpe(WFw)xpH)v;9DOIg1B#Ox9AjlxHfJ-)d2`0gNM zdAzahxb>n9h2Oi$t7wa}Sw2M;opA_b`N_~g4v=>K^P3ML3F;W9TMCOBxBM5&_bR>8 z5#e>AkR{L8m;d;|KRa!8D%$o$t+L{A(=)KWpP@hP*0e@Ay>ZpTV&~Mfp@SjlA2uHE z5_t$&YB3(CnjMV|4JlKz68f+(8tfX~6A>0PJ#ul8Z!B3i)7b<%HvxyvrVyDnZ?THk(=$`UZ-D-7X!z>7^+)J zYJ0D~Q2pB}e)XRD_;Ce<`~ip@_a8>--}vzV#IyhA;s1iePqMma3>RI`wO%2REjO*p zH)0uG_7JBv5RIK$#kM6pS@I#0Q-I)cJ*BpHn~~U(EM8?MXrcH@nsl`Gy~p4e+t#li#~CCDmspSTd? zDpT^Dr$G-SxBjA3JjV*hsZ%0{FYeiemwv6M9A69>rmaxltB33)UvOnqyw$z+E3Ijd zn49RoSQ`68FWX&`_a01*&g5pSvwBl^0sj<|LI1Zsk>iW)dvJw})EM#WC#^n%J62QE zbh7YoFcYP|!%OR=mS|V7f}&Fsls8U0cURC`hO|C0@icx8y1MJEh}Kq2ouk<=1SDP2 z)=dR%-$4JmdoP4@`-JRr94@y-F;RP#mq#~xSF?&UAF)o5pZ?L%pH<*}Z9)L^&96wV zW1x{5Uk8}jTM94D!?D6$e5zMwo8i+5)h*nVT@Wo!pv4?krj!T8Ny9dfPTPSq<=9<3 zk;vgcc%l8`J0KH~V9~5cpuZd-66v(dQ`+Xp4&Rd?$2=^VrGxw;*?z;+tr09*X=A>( zx1T#J6UIa?ejN@v#%P^~+!{XdlVv-6@3y-r^O4wBVvq!e9slesse`p=6+1$Wlr?%3 z+}n}37~wc`*l}g*pawO>?6B+4uRHpe^J?!o;e~g8uWXmT&wvzCZTjPT{`$$0-I4ne z=E3fwghLTmu--4vPXnOX6B8n0UiwJ+UA;unJZFDaLx78>sjf4k_^f$BJh5cl2Ie>#LVGEV-OjAEyo zu^)4yPJL5Jgy?$WRn3a02=CM)s`*R4t&%3wkgMGJc4lsvVe8cG4>Hc8Y1kmtCHI=+CgM|D2H;L`!MLL5A_n z4w_Y{bwJAl2ABL6(Yz`0sNKTbkIs4-z0av@K@=w$CemafL4#y@%`|mj6y#*S*(Y1k ze%>V3O~-H5iB^qHM>z#tr@-U0^5`~DhK>r8sqyDDRSxgZx-IB8+Rkijf7cfJL|?o zv4Vm8m{X|iOFefBU4|*H3!zuE-bL9h=3at@+*U0uIBYHFZ(NhbPW>ircwLX+Q0x9$ zsW;G8%%jMk+1Zza1eR%cjuLnLQq&k{@T_$D+GLK-m}>|E30wm65bE zRD?d`=An!68yxc}r`+Hc};47`JH@RfQ&Iy3R$PJeKHeKAqD}-gpQSMdupL zz2fjvN8}p1jFM4Mmb`YOPqCYzM#DjCg1WVVdreIxB4j6uON<(cUX5O1to%swa)Zq#Tt0Mh zBYLqj1>JY45zVT~!%0JYc#+bjp&dWUO>DH#FtL@^RUgkVcAx=e)`p$k`#dmIvqbya z=&O~>bWOanGmCq5HZS>1l9M8E7=JC%J1t*F_d>K>qn>(c$Qc!)2kp^@z3q>vvobP*6O>)NB!#$H7$SLTODsxZDI z`$U>+hnD8gvH7jJnT6*qjM7rtnVLQV{b7?uQg9*ceJiyDH4np08HW5t3p}7)zH7qS z2doYpKMxyWS3K?HFtLoK8Bt7HuK#r8u)4n>^3oxGGR(8)xHqH=I_&fzke|_sBRNWP zxCQ@|HQWOC!2EC|qpCmy4<~Fx#5AkpTi?mYF6QobxU2a^9jxM=vlLvzMXCMaH^v9M za}yG&kn@F3rpZxE#T?2Ttm62NfIg!9s`VFoaty0x9Xj=kmg8aDp3B^7l1V5pPo}Ma za@?yX*8zI3+{9&azg5qOLANAhmGmPZ_!Gsn?M#?=K7A_hYZjk*eDI|8XF(rN&#mC9 z&ON5{AP@OqlS8(>@>xN}A##rve%~2&mK7eCnHLjBMcoXAze?acPVE}%S72Md?2brN z?VEu^qUe?J6&mv;Avl*!56`B_TsKP~H09hY_G6Nys6J-fPxvLAACE}5OpenVhWnly zad!sI>{=9}H+2=3+yf#oT8kN7r^t{MiKI_sGfpyNVFcvgs_-sNgN>EzZP*A za_i)d=8B_ZW~vh1dkO-GC7@kM**kK43+3cP>f0>dR{WL}gZhbIca)nq6_V7odjtZx zK>PcLyCPqwVZYc~FfJ)tyF*3&3<>W2#?xr!>`72XG+k$?lp2G+M9K+2S29!JsdB&b z(0()E>dx$BJ4MIRL8U;1bl2~{R*RU&(7^>yRHPRi+B7wxABbuvj#QSpQ~^c2YcxJX zInaVOcvxL$OtRLsWc{|d)Mf8jg^>_=32eS**Op2*>yBXM2cp8+ku>yg;0xyvoFuA0hJ>sZW3D{#to zEwVq0OQn;oT<>cmD*g8d|6GWy^2SDfhnJ>?mpH`IQkz0eA5ZJ^i1FtRE2xrxz1aHx z_@hP9QZ?ZI{jvCgbC>g+O8`ngI3gkGUa?O^kQ6}OHLGVJ8-*5*GrL5=A@awEk}B3O z?8lSRjKA%BF!bEAiq&?%CFBS2t+qqtBn+^jnYibF3j_RXM?gXkR9YAU(uWl4*paLX z(#0O2s5-5L%`Lrp%bst`@!BU{E0908gZPJ%uoi4~KlT8*;UUb?66Ku6MA;9`j>Rgk zue2NSCu=a;KRCF1LyimD+$TFJFIQHCn4>_P5MHm5B~L#FInDZe%Qu$B2T&mO!l(ZL zWx&g05Ww_+F9!)1;!^!4I^#gG?OJW->o5~2=oL$Xw7w+*L#aZ#O6P?B_RLinjm@h? z7A#b!J>11{qL>BqjSRt(x++86rX1a-xp~`fq7gqfK9?C>r|^zNp*PyPiM|gl)SrcE zWR=5 zSpIyO1Pwgj>8W4@@gc5l&_Y?!MfsL~02ZYkfMr*Ie1Sx?NqJWC-Yg0EQ-GMcan%{^ z9f9)GAoZV-)w!VPQo^Qn-UpEf%{MFGN`ix`@>`2~LtS(`P%<046oB)zOcK_sEnQUph z{C<%KF5D8rbQ_T`junHxxkK{n;YgQimPtjuJ0zG9HYejDzuo-tYduWNHC9tG(rXEZcX3wZiglP~ilEnB(?xG44N%21A%-(vC`h|I4w zH{T&5I0o@yg}G$LrZf2`U~H!Jpj`{PIp+Bz5aBn5Fo7W84mI;NKZexqw+T0^QBZ4q z)Tc3#DPg8%*i9!U*|iB|G&#Hy%Witdwv=KI?Dejwdof|1Mc;I z5uhEi-<(Eax^kXlmfJOXfZv!rL%I}6!><4JkNABMsK-z1fPv0qcw8P7zgEC9r{tg) z1<^6C;K1_k$j<$Pv^VEotQBK7g`T`f&Uqycf&Buk=SAEv%RgnIjh7BRE4=o@?MLt) z$k5Iz@SXwsX*XP!ph9-)n^F^#c`jleqJWWqp|b;F060H{bnCN_M_bjb>>EHv^)7Rs z_SbqHCUq+~^bEaOh<;9G z)@nmS|3@R&#O92)yz?EXQ1>H>sC{)SV7q0ecO|I(5`8df5dGdd{eV{d1IGXFcm{Q# zSu*bjQq`~6WP#pIDgVZn}TkBWY3Z+`~CR|Gvs&lR) z+Xh_8z{1w`}kEf=gLNsBwE|5=W=ViE8qLsH?O)jFxB^M@I^Iz%)U0p z+fPP1b=rMh^lTGF7Kk(AikH>gY}%@vs8H$NrgPd{8?A+gY8iCYM<`hhw9ymptN9#lq00VQ=tHDdkKDy zw2#8@j}$K+{9_Zo_HWRzNn!tud5mn|>6!H3MKbD5KXgedhAR3>WrYh2-tov3r4Ky%pd8 z<{Mhb^>9~UG~e`OJ{edFUhL}{rJnRQn%%r@B(QYu0bnI}d0R2$m{nT$MfIxJc7sIf zKyG^#+(tC&rPnA1O)SUmpTzl8C*B%Yf5qCHx`B&YBYJAqnwa*kye=Iqw)Tr81fXXM z*CS8D`6Ns>1q{2<$P|Rv(^*`;3Q=H|)-ymvll#wad#EB;q*{$@FT8%>6%Q*VTX=l! zB-Z5{rOPOV*yMX0zpk8r?NX5#3WXp(dgmx`z{`U_L6{!uHvBzCl)zdOrjiT*bPk{a zcKOWruG>OU>>B}V^-Z(()6{(&jUKkk{KOxl1gl=fa_ch|#}Jc=GeIviu8!5(pS*Gm zY8mdgHYtE;4BZScOpbJRO2^LH7geF3C`*YaQRbN`9D{g1kYTxNb3+QAS4x z|Mx(Wt^N~{`cdgOX)zYZB$N$YU}j&`ewgG6!k5k#Nxk&bUW;UMQn{78>gXzS zpZL=mdAL^Naf2xH={k&jlFas4j7X-nFVPOc&0bvJv^QcrO-EZU#rTf@pLqY7q2ZDA z;uV!kiYE`vzzP6uvg>>PJBamL4JvK#3!$H;^Pl0ZGaPOMuMQq{y$*Tt^|$N0J2#s| zWr3C6$;JLp87c33fIlK#4|WK-ATK+NBh{S8LLdQdJ02|TATy2L9seUow(_2SO89j_ z+Zm|l#`hpGXJ;V^aHlmLi9eXTu;&Hmj@f^D{sLXS{eCI)4eNnUgGg$P>ySI(%YY3A z6?G~HPMT9S6sh+*lfBU0nMm|&I%w68)Y{hW2YXI*{u<}xehe=uRJC^pi*oCJj_NSK z=NaQj?9Ah81;;(?m%mMUo()PA^<2gBFG&$=9EZr7=<0~GX+%!38Tcbd&^>YtZ;M-= z=}m6?Fim7SqEl5&*r4Vq;&557)Ifxv-cBU>iFv_$1d)dMpU~i@a!wg5yu3)Hh9cl( z-3dH?3%cUy=XGi`R0x0RSnTtT6|eNk*TqU}oCfM|a72HS2>t%T%38t>aAC<^C7o^>PZH7v_)HhKg$u2W=l#-0Xs&jqPU&3 zqRp?NV9B6Sbm?QN0dk!P^d!X1$k@qYyF|jPI;pIH$b5}SLw08PVW4D zE8F4tqN?Z~$i1KR-@ihM{}T@4Z?k;_s;vOJ1b&YDQ-FVEA5Kj2t|6&@Gg-SrSKCzA z2KOi+yBR?YTcFDJ^9r5}Oh{X|LN)N+)+JwR7hKm$HpvLW1iNkGc@QU$Pk$VFdt<%T(X~W) ze*HO7VVT=bS28bxO4D&@gMBs7dC1Uw&L10DbN^sFZ@uL7<~#$b9HwQ^wkURU)f3!t z@oKVYIh8!KpBj;bmf-lH%y3cQe7g@hCc&?v&Jz^Psr?BiILI-KL2t;6oy#{EOdIK! z^!S1jK`;h8?X_ZnTWQSotesiiH_XSgf5dgzJji~Mk;~E9EzfA3wmA1X>}xJAxIpMV zkZSA5{QE}dwg1i}mBe(6zAj$t=A+!)&`>&?rommVM(~Z+U?3N7bi?M!q&OPB>{!Ug zE#x^HQ&zN!MEPGVyujhY^u?MfKx5ii(W9I_E#ee`pobQEKOdEshEL5XE~P}qzLNfP z7K6(INDxJM%_fkp=-b+%ukh!;)z#Gp(h6tz$T_(D|5)=n1K~yH`L#hmmh!_JK)d(2 z?s%p9=g@?9;=`8*D1{tXjOGsQQtAi(cdMWOU47gCLj~FY|0n&g)VbHLf{^-AZf@>M zy)J_Y`^pYdvcgU|eWsJ%0o;Tl`a;Qasws02e7Qy5aNV*|$Ux2H{Z`SdTzFK+`K->A zQE4)KacHOl!r-&4!-8D=+SIwU2etY;%z)KyKAT8(V=Pk% z0z!n_Edp%h{AXy(nlqyEW_c0e&tq(-WZeCloo{puyZJGer7dlyHH#=vV2<3DadO;@ z3`rfa&S(+I6i`AuyObWey2`NLpF7_Dt}rA$RT1cA>bWtP15+Ijczwftwye#$$o2mepB^U%$5>GyBnPz&O)>+482IVA2enxlZvS zjG@H0i}WwE(-(VioB*KO&d9`Z?q+Gs7_y-QGjG+z2YNXx|ojS+z@lw?8fq66!_WJ z;*p&HMe3Qo5^m97W^=-IR(#}Jvl{yfRl>aSK592%4JJw_R;u83u}BeKhoH;HT5brW zGqfiVn-wyx%I8q-kNIE`xPC)4&%(jXfCI<{K`zECYwRO2C8Fk%1XJF_hcoj?&9V|S zU(S=7y=;2@d@U(~R;W@cs`q#V&LNC67gycj?Q zi_b!k_4lpR(VahURoHJ8wvYti<)10^+a}}B2J}dIDaU8 zdIW396iqsOa~Yo$?&$JPX~XTM_T-6EaeafEVw=R(BzxuU%CW5}9_JB!#MEpfb{cL4B}L>&d7~)ILUu`y7D8!I2!-D-McpWcWZ-zYbs`v^KYv)@=vMRgfg-XE0AOE zMpmy1s0Y(~C~SVN)+qcEMb!Ovk(61b!2<*L-!RxW_8WaBCMqlQo(LL`>L`{-8D9yi zUu`R1tJN^CZB^zSysSyX?&kTCf4Ex)HSKUWOn}R4D@A^Lc?X|PrN&EHcQ~S??r#+K zCdraxa;tmGJ_rUO>3Qs@K|7hAcm7`m{Tzx(iEBg4ZUYz&)e2Uuz2F!NUFT%vLJ<8y zBq#A|e3t>DNa8-BYs@tNwwP7Z%1U^p%Lh_r!1kHA=35Cmlt}u$i4~MZ_96uCS`|-% zekpm2x*0w4C1g?mX7L^ z)<$4y*(tkYpB8AjtaC))+9qy|Q$v@rXZ7Nn#uMsDz8XZYHWeY{t$T$utD z_dg$KE-eL>H=xqu*`YTNe?!9mxeiPJ?^nS(_w0!HRg3_7U3z}r524o5-tOlm707is zJUqPg@?Hqjul^3_f4q}qdmu6mu0o|VEt+=+eLa{MQO4b8M;gMDBPe$z#j%dgSFtlj zZuX3Nml_vMjg6h)+WX=iNd69b3`&GK!R>PPoq_z$n{s(FqxaB_k+W3o2aCTNNW@dpk40gYt$0MR65XEmwU)TT-f*sly$_7R#@(39ki{oo0R_U1Y-465$7Q< zWVwMk=TWMH0sy#*Y@UsPYUVd-t`wIvx$2~(C!WFDTEt^*2OnNVWTh+i;GrZEZ8{)o z>=Jgp!B*kKWwNU*LuscV6^ZI{Mn^s&c(3L$rk# zgBv}aH&>WNL#P3lc0z{RZD3rK8{8?M^KKT#uRhK*>9BglHF^~Jqv5mFL{HVBK+;do ziZp5K*N2?(J$_w>>KQ*E8DCtxN{!UmxDl*o4DRD(nMsbbS+Vm zQ;4{ETA!lyvZHbT6S~8Pjo*WB2C{wUSPl&`UN(|y#2H`Gp(-0QGmpw4SobpCBv68z zdKz|v#M-`>n06a@*0dQOr)0)*66P>r9b>~LkRne9#iQ~W z>^Mx}fG~AD1G`ZNGVT0GSIK=s+mL41V7Fg<+*rhJaa589`8}3WKiN8My<)HV*wEbY zgh550Ljj?b%!Smfpfz;8LLRg{QFV0s_S_0f+M?~aNdNLb4h5}D zj+HD2b}W1wEq-eotkmAbn08K76YD47SYTlNMvWgsX`dg(?KT)u@`E?sakBS|OpX_s_zZ0CU$MW4#&Z9o(D)RBcAimr z{AC^4ZwP>G@60+za~Stezavfc`0gK=2d|+_xgWnYFXa#{ zv;G{3UXc|a$%4}pZEkL~HSftBea?4{kU8d+@|@&u?wa8{H=SpJ8~oohw~opV=efO; zzasNA*-q;g^Wn2jRZQQFNDbq54KOofVm=yfdrY*;ZWHuH1N5yH)30HCsh*G;;P_}8 z-wbG+Ohxq_iZ}y#@hj8x{4Mt^#F_`<5_#^toN%P$O#V4Lo2~rgZ7TWx!ARWDxktCM zOYxhl6x+$;KUe9K?vU;Iy%2??+pAzP{I`HM?vJdN>VUtsds+BfyBESAA%>x^e{1)$ z{;S;!Nv7X&BK|1z+4b^&g$I8>O{zTqLaLBUGH8@1#)linWYKhCa4A)_y+j zUq2j32hHwG$wY|B&M0iSd=)y>nSz|^47uKb%R zsa4MJWmY2FEU1tMEgC+<3#G%n$uR&Ny2`4>xRyWRS#y?tcPVq=3~_#P2A>jwo-Pf< z@jqePSTPW<{e&YS%cqmvY6hnPe7u-%tr4(SW%XE2Zz|qh@1?c@Ba#bI=F8j`Q0O^F5MnM)Mw z(~`YOE-sJnhc4Tjw}rCL?QI`5U7l@mcByg472wm69e%$ZyY>SN~-hfbaBM+QfmA2*v zgbL40grKVW&}7uP?*5Gk50@|UDK2I&KTa$&I(CC5Y=v&O&IiCzvGHc%@4)`Qau5MG zWHL82E=73ITvD}udL7G|CEvDYY#(I&73YLzRLU)xPDc-~q>NqT>^2Zz*$5!qh**pt zLOokxpto8WDJo&DRYdmi6c3L!CTU$+f6n)+BsCDbU=4q3(%JFALe4uaCZKWrp{ zE^fZM0L9e)A%qYqTYq8dLt2mH3ejMe=r)x$xgU$3l&QBHja=?aRN)k~O%7CYwnd9p z_M9v|GD30U_Q#r#V+U@)Z&_CD?-f(9>Ua>Q;OQo9bvoln%bWZ6z3+EKHy_I%}; z749S9`hz^??903NMgP)`*59|UZF6(osbMs1@p?SG7eD?j-hTDEpFwMkg^TUz{_e(P z%~2}@RAWP98a2im1%F4?wA$ic?=jhEZQ;-zQvpdmZ1+7?Ifq>_zbaj&-sU=(bB2_8}56dD!->2qLtl-F^yfebB~lL zA53FzyesZP0F6xHWP7+1Uyn;|y>@BQ^_tOeT92BjHG-omN@KZs{9B+2%Op=Hm*GwQ z!Y7)wY3Lc2oc@IVx$^$^?TqXktO|Zt8_>kSE-4}T!IGsV#~hI~Ua^Rth;UhotR@P~ zkkjj+2z`{Sd-(^RA}E`tB8!RuAz>&t1}CH{tbunz7*{vQXcq9I)A;GyDB6~+Zrp6w z+_=$pg2u{MV{O-jB!s4-@c+n@$ayNOdtD|o^ zHx88ItmgHSIau$qr4x>{4_WmOr#4MaHo9ilxl8oE?4JXS~+~i zZ2)1)dba9{lkE!AGPjDfF)Rsvf7!TZyemqnkg)O``(9lWu|7ObgG?rDN@+XtRWQzm z3eD^%$4u!(Eo)z7CpXBsj&z)`lLfa@> z5Q3|+*k4(;#m=%3&&9DFKV)jBj<73N4RL$+NRNVxI)(~at{Qknqrr}k<>T2)u#zib zYJE-zM>1!9gsNNmEZeVy^`h-dY@Bc0oTlkUC~T@JMramZ6e`{f9I3s0ZsU9NN>G~q zgqx~xkDi46__I!Ytx3?a^;Is!_;@{*vuNo{{mj&G+#5zVQ?*=k#AMZ;ffhne=R5QT z13CYeaZtGZaM=UgE>LI4Z;FOeUBV* zOCFsmP8m+kN<@*%l!2`pHtRzm4W+0XZOg?~Lmq7<=fuZlx0s*PP499-+E96Ap(7Kt z^XnFt=rtb4US7YsioeO;aEPU(o^xS(80MxkG4J?TPgH9|)Z%Lzk0H@9j;-X0o()kM zk$2*z+8S;$QflS;=}67_f|jhM4Y7H?6B>VPK}YCU8YNH7&2K7J3zFlsCnsnbQ=XNe zp`NOn?fR00q_FLCw-T4Nk6}QdD=O~-_O$Qv(Z?^>z95WkhSI|) zkMkZ|>*Xw)A8H(k${SC0?$lF;gBrNLu4@AKh9eEIv#AZTug#|I_GNzd)svf&2FC<) zKXoO&-q-U{{skT(WA?vf&XjlKRM`^4T~1vujR)v&KJv&DGiTq4e@xUJC=re?SK5@%JQK1FF98TH(+YS?7zj{$Zb&nSFD%D358)Ua0;`{ZkXNY{Pvzoo_!rJ`Pod$Lq>D zO~&D@jBlwC`bYj!N*P|f`Q<#Q#JQF4&EFQ> z%r-UxhvH95Ta#n@tZ-}QZm0g@{A_gRJ~Q-DeseD@aAivF8Vb#0U&q`puW4KY&$x#X z;Wci?)-|#jG!M3}zTkj24t9=SUOX-uy>w5X{sUQEP zB0569uU6e8;uaxAw?REB|G@KX4#cB|C|F`MGt^POQ-l&CUnMKlTKn~nKPBV;_S_!u z-2b6J78{!*5M#5NY7Xj>LRUXJ^$;$tDb!eh8}35$3oa-QZ`-U)fdJNwOZGGP^u?_H z__Qg0P=H(3@|@T;RjnCt8Fp{N3M{pP&0pW8$+1nJ_833a>iINMA@EoirEJ!#WkZ-d zy6TrF4Kf2SE&p@eE2k-Dyad>3R87=Ej{nsN)P?02^WMJAHuvi_BKQ<|-p%7e!B}DD z2y*ub+I>=^uL;?sughzOJ#2eISaCe~A<_COA^U4;1d8me?weM5Hl1m*7Z0!(q1M;L z-8!pgX)Q07@P*xJV3#IOX%p!!CfgLrov@5a4N$Y4^GMf{IK1 zZLfY`cltA&bC*ZmlzHDf8t1}ee!=TslBG{J~@7o!jX#&Gqj^c`O8m)mV%#WO*W(D@mVjI20#1ZQUZls`rz!5!K|u z%ACz7tq?Z&B9`a-#@)MT({I8UOyz@IWO!`l>e5tQ8FTf@|(4X6KjMzt-0AbQ39+Cz+=X2 zHhd+f>G>JReP+zrR@cnXGuAgiQ|t?ZTF>sd`!AX%Qy)mQGV6MTo~a;Y*epwHixf~N z(H^hKFsm1;Pt|hxrU?(Hfsf~A3d=#SvSs%+Ic877?XYj!YQb)I-@m$reE7i?bOrf) zSXN`>v5C|cKK(-#_|A2D>^0uXsE=W$EZ$#rNyx`asoT=gc&&R*tkbxP9Xh$1j1If% z+%U0E$@yEK*|Wt(X`)e(u!i{y7HMO-qAc?g8{*J);i5Bw;dntXaV*ssEu}E(|6=dW z!LnqrgCq*H3+7yu#vQ2VYkfkJKnGni0gt5-d=_pD`QAWgQK_WY2ohgo_ z!IXV$lf5zaF_`^#O(@QNe?H&u=llD8?(gsW_}+h2^nSmu>vdh%Yk9t&uh)fbPPOLg zw6u_D2|QLhF}N5h}JFm+o}`C|3=V{vmuNSvwy_lkigicu9!xD)Q= zck89t-{~#U`ZKn#YlaeL-@Z^iecALfx(%8h;&QEAU7Vy`cK#5M<^?%C#+FtyORoGyY7_zuU{ zTQ@gYk5kASI}lAB(^6G%V3@ILXl9=8O*|cA%nkI=>6`2#JQRzX zd-hLe0(_iEe5IyRRhIh8<=7VOKuf1tTqBp?j4rD;u*(#l$jAg%T}D zg&9J?;{gh(*yZr*BEFKD+UZ_3PrjD`t8A_O=Jb~;da!=hCkAZ7t|;5xh0LZtmNkm^EmM+R?Qz8 z7>f$~e&Oa91@iH+c6#js1eJdykarkR9$FQb?cgIdWgjq*h+R zVr<%gBq3~s@;QTe<4ID`3;5Np54d1k1+A}<=bY_G^9FA2kQ)k|9mhnfb3pY9!rV@F z-aDtNA>eaP)qxK3L*Bup0ST^Q8w^Foii56dMaYUd|G{9L5-dZ|dn>AA#QAvcm4DF9 z$A5b#ozf+I@DkJgpZrcV>f^?LRLuehi~qA9MuiUmGdF7ydpP*3eOs|x`ez5>@3y$z z2p5)67pfIeF5S+uh~1;TbduLyatFs-UBSI)%)lv`@3H;WU3lcZi3)6Oz<%(+2XS-c zyqbW+4xtmtaQf?KUf{^Mf+JHqbwzOc1DK_!`Q1_scP^v95X13I&<<4g`r7g2wJmmp z`0#~y{aP>Q!F*02*q4&_bMprmUdw0n%BzU^@jk~Cj-EcZaCx=&B~-hf5b>w~`u#HZ zuLMvw)I4iFv38EC)deO_B=g_O?(}WjtYm#35*nNCw`2zY_NEoc7>q1?T=%wvx0Y{7 z)H$oay#2kAhBJwj>P;}>Q;B`7U@^X5H~n#@v|yf|k{S~OIzP;(ASH>;;J>Ka@|F!Re7?)m=L|}(~w-! z=nW2|PN7ekQ+PFX7)hbac$e;JFwE5sbqG?xXCurTCPN2vekK~f;%d_I)JiNg4sw!4 z(`3K4je|&OC<~WEm&5{`dgRfv+B>_^&S05#E?c7@X5UAY?;)jP`#$OW_j)m6z9dRX z^Z~ao%tFSy?rPnp^l;1jTC3I zOXg=HTjr*^FglREu`Lw87BWP!0?MJiR8=^xa&>9rVesl z&Xo6BkA`Je@nBbHO9AZ#SPYqzm-g6aKK6u(E@%koZlfpPj{N*#JghgT8o@963aB?{ zly#YxDWWjW9NY=YbT>0tpy~zibo5mV-ffkX*ww+q#u6-`GUELi1`DI3yd9PId*D}* z_FZ!Fxyjrn6>80hL46u=7@OOhjtseYNWsV+6wUD+*j6!zPxA96_VL~`o5{EgU!1?W z4#WHlid+@l-8#YP`QuA@DaoUefa2W)+Ob0q_Qs~4N`>#ROgCp&!i#6zz@{)zD z%}qS4ypFuMjyZl3-ALUQrp#swOSopVaGcf8;1*arM0vy!uXNCy_1mF0IP|`;+}(iS zQgMN}0XvzK+=oBf`w)~cUhAg86vgjz8W*-~Ys7l&ja@fexhc9^NiXz*^Y#k*z<{wC zMxt;Q?Hn%_MR}v1AevmUb;?;OFeKcm{B3qJ44?(^S!3wEt{oBYRDqREP%@I=i(q&X zh8bjH>iQ~v@UbLsSkMqUdPlB3oLtTDH)r=((W!|^H6W7Rew5SlR3&2Kl)<7uhe-hu zO~8pI?RU<9?$ZI%)DLG+L4=$P53~#9h3ouL$EkdO1$Ug1e{J>J zkZO|Lm%13%SrL2!jzgHA$+cw*7rI0K+=~y()>lPPyeE7U$6_pa6-{NDTY`-B_NHP1 z2B5BRvfVn|X+Oe{ay@cPMU@k@$PyNJL+OQu4H=tB4Pm!OFE~qEC*ww84wL|a7Rx2| zW4T3gA1LJfNBf{Zb9jqnTF(2w*f$q*9TTAwnTl&|>R+|{3T7c~1Gr|ODCj?ZeSzRZ z3`g{Lt6TcMTfqr_ve~c&!dHXHQ!>9>om@5^L8dCiq*l4Vn8*TAoecD-$=JQMPzm^K zN(+IuC*agy?4^<*kun?{x1wfOT!oYI*GK1OD-C;>5Au>*0)$U^;};U#_hRy|QfRBc zo{J_`b9%U? zo~U<-hu8ayn$|Lf%`MS83SN8?%FB-+%zzs<%ZkSyE_+zAYPN zB!y$U=kd-ye-6v!q{M~w)dxJj9#7poik1*9?i2p}h>z)|O4~S#=@@9dsB!dq`9Y{= zoPF$CPD!#%z#WZu(`|tUh`o<>PCEajl;W-awA&S9)%BaVtsb0v2ur;CvxR5Q>RSz~~)OYEIYB$nv8_9nUu&2O&cVVPn+%qCX?ish$d^QfScTZ)lL?_ymyumxEl>y zJUcOG{XwX9u7DLsP_Nvni!h-Ec*)e-5{;EBLJar{$TKhO<+L?raNJ1GE&SJ?l9t@3 zt+3CAky$3;A~J#(LXZl&(1J{l!|f;YU2ZAKe;G3wDxrk#NdkWqqdQGki}b9oc@KVO z_FW#@d!&HSqVG%3dFPdaLc;VaqkEWXNp&L=AJVWgstO}DpZgo{6jBK}Sp0 zxy#pBtY$GMR42v`ImL3lX){%U^_2l*iu*RE`o4^Wmw1hQB(hS>E+nZSu?DKu&?a0q zKylqz=H<7T^xiAZ)Hx}^^}DmO&m9pn@aZFp5;4t=QNNKEDEHbvUNlNX4~@@0T&Oq) zeT4fh0QChj)m}Z^R@tMkTH(pyMAPU1kMOi|@%bTP!?G>wdt(ubaiV1;w-aMn7AFJ6 z4-4fn5hrD~-2rl-WBz1b^A6SO{&S5qSS-$khDiw-WiN3Lz{ny)a($5#+KJ(PjQWEY zLd_fphPXbO_(tpIbva|HrNgiMKYMT-wNFiz?Wr&+Kik6^?xGCP_sN{tt_}uaHucg@ zE-OyZEN$K7qwIqXYXtwPuaQE~aJyMWC!}=##FX;B(gz5m>R4Auk*jIv>=oSA278*@ z=;fNOBba(@7)7FxQCQ9KBN?BxZ54V0{H61pnLChPf1`H>@qw6t3A89QZ?qNvAj1fk~);d%BFUtEynt_uRJ8ZrX)2Q<4 zN*XavEW1_rFPe6MAA*P-zuUCu>{!lPZTs@W37&<;ZWGp;AAe5?tOBuD{cxZ5F$Re! z5Oh;ol3oCus3nMxeNSB=_wGLi{mY2;T1?|#_Cv9~F%h+>gNkzt3y~mZ!P;r4lqpr| z6kh^oZ&*-DLj^-0NqSc$D-iz7;p9>_=PUNT)l^4vTgVv0Cx|u>ukXuyN2nRbba!h) z6t{2-S|D9tcaa^kah#h+9g;?mj;;%_2g>U^`pbv80F~T3I{}@ddDauF&_G;W03#+)NClBCLc_GXP;p(TX zLP7^}^vWRexxE8hTA(WXAHlwg`M2KFQQU*06CN_7R=Wf*l{@zE%&a1~g}I(~NZxGM z9^WDY^*CC_`+uQ`VKR883&heBOZ3FT}}h0R?@e-eU^WYmGnhmV3=#tt-?MfX)# zk7^(6yRW!!ImlghgFY4Rw;E2%0Lz#upDRLx1;2nZZx78Sc2Q1d+>2zWxsJFs-;2cg z>&)vKV)bH7&MCZB)yVY=a3m*)I1K={rOg#MIJ zfr;LwN8l@TJ$Ym42Mb{*5B;(^OYBzd4plEh6tr`$ZNia|xO?X|MY*5u7&vu(d?mt9 zI2FWv;#$WJG^};=UiDm-h-wtSfy2V;%`zT|MuCh{JW3mqTr@e*PIMVc4cb3vU$PI9 z{G@`{l}EXRJbA8Yt-?6_i2B~xEY5U7cD?cBt0~D-!4yj7gyXyJq;Rs$O=6nn_RVGs z^yDPW2FDg#R`vWq`Pp+1zrA7aqn%{4A2=MB%aOJR+o)PNVM1s}Ht8J_c$w;}?`mg>9GMWbHXJ<>lL>tHH$ zLf}^T_xuUCCh9w83Q{tf-?9_nUa8OD@(};>hjKI(COh+YLUtR*p#ei~;F5W3X|nIT zE&ZS0h6CL84-w_}+=nmfoA|`4gKJPPkUIjiJzs7K5(2TUppuWDS>n7pkI1@`q>vf6 z1<>Zig=(+1f?@!fcxC&ofF#)sav6&Qd4L2st-_mCVHEDjlu_f;#YCf8Qu#0#DQ~W; z1E*feY|Hho{Cg3)a?CP!1D?~3Pu7({Ed-*_5VhFy1L1(E_%>Bj%qa3Xlt+iE$fbsS zLa}3bs-mKzL&SS8ygw|B_N0T>E-r>Q%CpfrNH3TRW`OoD4|<-3>OARSCOc3xe9 zUe2TPnD}Zp2#dtFiETX);BF3b;$B~-reLazS7^-ff)muFh2YQNFZ581$ej%f`??erxQ3G8uETq;I~xyAmwrEqqE4&6Bx2oO6W-b&uEQpf&%HFW$*yFr4zbE z^3)8TH^mFJ&by?~J!3c)uXRXa8|i_Boz%^`d%H#@bT>CO2fSAI(t}e|S-PgYU=pK?GOM5OYs2+zep-km1&}z*n)XvMmq!E1Rq4T@0tvqg)!xzu z`*INl=APkiJ{G|1=FQ`8LfAEs2FyOg-gA3}YMBQ$9!lJNj1hH3CzoA21=rK%TobN6 zk6<`if| z*?PpZpe*4yc*sEbfM4X8%*RC528^JQ*Zx%ny%R40=g+b56`BxT!Zm;4BxCmu*CKtiJC= zJED<_cc{R*i2NvE@g`N;*HWm~Lu9lisw2j&S-4gn@=LW!#6=Z+kj47YSb|to?EcmV zpczT2f=pLeWoFrgOnI5Q<~=yV{aN_HTa_ake~9JUwuV&1>gSJVWS3Ope-TLk9qfud z>IzL~n#446y61ZP$5rpihrIB$2}!CVy0WVBI2&m#9)*Nl#B@++1rC|iHK;z>f;yq; zP+=_19h>cPaf8a;!zb8H4`k+DB{i^1>QjTlLT9(!@NwkeZl8)}DI4Z`h(@W^?2NqU z78A(+JU?FW7*(iS0psXhRim{$S5+|TwaE~wZADhw?mJSiKRCcDdYDW4Jg zQ!tuFd2b~n>Jjclj(X}d*3CHfxQ|!sZw^SP@}0lryoV64CuhXQ*s^q5u3rnd792wQ zC`Rx1^9Oi=HgyP``15XHm%Ngx-R%+Um^Xc>`N4#VGG1N2D{(;{W`0HmH55NG{Mhr2 zr|&jHFb1T1hyTSU~+K=enc=@KgYXBU$xh)T}8h9X>hRnh?Mv)`O&C&f_q~4z$^^2 zAtu`px*_ycF!-i8=hqttrgXF546>(+XjE}`q?;|I?vpeK$s3kcRu6@JFlo-^S$9li z-hv3-(c4wJyeb&M8P25qWGyer(#xwVtEb}CE9@uc#zwrhHdB*SPE`%4(GR%znP-b3 zC}xgTdHips%Lf?3ZDtvJ{TuzV5c|OpEoIlY#f z?xX_p@4b(gEYDrd@PRL@#|E41DN74Km&t<=fBbd#j&aZ;giWSRp#3C~C(2^`2|9a^z8G$Mo?y2&p$xY46v zhpKME!&bzKwQA;H^W%AJEI#2VXxfg??f=ZGi$d*n_2rq>+e6T)%UHtC&$=l zm{_?c>Pmg-t37yEt@>@auuS;C4^npbIr|Lbl9%x56IbyPtkTA8PjvrgnB0)687)itxrrTjWVpk`ob)XrmoHo~ni5*A4T=>G z22S?etN8NlPW4_0quyuk%P018X!3gAB$-n&KBx@E>G00%%G6oPk5KmPjnUn5!JZZ0Om&Et zGL2~TumXfg;~Wpoc(Ge2hOvU>pPF6=M#w+3b4cf7&H0f=pqDqR7^*1uCH*)PWg2n9 z(AmZOZVmGU$C2}a;W#nc5m^|U+Z$d1zri5nNO%8};Vd~U~I&r8Av8{>vs^XMX&BZu2TWw)?_`85;YG1OboN!^w>+k^+auW=5 zS~!kL*$?PwTg74G#_e9>A~`mj)Ws_y!Q;kTJLn6<4H{(MKZO<^{W_%tg4q8;ao^UAKo*g!AJvc*6 zsaz-@#|6~XyU_b{y=ig9-45qUvr#i62g62V+qreV^WC*N(k0 ztY+g1TW>Nizc;#DvJHk0uCs6YHLDg`)?JUtK_M)1?yB%_%+h^0%710}+)VR0L;r7(j(w%s{N8)?wEnjJxyTYA# zp_ATE@DuEzL8)9KeW4?w3S7x_$wcq4D_SEmyAFcTFP#vsr#M4J2$NUE&vaYOMfNf% z4`$KcO-N%+MzwMOP+SGr0Z2~1TV%~2`hk ziN0@rat*MLpU=yWjR5NsYfO&!+H@y;;FUcCv5_)*4wCG>&`fQHkgrWN_K(;mrZm9R zj%&T#6Z1*$?t#blGUc1?%9ZTyvq@z9{)(`+BsUkp1rqZlD$4*4j4*B_@1l%m3x45+ zXq43m7E-3G4Gly7KIv-Snjoj;ufU$1;(Aj)&l{b_CdHlyiAwPn(56WOQA{w~BVWi& zU2Fz@XKhdmK7+IQx(+fW4*2YnvpqZ?Y2;PS3eS2uvpqE(am7@ce*WaVagtzPM-Jcu z9x6l4sEaWKI&fzzStNAzWK0|A7ItcHO}%_uMEkqcMZjyMW#Nu635HfKf)m+3N{mi= z9b)k^!|QrqJ0E)L6a)+#6ZDdXN^>lV9w>wwN2DC>pA1!xI7(}7>;kmeA^-B9&yW{_ z7UVP{ZR)o_a!~IIl?)`GW3~MfKruFRJJJOGCH^uhJ=}@P5^}35RR5UF?rV%Dob2ZI za#D^SSV*=&l}!*m3@*F3Ka%mtpoTl?<++{=ju|jkX%!|V8tW|<&vviV&HcI&g#px* zsJvG1`y(j26G_-q+`7Tc zZU`GJ?td_cxI_BeOc0z@UKgiUw-6yBBLgbWU)2AF*`WKeQ$;uVac}xu`6h!MvOUe3 zP~Jq0)rK#ct0UH|KHo9~M z4PP(2AKUiD>(3PDBD^|FyllBPhUYw%-+AgH*1K1cJKJJaHD>7m-+#4X*$I-MM`%>; zXh_km@V`YZLa|^xFZEsAaW@D7B4fC9*{>SUpDZ1`=Xoi~eH4R2a<`o3+e?0Qe!^0@ zSJdd--mfwEcOocoqI>||c!|HpkF~!8tN$^3^k4qaCgEB+Z8%`cUY%X`?6zXnnlyLH z>ICHh0+`R+A4r(k!=&YzM%yAzXY;w9=a)pf+8f~`FfXBxUcokbZQLDjXa!#A0*hc> z?tc}~a=eL28Zd{_iIVPWB8p36lOD91EU($EP$ng#u>T8x*@8fb-y1}6QweR$a@d3f zm%{_XhLDiZteep}?ByjU?oMU72;`s!^11(|XDaLr-W;YH+$)gJCdIajEx+<~D2ED_ zM^drKEdOKA_Q|E2C2Ku`c*LfME{$Eaxq017&x6m}@WA`k3!C)A5HLp!TXdIEjjKYn z?ns&jO3e;Va$ywixmxbF{Anf5?G*e%DBem-`qNkka-xI%K|J%$vh;p-HO~erCBSO= zBh{i&ZG3Ni1x)D-3LoejzaV_l0qdktk&eU#mD{fVz4asgnFmq^)_ris^`Y7GGX>s! zW{~3Ypd9((Gdcgj4SATmLmyu2M!y#+fKF&GXp!&Sb`KKd)j9v?%y_|5qTQ!T^5Hd4 zJ_1^tIM>n8q0+4`Tb;r5=x9?hCV;G&Q&&Uvu+&IKhBgS&?3abNK1)l%K{IH(C@R44 z8mVC@w*OrKy*te%10=JEWABFfft4~uw|C9Z+w@g+lS|m$8Py-7Mwb3N4V}`iqM=|E zb!24*8@EV8iZkGtfdD{hgY6=(D2t=rspNkI!xg9>Wd#93|ojKF=WZQm~f>)QKhIabWOEeWZ)so`MWmCa;x^r`_AXSB_o6 z`xT;Q8WT2<{c#~Qi{{s&v1%z#Cjni8G;$6AEm9&R5MK-!N7tU)1NP;I!$7;1CZb^? zkkwZR^>lY-uwPJ1Qr|<2g^iic%#G_8#|oC0?a!*2XxJRL;%ryZRSWf9IN&xicE^_Q zSl+Y2P{s$Vg6p>~pzW?1azzct06@)O?q~b0ct>Qup|;;@PuA1}{L3}^|E$4T6P|vZ z&bx`bYEA2fCAN8rMoHQE=+A@t3HY+2AG{#{3zc68u)K55r!~KJLYO~%Vfys|X|9ex zzrqKm;U!+(n^z!f+DK!Ply}8M$utea(?P*^1z0Sf`hgTAlj5lvGT{N0ao~(uX*e)R z1PA&ntDF2zfK4gr>({Td9|Jsjc^z*VM?U38oeFXZUqpBm2q$AYKL(+MD%llau>8tq z2OhH{5`Z1#{c`uO7)4SnGE9ewi7zNA%e%hFUtqt?-7EAd9tSVM?=LXxX432rNKbrp z{v@bCNhGry$)ad`xD7Y^`xaVnjCwZ8zg_H+_{kzB2vv{Z3k=4deLMS-S$r5~UNP}xUmteLuB>Y$OZ?(t3T}G4Aa4Bway1r5Vt*}5i%WK zsDYq)>5P=#?nGg_-#FPmcU{*ZKx@Z z+z=DiKlX0InNMB(W=GnfAtBo|$M-@O9!7r@l#bP%J7r_TF8o zc)M+CYu;V}u+86aK{~8jPKJYM|6)oI)JbdW>i-g&rXJPuVD)si{O8;0uYN|ezAwP} z&hy8!cg5CHQm-s##{a*tPpEV;&(1=SGKvGXq7RX4-IZOpT7A?1L=1@Aq^VT&1%82))8+QPAf zbFVurU%P06inCX*pRE>Iv@ZK@&xUv}$-;H2yuzcKQ~FNkbFGUb3f3I~WS8%@-m?&mndz3grZx3_(U_y|-o}IpfOe zEgPq+P{M2Gpj(b{ML574(|WMt8O%gto_bmv3%><2s>o)TC5lC9w{*8E%NwaEJ%LP! zdx2SvonGfqG3eQ!0%R| zn#Q&|k?8_1c$`anUc3XQpd>5Xq@3gfxEXTayyeasq7UU{{^BpKAua$Dx7BDf9hU{S zW=1Km?D0Wy2z_D89zkc{W7#lCs`F)l(_xIwWjQ#rECUHnpgS0kt(;gv+vj=@KE$M4sd+jjXFJ=n!!9Th6N)gGEq6S5-O(P5w}RWCe`eQJt0X;3U{Ss z;I;^Y^ugeAi1KxAU8(I-Bf0y?sYitJ02dn$KifR>F_4r#-g*~TSOD?Z9S+q4QIvfW z=dpT`6MKg;VhbhSjiz>BceuTyUjp*(7K-uZIHKDnH{{uKZTh|~E{J+Ja<-m=vM0!k z0VUb?&9@DBUJ4?TyaV#B;yq=%8g$$Ytphve^R%{Pg9_es15%p}UH?@sEO_4Sb#>}u zB%$!Ny}hrd;%k=*8&6PZ96G~^1cINa|EyWLTZw&~HhXNW6DH0N13vv#I{LB0*-|2VXbw=cLlB%FQbQqXcvXdzBkgqu_Vp@w%Mb5;f7pSW z_z?FZfS%Gt!QmLi8z~ac7*%ZyO1?4qPAOozj4YN6sk#Fyk1$%p;X}x05s_0QZ=?N$ zrK9-j8i+y7dQn~p81JbRxeaP@tVU=_4q*QP6{Gdyz)?5XgqaFj%}n)4RYb6RNHCOq z*N2{Xw!{z87mIj;J88vDeju zvk#cPfKZCm71G5JPKMWTD%slxIkB-Pzf}_hps(JWIOaod|KxBvWWOu~S$V={0=vbf z-nuWdofZG!Fyq_KYzZHTcQG!&+$i_*37E}KRQdT)tMOO>{y@OcF&4&J9WU>F_+lkMl>lztVf$rRVl5JS z3_wDH8L=|Bp#9^BFqkIYX?6B=YC#dHiFW5PM5%%IR%` zg2NrfAYR$JOa%I01ihEh+O2-(>^Jj2rC3`b?VVT$tXfSMBzDD|m-*Ymo{adR4(JS; zRtWI|i3UKuT*7z36Je~9pOBTA?D9VwG#q5M8IrLH2Y!liL%$ByIqDY%E6vMyJPa}c zOXc?AP7;UbL8`$OyRXW-)_lD%_Ha{&aBX5?b>G~k45Bx&NoLLKry@z0L067|?G4$` zcGL-w|5vd?@MaFO+jf&&XdM|Uny0|h|5a`lZdHNy z?@@Xtnn<6Qs)r{LCGh@qm&T_)y@8R1gzC%ziPFay&c$e-Fa-z%AWnQZdBh?vIAs)BSn$;ARTbDz;=tKxT)kUP=Zi+=A|^a)KoGs0h+BGRb9n&O zVFr!C0Z!L!frX*-vN8#MsZrT4<~To2N;9y_;B*5JyKu)0AY<8ZNvE6NFK_Lb0B{-R zWx?+A+Mq!Jjd(2fKet>{NGNqY;WK!OlTM zj6*Z6kAYDUM={?)KKxUVGq8#=(0u*2akUJJ_vAmoI3f7OKbqIwPWMmH1N`aLSmkMB z;nae+w=Tf+coNm4dJAQ2o4CWh%WljA03Nx{ur+QHlq-}_?Ym4CRxr#o$S0auE~LUA zK1)r^g-T=?i1vL~NOYb~;RC#+C)(|$lNAuXc-?orkjcH#1U(cnuz*l)ta5|s zhHew8ogim$%p9@@*6%hE9S)g!g0qf^o|j+gpfotS-UutSF_QQYr6Ji2hhXP4X;y+@+A8lU=_ z%)1>@*wg!nJXLo2V3@zl#N5+5Tr62#?ikGkG&xE~=v3%zIVn-~6PRdZLCaM|8et)M zeSZaG5D<4)rJu4uC6hl)>ka3bFXMfRyB+e%=%qn;v3@?>ek`XB`E!K)`&?V*)4ZuV zvzsVg`f)gZya4pp8pFMtAk#9!8OByq1aFJ%8BGgt9pHC(Xb%C$rDw0sG94!87Sn-ndXlsWbMrng@lt^Bw^$hS+k> zltK}St~>fL>5z-v0n=1$jWf(WbOt!KZas_OP+= zi5j!w|3)P5PVk~lr^Y+fA#LI6H%;t0Nte~Q-9{N?#Bu1MO0m&Vt*)mgj{TQdr;PNt zwi?43bz636uR$nlwG`sTt_#+?fEHJBdor$Ntw8_>#ULb~_${ZKKu!6f)HGDP2GT&I z71o08d3J%rWDe6o`E0wgqIlTOty{}X&ghM#8`KY$^uqRIqt>K1+NUk#$C@L&uZTxc zHs=P}KTj5nCKNd4!5nG+hd#GWT2v^>9Xo|9D!LG2A4*^6JM_EFq4RKswadAfDHcW- zyVReS!MLFMx-1~u(3qDk;gi&t=%9C>XLHc$OIC*#dy{b8JnNy@qhx-?`L`73yKV*7 zBN5fJ`l=i?MEzA?LIT$&w9FmJjClI9?RzHSJYlPsfTx^fhWAowM%_X+^+ zi?8Q$`)WW@0C!t0GgJ!Dnx(|sy`l2bEm2c$vK2$PdLNR5ulbV?UQYbNzi-*u0CzCv zC>2;YTKtdaddCkvG#yq~1(4^raV-IjiIsbt&F{VjV50{A?)DF>Gn3RmQ#+&oWjRX? zZZ7fu3-njm<^MFi@xQyy<`#=$hT5vfA`v6X{6?EccFHqOcY@|zUyDqZZRCq8s{Pat z?Z;}toejQ*zv-WUC3ZrpO1A+)t-HEVXI#?*SsWZc8v9*S{9_B5r3?c>{kDt2&%??g zOLZ*chKWtf%H{oszPK;L=^Nd$_ze}7*bxQIjn?zmJ`_0K`UEm2pyq!mS-Gl5%-6TB zYWecd|GhQPoV}Pc4p(8bq1D%)6Wub5VI82nf!~#2i3#B`u0KPlKj5HLCJPRy+{&1? zdYD%ZURqBk+ac}8?wv}>(tVHDljm~FM}~LkNO8GOGYYei$RH@3gpRFa_FM6u`X-q{ zW}FC&t99Twe4EGrdIOKIg;jS!!G5hg?l3@Vd$B%TJm)jR=ks-D0N&GK;j>@=C?x2h zcJicrq2p{7S;BDINZB^vb*q&Yy>lBG#2fsX4S-}3+2zq38fhl)-oa#t4j(DW0B z6b*jW@mHAPfy?KkRXVV9fh&9Bdy|FbZTGDi!}9pPHEsA>Rko_R!?GZ>j!w_<3ovU* zAVHW%F-N-o!B70HY~yxoK{F^6D&6@QLppjt8R$pxT^X#t@_i7Eg617c!FRu6`uQ^D zj(_82?uxN3E9p*|Jm>)|;?Cvt3%?P9h)B)nbnKptI2v6)7ZmAddI~7^q2?kAyVG9H z6~K~2?`0HP0!Rt;kodNjQv9>5x{|QY(p-RhA7B_xcd|^73aRUN#11=g{}wYrscu`)1+V zBLDj`(y9$yNTzf)+WCfxu5R>_eY&@- ztu9AvOuq=XEIHw+4eb$$FY!ygEZ{*_xYzeSdf>|=uOClkaY`}@P0sM0z;9A971tTI zt-ZBh&NYkgbbJ%>KNi1#Wj$Ty*QP7EJAIoEAW>Lr&9Ub)AdSSUmCd7s%@_EU@=Fw{ ze_qJ`RaxD#1qFTT>s7Uqe)~qTdSy2UIbzvd^cAT3KY&oVhJIruD;YT6`2Z99qJBB~ z;NH`uIe6D+`t#d&sX=dlkh+XAYQDUjyjorJ!I#Gg%l*Ck+l5aUF4HZaBQ0OyEQ?GY zw$?8RbKvlmjvRZ(+Ne#AAos636{VQ79?Bk&qPd+shg~7IO|fZ2Vo4CVZhOP2m~CR9 zB!zmFpVyQzD8y9m4}dOjr-5e2FCx&z*>r3cE2N)!2-GR?c2>IjPWZ{)S>-mqh(4l#q`m8i#IkEr;t7-7Hy+i+fpVTt zP>4gni;ZTqIF6%aUnS!>W~}POUJ6C`>C_Q|3>Kq1_6DvbbC6o|C10ML+%wfRK6fSD ziABjTl8A!xe7xR-5u#=y|9tqDPsMxncd2yKBt)ubC+O51cZWdUY~E*iR6;V+_8O+^ zAcH=gDAChfX>unw61Py|fZYzRftQw)5$D8Q-cE0}$n01#4D+DCRh~Qe$2()g7Icbw zj-qH3g_1S3RXh-nRTM@XrUQ?{9I!v>f?>DAPY2V`#Ho9Lu%y)CmrUrQ}l z2v$Wi$b`mTc{AZk+_XIJi&yl*ujp-01!Tbj6oZX+=~f2v>IJ`ktX=DMgn4|CHv{2 zcx&u8o(^aY=**gqgvj=>`W8lr@!v@UJsIHcTTg5Drx@gUEd_c(AdZ$?a{HXr3St5a z3euFhb%hRoe;5_Qv4+c%JeOzaJ2~QA~))my#4Ch_&Y^5^dmFBvx;Ji8C zlWrTrCVNuXVV6_R)MJeM6y^qS6}XQ6c#o@-L^q7ii|SMjKpx3HWaQNC~nBB@D~ zX5}fyIvM&TigttSWth8KtKG8wvlyL9H;WNKUvV7edoGlioTV4;&zuO(Px9daQ`o}BKI z(JwLH8w{dZBioF*4qozu1(voolT0#xj`l@Ds#m9#shO#o^1K;UI53(dO(nZ4IqO9R zLZy`5?a*4eb9h2|J5P_*oig=cPo=phOB>xS)vmI?XD>^mOhpI&GB{}`Bv?rl;cF7K z;Ke@}XK?=cow$Q%r0%d5rc|mb5&fM%E^mT%e+LC_t#GagdQ%g0{&DXCtUT;#sq^sT zn^abfB~*4;E<6(>8j-aDJAtyF4f2{w$tr)LRX{Iq4>VA=mVryb)Y?R9?O1%28n)hau-#s;ZVilAf8|ZwGB^%9QoM9f%pJV2uZ}=)nc4IQ`RMVmI zgD7Eea(96B$#ht?EWu9`4m?iTvO%LNGw&yxBHYZNdYzquo*$I@YudP6fY&?Q&6dz} z#dyW+i4q|Bb8YBL0nd?0QUx^!7P>Ix;yCfVmKB4`)|%}^^uNPOqCq2eD!xx^95P=9 zN^i;XeVjDk_zM$6ebzP4yEmPf;hc5aF~6Xpd>`^F z!o|(Cqpz=U5+<~8=CZZ7V$=qbp9i=_Xt3K%9H|RR((44i=!l%)E3T;0t9bJUWymk^ z?EUOpgpLCn!b9t96t=OAK-YxwKOZVf%8O8WVRySt>yP^$rcB9Js>sUDOun5#Te{(f znFlpLOx=_%FQdyv#(_^YlB{mh58U~bSZ|{cAJps8&r}hAq=VXdqR>1$)D_w}!=TnY zBf}7*-n}R4X{zmiND2s;o+AA^!*`Px`F^kw-Z_7xlH_hMw#?U))(Y*ipuI`;RGzk) z(#7*(R(9RTXUcXuQiZ75p{+sm}}|#UCmEf(agZ9N(QLQa4{az}Hx=XepS+`_&|{eTp|8F(v-a7t1#1fssXfD-teKbZwk|Pm zkdtL-FMN2y*xM2Lnq@SJ@vv8q9?qVY!j!6v%S_0HSfZYc{+)ciz5b??>!`_Vks3(3 zUXN9QdAG*E!jbr+igp34$xq~AjZBf+PbQ0XT_rqIVk~*DO~=g5ftOJ{+m~?647XcY z;P?G}BQgEaK;o;BcwKqg)5=cC0mY#^f>NS1Z0OMysZ)yiS4IBbRABxPX#E!@N|NgY z1XAmljX=TIU+!7jZ)Dp-cNjAo-c9~jkC?Lm;4?P{FO{~dr^m@sQQFx>F!b1 zzM%Yl>7E7_p`3pZ5C}O-%D?vDyZ2S~d^~9%(A6Fv?mfD*`o%8fGwTF?cd&qG+kN-I zj!!ADTsH^^%q12$5C8go(}bKNX;^?=)$veHQEJ5sZ;tw?{Q>tFxO-LU)}TJ_=w{E(Emp) z-!VP&{%NwK$Gmr~aP>x)4zpux2Raq36z0+v-Tw}6FWE%ce0fS-BYkwEhrFhAN~phA zsZtrcq4~t1v*ScsJz{gVSOtv$&py+%0F^J%^Ffq;382+fSY`i6#h2=3Gd@3?j_BCk zu^Zhx*SPa_jNcW7p1F9nIBMTdn-K@gIKAPH6H$SY4AO+u&9_xIyG@@Jz|zm7RV1Zg z;u7SU_@SvV(DUF^L3f*Mg`3T_BI~yQu@kv>rDTtVhq#%*`@Bo ziSN#%uE*U#VABRHpW=C-ZIG3%)O7<54B8H9Da|&d6?7f<9rB_^=soCjLt3+{*ahLx zAE!+9CM}^lngb<7-N0R*oJ-~BD^V;j1J*6Rp_2y2nZoX)3kc*{IJaP-y>FAvq+);meBI-}^LJQIsrebAyhtE^ z?@>soOVHxwemJ-Y47&>(N{}-v>4W`Eq?WtTBX@5TEGby02adefnB?m zOM^{`oI2TqD+I^qoINmMf6IvpLpWQzx6#08ug)isns~r z2Ik7_Ho;>`#RY!P@q5A5-ndbF{tv1*w}!V%Uq#%ssvd6}936g^@{&n-d*PlB@y&+&JH(dMX3H~i*4NCl*U z+4(MSW_F1A^OSNC&|BM;*Glc~Zd7Eye6M6g%DxI*b&O8*X`o+T$+$dC_<(A-%41+} z)1iQ9FIC7U+rkkhQ+#`us=fAqeRL;F<-X#)Y$oHC>=tSt z;MMYzzaDXwwd0t(2Tm3US9ABR(HTzzA0pKF%G6}d&RhRqRi;5RwNiz!)K=c#fu=pH zv-z(o8<4Vl4uLtQJlZCbQTONCvHd$z{vX2#_O0_1-8CpKk$1EqSsVQao=w1uRs>Ep zVSwNKVy1A#`N>YbQR1KB>G{>ZAPW5_?x7wdWx1`k}6 zpWl?@p}^@`xJd_{2VZ|?3SX2DBnwh&=vIvqQpxtEy(Fpshq?ETYC8Mc#hI^TnE_#D zL_ledN*O5;MT(S+<0zw4MFpgZ5fLe&hn8e4fKo@0A_#~mNN+-b08v_`MtUa_dI^CL z5<=SV#5PWO-@ATy-Fw&jmuq2?&-v_g&fd@S>~r=GVOrRV_v?EUPgj76jU@bXF1Z=G zXbos$E0=}lQJ%M{Cs(+S4g1%jHX3AS2tYpRepBt2krX)g^sr@(@iI*;yr9#D9`8}S zw3>A-!~futrSG|U(4mm)4u0^0Lx!Spj~{tBUZr^)2=Ixj1Vsj|$22TpZN`x>mT@wS z>SPX>MESCfVw!j-D|2)HXoIhH%u8L9z7Wz=0!7J&VWYLtl$XpflwmZ(6LiP#iiIB_ z1GQjP)`hZJ{9@ITGXc?O489owkMp>!Ec)DeIDEHK>GMHp#OS$dEtrocG1!|visb7O z>BhkP{ng$XNp={(0s0p4_L1G&syDB>PMYk--(yf_|ml}sH=ugVW#K3$(E zams~=8X&IwGZK>{usQaMGigKO%dAHGyJdT0tXHZ*x$y0miH`u#dbPnHcyb77zKjo- zC;LEf>U{tomr%{MXH+lN`%;RCs^>g2{AVZx5euY;TP3CBfnjxm+`HT54v3NUoCDTH zO1k6g4hAi5=XYIJtkBUC?H3!5w_=y%^QQr*V8%VlqNeG51h%)x&QkF@iWTL`Jyv7MpLRuy zYDA_UVEKi;#?`(E;BnXLjdezr1;KkHy`O-2h$v1bfw3M^2d&j(7t=PEo%m{6bDk65 z7t07Piub|eQUFsv6tMJ~=XGfjhS6c=7IV8|Sz;IL6~$yQM%0pRvPxADFnAb1lUdHS zWoA;sL^eLJVWg9LV?^TpL`j(U9^NduV-qFZ0*i}W|s12Hj(8F zC@}jq+2Mp2^L8I5Y}yGY{}?kjTs$Kv*^rD2u-2j+oBxHfK9MQ1+dX|_Gt4ET4l;(* zt>GS<4Z(0a6za+Lm3qOXTKG3SwOUiJAp?v^%!UL~mK)eM+1n>uRHrq1bfasf3y0OH znf9HZ77xqgs_Lazk{2hgOrUl-zPG_=t^-D!Ef$Wna96hKM;;AUw_UI;mS6hG#?D%x zPTmI82zRzz>Aj%941c?g#zK;$b6%{IO?Yjw z&>nD^0mWWIs1|-Bi7S@Ja?t*3$;C79R`IYPcdS-b4TK#Atv#O4#E-~zNt!2m%J~_e6LkB3G59hwT_hhcI@f$=lF3;dW;!pRLUg3c+iAYr^^nm( z2x$yKOw~hB&^*@Kd)IR8i}~l#OnDDgn)scP9;$XojFmdJmCm}e=wxQuPe2Dw{$eDc zIGV*ow;`lM493?y=Sh(dy0^M+KCWF$rL?D?0eJJ+N%%RhF_F~7^3q9IYFn2-wrchb- zyY0^fE8OXGnK7X%))jYgJQhyG!p8H|WB?Q7ms6oIpbNpAK1tZ;6W819xkpCPJkEr9 z`sM|HB5QoW-@*=J1v1N(^kJ6A)FlWFfK#E0;KsRv{1yPv(^e_25vUTO-b;j(z@G9z>KE!<*#2fdCUH+gU08gR=f<)BnByIzlGf%8IQs!rn{X z$ovnXykMf4VJ9|@ z#P=%8g}K)$(irxlh3I#`O;8Uw;9F_bwPDqCU0{G)^Vj_cW{PhP-S${s8a`5oLz`4y z>tZ#GvD)Aw0&fdH5bQ73f>BT$h@kH_UDed&C|*YT{f}Dk)$T4s-5&OqxED|wZn?P_ z*60hd2go|f&O|2Yh%SoIT)Z%hx$6xdvJFgmQR8t$h&MM;96)X@b{#J*W_q-^9nu!7 z^XteDzGdm&S6IsWN}y-t6NeYMEBnw69r#g4_;W42dATyUSv==E-~A5gV6ru8`5e*} zODt(@qO^=46miK;mirVvb;JYTI!uNHQPv$gW-+0!M~Xx!+__=uTngmt-eRLyk7 zdQ(sC>#0fh#7I5Aw&RS>r4R)0`y^P@bJS{ zG}&XPya(CW*3?Yp(Tr7@GP+HIP5PsEPnSa!8V2oZcn`9@>*d-mg<#QUWMYwRIhxQIXD>L= zp;TqukiU{Jg_|d#!wLv(%^nPuX#!KO#rwe+C1eUWQjbqsM-)W#B+O1wqb6GFeFfeg zi(V44rWlFen+yVVB}Z{jKPpszZucLs(s|v8-O;PjwcWA-fd{LI>?MN;I&OKV0~a86 z-#qvB@tn#T+%w`I>92lSEqU$HH&hNMrLSi(Cm`cN$VxS2Hc7llWX%9ekO+0xhqvQQ z$rPPU9SM6rO}vl*5oHm*5n39gO$I0-JjeW_L$E}17G>X2 zmdeBoTyKv_of(!3N>(j)h*r@w8^f?B;Q}Z zdwu3+@M9~7mQZ{{Ni(@3EWYU!Y=T0p{@T5kSYnF_!KfE1u`y#*%2B0!c7F?-IyI^8 zj#D1BM1KJhuAaryz9DZymAEZoP?(e^?Q)L?4b(QcdfTYTf5@Hb5#kuP_&tgnak{G7vv9e?$4C}W>;T3f)8b%s&*{MkdU`uWpY4E7KoBS~3 zYTNCibz)wY?`ta-1LY_lt`dWup6N$p6HbkMjfx?C&DFCOOLinM714OL^u)#&F<3Or zdr0fObX2#4psJ+2=c^|QmY`@w?5FVkVE)Q`4WhGtphJXWcaEnQ8c{E4hd5Wc8eM*X zm{GZw<#CQf&+89+O(^{2SLoy@)0 zOOw9CnaEcv%;PLKUmOU*X|bMqAgM_QhQuS7=u(s8dxt< z-9TB;{$(g>4DHxfGb$JpHUd%6Jw2%n1rfq{jY`Ac&>ynYxYS$myCh3GyaQ2Ru8ixJ z9oozsKEF5>b2%~lfS41svY`0a+I{(({m$$g-QvJigdoVr#^)LJ9*eUnD#YPso!ner zTFd-)3k~=H-K61rikWiF9gJhfAo@(@avHK6kDjpf4{nI9$I7r8SQIy;T{tD&#>2V# zcY)^Qgy4AdJHq!1E*_O2hfx zXM|kMT@`*kObM*vbdo2Qb=-xOhym_N@iu;Ij0H^U9L3DY9c>uM8uoI!8~iANTZnyt zsX91X;|QCu^<1C5xkNmswV0Y>oe-*k1Iw7g2Db1!P_uHalBHKpglZ>iQ>w7OR?IcB z3iMF9J19`vK!06Na&6RE4Q&{sC!vhz zde@h#FsWJtrDio=7cDQ+br14v^M@~TqZ?ohWtUpF`54J<{Ji0N$^Q$)=Wt|c$ACC) z)&6EAaf|jTzDn}l?FA-< zZyOlsLpGd#Z_tdz+Xzb7gVTZ9)LsvgK&jzc4kFmtyax83Ee_yvbLphRjgCGCE~WDg zG+n9wm2FTQ;pLVnR&d(4L6p5a3_Bk^LaHJ;^WH}v28YHTT)*@w`{2{uX06!yO`@{( z?q$?dHa_D&0ua+r2BA!=h{lAGHQS3+SexRZE{V2Dy8YKwMcZ^dSfHS=jYL033-`{^ zcc1HK8}XZ?-P7+Ri*{Pzj@jA(XCDKvJzh0d<*=T{zMXIGaHGkp1(|SHg)k$Y>bJ{Z zpOG&-OYhiWK|J=@-fZ!exR^XcJCE%B@L&$M%_9BscXJro)i*wiU>C}@;H!?iDg`KwuTtE<84;Orx_n$Fba^jtEa;?WWT}~M zxt>jmuBrGqY9H>&pqa#PiN9;U99snCOB_w-i5g3sg^FT^ICJRSUJg_DyMEip!8N!c zJF1OvnDP91T0u%IP=MA?e=^*kX`Ak6n$HEBo|&8G3tF@%eLq7yZ>?OBE^Ae{&rsYi zhewPpc5ZK)TrK7_%!Yz4HNqpMO;=gW5=q|-P{4?_0SfV~sYF#Y>g=YEqfRBM!BCc| zDqQ{V1o0a#sQ}RVPt3sy0c%?bDzHALb!MG|^3RTKbO$hY=mLLQq}r`fEFh1cy+skKa=rEFn?4D`qyB*DJ&x~xDZ??=H9AH3%w%lo%Wr0~<-Xb2l2RR#h zD^(7i$Nx0l<6RSbNim8OEF7i+Pyc@Nx{X9a{uyp(%Uap=F0fY)*r?rjq_}SmTkw?HJzhI$(Z9p)sZ;4ZA$T82Z2Zi}X1O#j$dUWHMTRpIe zQrMK=UuX|71ZvmwB}rwFb5!bvkv{o`&qSB+oI}4%G5!hQ{`dIW|NCzlX@-SuYk1YY zb>i_5Cj$pPrqCY5ItF?Wx55cu>m8F2b0WCh^pptBozfV~66bXa=>3zvy-%Ii)4WJ? zpu1z?PKS3}62rVNQ4ZA_Uoc7A5ti<+D1^Fx3bYUGPAqw$y>}T&%nzP0cfayt31yTN z@-$@s&mxK+au#3A-uraM;Z!N%JCqj?y&p79?u3xu{=BxPM?TfTEp~8Nqae9EMM8Ex z$H2~8DqdFn5AFAVouBJx9hsV=;zWIkZK`Rz#cJUDTq|7+2R#~&Ih>$pD$Ish%evJP zKzG47K6NR){Jr~drQ9ekz|C@%u05nlo*k>meotLqLFHs;MGA$K^p8qx(-gkzGHw%O zF#;okR#Umm}c){C81u3($QOvb6r7&&f|~(HphT*_w{t*ER^bFl7#Wu z=Nj;G7-!!z&rbcpEh2lTeO(TlxD0%E-YQ#9)^E(Ra=H0+@pNt}!n*U@2u%unRKa_L zTTCL+M~-zE>QiRWtO5e_l^aG(AM*`c(<>Q`r=d-OM5oInnzy(us&cPXVpfRULBZJy zoau!0ivq$dXCfoMyl88|Fiu{M7lbQe<5+EpMLR4sI;|D|^XtT0v&z{{orYAox>f%c zvcii`vVeuXViq|4kp=tc-OpPgTRheiJbW70fnjIm09W~ONg8mKAa@<~lqPwtsh*kc z3YZF=*AqYFE2xh_H2Rj~kK?Mev;yIk@>S$+Y^#`&LOrJqbeg9dI}F@gZ1z@nt*Wl` z_)T__>d_O;nP_YoURR(~zIB0CJ+JaVbUa|c6mrvJFo^Q=C!&&UgJOLmDvc){PJJLM z7H&I&(hBV{&{14zrAH1=GIfTka_k$5nX8pd)HJC)p3BO61VrnRH3qHF!(}4%VG~d9 zC0BYTWH)1)M?T1NTjEfUO|YANISx&lCf69+C9qT8+5=o17QQ=eKvJd^0=Fi0+DU0F zb@C7%q#Ig+Pd#`gk*f&Z5}e!XfbjwcQ$Jik;sM>hX8$M-M`!qRr@D$~y(0u8nyosf z-(6D{Et_5QAXjJ0e!HcKP_zoyY@TJ%(uy-2U%O=Ncq_;kLH@3X@pHLWb=NFww0X_Q zh)u8SPaMx-cx{FakC!BA-t{$xMM49k9_DnDCKXL(38pbCxWtSew6f5adT5}JV9&iZ z1uKy0sC7V(Oh)FBZLMLdQgj83iZXe|riM>qoGnYdJRLkQ_!kNjWw%igS1Xcti;a4q zwH`xM6aZi@edWwy2HXq7{5}9ryK%~U(MK`y0`=36YQm-CuaOej z10|`OZVH3=I24Z?%<$>kbCbeCR*f%N|UMpwy~rt!Lz#Hi1>d zFbS>-JdVyz*g6>R@UEHkG(Tq@Az7jFio5vpV$5{D3u`PnXV*#1X zQu*%o`#iy8kgTZrHx2uI^#vexIX(6~y}@PRh?a_3f{i3+0NdvO2-*MlQw!?OeTm_Y z%DX@!$EOt_T!TqTMJQ_z0Haa>>J|OwXn3maSAM>rl+R0e+xY0K^i$v;&xUfG-~V^P zKaqL2^UEN{GU(`I<8Q@p0L#IEAVUI~wZ9(-+ofnL;Mn%WyHn5`5*U=k>W^uduktxX z#%0udfhfFu}zRzqfu>9@|e&7#-j_iOPA7NL`-o-bCz`xM_##S(=&V zI1%^yMO8vT!u#VXyeMEM^Jb8?c?MwldkXo`V$_@wmZI+wP&v~E@jlbvCm)cZ6}`)p zyi>%@PV$m+s(6Cm5-h3$oOGD0`7o!>kL8^*Dt{dH7^qumm`xLb)I5ZG41@dB!n&nO z%4qlya!#9{2s-s?Qff${+p?~m(Wo_^{>3v_pMOhZrS;>9nC5v$6;UXcj?E|ut*nzj;>!BnWnv$0-t`GHF;9wTVsb4o0E!8>AS=rxSw1r7rBQw z9;MX3+O8^s5fl3b>u!|zt9Sjhti%o%O`>dttJIExnt)4QL-`&ZdIl~SooXdlOQbn{ zxzQ)XhkH?}-gutbSPRXC_TMj_t!Qh=pF#m@gn{;x=!j#>vO*DMvWeOXHjj`f>-td8 z0y!}~b(G$G%foT#wkxl8;*Pi`l(-Y2cY!4#ID0L9AU|(g12-;Y1d7%Ysyk4{(b?8e z2!XhB=iW}SZ%Heau01!b+$9pSdw#Zp&Gp!?$LmcfrY%ja@?1|2j!-uon=Pq&@$6K| z0Ttmp#KG?hmAP-;j^;DQB%`CL{_Bo*WTKn;mU3CWVeV>LF7!IL=H+Pi6+f>L&iY8! zX?w+rU>VIBXmt7XWOUXFQ>qS{|AuSZ)?p~4$wIt&iB!dm?VUN+Db9-)_!;vNfChRpG0x_0N zw}AhoT%T+b3_In8vzo}FOeN|Cx@bWI9=B;O%X*GnAk*Dsoe-#W0@_ohL%G3vtZDvDzPQiR9ZM)sTL(9%d^{X2qeBt=NWEHh|ojGQ2*m>+APzNw`o(!nk-;c zu{;8saCa%}cg*kX0`=K61=G;59#*gH?1pLm&h-)Ww1-<)z)FzAi3zovq4f+?a8fg! zk!@gO0PX{v1=rs`VeF1V5A)4J44;EM9}rL7fxQH>Kt9*OnzyM|&wsyu>&q>&1yjNW zn>I-|CyqrSR1;bdXIA3_)|cN%GRL8zzJD=h zyj(pXk}fy->jhg-NZIzZ^>9>OyW|?|`DvDa6e6s@y~_UtnTVPuxjI{8Yc}A}&Dq1_ zzj*Ry>Yw^F>d1~PLlc_O1el(zz|We8K9I%cd2o)1D6+j8~axMv@Z;#KM1MujKK}D&0D$W90=T)3Cb1%B4_)n~Bo?#r`bx z4*TOtcFDSQr(}lHw|}$E{gt<^J`S4P^R6_xvGL-GZd!FN#jG6;4LarjqF$#ldfeNX zu4PDRC0g}g=QR(+uNLL}?C&C?yS0mm1-j`3ErW{DLFy1dMt z6I9w*q|WKOl#*(-gkW&eq@CBv^qnQHm}y-Ows4%syKp?SBNhp>_c;^X_zuMPb)zAq z`%}a`X6A3Ra*UtQCW}0k-u7=JpcnpG(L^k38hWFl~qr9bizlGBsV4Qn1S z9*MheQbX&>rihHe>S8I|wKZpU9ie+f?()AFuh9S}D_rwXzkY2(2XVos8_8|?gLAEC zf~Va|vCYSwEUDUI1oY%2e+r{sq=!J6t1G5e4C@@Q_a|laj~p)NCsV?~<%K!^8G-pd zOiCwqCXI?XY*M|6l{8>V{~;hyu;e= zbQ!_^KU!;ruB=gr#2Nw~42r$T!_~54VzEys99*s4V)<;v)y3!Ywb|K?%j-6;&og~P zC-{eo5v`o7PvlO&Oo|xJ`bk2Jru1V0yIA6Y8ZBjqO~dzf?xH@sc30b| zAKZ@;Hye7jvzXf8`r~t@s|1!#IwANvv<%>m&SjL}qoDv@LO}0QyVG^2L?%q9Xzfvs zU0QG`>D?Wu6l+g-LrgQ##*(L(rL8FM-l}h^79h1NI;nln9dN=(CGN!c#|^q7epPC>HT6$pCNsKJ_<+m>|uTzFh^jpO`%9Qs857&Xd2xEr%OwJM0G z5s=cmtVQn?ByGYwx*h>@8(DP`Ti1ocyASRWeRLF5Sc|^iy&E{eT`(PJs>K ziK21ri3i%?*8IU7P{!<+S;as#aGLhG@&EC!c8Qyz*f4PZLJO1#0KM_Pv=n(OADv%2 zg;p8>1t{Xwk1N(ofo{67G^&%?-Oh8gs{jW3BtON)FM|8kXL6%-yImQLyG%ie)-yLB z2!+4Z4!7gqkT5a-WW#NzIxfC{VbBBNUtai}H@~g;vwqN1C^d7giT`l*PY=Il)@uW* zT^!8u{daR5vjC+X&fHA>|NYqWtexSUuAaW#wu_r(MaODqxC~H$>%Vx8v6Gq9aD{Gl ztXl4sKlXat4p0ZHq1gS-ynajl*W_du()X@6tOd~wx{y-9Du^o4V_dV3ajsa#^ouUTxBX>E z&doiXl*!h0i{|YK3J3aHR?`d%jc|2doJ7)tbX{>`<-t9r;bG0Qeg#pP)mRwWB0bM8 zA@!S$G~TRa3ArYdG>9mQQo^{#kgWp`G3@nrQ3fxL;G^7QM(G(^Dpl^x?N774*r}Sf zvMxkhJ?_hOMzeQ0Om;*@T#6`dz*(J<@OM~3isnQ=i;~}Kyymbn1w|Qb`tOAY^(O0- z;i>+vItcdqWM8`HhRtY|JeD1EClkJ3UEm8M z0nD7nHc({w%tF$~J@6>}8-iJ|13Z}O7QdJPk2Y95!3ynTHs$d)#gUfo#fz|f=>?H-17}a zm&2X$H$WhiSxZ0r4fT{DFHUbXPZaxwl8krl{C(&VGPA*2qqLT+SE8Y(R601r&6Zqt zTF6pY)DXG@iV&)kj%h1i-{KlLI?d3qu}9FaGPaWpO?d~?SU1;~2-ilX)~epm!O26L z)>UaI&LuzF1M1FY9FpF-D=NKbz@OvOX_60r`eOk!a4)3oG;Urb=?s5w-_v|exf$9p zeu080{FiZ`D0#X`&r-lE;MU^&ueIqsWzChZM$PVx&Wp0;9e{4}j2ufhg7P4;@-8Qb zb*`>4B%@V^KxIe^8QWlNNi5++d$c6t^mUE5@&nRiyBL=W+hi$I@-iM@2^=^56b(9D z&4RTLIr;aDM&Aeh+CW74l9qw@)|Ff&Ly{WA!+CO96T9= zi|4`a?ex`KP0NglLfG`~N`a5bqzn{AA!O1c?eWc9d20_sNA$!QcBsp~Qaaq!50Xx-Cp@9af!X1|mh~>p!FEfPHsv`@>P%QG0T(QEOiXK-ewNvYS;u}&4KwqO{Mqo>H6~c z#x?q-=h?O%$WfM0mzpo%i`BG$+@{I-ess$t8~!Zw_={AVv*V;*t6~5UK((Fs00CT! z$^Pl%V)~_J!(1m$`GL@h7OM!*)8R}UxXc_odzK37$gJP|4FsHHNBge+kO}lm07jXLy*j zpQYb@AO;3Ge!wQJ4{KL!bh~ld{i;xnYAkkAPgBn>*k)#);u@1CXc7Ta!uug)cPnsL zx05z)!zw#^f?%v^@kVF70v>@s$D5X3y=96#AcM|$H#jdPBJeX_>BCf>pXW!P^Vp9V z=xkXUYhl=@cyU=@W1NX0-V4IiC~yMnPplf)hl@z1N}(_3b6O9q+8kMtgH07GO-c_< zr?X8sZfVhwTHXtqzA1L9(|_vcb?U4P^Rx{UKQukCl=^xV6a@K*J^wZX59b5fStoaf z$KdH}v0W>-2f3M{i)nS5g=ob3Qu8u&SZd4AbPSKMz_ImM?19CgC*~^|=9KnM4;!%* z*~jDjU;c>5{_)GlX;1v&w1aoXt42txQ;XS*=9!B__xx$4bIjJ-66WZ&l^|2sp!~p; zAV?8wGb9^ENf10ul=mH~*1@NKuiZBZE|O)F-0@A{>7x9OMa$>gx48iyH+^XdPqF>b^S26eQ|zh&GS**hNl z@~PiQ+#+;xMJGKnPAO4(oeOE|@x^D!jEGUrgf?K1N9g^kDGT7e|M<8Y|2~C1HxGf!tjJjU4`22EC#Zhbt+Xc8w4jB!cVhMV>$zHA z!k~kS%hc=fIb`u%^6E}v^;0VH(kb}sGj|(^<1$EADWq9lii4qBa4CbiF#)Gs(el`R zE(C?>pJYU{*+(yIZ-VKUD6oz%J|dUYKLOJ4Pai=sxE!7Oc}Zzz-?{qy!FJ82AwjXm z?wx(-3Rf3bcQ*Bx<-!rko2E)v7k>=HPt36*;+`%xUhGN{j)<}sry0TXxu(9v%&YU+ z+e(%2n(OHf2e%cPbmr!>WO&ZLG>yn4a9JbpUmrMd=W0R5h()7hoCBFRy4PIK06vwL zrj_G0ax!g<(Vb?A)7nEBmjLiIG0NqzMs2NQM$vm*<{PN zzUT9aDYl1)uec%^x90*`S*6`C$#{q_AVTwNObQ5SgU%d( z2AcnAe*p328<08jYyU9TBD&!rpa~p2bCX}09k`Xh#zOw+|Meq#FyJm>C9T`AtVmu) zR9xkC}cnFUQi3w;kJw7lICaZs3HO;em&%9t902J$>ayM-(YEPW-SHrLw|^Vl7jS?kc&%JQ8fUzcy~Zh_nRH3EuUv<9mt+?U*TC~@cLce9ap{gJq6+j)+jl;&e|A0+9Gf7=Mpx`qLV z$BeNS>d;LM={niVmBYwmH~fvzobrWf7^+$R@mf_Few5N+kNuImS@Ybpbrm#dL7!FD zqa5($4A7&r#RLSd)B#20=-g5n!33WH+Pd)ECj7sSip4Y-ryb3+DC$f*1?;oi1#@Dx zrN~ys&}9qh8fx)z+%AZd4@Pw7F2p(7o_pgvo$&SbBk{2H&WTpU-srlB@FH|TAIF_;H!?+WaCRjoj?KI_ z-a3E*qX66D+=x&(fsT)d7f50fY^AYf1WBpcun|%XNV~Z0+UeU?7&EqxY10j08Bk&J zQt!@6h{izWxFaEn_zLW|odQxv|ARW{jxL?pY(x&=ySrfWd-avFjwosj56BMi?AM)C zW%eYZYu{;s;Rb*hW?XiDThm0H<(S8qnhR_HBi8acQ22I(tIw#=lRB3~ zrOe?DHdu_;JHwVJem|l!=f5c@Jh5DDfHVf7^E6!LOzw-I3ZXKEQEt#tX^g$+X({bM zBtoi+Dp*p))!$1zzD`CkiF}s|49EN zA!5;`Vba{R8G*2oaq8u$Mzh->*Mx}*pDBV1W+ zn}lb<);0mA4Q{~q%zV$E&g(8_Mi;i@?%k4XwzmkakPubEr4%6K;;uHP<%x?)S(IPJ zn@!()FlV1K20X%<*t0JRwl%dkV0gaD(npn}5mS#6@fI?wb(}I&q-F(YNG${->B|oA zY(RYLu5+_udsAav$zu3(*tTxZSj>;LuvmhYYoJ*^mbgEj*iu0e8|8018L;hoT>U^h zK~H+rmib2k2Si-M<<RGrx<@6pAl94dsX zVz@Svp3I4CncSo2xA|T?HXv*9edR0H598NA>8V9`eIZ$Pb9_v)CBVN4&Z|1eV!ygI z_|UdOKS1y;Ex)!IwyS*7NlUSCu}JuK9I|slRF*Y(LX8=CiYAYEyCGKWl1D^O6&{1Z z2bT7POVSS-;8!~PV5;_|CCBHo5}jaEZ}jw@BeR~r--DmyEttqjp{5XVC#N30qjc)< zpk^t0-!_?~H+0K58T(-3diXfiCl~Y*+#V~qm!K*IzZs)Ra?(fg81~NZ%y;3&7JS|d!!e&D zU5{%&c>N*zZ^Soi&unVkNk)~%oP=#5|FQo6n`!EJ7Y1;S2o)=%BJN+z2~o^Ec8 zwa$&fd79Z&IuPdG`qQI^)DU8k(LJDL+rbCkOg37zDwpn;!-^v!EsufRCS{LXRtm)&pP&N)eD;iH9qCF)HQ%$61|?*;1vH$UeV z%aYBtQ4h92`%WoqvEVc{a*=)(?WibYXAD!QDMycflI>oK(T+-_HH=!Le=Ua*3Uh$WHAk{5<;uXesIaW)?k&{^Ire{a?HRT`W$-i z+xW$y--mqXWRO+(;P1Zp8>bD&Hk3lMSLd}JM*ezg@_Y47P2LRg8R3y*TY@7297*X7 z|BNoKzcqkvcKJwcB6!Qh-O&bb=b-E`-XHo-`kZ-FCjyXV{_^1iF}ktQC+`o`(aS@& z{Lsq=H3n{O+GulVJ(KTV-x(_V4WN{(pHRy4A2wfY*Z)KKR1uo9I2m6Q>^PD1>ulh9 zp1m~Ozd^El_BT^gQ+hrhB|QH0N;%Kc{7LsMYXjHk_C&N)R$h61+rPub4t8y6AC$KS zmE}`^HopQyCKek+WByU2w+!XHn2K-0gh8=0PB}wDasGC+HTS{KTm4c0ibaF+$Ss#@>Rf8djCx7I5)eE*J!KO(z=)O?r(286(QDCXp1c^0t@c4=0884 z^Dy?D+Me%Yv`F_Vd0bWxS|RqU5-t${Wbz*$Q+MD{yz@u(?XM{K?~2>`oZV63&EU{h zulv6dyDx$9T`THTVI`Gp(EI?7JHDBIsFLew_SfuIz&Me|A>sjG9#AvUXHHd;D@IDw z?Y^XqZj((0lWnrc^?=wUXzK}o?I!TMhNdWE6lFN-)iyv13K=l{q}b{%WrWFuV``>V z=3n{Ix(x6x_N-ww_=)4`wadOm5wK`&dCm~O3^+ap&7NGs@b?^O#`J>WG3sH0@JaVR zsmt!I8!TU~^=#&OUh_f7?tyEs9q??ZHN6xwG9U}Om_!1oEO+vBgYPNEMFg|ZFm{CP zyOhgjP3ROxGt!s|T1WQ$8V!}}#7@ySmX*^wEWrztuPM3*no`*uzv z?SA*bns-U^HN|VOS~qE^=x|4LfVu99ksl`l()gCay2>~JgKrUN-wyDi9PJSySYEn^ z--Wq}z!@d%j&@Idcd;n2Z?1SQ65vU7(81OWBKZ{%@&I zY?^YJ!6qp@e#p&4OYf2$5+US>7eTHiTLui5^bs8zd|@?Oq6mB5tmh0x>R8|s_7Xf2 z8@3P=^rN4Bt#b5&zJ@+^`ZgZ=0EmUg@#$zQvgZY6;v~iRE>xqAV+VBUkr1%QVV4I_ed^;F4yKzgX|JG2qP)U z?DVx+&px*(9#UBET6BObrRt~9?luH|cX?a5k)sgun&D&Zz>)aXrr|$!2(&{@$9G|f zu|oIaSi?LE50?DLG+Rd|zuUF&4IFn7XQx%_`*Z-uU7x2s!(cR7fKeVa272#;c={rE?-!41JD>LQIMrJh~#=X&cq>DC{79D) zDa&9{wMd$XyddRU|LcJ54@gL_D*8u1wCg#abek&Ic7My&8`Dl&f$T}wenE!q?(A&b zoo^zeolmYdl_xeP!1bk05}YvT)ewLGk?^jvB`SX92L0%TRBj2@0AC|6GbC-TvHqN4 zHS=WSmG3T`+l|`He!64K>a?^dev~(yN6PJ4Sa#~2_iU<#h>pt$+;Hjy_jek&??kx! zqzQbkDjw~UI(Munz<5)~s>;ieZVmiTvhJPzy-G4)<%OSaIBAP`aiU~1A)84)>ULOB zwR-v1?M0V6J-bV=tJUH%QQPYR&0R-5F4~lOS&dmCcOJ0RP~8K7p(hZ##3V+1^&s&RPePj4}&lU^W zDDl{?b+8v+WmQwgYLzR3(QNWpzx!PWuY8A2A*KsEwOFT$?Q}_98id6dqcs7==f!d= zfPWTF1#4rEsKNzaLiS`KL_)L-lVkIe-ZnWNOY3r@T<+}BoS9GSjlhZUm(+R-oNWD0Ta?QgM)g-p9KUjcz#iXZbS z3Sem9EkKDGAToi|6uXj1Pqbdv9;X%5xGfbk>T4~o!6lCy-XYDwwDo||S2IbsScUgh z7*VzPRxN~wF`iqy)!Y>8{86uc?`Ov3Vv%G-xsC?v3XFzCzXRgU`A{DDj+Gi1K8Zxk)7#oKW+A zcWZ-$zcd5a4y2taah=}n*;Bh7!wVTiu9s>Z3Xtqboy6nsOKpscD?tZJ*+%uWI>>z$ z-XBCprhV=HOKcP08#?rt@g=Kut(+K?u;WuYHc-8;#?Mii#fhVqs8!*-xw;yt^@%Qm zdA=r1WT3IN3lnW@L}|xmz}?RfL;Xexs~t~y&AU$`@W;w|Y4I?EO^~@RJD5w{*Qcc@ zpN7_QzdYhGGwst^Xf#fuRVZDZo}(-bYUwfcjA{N~3CMpsc3^tQ&1}i;hdfPs)2pKg z)TrnEW&yGeyGn@4BPBf@d8zdmyL_23Q|Rz9v+?(|7ka*hlh( zUUO)hVMXA`&q6yzP7zNv4Vd!vg@eY&{s}2M;;tW2EUZK-*2YYv<_Q+18v>{F~>2A`LBnVrKf1u zD?ZNmoLC8WnQuQQX1mFoQziF*??@4ZU3>OY($j_=Le zyX`dJ{5L@_z<(UpF->l9QjAV}u65+Ql!V=LSL~Hv{v~5M*zO(3$JSj?~p(nLICMA2 z%*)xEpokNb#whh)B`s;nfv*RACN=@RP2%>Cqn-E#^lY>WeV^L1%Iyu&3gFtp8mIMVECdoBo5fHd9I|3p{1b;&@3jS_^^c+eu7LIlN| zZ*LVOC8FjNN$*+LcLUL}gx_?RiW|By8k`OyyQ3=|(UE-2^WCdkOBld>)XP6VUfS%4 z>bT#Co9A@AMV1pJEtuK@&RSq~rXQM<_>E+Osd4F%(mH;PpnQHRrx(0y55FJKM^#@^ zMn*>Oj@gHl^n1FU$B8}rnKJO%)n8K6|IJ5k-XS@JNhk162K3r}o}>%>Y-4Qi^8dI$ zs84FZ$E84hVrP0^;~mn|QxR7|L!coGV4#AOPwe*0^M{e)E%*Ezzc*;)Fo@|~od-hq z)_DnFGQ&R4z@dWS7nY4le3tbEAuYIZL0z7oo9A~g`e<$Ja`j^5ePHZ@FEn{b@Pwcz z>ApfUU+|AR&Zp*`I+XS_J@C1UpFd}usgUNsUkqaRSol{sYAt2b0yS;XT+^grG-ZKa zw=lpr9VO@zLOvEK;kawsV(f&}^T zrF?L#@53n7L)@#T6MO`YlkOe()?j~9F26bYy!XN~meQH#$k)sN)MYZfJGB+0(Wq5% zbcu)5?VPjYs+s{NNKoPN?;kxj1U5;!Fto)o;PL#O zZhYPBn#OxE3P{uc-QP6I__5#`V^>g7R7eP3{qo)sh9f4xqbF(c5=G)HM5fxeDXGmF z%3pt%kAB*E=i}m!D9T9Pf6&SkA}6`-u#p&8E|A(d@0EFW=biokUwhvf&{VpvJNvi? z_ly)BP=rt%3!or~^qO%{5UHaGC`}0v5ilT~5VDmKlsbYGL5dWSJ^|^F3{o-(k^!WI zDnz6OLJ2J)$^C*F0q5Ma?>+nObARl=i^a-XUwz-Fyx-C$6yEcZN&-jtIb?H(HJ~ud zMGhAu`aG9k-X-OkXTc9eon`ft0lw+q%4S1dz?m{1=$o4*C0tG2HU{=x!7YM19mu&g zSb{6WuqdM5&0ujri8o9oE1~%}9`6L3H-Zguts%G}!Cgo89nsnK0b&xlKnOGBdZh)C z5R>=!-`NP}!|92Eb%z7GqR)s#6z24ZR+K|US-Nvq6;^yM9Tb2)S22Gho_tjt8B<}R z)hDh+Dk6<x>6Nve{Qub^T)RR1W~qVAiU)P;)1c{UVJLKqy&QXKDUgNvE)Y`A>IqP0uDgj_J*#qA!$`2G~Liv zemJ7?&ATt2H`73Pg4;hOgDcBR0!PJeYljk|SdeX0#rG-U6&Br3!JPf#(OPM$&(!DZ zP?xx!5?-4h(dccwOO5z_KZfy`{Nrn|EpKna*RSUlMCzm^10bDG`vS~Qe`UTr7;6(~ zLYQ>4fx8uo#Jv&MDr=pNE>xO86z4to;#=lI((3!DE7N=Z+9aJuBhRCMJs$rIHpZs- zqlT?6^RrWtEiG*Hl;1>CUKGtMrkQAS+8~m23yH9}ZO zsh<=L71QvYig2%Uw>l07er)>owk+6mk9uIJ^UgKC(xL)WH z^#SLclmisx2z@NcrRagiT;-8eG;^JNPgZwMIv9ng#hl2fou0Ht3RRg*l)c|$mfx{04EZ{Z0r~wWM z{jhUfbnez|I=l&q8gmAj+0ApA8JkK1t51D>ENqjw~+O>X0*Tm)}83WXw;f+)J|!l$>) zKJEp|kY?@rW1pVLPpaS5o7Ii-AkL><@*P3>uE+ptybwUfAK(4k6c9yBtu2M)*23?R z_8MMZ^r>p`oNf?jy6?C(w}n0ayw>!!6)-mr-koAc+Yhyi51hRENUMuYw{u$a=IP+W zrS&_Otv|a~@@J&A{K(Gb(EL@p>Y!jgq)5Zb&5k=~+MVFGl*c}z5WFtxuBKIG@y1Yq>!irjNEp?vf)~ftWi5>~bSRJInEcEP3^m#3! zkiXt;1ew%eH>A#&r_pqoOCF@S4Fh?ykwz_;skk^2h$8AVRvX_cwDj%*>}i$#A!Vh( z_x*L<3ko1L*3zhmUKeZdEh^H8ciI_KC~KrWRTV)h%bZkY?gcETPK%NFpqa0~vQ!jA zIKPKtR0Mgz^e$^{j~b>LO9Ui$r%pl>t?i&vP8CAak^5m$g;#I4Ub`K9uuqk#F0o^w zRU*1Rd0II?Mqc7iJMJg_#xZWd0nR)IKOV*Aa`}$JPpp_|N-5Lh^8}qneA`eFWmEOk z1MBI2KlvmF>%`&ZPd%M=!4LC=e(~G_pp+_^eV971+6h7xEvxyx_~Myj?Cg{wS5wh4 zW+MC5*K)HDTx#%Q$?D!N%kq(GIvAgxv9oZc55kG9@!;$J^u$S9ab#zlyDClgf~?iS znbjT?cC>wXlJ2uy`faH1%4IwjWXAh*dQ$$PX1(fvzvaf7p&gZRs48PrL7+x@L~=c% zDU0kWSV5VLzJuzfNAH%1Pa7gwI9$Sze>lfW6uV{@N>AD~n**MReIrzQeC8n$oX5CzbNq*|U2!x0`NKb#aS{S0a8YsNd7CESkhVrA zPHP>m@BU|kDOX;o&Op3gFEIB=(W{@(GIR2!1Lj(=Pl7wsGV^m5Q%dKias#z0qMr`! zj4RsGRrRO*EL9|WF38!5S*^nDT!6c5dzTua*Mxvw$opJxb#<;At7noJPYC7b32PaL zan-+?jra9#TC5&eDb62Uz0wNvXE1V%+)NEAz_Rjl^AM}Ab3jl`VHJL%94N)MKlrtu zsb=GqfKY8PaOvvac=O#&83XOJzranO*zUYHPI<#2P1+KQ-On4wUwiom>F<_3za$Ks zELKXKZV?KbBD_6mlEa3Bw#B1e;z6`bc-ujX`2)|-We%K0Ad(`GMm%9(kHF*ewXxMd zbtI}f@BFEMdbIm^RIl{mMBMf~gBzL@`HzgDk}E<%b5jqBipDYC=oRl+9ycr7DjT4g zwaZ6+F$21HH!;0!I?9U9Nj5XAVEz{43q~Y(3T5Y7?_~N!cj}i!M&L55=f=+Oe|=-E zrTq^pteaF#`bBk#D5#5^DAx@B<=p{gs1z^Ix;Beh)XlzYuwAZvhcSFxP#A|y`;n{Z z-$L^ zahmDIq#pE@Y+RDGe{vTadqeS}$T0C%S@tOU97ZP{(;3@D{eCR-koO;W#YRnZbt6~S zNYVKA6b%w>rZ{>P$6{m>5>iuBspl?i%i%6ewn(N$3=a?Y{d!%;+P(g3cGT1{dcWPL z-#MoV95#!sd=D&zAl$k?k%rbr_M8VGO8e)46##2dMMXs*OuMM@5gxFhABzLlC9seq zKg3D?Wp+DM1^?1pY~4)R5Mzk}Ae8#0KgaXi&_6J6x5I(|&n_TzgU6VKnA+O|?^ddS z(7s7JW1{EI!4}?R!g8_8#<~LBE%GTjk?5jIE4JNlF!JIMBX!TP*UA&taA^6-;y?;} z5%8(MaYWwT#iT&hME+EHpjL{ieEE8!v5(xxh?r8#4ysS&yq5#TIE z6DEWV${!3SaW&QVdEFeR+YMT{KXXw2t-Xbq9Ft|dCD8(hfm@gOAgQzD{K?2di{hwK z(9j2DZ23`#zQcjHp13eSJ+ub7Bkmnq7f$$nSA=>sIh6 zsvaSEPgJ2|<2jgssrarEWN%c%#MqhK?xU1%7VK`1N!j=*dF?B|hRRfX-|57?KG)asN^|R{ma<$&;;uF8TYwp1^BK0luvhZMK z)+ny~Jk`cpjrqoi)PAs#v1%F6@TPtE(NX7;tuI99ZhENI5nC(z{y|x32N$esK-x~u zQrM+@*?i94|J%vORQjBla zoQf|V5U%rv;_qfum=-dEY7+EJXmoOD}B@dVQ3qVnG8bwov>WV~crW@Y(;fBo2C=)tTIp-nY1nPkKctfBP-s*W>z~fm@d4 zWL&VlPxHv#%N=w>pPxz{;kFXE1p6kq$pQ2CC?m}D1mpmU&_*c^%RZM7om7XH(7*VY zlLVUu%tEE28dKf!VNJP`O)vZR_Mk4x*!jCrndxi%-+=m58(U^l@(98oB+=^M|EpKl zVr3}__L46D;fMcG{)Qc*u-%8BC?2Q5_pg&l?4wJtNq(l*FK+xq?k|(3UjTP`a{%QJ zqOiT6L}7ZnoBr9*M(XM+nziaN=z;f+h&9nxyHzt=PR>bSpN_#n8~pop__PEN{;gJ> zcV@wHCsAy@BW(Dtmo{=}##Rt7;%P`L#W7|P(c-K%JQ2Z`{jYQ8!cqYt@V&xR;>t6X zy5m7}6G27pUNe7XGkCh8=io9QNXw5fO_H8l|BK_Ie*`gl(RJ<)GiTXS$*vK71Oma4 zq5xb2STs5{r}=R1`H?J{uiBt?N?(g>o9U$sX`APCX{FtE1uRAK;E+6j~5tv z*2E5bJI%Jdf|I(GY;C1nNatTXRJm?hiyBlzqVz+WA-glsu!iwtR&{QNLPA0_C>dij zISn9ejCuCiDw3)QtO`xDI#VcainWa&!@8e(jdq?rqNW^y1O&Vj@DM4Q0GoN`ajqDR zz6z~)0b3}hTO+iKB1+(H{>`QigPKS^*d{AXtkFIYC026^>f?86g-njbBKQEZ5mzie z_(;rirI~rq@7kddUZ+sTwL^@6w|8bSPSl`h_%GgP_RK;C_IwW3wNJW%R@WlhljfKh z8NZvJnX^!622s`d$c=w$txhVz)EY1+1mi%k!^qVJ9o_?5PU)_=U?i-jDx$;3n+3oM zZPklaSsnt`!ZW_yjqWtH$T(Z!2Tu`*$jdytGB*P|z_xRqXbAMvRGFrSclurwyrum| zS;Z;Q%BKs}$1o_n+pirm-*&zvZBDt*d>Wv>6{ViVH`8hs6?kD_hnm0hu9P=cfj6JW zwJi!-p>k7?j{@o@u{H7tRncQ~Y)VAra^|(=lO}p4gsX#BHI%E!^+uQ%a~?88O84o5q>t(a@p9c6g?arr;3z(~#=GLR1cayiqmQ5j; z?-5bVT9=-J2fKFjI^cM~r{y|+YMJT3$9OE158MiuF0MVTgo00BSowKR+q-^FIS4=U8_^@BAk)`v27}WoH(C zZQyN{WlI%`IjiG#GaX1r|3w$-0OCfkXkL8f7CSI;pb;Wid_UD{xBXEKSM6S0(ZU4b zNtsee3CyW@bP^YESq9a0mx`42>mqtGk>dU3wTpEL_#OuF>&(c;-wL%bV~zK^NAy-Z zExo)CA_9KS-e>$YtUv7foZ#tWeIfH`3->axtF z`tcshYuWSpjA4t$6?jRwQ`O5e9u%5tuFIK%YHu=nqaG3qi+1*Fi#hrlXc9sHfQEwhS4dtC#mQWk!#1+iLz< zHip(yPfo{NrmSXJTB;T0yIcps)bSPWRXvkM@A|hYfxTszn?wlg%x%V974s;ZGA&2* zqQ*RIIx@-BXpAl7w*NcvX zb*1o-M(c+yFVbp~VTdcfCq^Na@YawJ+%RAqem*zKE zGb+s?XArKcv}@>s+(CjoWW_;HvEz4BvOan)Z`f@H)->keZnYPVkQURH%nNunqTk_3 zSQ9FudA>;@CNP57@SWJD^~|B4qi3_VFwDkRt9jxAd`hP+^04PzP1+c0DKS~=9ESqP z_ojuyOVS`j9}F6q}wj4Q_D%5Gj&zSYeahEzV9Ca>D@)s>6LUc7u9rH0n-gwtP9 zemZe0?b=`4f_yXKCSqf5=ejT+X9|c{diW|OdvN}DDVLqyNiiWT^CKO;9^-45R3bHC zFIGliQ#_Y6ke9%+%>iz~3(YDB=&hVJS@cPi>}l79L>M881M99i7Z7`_P~m?-%<(Zz z`S#u1TulzY^?NqyPZ8WZt9qMPsIz(%__7JP2ghat@(?-d@0b3ncVhLS2<}k)6FUv; zSdS6LX}r_YZcRCZpE|pY@fxS~RcHE$_c|f`uWs~Yhu%6>j@77|eQAlk> zR1QEfng@(Yh1UQv4oY@$=ALe_eQPhsJD~%2rEBX!74fC5)FqQQj3|WOV}6dJKs@(4 zKE?m>hHcpXG_5O^k5`qN^3o8NR4oM@?nL|n8||AbV#!DK9XVU&#i=q@y6Shx^1`V7 zIjI7BTy!i~^)Z3tWgYb3F_WVJ`dQk7svv@JG%&n{14ip zxBI<;V>-zrSp^ZrF5yDIId;aQBjX&#rMHL-)DT%coDeVT;ssrh0cZm4&=`|W5f>GR z-Qp2p?^gg&&AtrTAAXSSd)DA=uGcPFhgFp#MY)u~ZKLYe2#aeuN$WI#WThGd7Xkb!YOVq=+~=@KwtK7OE*Ku^hz; zSe&f%d;b{T%swkaE1yjra^57#US-JZk~&w)TAImysFlyvQ6QI6^PNJAiXNcTXeDrv zdfbfTbmFDSc&l6byVEOhvp9jMv!^Z_Fr%jj#)na>L&Q=;@JgRU!sA};Y7;QT#a0>wkMjy6c#eO~?lh9Y=!VHW@-vTl(*= zY+>bABI(brxjF@pS=dVfcWw}E5VlY^pe>dlFkEB^A&sK*0vpi%kctDggw~y$X$f%2 z-j%12tX7+$fcBKaqFVZ9UOAe&p8EnB?@3RrT&~&OMUkhLE%e6W)BD2M@$FW*3&6z| zaQF!ytpm_P>*X#y_B)GSyIxNu*@Wx23#^Sde5BDD-2PxXUlv|hBq9`=EKc{e%OBRO z^*a>8h21{_@skNjMPWwi-CRwtP_xC>7QST9IJwwntq_6}+=NNKrb+MnA&@dxhhgPT zuL;R9Ptjx#f@4Y1zHrSA>v3_G;-eAP`GD~IyGCxVt+P&G~cp^pI)=6bMCd-Ng4vgppAWsH>Fuk0HAoZd zo4&Yw2hr5i43jRx*3cl3YS@PN5?ctpjj}2A2pKz;)~Z`M(QIpr-18<)n{LUirTnmF zCZvWXRzap+50bc+jUHwdPC^t;iP27)gb({D`3DjhGIgo|3@4>^;T86d)D#EkI&`F~ zzAup%#7W=Uyzc(28x-O^HZ*xgGZlZ9V0rb@R^Ogq_(8MM()0F zW@%v;O_xzROLyGEKvA^$0aly`zNcMGMpI{jSVN*rM-^7RGw3cAM$`T%CzrU%C-SE2 zEX?*3hVFbZILgk*d(;aaUbk$ZRU8r3LSi~i_%xMi>vsvyD3Gfx^YpT(^FMGit|0;? z*Pk@>!%s@UwWEUCN6kP*3{#XrXBJvGte%eD9~{lo1Z9z%4$|iM@M)2j8afLt_)eBl zdr=!Ac2IxOUAg&K8Hr!eOzhIiQdZhjq9&8hW&eCdEiyV%Twwz81*fkp<@0y_k;O-c zcaC{rbGUB${%+pIdG-dxS zmhDeCp^LG9Mg+R9grqSir>4V-8rgJfjhFTtoa}m4zzOK6moQ*SU*ksfVPCdkj(I6; z?2q#C&<$c2{V_f>Qlvr*K88JeUh58US&%!QrcY>{*+5nqI+u}M9*QzDtl zzjX72@lRGsYW;d~MLGRze1y>8+|=>Ppn~Hg@K_}k33J@UqFeVjn8&4iCuW{+7L?qo zzXOkI_CO4}E7uE!E$h6iZWb!^`)dJ@*YXIcko1B!aEvtgw7IdZl7gC)5Ebacmur^K z5wJyX+S2>TSE50Ij~%ntksI3mlZ^OG9?Whj*q$+~7y7cABq1&U!C8=gvoz+9L`Oo= z4>8dd{ehzVv0qG!xp~j$`wD60nOzWpZ~>mMrfGbYPTlx4Rj+m$CW@38J1aWP-L$SL zA+eLm>@ja3{0x_Bce^CCKRk&7vya*SYKr&R`>5B3n&132AN}y2!@(I66#57&i1tLC z6rsQ-hPG00KPK1(`KvPnnM-K|*@sR&mo04Kv~#rSM1oTSoBjH1UcV=XBx8W!jlc6R zH}FxS#OzLLe~F_3&6-v>La#e0B-FCW)qU0g{af<(Z^_%o>N)?YXxK3kJ?e-AZ z5C(6~!rIRp zhNNfQ`P(yTf8)!kRyiq5A2#ay<}l}J5>qP!h+^Icta0CP{2?iw(!pMx_L zS~IQ_U?o_45*Jj@Q26F zha`2RuPp#|QBF%;tpFj2g0)#WYXRz8W^6pMq3#+wWPD3Q|XwY$mD zj`mY+;$S^#760|uy$zrcG0>@S@~h6HMhMeEp%Gfnt4KYuXu<(o^xwpBa$*;5B~v$` z%{n7URT_3R({F5@T#}{@vD;Yd8wgxjco&9_K%gj+`=E4v&4mU%(%d`cGP)4+dO^M@ z>mEK|X97#NzcPd}TgEev#Lb+DrdNCP%Flf9NyF)WXl@A=W{w^S@ojP1Z#h8+2@(}Z z+NdA8V;Meszb3RxBdVsjMXs1PY#-n3T1_=9zlU5@6lOw22T@DBk$DL`a|KW)W$Znw zB|DDJGz&t$|AoCs#&ZUUqCQx65pp6-i6xa5p^!K(-vf4OQtHptrNH+TXxw_!?v)WS zfwN$#MW#i|@f}NG!UESzrX2+b8PQW7=Q59aHnKGqtsHG)Qq~mEk|hj@2SW|!MkN;t zafsVl^uf%B*wU##SSs}svOPcp68c-?o83)ednn_B24AAVq6xdV`ZuwaUQ-}8diEG=6U4H_uv-*EO^4iBlbKz%p2OikcfmAG?c>RKmw4eeZyxB#@jMJjVu|%S+?Ad7YJeh?E_pgvOW{(+sPyE* z(c&ulNg4x4H`o!?mJaO_7m)5wxmQ^mq*-v;ok1PB>y`17e2peUo8LJwyu0~&5%KA* zpwy?Tf}diZE&r?_Ea1}6AS%4KWax{DMG^tfyep7X|wHq7;^>tGn$Pb-LhiZIt2xaU_C$HIfx}3%5)tpAn zfu=-hwy-Xg60;L*leBwp;TS7y#rw8R;$2%~|CO*1vDfBhqx}OZU_TP(>_`83X6wN^!{9ty;s*|9u_X4^P}AFxcEWU1u95^k+G?n;ixLFnc~%DgDp^|5FzfZY{lk?J<>|;ACf{DJ6UL zg_O^1e;kS(a#V9V#q2`M^TdezdLkZY2hl9X6pp{;@0KiJ7R~bEM8;X2d68k~HZ6cD zoP9R;O_=UE7~ASw!I^z0>Sl^++BZFg>YN3H8zL+m#;(GrC*nKkaVob}wnoMlP+Mdo z+mZ2^4_2&M>37l7%LtdPmcp_a6#SiCyRZGYI^=8j1Q2MMgk*M&L8LNxGjonPQ8AMF z&V+eX^yi*pJ3Cd8dI#p19P==W*9JVFSPDr0Eq0URrUVp2m&(-V+J$$3btzsx% zI{}y$Yso~=CRaIS3Ofa+DHN04>7E3w3im^IbMtuKu<5uRvVP}pQNW{Wh)**?)4JFW zfbMGVna+tvgi9dD`z$Ut0zwMM9dgKLznpnkvLo%e^n1E0j!X}Y#7q~4;*&JOi=p#W zP@p0@O|UhJ6*)OQowpXLI-25Q-wGv5YX=GM$heL>m zEE08^+ox$1VY5HMrdEcrdNz2hx423Mi!B~qM5*BO8l+bjtm0;-<Ul*n7m2+qMPFwdTt8 z=6q5~bWxZBf%qL?chmrDxsvS5S1jV|S34@2KgOR{rFM(iCTZWTF}yca5>ey7kyHso zGZ;NwMctSM1$gslvE70E6RUN{s$Pqm<3>g)Rcb5UQ6nht+QQq!4six8k^a0NVj-6| z*n8XpKkU&f`pPtrv3RKq@4E&e7~)v3D_kQ?to@v7zgJ5xp846*bHa3J!M8E#9elkZ za-Cq4qfQ-f7{zMDj&y;>JKlm46@}Y+U7;;Iir4vvMSB+H@`UMNC>p+vk}<3he43yN zI%hI5cW0me@nUz=Q@Q3mhaZp=@qvBPi@_%siX0x*%!kXHc`@#wFykpL&wXqMtyj&T zbe~z6tRzF0@#eDwKDO#fh1E*p)Rm#NqF0V2sK)$^xK{P;^&0zDzGmB?8$_6T;N?Kf z^~r+VAY73u!fLj3caE}DPpt@CYHr9JA}1H#O;IaT{{~QLk3%23 z`X#Sb_7nfUzBe(EmY%p4V7+1$H*gHDGgqV5o-g7#t%QU`IXCNI7WnYcX8~mjhb>0T zHS&vnl9d*gLtsm#r>xMXfdeDP6RS!)5DLY6PE?^VI_16+UNYEnxpbdgjgvv3JgA8l zP6)q*2gkRpkwB&R1(o3{$H56MV}xnJ`Ox+Tl8P;Q?Hz35d!EZ;4c|VsP{E%5O) zaIOKP#@>Jg=M93xo;(i1jv*RL_CWVR5Qb59=KeDIr0L}*xRf~|qaZN40xK;`HymH{ z)c)zzd1cBk07M)#LZ_15Ok!E)#U^4)-*^m{KaPIz&CW^n`iAMx|FIcialE}?xyd?% z@mU%EUUb@%f8uTH{Ynt#x&ET~g((H)F4Ez}pPc$lt&4HnXKdz?Rfg zfh}XB^?vUj>glGL0Oin@B?*&XRDDA9PP;ap{PN-7k`{2RRaO)*+H0X?hx|y9J6%!E zR8T}fTJ-Ksm}$qgzw)oY$k-aV6kXD8IzI88+}1Bj}H*=|M>mOj^-dJ=qSH;1qU6%X{mQ6nFd zYw*yl5YAQw{&Z32Q+NZG|D(B|6>s3qHyo6Q{g9iStq}a_s1hfH1%HYW=In_7{<+OC p?%$0G!ak62{r88;bAM$fI#fOxc7`>L1rPm7|D5sJq93o_{a-?&HRJ#Q literal 0 HcmV?d00001 diff --git a/tools/Polygraphy/examples/dev/02_extending_polygraphy_run/README.md b/tools/Polygraphy/examples/dev/02_extending_polygraphy_run/README.md index 14823277..dc83a3c5 100644 --- a/tools/Polygraphy/examples/dev/02_extending_polygraphy_run/README.md +++ b/tools/Polygraphy/examples/dev/02_extending_polygraphy_run/README.md @@ -93,8 +93,7 @@ It is recommended that you read these files in the following order: Polygraphy Test --> ```bash - python3 -m pip install extension_module/dist/polygraphy_reshape_destroyer-0.0.1-py3-none-any.whl \ - --extra-index-url https://pypi.ngc.nvidia.com + python3 -m pip install extension_module/dist/polygraphy_reshape_destroyer-0.0.1-py3-none-any.whl ``` *TIP: If you make changes to the example extension module, you can update your installed version by* diff --git a/tools/Polygraphy/examples/dev/02_extending_polygraphy_run/extension_module/polygraphy_reshape_destroyer/__init__.py b/tools/Polygraphy/examples/dev/02_extending_polygraphy_run/extension_module/polygraphy_reshape_destroyer/__init__.py index 27195953..fb3b870b 100644 --- a/tools/Polygraphy/examples/dev/02_extending_polygraphy_run/extension_module/polygraphy_reshape_destroyer/__init__.py +++ b/tools/Polygraphy/examples/dev/02_extending_polygraphy_run/extension_module/polygraphy_reshape_destroyer/__init__.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/Polygraphy/examples/dev/02_extending_polygraphy_run/extension_module/polygraphy_reshape_destroyer/args/__init__.py b/tools/Polygraphy/examples/dev/02_extending_polygraphy_run/extension_module/polygraphy_reshape_destroyer/args/__init__.py index d1c97374..f48a3bc8 100644 --- a/tools/Polygraphy/examples/dev/02_extending_polygraphy_run/extension_module/polygraphy_reshape_destroyer/args/__init__.py +++ b/tools/Polygraphy/examples/dev/02_extending_polygraphy_run/extension_module/polygraphy_reshape_destroyer/args/__init__.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/Polygraphy/examples/dev/02_extending_polygraphy_run/extension_module/polygraphy_reshape_destroyer/args/loader.py b/tools/Polygraphy/examples/dev/02_extending_polygraphy_run/extension_module/polygraphy_reshape_destroyer/args/loader.py index 1279cb85..f7a7b6f5 100644 --- a/tools/Polygraphy/examples/dev/02_extending_polygraphy_run/extension_module/polygraphy_reshape_destroyer/args/loader.py +++ b/tools/Polygraphy/examples/dev/02_extending_polygraphy_run/extension_module/polygraphy_reshape_destroyer/args/loader.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -41,7 +41,7 @@ class ReplaceReshapeArgs(BaseArgs): # The description should answer the question: "What is this argument group responsible for?". # # - If our argument group depends on other argument groups, we must also add a `Depends on:` section - # enumerating our dependencies. + # listing our dependencies. # # See the `BaseArgs` docstring for more details on the expected format. # @@ -70,6 +70,9 @@ def add_parser_args_impl(self): # Next, we'll implement parsing code for the arguments we added. # This will allow our argument group to be used by other argument groups. def parse_impl(self, args): + # The docstring for `parse_impl` must document which attributes it populates. + # These attributes are considered part of the public interface of the argument group + # and may be used by other argument groups and/or command-line tools. """ Parses command-line arguments and populates the following attributes: diff --git a/tools/Polygraphy/examples/dev/02_extending_polygraphy_run/extension_module/polygraphy_reshape_destroyer/args/runner.py b/tools/Polygraphy/examples/dev/02_extending_polygraphy_run/extension_module/polygraphy_reshape_destroyer/args/runner.py index f54cb70d..1de79035 100644 --- a/tools/Polygraphy/examples/dev/02_extending_polygraphy_run/extension_module/polygraphy_reshape_destroyer/args/runner.py +++ b/tools/Polygraphy/examples/dev/02_extending_polygraphy_run/extension_module/polygraphy_reshape_destroyer/args/runner.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/Polygraphy/examples/dev/02_extending_polygraphy_run/extension_module/polygraphy_reshape_destroyer/backend/__init__.py b/tools/Polygraphy/examples/dev/02_extending_polygraphy_run/extension_module/polygraphy_reshape_destroyer/backend/__init__.py index 0f0b4db9..5e210e27 100644 --- a/tools/Polygraphy/examples/dev/02_extending_polygraphy_run/extension_module/polygraphy_reshape_destroyer/backend/__init__.py +++ b/tools/Polygraphy/examples/dev/02_extending_polygraphy_run/extension_module/polygraphy_reshape_destroyer/backend/__init__.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/Polygraphy/examples/dev/02_extending_polygraphy_run/extension_module/polygraphy_reshape_destroyer/backend/loader.py b/tools/Polygraphy/examples/dev/02_extending_polygraphy_run/extension_module/polygraphy_reshape_destroyer/backend/loader.py index 93e97d8c..13d8667a 100644 --- a/tools/Polygraphy/examples/dev/02_extending_polygraphy_run/extension_module/polygraphy_reshape_destroyer/backend/loader.py +++ b/tools/Polygraphy/examples/dev/02_extending_polygraphy_run/extension_module/polygraphy_reshape_destroyer/backend/loader.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -27,8 +27,8 @@ from polygraphy.backend.base import BaseLoader from polygraphy.logger import G_LOGGER -# For external dependencies besides `polygraphy` or any Polygraphy backends -# (any backend besides `polygraphy.backend.base`), you should use `mod.lazy_import`. +# For external dependencies or any Polygraphy backends +# (besides `polygraphy.backend.base`), you should use `mod.lazy_import`. # # This will enable Polygraphy to automatically install dependencies at runtime if required, and # will avoid creating a hard dependency on external packages. @@ -84,6 +84,7 @@ def __init__(self, graph: Union[gs.Graph, Callable[[], gs.Graph]], rename_nodes: self.rename_nodes = util.default(rename_nodes, False) # The `call_impl` method is responsible for doing the actual work of the loader. + @util.check_called_by("__call__") def call_impl(self): """ Returns: diff --git a/tools/Polygraphy/examples/dev/02_extending_polygraphy_run/extension_module/polygraphy_reshape_destroyer/backend/runner.py b/tools/Polygraphy/examples/dev/02_extending_polygraphy_run/extension_module/polygraphy_reshape_destroyer/backend/runner.py index be7acdd0..b5c2ed75 100644 --- a/tools/Polygraphy/examples/dev/02_extending_polygraphy_run/extension_module/polygraphy_reshape_destroyer/backend/runner.py +++ b/tools/Polygraphy/examples/dev/02_extending_polygraphy_run/extension_module/polygraphy_reshape_destroyer/backend/runner.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -61,6 +61,7 @@ def __init__(self, graph, name=None, speed: str = None): # Like Polygraphy, extension modules should use `G_LOGGER.critical()` for any unrecoverable errors. G_LOGGER.critical(f"Invalid speed: {self.speed}. Note: Valid speeds are: {VALID_SPEEDS}") + @util.check_called_by("activate") def activate_impl(self): # As with the loader, the `graph` argument could be either a `gs.Graph` or a callable that # returns one, such as a loader, so we try to call it. @@ -71,6 +72,7 @@ def activate_impl(self): # so we can assume that `self.graph` will be available. # + @util.check_called_by("get_input_metadata") def get_input_metadata_impl(self): # Input metadata is used by Polygraphy's default data loader to determine the required # shapes and datatypes of the input buffers. @@ -79,6 +81,7 @@ def get_input_metadata_impl(self): meta.add(tensor.name, tensor.dtype, tensor.shape) return meta + @util.check_called_by("infer") def infer_impl(self, feed_dict): start = time.time() @@ -115,5 +118,6 @@ def infer_impl(self, feed_dict): self.inference_time = end - start return outputs + @util.check_called_by("deactivate") def deactivate_impl(self): del self.graph diff --git a/tools/Polygraphy/examples/dev/02_extending_polygraphy_run/extension_module/polygraphy_reshape_destroyer/export.py b/tools/Polygraphy/examples/dev/02_extending_polygraphy_run/extension_module/polygraphy_reshape_destroyer/export.py index 505d0adf..ec3b0349 100644 --- a/tools/Polygraphy/examples/dev/02_extending_polygraphy_run/extension_module/polygraphy_reshape_destroyer/export.py +++ b/tools/Polygraphy/examples/dev/02_extending_polygraphy_run/extension_module/polygraphy_reshape_destroyer/export.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/Polygraphy/examples/dev/02_extending_polygraphy_run/extension_module/setup.py b/tools/Polygraphy/examples/dev/02_extending_polygraphy_run/extension_module/setup.py index 57fa9650..a8290580 100644 --- a/tools/Polygraphy/examples/dev/02_extending_polygraphy_run/extension_module/setup.py +++ b/tools/Polygraphy/examples/dev/02_extending_polygraphy_run/extension_module/setup.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/Polygraphy/polygraphy/__init__.py b/tools/Polygraphy/polygraphy/__init__.py index 8ad6217d..86602aee 100644 --- a/tools/Polygraphy/polygraphy/__init__.py +++ b/tools/Polygraphy/polygraphy/__init__.py @@ -1,3 +1,3 @@ import polygraphy.config -__version__ = "0.45.0" +__version__ = "0.47.1" diff --git a/tools/Polygraphy/polygraphy/backend/base/loader.py b/tools/Polygraphy/polygraphy/backend/base/loader.py index b1ca922e..481cbfe1 100644 --- a/tools/Polygraphy/polygraphy/backend/base/loader.py +++ b/tools/Polygraphy/polygraphy/backend/base/loader.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -24,11 +24,9 @@ class BaseLoader: Base class for Polygraphy Loaders. """ + # Implementation for ``__call__``. Derived classes should implement this + # method rather than ``__call__``. def call_impl(self, *args, **kwargs): - """ - Implementation for ``__call__``. Derived classes should implement this - method rather than ``__call__``. - """ raise NotImplementedError("BaseLoader is an abstract class") @func.constantmethod diff --git a/tools/Polygraphy/polygraphy/backend/base/runner.py b/tools/Polygraphy/polygraphy/backend/base/runner.py index d2a37659..0a26ec66 100644 --- a/tools/Polygraphy/polygraphy/backend/base/runner.py +++ b/tools/Polygraphy/polygraphy/backend/base/runner.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -112,6 +112,9 @@ def get_input_metadata(self): Returns: TensorMetadata: Input names, shapes, and data types. """ + if not self.is_active: + G_LOGGER.critical(f"{self.name:35} | Must be activated prior to calling get_input_metadata()") + return self.get_input_metadata_impl() # Implementation for runner inference. Derived classes should override this function @@ -155,7 +158,7 @@ def infer(self, feed_dict, check_inputs=True, *args, **kwargs): if check_inputs: input_metadata = self.get_input_metadata() - G_LOGGER.verbose(f"Runner input metadata is: {input_metadata}") + G_LOGGER.verbose(f"{self.name:35} | Input metadata is: {input_metadata}", mode=LogMode.ONCE) util.check_sequence_contains(feed_dict.keys(), input_metadata.keys(), name="feed_dict", items_name="inputs") diff --git a/tools/Polygraphy/polygraphy/backend/common/loader.py b/tools/Polygraphy/polygraphy/backend/common/loader.py index 61910fad..38894bf6 100644 --- a/tools/Polygraphy/polygraphy/backend/common/loader.py +++ b/tools/Polygraphy/polygraphy/backend/common/loader.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -33,6 +33,7 @@ def __init__(self, path): """ self._path = path + @util.check_called_by("__call__") def call_impl(self): """ Returns: @@ -59,6 +60,7 @@ def __init__(self, obj, path): self._bytes = obj self._path = path + @util.check_called_by("__call__") def call_impl(self): """ Returns: @@ -89,6 +91,7 @@ def __init__(self, path, name): self._path = path self._name = name + @util.check_called_by("__call__") def call_impl(self, *args, **kwargs): """ Returns: diff --git a/tools/Polygraphy/polygraphy/backend/onnx/loader.py b/tools/Polygraphy/polygraphy/backend/onnx/loader.py index 78e8eb4c..4b144f7d 100644 --- a/tools/Polygraphy/polygraphy/backend/onnx/loader.py +++ b/tools/Polygraphy/polygraphy/backend/onnx/loader.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -24,6 +24,7 @@ from polygraphy.backend.onnx import util as onnx_util from polygraphy.logger import G_LOGGER, LogMode +np = mod.lazy_import("numpy") onnx = mod.lazy_import("onnx>=1.8.1") onnxrt = mod.lazy_import("onnxruntime>=1.10.0") onnxmltools = mod.lazy_import("onnxmltools==1.11.1", requires=["onnxconverter_common==1.12.2"]) @@ -107,6 +108,7 @@ def __init__(self, model): """ self._model = model + @util.check_called_by("__call__") def call_impl(self): """ Returns: @@ -141,6 +143,7 @@ def __init__(self, path, external_data_dir=None, ignore_external_data=None): self.external_data_dir = external_data_dir self.ignore_external_data = util.default(ignore_external_data, False) + @util.check_called_by("__call__") def call_impl(self): """ Returns: @@ -188,6 +191,7 @@ def __init__(self, graph, opset=None, optimize=None): self.opset = util.default(opset, 11) self.optimize = util.default(optimize, True) + @util.check_called_by("__call__") def call_impl(self): """ Returns: @@ -238,6 +242,7 @@ def __init__(self, model, outputs=None, exclude_outputs=None, copy=None): self.outputs = outputs self.exclude_outputs = exclude_outputs + @util.check_called_by("__call__") def call_impl(self): """ Returns: @@ -275,6 +280,7 @@ def __init__(self, model, copy=None): """ super().__init__(model, copy) + @util.check_called_by("__call__") def call_impl(self): """ Returns: @@ -368,6 +374,7 @@ def __init__( self.size_threshold = size_threshold self.allow_onnxruntime_shape_inference = allow_onnxruntime_shape_inference + @util.check_called_by("__call__") def call_impl(self): """ Returns: @@ -425,6 +432,108 @@ def run_const_fold_pass(model): return model +@mod.export(funcify=True) +class SetUpperBound(BaseLoadOnnxCopy): + """ + Functor that sets upper bounds for tensors with unbounded DDS in an ONNX model. + + Requires that the model has been constant folded and has shapes inferred. + """ + + def __init__( + self, + model, + upper_bounds, + copy=None, + ): + """ + Set upper bounds for tensors with unbounded DDS in an ONNX model. + + Args: + model (Union[onnx.ModelProto, Callable() -> onnx.ModelProto]): + An ONNX model or a callable that returns one. + + upper_bounds (Union[int, Dict[str, int]]): + The upper bounds for tensors with unbounded DDS. + If a single integer is provided, it will be used as the default upper bound for all tensors with unbounded DDS. + This can also be provided on a per-tensor basis using a dictionary. In that case, use an empty string ("") as the + key to specify default upper bound for tensors not explicitly listed. + copy (bool): + Whether to create a copy of the model first. + Defaults to False. + """ + super().__init__(model, copy) + self.upper_bounds = upper_bounds + + def call_impl(self): + """ + Returns: + onnx.ModelProto: The new ONNX model. + """ + + # Set upper bounds for tensors with unbounded DDS in the onnx model. + def set_upper_bound(graph, target_tensor_list): + applied_bounds = {} + for tensor in target_tensor_list: + upper_bound = util.value_or_from_dict(self.upper_bounds, tensor.name) + if upper_bound is None: + continue + # Insert a min operator to set the upper bound for the target tensor. + # A target tensor should always be produced from a single node. + assert (len(tensor.inputs) == 1) + producer = tensor.inputs[0] + producer_idx = producer.outputs.index(tensor) + tensor_copy = gs.Variable( + tensor.name + "_copy", dtype=tensor.dtype, shape=tensor.shape) + upper_bound_values = np.array(upper_bound) + if tensor.shape is not None and len(tensor.shape) > 0: + upper_bound_values = np.array([upper_bound] * len(tensor.shape)) + tensor_upper_bound = gs.Constant( + tensor.name + "_upper_bound", values=upper_bound_values) + min_node = gs.Node(op="Min", inputs=[ + tensor_copy, tensor_upper_bound], outputs=[tensor]) + producer.outputs[producer_idx] = tensor_copy + tensor.inputs = [min_node] + graph.nodes.append(min_node) + applied_bounds[tensor.name] = upper_bound + G_LOGGER.info( + f"Set tensor upper bounds: {applied_bounds}") + return graph + + model = self.load() + graph = gs_from_onnx(model) + + target_tensor_list = onnx_util.get_unbounded_dds_tensors(graph) + + tensor_map = graph.tensors() + target_names = {tensor.name for tensor in target_tensor_list} + if isinstance(self.upper_bounds, dict): + input_names = set(self.upper_bounds.keys()) - {""} + # Report error when input tensor name is not in the graph. + util.check_sequence_contains( + set(tensor_map.keys()), + input_names, + name="the upper bounds dictionary", + items_name="tensors", + check_extra=False, + ) + # Report warning when input tensor is not a unbounded DDS tensor. + util.check_sequence_contains( + set(target_names), + input_names, + name="the upper bounds dictionary", + items_name="tensors", + log_func=G_LOGGER.warning, + check_extra=False, + ) + # Still set upper bound for input tensors with bounded shapes. + target_names.update(input_names) + graph = set_upper_bound(graph, [tensor_map[name] for name in target_names]) + model = gs.export_onnx(graph.cleanup(), do_type_check=False) + + return model + + @mod.export(funcify=True) class InferShapes(BaseLoader): """ @@ -514,6 +623,7 @@ def _run_onnxruntime_shape_inference(self, model, external_data_dir): model = onnx_from_path(model, external_data_dir=external_data_dir) return onnxrt_symbolic_shape_inference.SymbolicShapeInference.infer_shapes(model, auto_merge=True) + @util.check_called_by("__call__") def call_impl(self): """ Returns: @@ -524,26 +634,29 @@ def call_impl(self): G_LOGGER.verbose("Starting shape inference") - mod.autoinstall(onnxrt_symbolic_shape_inference) try: - if self.allow_onnxruntime and mod.has_mod("onnxruntime.tools.symbolic_shape_infer"): - G_LOGGER.info( - "Inferring shapes in the model with `onnxruntime.tools.symbolic_shape_infer`.\n" - "Note: To force Polygraphy to use `onnx.shape_inference` instead, set `allow_onnxruntime=False` or " - "use the `--no-onnxruntime-shape-inference` command-line option.", - mode=LogMode.ONCE, - ) - - model = self._run_onnxruntime_shape_inference(model, external_data_dir) - else: - if self.allow_onnxruntime: + use_onnx_shape_inference = not self.allow_onnxruntime + if self.allow_onnxruntime: + try: + model = self._run_onnxruntime_shape_inference(model, external_data_dir) + G_LOGGER.verbose( + "Inferred shapes in the model with `onnxruntime.tools.symbolic_shape_infer`.\n" + "Note: To force Polygraphy to use `onnx.shape_inference` instead, set `allow_onnxruntime=False` or " + "use the `--no-onnxruntime-shape-inference` command-line option.", + mode=LogMode.ONCE, + ) + except: + use_onnx_shape_inference = True G_LOGGER.warning( - "Falling back to `onnx.shape_inference` because `onnxruntime.tools.symbolic_shape_infer` could not be loaded.\n" + "Falling back to `onnx.shape_inference` because `onnxruntime.tools.symbolic_shape_infer` either could not be loaded " + "or did not run successfully.\n" "Note that using ONNX-Runtime for shape inference may be faster and require less memory.\n" - "Consider installing ONNX-Runtime or settting POLYGRAPHY_AUTOINSTALL_DEPS=1 in your environment " + "Consider installing ONNX-Runtime or setting POLYGRAPHY_AUTOINSTALL_DEPS=1 in your environment " "variables to allow Polygraphy to do so automatically.", mode=LogMode.ONCE, ) + + if use_onnx_shape_inference: model = self._run_onnx_shape_inference(model, external_data_dir) except Exception as err: if not self.error_ok: @@ -591,6 +704,7 @@ def __init__(self, model, input_metadata=None, output_metadata=None, check_meta= self.output_metadata = output_metadata self.check_meta = util.default(check_meta, True) + @util.check_called_by("__call__") def call_impl(self): """ Returns: @@ -694,6 +808,7 @@ def __init__(self, model, path, external_data_path=None, size_threshold=None, al self.size_threshold = size_threshold self.all_tensors_to_one_file = all_tensors_to_one_file + @util.check_called_by("__call__") def call_impl(self): """ Returns: @@ -753,6 +868,7 @@ def __init__(self, model): """ self._model = model + @util.check_called_by("__call__") def call_impl(self): """ Returns: @@ -778,6 +894,7 @@ def __init__(self, serialized_onnx): """ self._serialized_onnx = serialized_onnx + @util.check_called_by("__call__") def call_impl(self): """ Returns: diff --git a/tools/Polygraphy/polygraphy/backend/onnx/util.py b/tools/Polygraphy/polygraphy/backend/onnx/util.py index 6b9c2995..ba2546b6 100644 --- a/tools/Polygraphy/polygraphy/backend/onnx/util.py +++ b/tools/Polygraphy/polygraphy/backend/onnx/util.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -350,3 +350,83 @@ def lower_constant_nodes(graph): for node_id in sorted(remove_nodes, reverse=True): del graph.nodes[node_id] return graph + + +def get_unbounded_dds_tensors(graph): + graph.toposort() + # A dict of operators that might produce a output tensor with unbounded DDS, when the value of the input tensor + # at the corresponding index is a runtime value. For example, "Range" => "1" means that if the input 1 of the Range + # operator is a runtime value, e.g. not a const tensor or an initializer, then the Range output tensor size is unbounded. + dispatcher_dict = { + "Range": [1], # the limit input of the Range operator + "Pad": [1], # the pads input of the Pad operator + "Resize": [3], # the sizes input of the Resize operator + "Tile": [1], # the repeats input of the Tile operator + "Expand": [1], # the shape input of the Expand operator + } + + # Check if the given operator produces a output tensor with unbounded DDS. + def check_op(node, const_tensor_set): + # Check if the operator is inside the dispatcher dict. + if node.op in dispatcher_dict: + input_idx_list = dispatcher_dict[node.op] + for input_idx in input_idx_list: + if input_idx < len(node.inputs): + input_tensor = node.inputs[input_idx] + # Check if the corresponding input tensor is a runtime value and its producer is not Min operator. + # If a tensor is produced by a Min operator, its upper bound has already been set. + if input_tensor.name not in const_tensor_set and len(input_tensor.inputs) >= 1 and input_tensor.inputs[0].op != 'Min': + return input_tensor + return None + + # Find all constant tensors. + def get_const_tensors(graph): + return {tensor.name for tensor in graph.tensors().values() if isinstance(tensor, gs.Constant)} + + # Find all dynamic shape symbols, customers will set upper bounds for these symbols when building the model in TensorRT. + def get_dynamic_shapes(graph): + dynamic_shape_set = set() + for tensor in graph.inputs: + for shape in tensor.shape: + if isinstance(shape, str): + dynamic_shape_set.add(shape) + return dynamic_shape_set + + # Find all tensors with unbounded DDS. + def get_target_tensors(graph): + # Find dynamic shapes, these shapes should have upper bounds in TensorRT. + dynamic_shape_set = get_dynamic_shapes(graph) + + # Find const tensors. For those operators in the dispatch dict, constant inputs will not introduce outputs with unbounded DDS. + const_tensor_set = get_const_tensors(graph) + + # Our target is to find those input tensors that cause its consumer nodes generated unbounded outputs. + # If a tensor has named dimensions that appeared before in its symbolic shape, it means that the shape is *not* data dependent, + # and so will have an upper bound. + target_tensor_names = set() + target_tensor_list = [] + for node in graph.nodes: + check_node = False + # Check if the node's output contains a new introduced dynamic shape. + for tensor in node.outputs: + # Always check nodes if tensor.shape is None. + # This happens when the symbolic inference does not work correctly due to some restrictions. + if tensor.shape is None: + check_node = True + else: + for shape in tensor.shape: + # If a shape is a dynamic shape, then it is a str. + # Only check the node that first introduced the dynamic shape. + if isinstance(shape, str) and shape not in dynamic_shape_set: + dynamic_shape_set.add(shape) + check_node = True + # Check if the node will generate an unbounded output size. + if check_node: + target_tensor = check_op(node, const_tensor_set) + # Avoid duplication. + if target_tensor is not None and target_tensor.name not in target_tensor_names: + target_tensor_names.add(target_tensor.name) + target_tensor_list.append(target_tensor) + return target_tensor_list + + return get_target_tensors(graph) diff --git a/tools/Polygraphy/polygraphy/backend/onnxrt/loader.py b/tools/Polygraphy/polygraphy/backend/onnxrt/loader.py index 8583260c..b2eb4ab7 100644 --- a/tools/Polygraphy/polygraphy/backend/onnxrt/loader.py +++ b/tools/Polygraphy/polygraphy/backend/onnxrt/loader.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -46,6 +46,7 @@ def __init__(self, model_bytes, providers=None): self._model_bytes_or_path = model_bytes self.providers = util.default(providers, ["cpu"]) + @util.check_called_by("__call__") def call_impl(self): """ Returns: diff --git a/tools/Polygraphy/polygraphy/backend/onnxrt/runner.py b/tools/Polygraphy/polygraphy/backend/onnxrt/runner.py index 77314e68..fcb552a1 100644 --- a/tools/Polygraphy/polygraphy/backend/onnxrt/runner.py +++ b/tools/Polygraphy/polygraphy/backend/onnxrt/runner.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -39,9 +39,11 @@ def __init__(self, sess, name=None): super().__init__(name=name, prefix="onnxrt-runner") self._sess = sess + @util.check_called_by("activate") def activate_impl(self): self.sess, _ = util.invoke_if_callable(self._sess) + @util.check_called_by("get_input_metadata") def get_input_metadata_impl(self): ONNX_RT_TYPE_TO_NP = { "tensor(double)": np.float64, @@ -65,6 +67,7 @@ def get_input_metadata_impl(self): meta.add(node.name, dtype=dtype, shape=node.shape) return meta + @util.check_called_by("infer") def infer_impl(self, feed_dict): start = time.time() inference_outputs = self.sess.run(None, feed_dict) @@ -76,5 +79,6 @@ def infer_impl(self, feed_dict): self.inference_time = end - start return out_dict + @util.check_called_by("deactivate") def deactivate_impl(self): del self.sess diff --git a/tools/Polygraphy/polygraphy/backend/pluginref/references.py b/tools/Polygraphy/polygraphy/backend/pluginref/references.py index ffef18fb..2a50006c 100644 --- a/tools/Polygraphy/polygraphy/backend/pluginref/references.py +++ b/tools/Polygraphy/polygraphy/backend/pluginref/references.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/Polygraphy/polygraphy/backend/pluginref/runner.py b/tools/Polygraphy/polygraphy/backend/pluginref/runner.py index 0c2a4b33..150bc38d 100644 --- a/tools/Polygraphy/polygraphy/backend/pluginref/runner.py +++ b/tools/Polygraphy/polygraphy/backend/pluginref/runner.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -46,12 +46,15 @@ def __init__(self, graph, name=None): super().__init__(name=name, prefix="pluginref-runner") self._graph = graph + @util.check_called_by("activate") def activate_impl(self): self.graph, _ = util.invoke_if_callable(self._graph) + @util.check_called_by("get_input_metadata") def get_input_metadata_impl(self): return onnx_util.meta_from_gs_tensors(self.graph.inputs) + @util.check_called_by("infer") def infer_impl(self, feed_dict): start = time.time() @@ -71,5 +74,6 @@ def infer_impl(self, feed_dict): self.inference_time = end - start return outputs + @util.check_called_by("deactivate") def deactivate_impl(self): del self.graph diff --git a/tools/Polygraphy/polygraphy/backend/pyt/runner.py b/tools/Polygraphy/polygraphy/backend/pyt/runner.py index 3f8e8b2d..450d46f7 100644 --- a/tools/Polygraphy/polygraphy/backend/pyt/runner.py +++ b/tools/Polygraphy/polygraphy/backend/pyt/runner.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -49,13 +49,16 @@ def __init__(self, model, input_metadata, output_names, name=None): self.input_metadata = input_metadata self.output_names = output_names + @util.check_called_by("activate") def activate_impl(self): self.model, _ = util.invoke_if_callable(self._model) self.model.eval() + @util.check_called_by("get_input_metadata") def get_input_metadata_impl(self): return self.input_metadata + @util.check_called_by("infer") def infer_impl(self, feed_dict): with torch.no_grad(): inputs = [ @@ -71,5 +74,6 @@ def infer_impl(self, feed_dict): out_dict[name] = output.cpu().numpy() return out_dict, end - start + @util.check_called_by("deactivate") def deactivate_impl(self): del self.model diff --git a/tools/Polygraphy/polygraphy/backend/tf/loader.py b/tools/Polygraphy/polygraphy/backend/tf/loader.py index b71bb88d..3b86dcab 100644 --- a/tools/Polygraphy/polygraphy/backend/tf/loader.py +++ b/tools/Polygraphy/polygraphy/backend/tf/loader.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -66,6 +66,7 @@ def constfold(self, graphdef, output_names): session_config.graph_options.resave_options.CopyFrom(rewriter_config) return tf_optimizer.OptimizeGraph(session_config, metagraph, graph_id=b"graph") + @util.check_called_by("__call__") def call_impl(self): """ Returns: @@ -128,6 +129,7 @@ def __init__(self, path): """ self.path = path + @util.check_called_by("__call__") def call_impl(self): """ Returns: @@ -158,6 +160,7 @@ def __init__(self, path): """ self.path = path + @util.check_called_by("__call__") def call_impl(self): """ Returns: @@ -189,6 +192,7 @@ def __init__(self, dir, name=None): self.dir = dir self.name = name + @util.check_called_by("__call__") def call_impl(self): """ Returns: @@ -257,6 +261,7 @@ def __init__( self.is_dynamic_op = is_dynamic_op self.minimum_segment_size = util.default(minimum_segment_size, 3) + @util.check_called_by("__call__") def call_impl(self): """ Returns: @@ -319,6 +324,7 @@ def __init__(self, graph, outputs=None): self._graph = graph self.outputs = outputs + @util.check_called_by("__call__") def call_impl(self): """ Returns: @@ -358,6 +364,7 @@ def __init__(self, graph, path=None, tensorboard_dir=None, engine_dir=None): self.tensorboard_dir = tensorboard_dir self.engine_dir = engine_dir + @util.check_called_by("__call__") def call_impl(self): """ Returns: @@ -406,6 +413,7 @@ def __init__(self, gpu_memory_fraction=None, allow_growth=None, use_xla=None): self.allow_growth = util.default(allow_growth, False) self.use_xla = util.default(use_xla, False) + @util.check_called_by("__call__") def call_impl(self): """ Returns: @@ -444,6 +452,7 @@ def __init__(self, graph, config=None): self.graph = graph self.config = util.default(config, CreateConfig()) + @util.check_called_by("__call__") def call_impl(self): """ Returns: diff --git a/tools/Polygraphy/polygraphy/backend/tf/runner.py b/tools/Polygraphy/polygraphy/backend/tf/runner.py index c62b4352..12099855 100644 --- a/tools/Polygraphy/polygraphy/backend/tf/runner.py +++ b/tools/Polygraphy/polygraphy/backend/tf/runner.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -61,12 +61,15 @@ def __init__(self, sess, timeline_dir=None, name=None): self.run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE) self.run_metadata = tf.RunMetadata() + @util.check_called_by("activate") def activate_impl(self): (self.sess, self.output_names), _ = util.invoke_if_callable(self._sess) + @util.check_called_by("get_input_metadata") def get_input_metadata_impl(self): return tf_util.get_input_metadata(self.sess.graph) + @util.check_called_by("infer") def infer_impl(self, feed_dict): G_LOGGER.extra_verbose(f"Received feed_dict: {feed_dict}") start = time.time() @@ -94,6 +97,7 @@ def infer_impl(self, feed_dict): return out_dict + @util.check_called_by("deactivate") def deactivate_impl(self): self.sess.close() del (self.sess, self.output_names) diff --git a/tools/Polygraphy/polygraphy/backend/tf/util.py b/tools/Polygraphy/polygraphy/backend/tf/util.py index 7849cdea..ce8709cc 100644 --- a/tools/Polygraphy/polygraphy/backend/tf/util.py +++ b/tools/Polygraphy/polygraphy/backend/tf/util.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/Polygraphy/polygraphy/backend/trt/algorithm_selector.py b/tools/Polygraphy/polygraphy/backend/trt/algorithm_selector.py index 3a4d3aae..e2ad25e3 100644 --- a/tools/Polygraphy/polygraphy/backend/trt/algorithm_selector.py +++ b/tools/Polygraphy/polygraphy/backend/trt/algorithm_selector.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,6 +21,8 @@ from polygraphy.json import Decoder, Encoder, add_json_methods from polygraphy.logger import G_LOGGER, LogMode +from typing import Sequence + trt = mod.lazy_import("tensorrt") @@ -28,14 +30,102 @@ ## Data Structures ## +# # NOTE: Modifying the structure of the data classes below will break backwards compatiblity +# + + +def check_is_instance(obj, cls, name): + if not isinstance(obj, cls): + G_LOGGER.critical(f"'{name}' must be an instance of {cls.__name__}, but is: {obj}.") + + +@mod.export() +class TensorInfo: + """ + Tracks information about a tensor, such as format and data type. + """ + + @staticmethod + def from_trt(io_info): + """ + Creates a Polygraphy ``TensorInfo`` instance from a TensorRT ``IAlgorithmIOInfo``. + + Args: + io_info (trt.IAlgorithmIOInfo): The algorithm I/O information. + + Returns: + TensorInfo + """ + return TensorInfo( + io_info.tensor_format, + io_info.dtype, + tuple(io_info.strides), + # These fields were added in 8.6 + util.try_getattr(io_info, "vectorized_dim"), + util.try_getattr(io_info, "components_per_element"), + ) + + def __init__(self, tensor_format, dtype, strides, vectorized_dim, components_per_element): + """ + Args: + tensor_format (trt.TensorFormat): The tensor format. + dtype (trt.DataType): The data type. + strides (Sequence[int]): The strides. + vectorized_dim (int): The index of the vectorized dimensions. + components_per_element (int): The number of components per element. + """ + check_is_instance(tensor_format, trt.TensorFormat, "tensor_format") + check_is_instance(dtype, trt.DataType, "dtype") + check_is_instance(strides, Sequence, "strides") + if vectorized_dim is not None: + check_is_instance(vectorized_dim, int, "vectorized_dim") + if components_per_element is not None: + check_is_instance(components_per_element, int, "components_per_element") + + self.tensor_format = tensor_format + self.dtype = dtype + self.strides = tuple(strides) + self.vectorized_dim = vectorized_dim + self.components_per_element = components_per_element + + def __eq__(self, other): + return self.__dict__ == other.__dict__ + + def __repr__(self): + return f"TensorInfo({str(self.tensor_format)}, {str(self.dtype)}, {self.strides}, {self.vectorized_dim}, {self.components_per_element})" + + def __hash__(self): + return hash((self.tensor_format, self.dtype, self.strides, self.vectorized_dim, self.components_per_element)) + + +@Encoder.register(TensorInfo) +def encode(tensor_info): + return { + "tensor_format": str(tensor_info.tensor_format), + "dtype": str(tensor_info.dtype), + "strides": tensor_info.strides, + "vectorized_dim": tensor_info.vectorized_dim, + "components_per_element": tensor_info.components_per_element, + } + + +@Decoder.register(TensorInfo) +def decode(dct): + return TensorInfo( + util.getattr_nested(trt, dct["tensor_format"]), + util.getattr_nested(trt, dct["dtype"]), + dct["strides"], + dct["vectorized_dim"], + dct["components_per_element"], + ) @mod.export() class Algorithm: """ Represents a TensorRT algorithm variant, which can be uniquely represented - by an implementation ID and tactic ID. + by an implementation ID, tactic ID, and I/O tensor information. """ @staticmethod @@ -49,16 +139,16 @@ def from_trt(context, algorithm): The algorithm context corresponding to the layer. algorithm (trt.IAlgorithm): The algorithm variant provided by TensorRT. - """ - def unpack_io_info(io_info): - return (io_info.tensor_format, io_info.dtype, tuple(io_info.strides)) + Returns: + Algorithm + """ implementation = algorithm.algorithm_variant.implementation tactic = algorithm.algorithm_variant.tactic - inputs = tuple(unpack_io_info(algorithm.get_algorithm_io_info(i)) for i in range(context.num_inputs)) + inputs = tuple(TensorInfo.from_trt(algorithm.get_algorithm_io_info(i)) for i in range(context.num_inputs)) outputs = tuple( - unpack_io_info(algorithm.get_algorithm_io_info(i)) + TensorInfo.from_trt(algorithm.get_algorithm_io_info(i)) for i in range(context.num_inputs, context.num_inputs + context.num_outputs) ) return Algorithm(implementation, tactic, inputs, outputs) @@ -70,54 +160,30 @@ def __init__(self, implementation, tactic, inputs, outputs): The implementation for this Algorithm. tactic (int): The tactic for this Algorithm. - inputs (List[Tuple[trt.TensorFormat, trt.DataType, Sequence[int]]]): - A list of tuples containg a TensorRT tensor format, data type, and strides for each input. - outputs (List[Tuple[trt.TensorFormat, trt.DataType, Sequence[int]]]): - A list of tuples containg a TensorRT tensor format, data type, and strides for each output. + inputs (Sequence[TensorInfo]): + A sequence of TensorInfos for each input. + outputs (Sequence[TensorInfo]): + A sequence of TensorInfos for each output. """ - - def validate_meta(meta): - for index, tup in enumerate(meta): - # Fill in empty tuples for missing strides. - if len(tup) == 2: - fmt, dtype = tup - strides = tuple() - tup = (fmt, dtype, strides) - meta[index] = tup - - fmt, dtype, strides = tup - - if not isinstance(fmt, trt.TensorFormat): - G_LOGGER.critical( - f"'format' must be an instance of trt.TensorFormat, but is: {fmt}.\nNote: Provided input/output metadata was: {meta}" - ) - if not isinstance(dtype, trt.DataType): - G_LOGGER.critical( - f"'dtype' must be an instance of trt.DataType, but is: {dtype}.\nNote: Provided input/output metadata was: {meta}" - ) - - if not isinstance(strides, tuple): - G_LOGGER.critical( - f"'strides' must be a tuple, but is: {strides}.\nNote: Provided input/output metadata was: {meta}" - ) - return meta - self.implementation = implementation self.tactic = tactic + + def check_io(lst, name): + for index, io in enumerate(lst): + check_is_instance(io, TensorInfo, f"{name}[{index}]") + + check_io(inputs, "inputs") + check_io(outputs, "outputs") + # Use tuples here so the class is hashable. - self.inputs = tuple(validate_meta(inputs)) - self.outputs = tuple(validate_meta(outputs)) + self.inputs = tuple(inputs) + self.outputs = tuple(outputs) def __str__(self): - def io_str(io): - return tuple((str(tensor_format), str(dtype), str(strides)) for tensor_format, dtype, strides in io) - - return f"(Implementation: {self.implementation}, Tactic: {self.tactic}) | Inputs: {io_str(self.inputs)} | Outputs: {io_str(self.outputs)}" + return f"(Implementation: {self.implementation}, Tactic: {self.tactic}) | Inputs: {self.inputs} | Outputs: {self.outputs}" def __eq__(self, other): - tactic_matches = self.implementation == other.implementation and self.tactic == other.tactic - io_matches = self.inputs == other.inputs and self.outputs == other.outputs - return tactic_matches and io_matches + return self.__dict__ == other.__dict__ def __hash__(self): return hash((self.implementation, self.tactic, self.inputs, self.outputs)) @@ -125,37 +191,21 @@ def __hash__(self): @Encoder.register(Algorithm) def encode(algo): - def encode_algo_io(io_list): - encoded = [] - for fmt, dtype, strides in io_list: - encoded.append((str(fmt), str(dtype), strides)) - return encoded - return { "implementation": algo.implementation, "tactic": algo.tactic, - "inputs": encode_algo_io(algo.inputs), - "outputs": encode_algo_io(algo.outputs), + "inputs": algo.inputs, + "outputs": algo.outputs, } @Decoder.register(Algorithm) def decode(dct): - def decode_algo_io(io_list): - decoded = [] - for tup in io_list: - fmt, dtype, strides = util.unpack_args(tup, 3) - entry = [util.getattr_nested(trt, fmt), util.getattr_nested(trt, dtype)] - if strides is not None: - entry.append(tuple(strides)) - decoded.append(tuple(entry)) - return decoded - return Algorithm( implementation=dct["implementation"], tactic=dct["tactic"], - inputs=decode_algo_io(dct["inputs"]), - outputs=decode_algo_io(dct["outputs"]), + inputs=dct["inputs"], + outputs=dct["outputs"], ) diff --git a/tools/Polygraphy/polygraphy/backend/trt/calibrator.py b/tools/Polygraphy/polygraphy/backend/trt/calibrator.py index c0bb10f6..8a9b2ec5 100644 --- a/tools/Polygraphy/polygraphy/backend/trt/calibrator.py +++ b/tools/Polygraphy/polygraphy/backend/trt/calibrator.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/Polygraphy/polygraphy/backend/trt/config.py b/tools/Polygraphy/polygraphy/backend/trt/config.py index 55586b0d..a0312c97 100644 --- a/tools/Polygraphy/polygraphy/backend/trt/config.py +++ b/tools/Polygraphy/polygraphy/backend/trt/config.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -58,6 +58,9 @@ def __init__( builder_optimization_level=None, fp8=None, hardware_compatibility_level=None, + max_aux_streams=None, + version_compatible=None, + exclude_lean_runtime=None, ): """ Creates a TensorRT IBuilderConfig that can be used by EngineFromNetwork. @@ -160,9 +163,20 @@ def __init__( Whether to build the engine with FP8 precision enabled. Defaults to False. hardware_compatibility_level (trt.HardwareCompatibilityLevel): - The hardware compatibiliity level. This allows engines built on one GPU architecture to work on GPUs + The hardware compatibility level. This allows engines built on one GPU architecture to work on GPUs of other architectures. Defaults to TensorRT's default hardware compatibility level. + max_aux_streams (int): + The maximum number of auxiliary streams that TensorRT is allowed to use. If the network contains + operators that can run in parallel, TRT can execute them using auxiliary streams in addition to the + one provided to the IExecutionContext::enqueueV3() call. + The default maximum number of auxiliary streams is determined by the heuristics in TensorRT on + whether enabling multi-stream would improve the performance. + version_compatible (bool): + Whether to build an engine that is version compatible. + exclude_lean_runtime (bool): + Whether to exclude the lean runtime in version compatible engines. + Requires that version compatibility is enabled. """ self.max_workspace_size = max_workspace_size if max_workspace_size is not None: @@ -191,6 +205,9 @@ def __init__( self.direct_io = util.default(direct_io, False) self.builder_optimization_level = builder_optimization_level self.hardware_compatibility_level = hardware_compatibility_level + self.max_aux_streams = max_aux_streams + self.version_compatible = version_compatible + self.exclude_lean_runtime = exclude_lean_runtime if self.calibrator is not None and not self.int8: G_LOGGER.warning( @@ -198,6 +215,7 @@ def __init__( "Did you mean to set `int8=True` to enable building with int8 precision?" ) + @util.check_called_by("__call__") def call_impl(self, builder, network): """ Args: @@ -387,6 +405,27 @@ def set_hardware_compatibility_level(): try_run(set_hardware_compatibility_level, "hardware_compatibility_level") + if self.version_compatible: + try_set_flag("VERSION_COMPATIBLE") + + if self.exclude_lean_runtime: + if not self.version_compatible: + G_LOGGER.critical(f"Cannot set EXCLUDE_LEAN_RUNTIME if version compatibility is not enabled. ") + try_set_flag("EXCLUDE_LEAN_RUNTIME") + + if self.hardware_compatibility_level is not None or self.version_compatible: + G_LOGGER.info( + "Version or hardware compatibility was enabled. " + "If you are using an ONNX model, please set the NATIVE_INSTANCENORM ONNX parser flag, e.g. `--onnx-flags NATIVE_INSTANCENORM`" + ) + + if self.max_aux_streams is not None: + + def set_max_aux_streams(): + config.max_aux_streams = self.max_aux_streams + + try_run(set_max_aux_streams, "max_aux_streams") + return config @@ -415,6 +454,7 @@ def __init__(self, config, func): self._func = func + @util.check_called_by("__call__") def call_impl(self, builder, network): """ Args: diff --git a/tools/Polygraphy/polygraphy/backend/trt/loader.py b/tools/Polygraphy/polygraphy/backend/trt/loader.py index f37fe42e..64bc9480 100644 --- a/tools/Polygraphy/polygraphy/backend/trt/loader.py +++ b/tools/Polygraphy/polygraphy/backend/trt/loader.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,6 +17,7 @@ import contextlib import ctypes import time +import os from polygraphy import constants, mod, util from polygraphy.backend.base import BaseLoader @@ -57,6 +58,7 @@ def __init__(self, plugins=None, obj=None): self.plugins = util.default(plugins, []) self.obj = obj + @util.check_called_by("__call__") def call_impl(self, *args, **kwargs): """ Returns: @@ -89,6 +91,7 @@ def __init__(self, explicit_batch=None): """ self.explicit_batch = util.default(explicit_batch, True) + @util.check_called_by("__call__") def call_impl(self): """ Returns: @@ -105,18 +108,31 @@ def call_impl(self): class BaseNetworkFromOnnx(BaseLoader): - def __init__(self, explicit_batch=None): + def __init__(self, explicit_batch=None, flags=None): """ Args: explicit_batch (bool): Whether to create the network with explicit batch mode. Defaults to True. + flags (List[trt.OnnxParserFlag]): + A list of ``OnnxParserFlag`` s to modify the default parsing + behavior of the ONNX parser. + Defaults to None. """ self.explicit_batch = util.default(explicit_batch, True) + self.flags = flags + @util.check_called_by("__call__") def call_impl(self): with util.FreeOnException(create_network(explicit_batch=self.explicit_batch)) as (builder, network): parser = trt.OnnxParser(network, trt_util.get_trt_logger()) + # Set flags if applicable + if mod.version(trt.__version__) >= mod.version("8.6"): + if self.flags: + masked_flags = 0 + for f in self.flags: + masked_flags |= 1 << int(f) + parser.flags = masked_flags return builder, network, parser @@ -126,17 +142,23 @@ class NetworkFromOnnxBytes(BaseNetworkFromOnnx): Functor that parses an ONNX model to create a trt.INetworkDefinition. """ - def __init__(self, model_bytes): + def __init__(self, model_bytes, flags=None): """ Parses an ONNX model. Args: model_bytes (Union[bytes, Callable() -> bytes]): A serialized ONNX model or a callable that returns one. + + flags (List[trt.OnnxParserFlag]) + A list of ``OnnxParserFlag`` s to modify the default parsing + behavior of the ONNX parser. + Defaults to None. """ - super().__init__() + super().__init__(flags=flags) self._model_bytes = model_bytes + @util.check_called_by("__call__") def call_impl(self): """ Returns: @@ -157,16 +179,22 @@ class NetworkFromOnnxPath(BaseNetworkFromOnnx): This loader supports models with weights stored in an external location. """ - def __init__(self, path): + def __init__(self, path, flags=None): """ Parses an ONNX model from a file. Args: path (str): The path from which to load the model. + + flags (List[trt.OnnxParserFlag]): + A list of ``OnnxParserFlag`` s to modify the default parsing + behavior of the ONNX parser. + Defaults to None. """ - super().__init__() + super().__init__(flags=flags) self.path = path + @util.check_called_by("__call__") def call_impl(self): """ Returns: @@ -222,6 +250,7 @@ def __init__(self, network, func, name=None): self._func = func self.name = util.default(name, func_name) + @util.check_called_by("__call__") def call_impl(self): """ Returns: @@ -295,6 +324,7 @@ def _apply(network, layer_precisions): name="the network", items_name="layers", check_extra=False, + log_func=G_LOGGER.warning, ) for layer in network: @@ -332,6 +362,7 @@ def _apply(network, tensor_datatypes): name="the network", items_name="tensors", check_extra=False, + log_func=G_LOGGER.warning, ) for name, dtype in tensor_datatypes.items(): @@ -368,6 +399,7 @@ def _apply(network, tensor_formats): name="the network", items_name="tensors", check_extra=False, + log_func=G_LOGGER.warning, ) for name, formats in tensor_formats.items(): @@ -392,6 +424,35 @@ def __init__(self, network, tensor_formats): super().__init__(network, func, "SetTensorFormats") +@mod.export(funcify=True) +class LoadRuntime(BaseLoader): + """ + Functor that loads a TensorRT ``IRuntime``. + """ + + def __init__(self, path): + """ + Loads a TensorRT ``IRuntime``. + + The loaded runtime can be used to execute a version compatible engine + that excludes the lean runtime. + + Args: + path (str): The path to a shared library from which to load the runtime. + """ + self.path = path + + @util.check_called_by("__call__") + def call_impl(self): + """ + Returns: + trt.Runtime: The runtime that was loaded. + """ + with trt.Runtime(trt_util.get_trt_logger()) as bootstrap_runtime: + G_LOGGER.info(f"Loading TensorRT runtime from: {self.path}") + return bootstrap_runtime.load_runtime(self.path) + + @mod.export(funcify=True) class EngineBytesFromNetwork(BaseLoader): """ @@ -421,6 +482,7 @@ def __init__(self, network, config=None, save_timing_cache=None): self._config = util.default(config, CreateConfig()) self.timing_cache_path = save_timing_cache + @util.check_called_by("__call__") def call_impl(self): """ Returns: @@ -516,10 +578,35 @@ def call_impl(self): @mod.export(funcify=True) class EngineFromNetwork(EngineBytesFromNetwork): """ - Similar to EngineBytesFromNetwork, but returns an ICudaEngine instance - instead of a serialized engine. + Functor similar to EngineBytesFromNetwork, but deserializes the engine before returning. """ + def __init__(self, network, config=None, save_timing_cache=None, runtime=None): + """ + Builds a TensorRT serialized engine and then deserializes it. + + Args: + network (Union[Tuple[trt.Builder, trt.INetworkDefinition, Optional[parser]], Callable() -> Tuple[trt.Builder, trt.INetworkDefinition, Optional[parser]]): + A tuple containing a TensorRT builder, network and optionally parser or a callable that returns one. + To omit the parser, return a tuple containing just the builder and network. + + + config (Callable(trt.Builder, trt.INetworkDefinition) -> trt.IBuilderConfig): + A TensorRT builder configuration or a callable that returns one. If not supplied, + a `CreateConfig` instance with default parameters is used. + save_timing_cache (Union[str, file-like]): + A path or file-like object at which to save a tactic timing cache. + Any existing cache will be appended to. + If a path is provided, the file will be locked for exclusive access to prevent + multiple processes from attempting to update the timing cache at the same time. + runtime (Union[trt.Runtime, Callable() -> trt.Runtime]): + The runtime to use when deserializing the engine or a callable that returns one. + If no runtime is provided, one will be created. + """ + super().__init__(network, config, save_timing_cache) + self._runtime = runtime + + @util.check_called_by("__call__") def call_impl(self): """ Returns: @@ -527,7 +614,7 @@ def call_impl(self): """ # We do not invoke super().call_impl here because we would otherwise be responsible # for freeing it's return values. - return engine_from_bytes(super().call_impl) + return engine_from_bytes(super().call_impl, runtime=self._runtime) @mod.export(funcify=True) @@ -536,25 +623,34 @@ class EngineFromBytes(BaseLoader): Functor that deserializes an engine from a buffer. """ - def __init__(self, serialized_engine): + def __init__(self, serialized_engine, runtime=None): """ Deserializes an engine from a buffer. Args: serialized_engine (Union[Union[str, bytes], Callable() -> Union[str, bytes]]): The serialized engine bytes or a callable that returns them. + runtime (Union[trt.Runtime, Callable() -> trt.Runtime]): + The runtime to use when deserializing the engine or a callable that returns one. + If no runtime is provided, one will be created. """ self._serialized_engine = serialized_engine + self._runtime = util.default(runtime, lambda: trt.Runtime(trt_util.get_trt_logger())) + @util.check_called_by("__call__") def call_impl(self): """ Returns: trt.ICudaEngine: The deserialized engine. """ buffer, owns_buffer = util.invoke_if_callable(self._serialized_engine) + runtime, owns_runtime = util.invoke_if_callable(self._runtime) trt.init_libnvinfer_plugins(trt_util.get_trt_logger(), "") - with contextlib.ExitStack() as stack, trt.Runtime(trt_util.get_trt_logger()) as runtime: + with contextlib.ExitStack() as stack: + if owns_runtime: + stack.enter_context(runtime) + if owns_buffer: try: buffer.__enter__ # IHostMemory is freed only in __exit__ @@ -563,6 +659,12 @@ def call_impl(self): else: stack.enter_context(buffer) + try: + # To deserialize version compatible engines, we must signal the runtime that host code is allowed + runtime.engine_host_code_allowed = True + except AttributeError: + pass + engine = runtime.deserialize_cuda_engine(buffer) if not engine: G_LOGGER.critical("Could not deserialize engine. See log for details.") @@ -585,6 +687,7 @@ def __init__(self, engine): """ self._engine = engine + @util.check_called_by("__call__") def call_impl(self): """ Returns: @@ -614,12 +717,12 @@ def __init__(self, engine, path): engine (Union[trt.ICudaEngine, Callable() -> trt.ICudaEngine]): An engine or a callable that returns one. - path (str): The path at which to save the engine. """ self._engine = engine self.path = path + @util.check_called_by("__call__") def call_impl(self): """ Returns: @@ -657,6 +760,7 @@ def __init__(self, network) -> None: """ self._network = network + @util.check_called_by("__call__") def call_impl(self): """ Returns: diff --git a/tools/Polygraphy/polygraphy/backend/trt/profile.py b/tools/Polygraphy/polygraphy/backend/trt/profile.py index 2367ae3f..0c74315a 100644 --- a/tools/Polygraphy/polygraphy/backend/trt/profile.py +++ b/tools/Polygraphy/polygraphy/backend/trt/profile.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/Polygraphy/polygraphy/backend/trt/runner.py b/tools/Polygraphy/polygraphy/backend/trt/runner.py index df92b6d6..ec63d2d5 100644 --- a/tools/Polygraphy/polygraphy/backend/trt/runner.py +++ b/tools/Polygraphy/polygraphy/backend/trt/runner.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -82,9 +82,14 @@ def __init__(self, engine, name: str = None, optimization_profile: int = None): self._engine_or_context = engine self.optimization_profile = optimization_profile + # Check compatibility with NumPy before proceeding further + trt_util.check_numpy_trt_compatibility() + + @util.check_called_by("activate") def activate_impl(self): engine_or_context, owning = util.invoke_if_callable(self._engine_or_context) + if isinstance(engine_or_context, trt.ICudaEngine): self.engine = engine_or_context self.owns_engine = owning @@ -189,9 +194,10 @@ def set_profile(self, index: int): if not self.context.set_optimization_profile_async(index, self.stream.ptr): G_LOGGER.critical(f"Failed to set optimization profile to: {index}") + @util.check_called_by("get_input_metadata") def get_input_metadata_impl(self): if trt_util._should_use_v3_api(): - return trt_util.get_metadata_from_engine(self.engine, mode=trt.TensorIOMode.INPUT) + return trt_util.get_metadata_from_engine(self.engine, self.context, mode=trt.TensorIOMode.INPUT) else: start_binding, end_binding = trt_util.get_active_profile_bindings(self.context) # This function always uses binding names of the 0th profile. @@ -344,13 +350,19 @@ def _infer_impl_v3(self, feed_dict, copy_outputs_to_host): "Please provide either a NumPy array or Polygraphy DeviceView. " ) + # If the format is HWC, make sure array.shape is considered after transposing back to CHW + if self.engine.get_tensor_format(name) == trt.TensorFormat.HWC: + array_shape = trt_util.get_chw_shape_from_hwc(array.shape, self.context.get_tensor_strides(name)) + else: + array_shape = array.shape + # Only update the input shape/address if something has changed. Otherwise, we'd be # doing extra work unnecessarily. # We retrieve the semantic shape from the FormattedArray, *not* the underlying array. - if self.context.get_tensor_shape(name) != array.shape: - G_LOGGER.ultra_verbose(f"Setting {name} input shape to: {array.shape}") - if not self.context.set_input_shape(name, array.shape): - G_LOGGER.critical(f"For input: {name}, failed to set shape to: {array.shape}") + if self.context.get_tensor_shape(name) != array_shape: + G_LOGGER.ultra_verbose(f"Setting {name} input shape to: {array_shape}") + if not self.context.set_input_shape(name, array_shape): + G_LOGGER.critical(f"For input: {name}, failed to set shape to: {array_shape}") if self.context.get_tensor_address(name) != ptr: if not self.context.set_tensor_address(name, ptr): @@ -365,20 +377,26 @@ def _infer_impl_v3(self, feed_dict, copy_outputs_to_host): # Otherwise, we create a view instead with the correct shape/dtype. raw_array = self.output_allocator.buffers[name] shape = self.output_allocator.shapes[name] - dtype = np.dtype(trt.nptype(self.engine.get_tensor_dtype(name))) + dtype = trt_util.np_dtype_from_trt(self.engine.get_tensor_dtype(name)) + + tensor_format = self.engine.get_tensor_format(name) + + # If the format is HWC, make sure the result is shaped accordingly + if tensor_format == trt.TensorFormat.HWC: + shape = trt_util.get_hwc_shape_from_chw(shape, self.context.get_tensor_strides(name)) - using_nonlinear_format = self.engine.get_tensor_format(name) != trt.TensorFormat.LINEAR + using_vectorized_format = tensor_format != trt.TensorFormat.LINEAR and tensor_format != trt.TensorFormat.HWC # The memory allocated by the output allocator may be larger than actually required. # If we're using a vectorized format, then we need to copy the whole thing. # Otherwise, we can determine how much we actually need. - nbytes = raw_array.nbytes if using_nonlinear_format else (util.volume(shape) * dtype.itemsize) + nbytes = raw_array.nbytes if using_vectorized_format else (util.volume(shape) * dtype.itemsize) if copy_outputs_to_host: self.host_output_buffers[name] = util.resize_buffer(self.host_output_buffers[name], (nbytes,)) raw_array.view(shape=(nbytes,)).copy_to(self.host_output_buffers[name], stream=self.stream) raw_array = self.host_output_buffers[name] - if using_nonlinear_format: + if using_vectorized_format: array = FormattedArray(raw_array, shape=shape, dtype=dtype) else: if copy_outputs_to_host: @@ -390,6 +408,7 @@ def _infer_impl_v3(self, feed_dict, copy_outputs_to_host): self.stream.synchronize() return output_buffers + @util.check_called_by("infer") def infer_impl(self, feed_dict, copy_outputs_to_host=None): """ Implementation for running inference with TensorRT. @@ -427,6 +446,7 @@ def infer_impl(self, feed_dict, copy_outputs_to_host=None): return output_buffers + @util.check_called_by("deactivate") def deactivate_impl(self): with contextlib.ExitStack() as stack: if self.owns_engine: diff --git a/tools/Polygraphy/polygraphy/backend/trt/util.py b/tools/Polygraphy/polygraphy/backend/trt/util.py index 7cd7cbbf..de6f9acb 100644 --- a/tools/Polygraphy/polygraphy/backend/trt/util.py +++ b/tools/Polygraphy/polygraphy/backend/trt/util.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -157,10 +157,19 @@ def try_add(layer_type, layer_cls): return layer_class_mapping +def check_numpy_trt_compatibility(): + if mod.version(trt.__version__) < mod.version("8.6") and \ + mod.version(np.__version__) >= mod.version("1.24"): + # TensorRT < 8.6 uses a deprecated alias np.bool that was removed in NumPy >= 1.24 + G_LOGGER.warning(f"TensorRT version {trt.__version__} and NumPy version {np.__version__} " + "are not compatible. Consider downgrading your NumPy package to a version < 1.24 " + "or upgrading TensorRT to a version >= 8.6.", mode=LogMode.ONCE) + def np_dtype_from_trt(trt_dtype): # trt.nptype uses NumPy, so to make autoinstall work, we need to trigger it before that. mod.autoinstall(np) + check_numpy_trt_compatibility() return np.dtype(trt.nptype(trt_dtype)) @@ -553,18 +562,35 @@ def try_setup_polygraphy_calibrator(config, network, calib_profile=None): calibrator.set_input_metadata(input_metadata) -def get_metadata_from_engine(engine, mode): +def get_hwc_shape_from_chw(shape, strides): + # The relative size (descending sorted order) of the strides should give the permutation to convert the shape + perm = sorted(range(len(strides)), key=strides.__getitem__, reverse=True) + return tuple([shape[i] for i in perm]) + + +def get_chw_shape_from_hwc(shape, strides): + perm = sorted(range(len(strides)), key=strides.__getitem__, reverse=True) + inv_perm = sorted(range(len(perm)), key=perm.__getitem__) + return tuple([shape[i] for i in inv_perm]) + + +def get_metadata_from_engine(engine, context, mode): meta = TensorMetadata() for idx in range(engine.num_io_tensors): name = engine.get_tensor_name(idx) if engine.get_tensor_mode(name) != mode: continue - meta.add(name=name, dtype=np_dtype_from_trt(engine.get_tensor_dtype(name)), shape=engine.get_tensor_shape(name)) + shape = engine.get_tensor_shape(name) + # If the input format is HWC, make sure the input is shaped accordingly + if engine.get_tensor_format(name) == trt.TensorFormat.HWC: + shape = get_hwc_shape_from_chw(shape, context.get_tensor_strides(name)) + + meta.add(name=name, dtype=np_dtype_from_trt(engine.get_tensor_dtype(name)), shape=shape) return meta -def str_from_engine(engine, show_layers=None, show_attrs=None): +def str_from_engine(engine, context, show_layers=None, show_attrs=None): show_layers = util.default(show_layers, False) show_attrs = util.default(show_attrs, False) @@ -578,8 +604,8 @@ def str_from_engine(engine, show_layers=None, show_attrs=None): # Show metadata for the first profile (i.e. the dynamic shapes) if _should_use_v3_api(): - input_metadata = get_metadata_from_engine(engine, mode=trt.TensorIOMode.INPUT) - output_metadata = get_metadata_from_engine(engine, mode=trt.TensorIOMode.OUTPUT) + input_metadata = get_metadata_from_engine(engine, context, mode=trt.TensorIOMode.INPUT) + output_metadata = get_metadata_from_engine(engine, context, mode=trt.TensorIOMode.OUTPUT) else: input_metadata = get_input_metadata_from_engine(engine, 0, num_io_tensors) output_metadata = get_output_metadata_from_engine(engine, 0, num_io_tensors) diff --git a/tools/Polygraphy/polygraphy/backend/trt_legacy.py b/tools/Polygraphy/polygraphy/backend/trt_legacy.py index 9621ebb4..09162d20 100644 --- a/tools/Polygraphy/polygraphy/backend/trt_legacy.py +++ b/tools/Polygraphy/polygraphy/backend/trt_legacy.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -40,6 +40,7 @@ def __init__(self, path, shapes, outputs): self.shapes = shapes self.outputs = outputs + @util.check_called_by("__call__") def call_impl(self): input_names = list(self.shapes.keys()) input_shapes = list(self.shapes.values()) @@ -53,6 +54,7 @@ def __init__(self, tf_loader, save_uff=None, preprocessor=None): self.uff_path = save_uff self.preprocessor = preprocessor + @util.check_called_by("__call__") def call_impl(self): """ @@ -87,6 +89,7 @@ def __init__(self, uff_loader, uff_order=None): if uff_order: self.uff_order = trt.UffInputOrder.NCHW if uff_order.lower() == "nchw" else trt.UffInputOrder.NHWC + @util.check_called_by("__call__") def call_impl(self): uff_model, input_names, input_shapes, output_names = self.uff_loader() @@ -130,6 +133,7 @@ def __init__(self, onnx_loader): super().__init__(explicit_batch=False) self.onnx_loader = onnx_loader + @util.check_called_by("__call__") def call_impl(self): from polygraphy.backend.onnx import util as onnx_util @@ -180,7 +184,8 @@ def _input_metadata_from_network(network): input_metadata = TensorMetadata() for index in range(network.num_inputs): tensor = network.get_input(index) - input_metadata.add(name=tensor.name, dtype=np.dtype(trt.nptype(tensor.dtype)), shape=tensor.shape) + dtype = trt_util.np_dtype_from_trt(tensor.dtype) + input_metadata.add(name=tensor.name, dtype=dtype, shape=tensor.shape) return input_metadata @@ -226,7 +231,7 @@ def __init__( max_workspace_size (int): The maximum workspace size. max_batch_size (int): The maximum batch size. fp16 (bool): Whether to run in fp16 mode - fp8 (bool): Whether to run in fp8 mode + fp8 (bool): Whether to run in fp8 mode layerwise (bool): Whether to retrieve the outputs of every layer in the network. name (str): The human-readable name prefix to use for this runner. @@ -249,7 +254,7 @@ def __init__( self.network_loader = network_loader self.max_workspace_size = util.default(max_workspace_size, 1 << 24) self.fp16 = util.default(fp16, False) - self.fp8 = util.default(fp8, False) + self.fp8 = util.default(fp8, False) self.tf32 = util.default(tf32, False) self.load_engine = load_engine @@ -262,6 +267,9 @@ def __init__( self.use_dla = use_dla self.allow_gpu_fallback = allow_gpu_fallback + # Check compatibility with NumPy before proceeding further + trt_util.check_numpy_trt_compatibility() + def activate_impl(self): """ Parses command-line arguments and populates the following attributes: diff --git a/tools/Polygraphy/polygraphy/common/interface.py b/tools/Polygraphy/polygraphy/common/interface.py index ba7e23c2..0b368015 100644 --- a/tools/Polygraphy/polygraphy/common/interface.py +++ b/tools/Polygraphy/polygraphy/common/interface.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/Polygraphy/polygraphy/common/struct.py b/tools/Polygraphy/polygraphy/common/struct.py index 0d01c9f5..c57b1bbf 100644 --- a/tools/Polygraphy/polygraphy/common/struct.py +++ b/tools/Polygraphy/polygraphy/common/struct.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,7 +15,7 @@ # limitations under the License. # -from polygraphy import mod +from polygraphy import mod, util from polygraphy.common.interface import TypedDict from polygraphy.json import Decoder, Encoder, add_json_methods @@ -122,10 +122,7 @@ def add(self, name, dtype, shape, min_shape=None, max_shape=None): def __repr__(self): ret = "TensorMetadata()" for name, (dtype, shape) in self.items(): - shape_str = f"{shape}" - if shape is not None: - shape_str = f"{list(shape)}, min_shape={shape.min}, max_shape={shape.max}" - ret += f".add('{name}', {dtype}, {shape_str})" + ret += util.make_repr(".add", name, dtype, list(shape), min_shape=shape.min, max_shape=shape.max)[0] return ret def __str__(self): diff --git a/tools/Polygraphy/polygraphy/comparator/comparator.py b/tools/Polygraphy/polygraphy/comparator/comparator.py index 9a8ff939..4be7ca80 100644 --- a/tools/Polygraphy/polygraphy/comparator/comparator.py +++ b/tools/Polygraphy/polygraphy/comparator/comparator.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/Polygraphy/polygraphy/comparator/compare.py b/tools/Polygraphy/polygraphy/comparator/compare.py index a7e642bf..8bd7d7be 100644 --- a/tools/Polygraphy/polygraphy/comparator/compare.py +++ b/tools/Polygraphy/polygraphy/comparator/compare.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -117,9 +117,10 @@ def run_comparison(func, fail_fast, iter_result0, iter_result1, find_output_func G_LOGGER.start( f"Comparing Output: '{out0_name}' (dtype={output0.dtype}, shape={output0.shape}) with '{out1_name}' (dtype={output1.dtype}, shape={output1.shape})" ) - output_status[out0_name] = func(out0_name, output0, out1_name, output1) - if fail_fast and not output_status[out0_name]: - return output_status + with G_LOGGER.indent(): + output_status[out0_name] = func(out0_name, output0, out1_name, output1) + if fail_fast and not output_status[out0_name]: + return output_status mismatched_output_names = [name for name, matched in output_status.items() if not matched] if mismatched_output_names: @@ -156,6 +157,10 @@ def simple( find_output_func=None, check_error_stat=None, infinities_compare_equal=None, + save_heatmaps=None, + show_heatmaps=None, + save_error_metrics_plot=None, + show_error_metrics_plot=None, ): """ Creates a function that compares two IterationResults, and can be used as the `compare_func` argument @@ -207,6 +212,17 @@ def simple( If True, then matching +-inf values in the output have an absdiff of 0. If False, then matching +-inf values in the output have an absdiff of NaN. Defaults to False. + save_heatmaps (str): + [EXPERIMENTAL] Path to a directory in which to save figures of heatmaps of the absolute and relative error. + Defaults to None. + show_heatmaps (bool): + [EXPERIMENTAL] Whether to display heatmaps of the absolute and relative error. + Defaults to False. + save_error_metrics_plot (str): + [EXPERIMENTAL] Path to a directory in which to save the error metrics plots. + Defaults to None. + show_error_metrics_plot (bool): + [EXPERIMENTAL] Whether to display the error metrics plot. Returns: Callable(IterationResult, IterationResult) -> OrderedDict[str, OutputCompareResult]: @@ -222,6 +238,8 @@ def simple( default_error_stat = "elemwise" check_error_stat = util.default(check_error_stat, default_error_stat) infinities_compare_equal = util.default(infinities_compare_equal, False) + show_heatmaps = util.default(show_heatmaps, False) + show_error_metrics_plot = util.default(show_error_metrics_plot, False) def check_outputs_match( out0, @@ -274,13 +292,26 @@ def check_outputs_match( cond = np.logical_and(out0_infinite, out0 == out1) absdiff = np.where(cond, 0, absdiff) - with np.testing.suppress_warnings() as sup: - sup.filter(RuntimeWarning) - reldiff = absdiff / comp_util.cast_up(np.abs(out1)) - max_reldiff = comp_util.compute_max(reldiff) - mean_reldiff = comp_util.compute_mean(reldiff) - median_reldiff = comp_util.compute_median(reldiff) + # Add a small epsilon (2e-16) to zero values in the array to prevent NaN in relative error. + cast_up_out1 = comp_util.cast_up(out1) + + if np.issubdtype(cast_up_out1.dtype, np.floating): + if np.any(cast_up_out1 == 0): + G_LOGGER.warning( + f"{runner1_name:35} | Output: {out1_name}: Some values are 0. " + f"Will add a small epsilon quantity to these when computing relative difference. " + f"Note that this may cause some relative differences to be extremely high. ", + mode=LogMode.ONCE, + ) + cast_up_out1[cast_up_out1 == 0] += np.finfo(float).eps + + reldiff = absdiff / np.abs(cast_up_out1) + min_reldiff = comp_util.compute_min(reldiff) + max_reldiff = comp_util.compute_max(reldiff) + mean_reldiff = comp_util.compute_mean(reldiff) + median_reldiff = comp_util.compute_median(reldiff) + min_absdiff = comp_util.compute_min(absdiff) max_absdiff = comp_util.compute_max(absdiff) mean_absdiff = comp_util.compute_mean(absdiff) median_absdiff = comp_util.compute_median(absdiff) @@ -337,10 +368,40 @@ def req_tol(mean_diff, median_diff, max_diff): msg += " (requirements may be lower if both abs/rel tolerances are set)" G_LOGGER.info(msg) + if save_error_metrics_plot or show_error_metrics_plot: + with G_LOGGER.indent(): + comp_util.scatter_plot_error_magnitude( + absdiff, + reldiff, + comp_util.cast_up(out1), + min_reldiff, + max_reldiff, + runner0_name, + runner1_name, + out0_name, + out1_name, + save_dir=save_error_metrics_plot, + show=show_error_metrics_plot, + ) + + def build_heatmaps(diff, min_diff, max_diff, prefix, use_lognorm=None): + if save_heatmaps or show_heatmaps: + with G_LOGGER.indent(): + comp_util.build_heatmaps( + diff, + min_diff, + max_diff, + prefix=f"{prefix} Error | {out0_name}", + save_dir=save_heatmaps, + show=show_heatmaps, + use_lognorm=use_lognorm, + ) + comp_util.log_output_stats(absdiff, failed, "Absolute Difference") - with np.testing.suppress_warnings() as sup: - sup.filter(RuntimeWarning) - comp_util.log_output_stats(reldiff, failed, "Relative Difference") + build_heatmaps(absdiff, min_absdiff, max_absdiff, "Absolute") + + comp_util.log_output_stats(reldiff, failed, "Relative Difference") + build_heatmaps(reldiff, min_reldiff, max_reldiff, "Relative", use_lognorm=True) G_LOGGER.extra_verbose( f"Finished comparing: '{out0_name}' (dtype={out0.dtype}, shape={out0.shape}) [{runner0_name}] and '{out1_name}' (dtype={out1.dtype}, shape={out1.shape}) [{runner1_name}]" @@ -393,46 +454,44 @@ def match(out0_name, output0, out1_name, output1): per_out_rtol = util.value_or_from_dict(rtol, out0_name, default_rtol) per_out_err_stat = util.value_or_from_dict(check_error_stat, out0_name, default_error_stat) - with G_LOGGER.indent(): - G_LOGGER.info( - f"Tolerance: [abs={per_out_atol:.5g}, rel={per_out_rtol:.5g}] | Checking {per_out_err_stat} error" - ) - G_LOGGER.extra_verbose(f"Note: Comparing {iter_result0.runner_name} vs. {iter_result1.runner_name}") + G_LOGGER.info( + f"Tolerance: [abs={per_out_atol:.5g}, rel={per_out_rtol:.5g}] | Checking {per_out_err_stat} error" + ) + G_LOGGER.extra_verbose(f"Note: Comparing {iter_result0.runner_name} vs. {iter_result1.runner_name}") - if check_shapes and output0.shape != output1.shape: - G_LOGGER.error( - f"Will not compare outputs of different shapes. Note: Output shapes are {output0.shape} and {output1.shape}." - ) - G_LOGGER.error( - "Note: Use --no-shape-check or set check_shapes=False to " - "attempt to compare values anyway.", - mode=LogMode.ONCE, - ) - outputs_matched = False - else: - output1 = util.try_match_shape(output1, output0.shape) - output0 = output0.reshape(output1.shape) - outputs_matched = check_outputs_match( - output0, - out0_name, - output1, - out1_name, - per_out_rtol=per_out_rtol, - per_out_atol=per_out_atol, - per_out_err_stat=per_out_err_stat, - runner0_name=iter_result0.runner_name, - runner1_name=iter_result1.runner_name, - ) + if check_shapes and output0.shape != output1.shape: + G_LOGGER.error( + f"Will not compare outputs of different shapes. Note: Output shapes are {output0.shape} and {output1.shape}." + ) + G_LOGGER.error( + "Note: Use --no-shape-check or set check_shapes=False to " "attempt to compare values anyway.", + mode=LogMode.ONCE, + ) + outputs_matched = False + else: + output1 = util.try_match_shape(output1, output0.shape) + output0 = output0.reshape(output1.shape) + outputs_matched = check_outputs_match( + output0, + out0_name, + output1, + out1_name, + per_out_rtol=per_out_rtol, + per_out_atol=per_out_atol, + per_out_err_stat=per_out_err_stat, + runner0_name=iter_result0.runner_name, + runner1_name=iter_result1.runner_name, + ) - # Finally show summary. - if not outputs_matched: - G_LOGGER.error( - f"FAILED | Output: '{out0_name}' | Difference exceeds tolerance (rel={per_out_rtol}, abs={per_out_atol})" - ) - else: - G_LOGGER.finish( - f"PASSED | Output: '{out0_name}' | Difference is within tolerance (rel={per_out_rtol}, abs={per_out_atol})" - ) + # Finally show summary. + if not outputs_matched: + G_LOGGER.error( + f"FAILED | Output: '{out0_name}' | Difference exceeds tolerance (rel={per_out_rtol}, abs={per_out_atol})" + ) + else: + G_LOGGER.finish( + f"PASSED | Output: '{out0_name}' | Difference is within tolerance (rel={per_out_rtol}, abs={per_out_atol})" + ) return outputs_matched @@ -448,7 +507,7 @@ def match(out0_name, output0, out1_name, output1): def indices(index_tolerance=None, fail_fast=None): """ Creates a function that compares two IterationResults containing indices, and can be used as the `compare_func` argument - in ``Comparator.compare_accuracy``. + in ``Comparator.compare_accuracy``. This can be useful to compare, for example, the outputs of a Top-K operation. Outputs with more than one dimension are treated like multiple batches of values. For example, an output of shape (3, 4, 5, 10) would be treated like 60 batches (3 x 4 x 5) of 10 values each. @@ -462,11 +521,19 @@ def indices(index_tolerance=None, fail_fast=None): output0 = [0, 1, 2] output1 = [1, 0, 2] - With an index tolerance of 0, this would be considered a mismatch. However, with an index tolerance - of 1, it would pass since the mismatched values, 0 and 1, are only one spot apart. + With an index tolerance of 0, this would be considered a mismatch, since the positions of `0` and `1` + are flipped between the two outputs. However, with an index tolerance of 1, it would pass since + the mismatched values are only 1 spot apart. If instead the outputs were: + :: + + output0 = [0, 1, 2] + output1 = [1, 2, 0] + + Then we would require an index tolerance of 2, since the `0` value in the two outputs is 2 spots apart. When this value is set, the final 'index_tolerance' number of values are ignored for each batch. For example, with an index tolerance of 1, mismatches in the final element are not considered. + If used with a Top-K output, you can compensate for this by instead using a Top-(K + index_tolerance). This can be provided on a per-output basis using a dictionary. In that case, use an empty string ("") as the key to specify default tolerance for outputs not explicitly listed. @@ -529,6 +596,8 @@ def match(out0_name, output0, out1_name, output1): if index1.size < 1: G_LOGGER.error(f"FAILED | Value: {val0} not found in output") passed = False + if fail_fast: + return False continue index1 = index1[0] @@ -536,6 +605,8 @@ def match(out0_name, output0, out1_name, output1): if abs(index1 - index0) > per_out_index_tol: G_LOGGER.error(f"FAILED | Difference exceeds index tolerance ({per_out_index_tol})") passed = False + if fail_fast: + return False continue # Log information about the outputs @@ -550,7 +621,8 @@ def match(out0_name, output0, out1_name, output1): output1, not passed, f"{iter_result1.runner_name}: {out1_name}", hist_range=hist_bin_range ) - G_LOGGER.finish(f"PASSED | Difference is within index tolerance ({per_out_index_tol})") + if passed: + G_LOGGER.finish(f"PASSED | Difference is within index tolerance ({per_out_index_tol})") return passed return run_comparison( diff --git a/tools/Polygraphy/polygraphy/comparator/data_loader.py b/tools/Polygraphy/polygraphy/comparator/data_loader.py index 66e2bd5c..332df866 100644 --- a/tools/Polygraphy/polygraphy/comparator/data_loader.py +++ b/tools/Polygraphy/polygraphy/comparator/data_loader.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/Polygraphy/polygraphy/comparator/postprocess.py b/tools/Polygraphy/polygraphy/comparator/postprocess.py index 616262a3..ce49e906 100644 --- a/tools/Polygraphy/polygraphy/comparator/postprocess.py +++ b/tools/Polygraphy/polygraphy/comparator/postprocess.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/Polygraphy/polygraphy/comparator/struct.py b/tools/Polygraphy/polygraphy/comparator/struct.py index ac6f6b9d..88fb3881 100644 --- a/tools/Polygraphy/polygraphy/comparator/struct.py +++ b/tools/Polygraphy/polygraphy/comparator/struct.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -118,7 +118,7 @@ def __init__(self, outputs=None, runtime=None, runner_name=None): If this is omitted, a default name is generated. """ if outputs and config.ARRAY_SWAP_THRESHOLD_MB < 0: - total_size_gb = sum(arr.nbytes for arr in outputs.values() if isinstance(arr, np.ndarray)) / (1024.0 ** 3) + total_size_gb = sum(arr.nbytes for arr in outputs.values() if isinstance(arr, np.ndarray)) / (1024.0**3) if total_size_gb >= 1: G_LOGGER.warning( f"It looks like the outputs of this network are very large ({total_size_gb:.3f} GiB).\n" diff --git a/tools/Polygraphy/polygraphy/comparator/util.py b/tools/Polygraphy/polygraphy/comparator/util.py index 267d13fd..4207297c 100644 --- a/tools/Polygraphy/polygraphy/comparator/util.py +++ b/tools/Polygraphy/polygraphy/comparator/util.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,11 +15,18 @@ # limitations under the License. # import functools +import math +import os -from polygraphy import mod, util, config +from polygraphy import config, mod, util from polygraphy.logger import G_LOGGER +import math +import os + np = mod.lazy_import("numpy") +plt = mod.lazy_import("matplotlib.pyplot") +matplotlib = mod.lazy_import("matplotlib") def cast_up(buffer): @@ -151,7 +158,214 @@ def log_output_stats(output, info_hist=False, runner_name=None, hist_range=None) ret = str_output_stats(output, runner_name) G_LOGGER.info(ret) with G_LOGGER.indent(): - # Show histogram on failures. + # For small outputs, show the entire output instead of just a histogram. + SMALL_OUTPUT_THRESHOLD = 100 + if output.size <= SMALL_OUTPUT_THRESHOLD: + G_LOGGER.log( + lambda: f"---- Values ----\n{util.indent_block(output)}", + severity=G_LOGGER.INFO if info_hist else G_LOGGER.VERBOSE, + ) G_LOGGER.log( lambda: str_histogram(output, hist_range), severity=G_LOGGER.INFO if info_hist else G_LOGGER.VERBOSE ) + + +def build_heatmaps(arr, min_val, max_val, prefix, save_dir=None, show=None, use_lognorm=None): + """ + Display an array as an image or set of images. The last two dimensions are interpreted as + the height and width and the leading dimensions are flattened and treated as the number + of images to display. + + Args: + arr (np.ndarray): The input array + min_val (float): The minimum value in the input array + max_val (float): The maximum value in the input array + prefix (str): The prefix to use when displaying titles for figures. + save_dir (Optional[str]): Path to a directory in which to save images of the heatmaps. + show (Optional[bool]): Whether to display the heatmap. + use_lognorm (bool): Whether to use LogNorm instead of Normalize when displaying values. + """ + G_LOGGER.start(f"Building heatmaps for {prefix}. This may take a while...") + with G_LOGGER.indent(): + MAX_HEIGHT = 1080 + MAX_WIDTH = 1920 + MAX_NUM_ROWS = 14 + MAX_NUM_COLS = 7 + FONT_SIZE = "xx-small" + + if len(arr.shape) < 3: + arr = np.expand_dims(arr, tuple(range(3 - len(arr.shape)))) + + original_shape = arr.shape + arr = arr.reshape(-1, arr.shape[-2], arr.shape[-1]) + + num_images = arr.shape[0] + + def coord_str_from_img_idx(img_idx): + coord = [] + for dim in reversed(original_shape[:-2]): + coord.insert(0, img_idx % dim) + img_idx //= dim + return f"({','.join(map(str, coord))},0:{arr.shape[-2]},0:{arr.shape[-1]})" + + # We treat each 2D slice of the array as a separate image. + # Multiple images may be displayed on a single figure (in a grid) and we may have multiple figures. + num_rows = min(MAX_HEIGHT // arr.shape[-2], MAX_NUM_ROWS) + num_cols = min(MAX_WIDTH // arr.shape[-1], MAX_NUM_COLS) + + # Remove any excess images per figure + if num_images < num_rows * num_cols: + num_cols = min(num_images, num_cols) + num_rows = math.ceil(num_images / num_cols) + + num_images_per_figure = num_rows * num_cols + num_figures = math.ceil(num_images / num_images_per_figure) + + # Populate each image in each figure. + for fig_idx in range(num_figures): + fig, axs = plt.subplots(num_rows, num_cols, squeeze=False, dpi=200, constrained_layout=True) + base_img_idx = fig_idx * num_images_per_figure + + try: + # When the error is all the same, we can't use LogNorm. + if use_lognorm and min_val != max_val: + norm = matplotlib.colors.LogNorm(vmin=min_val, vmax=max_val) + prefix += " (Log Scale)" + else: + norm = matplotlib.colors.Normalize(vmin=min_val, vmax=max_val) + + fig_title = f"{prefix}: {coord_str_from_img_idx(base_img_idx)} to {coord_str_from_img_idx(min(base_img_idx + num_images_per_figure, num_images) - 1)}" + fig.suptitle(fig_title, fontsize=FONT_SIZE) + + G_LOGGER.extra_verbose(f"Building heatmaps for {fig_title}") + + images = [] + for row in range(num_rows): + for col in range(num_cols): + img_idx = base_img_idx + (col + row * num_cols) + + ax = axs[row, col] + ax.set_axis_off() + + if img_idx < arr.shape[0]: + img = arr[img_idx] + title = f"{coord_str_from_img_idx(img_idx)}" + else: + img = np.zeros(shape=(arr.shape[-2:])) + title = "Out Of Bounds" + ax.set_title(title, fontsize=FONT_SIZE) + + images.append(ax.imshow(img, cmap="plasma", filternorm=False, resample=False)) + + for im in images: + im.set_norm(norm) + + fig.colorbar(images[0], ax=axs, shrink=0.7) + + if save_dir is not None: + path = os.path.join(save_dir, f"{fig_title}.svg") + util.makedirs(path) + G_LOGGER.info(f"Saving '{prefix}' heatmap to: '{path}'") + fig.savefig(path) + + if show: + plt.show() + finally: + plt.close(fig) + + +def scatter_plot_error_magnitude( + absdiff, + reldiff, + reference_output, + min_reldiff, + max_reldiff, + runner0_name, + runner1_name, + out0_name, + out1_name, + save_dir=None, + show=None, +): + """ + Display a plot of absolute/relative difference against the magnitude of the output. + + Args: + absdiff (np.ndarray): The absolute difference. + reldiff (np.ndarray): The relative difference. + reference_output (np.ndarray): The output to consider as the reference output. + min_reldiff (float): The minimum relative difference + max_reldiff (float): The maximum relative difference + runner0_name (str): The name of the first runner. + runner1_name (str): The name of the second runner. + out0_name (str): The name of the output of the first runner. + out1_name (str): The name of the output of the second runner. + save_dir (Optional[str]): Path to a directory in which to save images of the plots. + show (Optional[bool]): Whether to display the error metrics plot. + """ + G_LOGGER.start(f"Building error metrics plot for {out0_name}. This may take a while...") + with G_LOGGER.indent(): + title = f"Error metrics between output0 and output1\noutput0: {runner0_name:35} | {out0_name}\noutput1: {runner1_name:35} | {out1_name}" + fname = f"error_metrics_{out0_name}.png" + TICK_FONT_SIZE = 6 + TITLE_FONT_SIZE = 7 + NUM_X_TICKS = 20 + NUM_Y_LINEAR_TICKS = 10 + + def set_ax_properties(ax): + ax.tick_params(axis="x", labelrotation=90) + ax.tick_params(axis="both", labelsize=TICK_FONT_SIZE) + ax.grid(linestyle="--") + ax.xaxis.label.set_fontsize(TITLE_FONT_SIZE) + ax.yaxis.label.set_fontsize(TITLE_FONT_SIZE) + + def set_linear_ax(ax): + xticks = ax.get_xticks() + yticks = ax.get_yticks() + ax.set_xticks(np.linspace(0, xticks[-1], NUM_X_TICKS)) + ax.set_yticks(np.linspace(0, yticks[-1], NUM_Y_LINEAR_TICKS)) + set_ax_properties(ax) + + def set_log_ax(ax, min_diff, max_diff): + ax.set_yscale("log") + xticks = ax.get_xticks() + + yrange = np.log10(np.array([min_diff, max_diff])) + yrange[0] = math.floor(yrange[0]) + yrange[1] = math.ceil(yrange[1]) + + ax.set_xticks(np.linspace(0, xticks[-1], NUM_X_TICKS)) + ax.set_yticks(np.power(10, np.arange(yrange[0], yrange[1], 1))) + set_ax_properties(ax) + + magnitude = np.abs(reference_output) + fig, axs = plt.subplots(2, sharex=True, constrained_layout=True) + + try: + fig.suptitle(title, fontsize=TITLE_FONT_SIZE) + + axs[0].scatter(magnitude, absdiff, s=1) + axs[0].set(ylabel="Absolute error") + set_linear_ax(axs[0]) + + axs[1].scatter(magnitude, reldiff, s=1) + label_suffix = "" + # When the range of the data is 0, we can't use log scale. + if min_reldiff != max_reldiff: + set_log_ax(axs[1], min_reldiff, max_reldiff) + label_suffix = " (log scale)" + else: + set_linear_ax(axs[1]) + axs[1].set(xlabel="output1 magnitude", ylabel=f"Relative error{label_suffix}") + + if save_dir is not None: + path = os.path.join(save_dir, fname) + util.makedirs(path) + G_LOGGER.info(f"Saving error metrics plot to: '{path}'") + fig.savefig(path, dpi=1200, bbox_inches="tight") + + if show: + plt.show() + + finally: + plt.close(fig) diff --git a/tools/Polygraphy/polygraphy/config.py b/tools/Polygraphy/polygraphy/config.py index c476b43c..b5d7345d 100644 --- a/tools/Polygraphy/polygraphy/config.py +++ b/tools/Polygraphy/polygraphy/config.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/Polygraphy/polygraphy/constants.py b/tools/Polygraphy/polygraphy/constants.py index 902a6753..a8dcc20e 100644 --- a/tools/Polygraphy/polygraphy/constants.py +++ b/tools/Polygraphy/polygraphy/constants.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/Polygraphy/polygraphy/cuda/cuda.py b/tools/Polygraphy/polygraphy/cuda/cuda.py index aa863b57..4e9decd3 100644 --- a/tools/Polygraphy/polygraphy/cuda/cuda.py +++ b/tools/Polygraphy/polygraphy/cuda/cuda.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,7 +15,6 @@ # limitations under the License. # import ctypes -import time import os import sys diff --git a/tools/Polygraphy/polygraphy/exception/exception.py b/tools/Polygraphy/polygraphy/exception/exception.py index 1269168b..41662009 100644 --- a/tools/Polygraphy/polygraphy/exception/exception.py +++ b/tools/Polygraphy/polygraphy/exception/exception.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/Polygraphy/polygraphy/func/func.py b/tools/Polygraphy/polygraphy/func/func.py index 1b4fc3d2..dbf111ab 100644 --- a/tools/Polygraphy/polygraphy/func/func.py +++ b/tools/Polygraphy/polygraphy/func/func.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/Polygraphy/polygraphy/json/serde.py b/tools/Polygraphy/polygraphy/json/serde.py index e181c6b2..fd6ac39d 100644 --- a/tools/Polygraphy/polygraphy/json/serde.py +++ b/tools/Polygraphy/polygraphy/json/serde.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/Polygraphy/polygraphy/logger/logger.py b/tools/Polygraphy/polygraphy/logger/logger.py index 8a2cf2cf..5594dca3 100644 --- a/tools/Polygraphy/polygraphy/logger/logger.py +++ b/tools/Polygraphy/polygraphy/logger/logger.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -16,7 +16,7 @@ # import copy import enum -import inspect +import functools import os import sys import time @@ -633,6 +633,7 @@ def log_exception(self, func): still be logged. """ + @functools.wraps(func) def wrapped(*args, **kwargs): from polygraphy.exception import PolygraphyException diff --git a/tools/Polygraphy/polygraphy/mod/exporter.py b/tools/Polygraphy/polygraphy/mod/exporter.py index 6cf52049..cc592fc1 100644 --- a/tools/Polygraphy/polygraphy/mod/exporter.py +++ b/tools/Polygraphy/polygraphy/mod/exporter.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/Polygraphy/polygraphy/mod/importer.py b/tools/Polygraphy/polygraphy/mod/importer.py index 1043530e..f018398e 100644 --- a/tools/Polygraphy/polygraphy/mod/importer.py +++ b/tools/Polygraphy/polygraphy/mod/importer.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/Polygraphy/polygraphy/mod/util.py b/tools/Polygraphy/polygraphy/mod/util.py index f3e01dd8..82a10b85 100644 --- a/tools/Polygraphy/polygraphy/mod/util.py +++ b/tools/Polygraphy/polygraphy/mod/util.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,4 +17,21 @@ def version(version_str): - return tuple([int(num) for num in version_str.split(".")]) + def process_version_part(num): + try: + return [int(num)] + except ValueError: + VERSION_SUFFIXES = ["a", "b", "rc", "post", "dev"] + # One version part can only contain one of the above suffixes + for suffix in VERSION_SUFFIXES: + if suffix in num: + return num.partition(suffix) + + # For unrecognized suffixes, just return as-is + return [num] + + ver_list = [] + for num in version_str.split("."): + ver_list.extend(process_version_part(num)) + + return tuple(ver_list) diff --git a/tools/Polygraphy/polygraphy/tools/args/backend/onnx/loader.py b/tools/Polygraphy/polygraphy/tools/args/backend/onnx/loader.py index c4d0dbed..032e8d0c 100644 --- a/tools/Polygraphy/polygraphy/tools/args/backend/onnx/loader.py +++ b/tools/Polygraphy/polygraphy/tools/args/backend/onnx/loader.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -409,6 +409,7 @@ def __init__( outputs_opt_prefix: str = None, allow_shape_inference: bool = None, allow_from_tf: bool = None, + allow_setting_upper_bounds: bool = None, ): """ Args: @@ -425,12 +426,16 @@ def __init__( allow_from_tf (bool): Whether to allow conversion of TensorFlow models to ONNX. Defaults to False. + allow_setting_upper_bounds (bool): + Whether to allow setting upper bounds for unbounded DDS. + Defaults to False. """ super().__init__() self._allow_saving = util.default(allow_saving, False) self._allow_shape_inference = util.default(allow_shape_inference, True) self._outputs_opt_prefix = util.default(outputs_opt_prefix, "onnx-") self._allow_from_tf = util.default(allow_from_tf, False) + self._allow_setting_upper_bounds = util.default(allow_setting_upper_bounds, False) def add_parser_args_impl(self): self.group.add_argument( @@ -468,6 +473,36 @@ def add_parser_args_impl(self): dest="onnx_exclude_outputs", ) + self.group.add_argument( + "--fp-to-fp16", + help="Convert all floating point tensors in an ONNX model to 16-bit precision. " + "This is *not* needed in order to use TensorRT's fp16 precision, but may be useful for other backends. " + "Requires onnxmltools. ", + action="store_true", + default=None, + ) + + if self._allow_setting_upper_bounds: + self.group.add_argument( + "--set-unbounded-dds-upper-bound", + help=""" + Set upper bounds for tensors with unbounded DDS(data-dependent shape). + Tensors with unbounded DDS can make it difficult for TensorRT to optimize inference performance + and memory usage. In the worst case, they can cause TensorRT engine build failures. To fix this, + Polygraphy supports setting upper bounds for tensors with unbounded DDS by inserting the ONNX + min operator. To specify per-tensor upper bounds, use the format: + --set-unbounded-dds-upper-bound [:]. + If no tensor name is provided, the upper bound is used for any tensors with unbounded DDS that + are not explicitly specified. For example: + --set-unbounded-dds-upper-bound 10000 tensor_a:5000 tensor_b:4000. + + Note that setting upper bounds only works for models that have been constant folded and have shapes inferred. + """, + nargs="+", + default=None, + dest="upper_bounds" + ) + def parse_impl(self, args): """ Parses command-line arguments and populates the following attributes: @@ -477,11 +512,15 @@ def parse_impl(self, args): exclude_outputs (List[str]): Names of tensors which should be unmarked as outputs. external_data_dir (str): Path to a directory from which to load external data. ignore_external_data (bool): Whether to ignore loading external data. + convert_to_fp16 (bool): Whether to convert the model to FP16. + upper_bounds (Union[int, Dict[str, int]]): The upper bounds for tensors with unbounded DDS. """ self.outputs = args_util.get_outputs(args, "onnx_outputs") self.exclude_outputs = args_util.get(args, "onnx_exclude_outputs") self.external_data_dir = args_util.get(args, "external_data_dir") self.ignore_external_data = args_util.get(args, "ignore_external_data") + self.convert_to_fp16 = args_util.get(args, "fp_to_fp16") + self.upper_bounds = args_util.parse_arglist_to_dict(args_util.get(args, "upper_bounds")) def _add_modify_onnx_outputs(self, script, loader_name, disable_custom_outputs: bool = None): if disable_custom_outputs: @@ -540,6 +579,10 @@ def add_to_script_impl(self, script, disable_custom_outputs: bool = None, serial loader_name = self._add_modify_onnx_outputs(script, loader_name, disable_custom_outputs=disable_custom_outputs) + if self.convert_to_fp16: + script.add_import(imports=["ConvertToFp16"], frm="polygraphy.backend.onnx") + loader_name = script.add_loader(make_invocable("ConvertToFp16", loader_name), "convert_to_fp16") + if self._allow_saving: loader_name = self.arg_groups[OnnxSaveArgs].add_to_script(script, loader_name) @@ -547,6 +590,10 @@ def add_to_script_impl(self, script, disable_custom_outputs: bool = None, serial script.add_import(imports=["BytesFromOnnx"], frm="polygraphy.backend.onnx") loader_name = script.add_loader(make_invocable("BytesFromOnnx", loader_name), "serialize_onnx") + if self._allow_setting_upper_bounds and self.upper_bounds is not None: + script.add_import(imports=["SetUpperBound"], frm="polygraphy.backend.onnx") + loader_name = script.add_loader(make_invocable("SetUpperBound", loader_name, upper_bounds=self.upper_bounds), "set_upper_bound") + return loader_name def must_use_onnx_loader(self, disable_custom_outputs: bool = None): @@ -566,6 +613,8 @@ def must_use_onnx_loader(self, disable_custom_outputs: bool = None): needs_modify = self._add_modify_onnx_outputs(tmp_script, inp_loader, disable_custom_outputs) != inp_loader needs_shape_inference = self._allow_shape_inference and self.arg_groups[OnnxInferShapesArgs].do_shape_inference needs_save = self._allow_saving and self.arg_groups[OnnxSaveArgs].path is not None + needs_fp16_conversion = self.convert_to_fp16 + needs_setting_upper_bounds = self._allow_setting_upper_bounds and self.upper_bounds is not None # Currently, other loaders do not support external data, so we must fall back to the ONNX loader if it's present. return ( not self.arg_groups[ModelArgs].model_type.is_onnx() @@ -573,6 +622,8 @@ def must_use_onnx_loader(self, disable_custom_outputs: bool = None): or self.external_data_dir or needs_shape_inference or needs_save + or needs_fp16_conversion + or needs_setting_upper_bounds ) def load_onnx(self): diff --git a/tools/Polygraphy/polygraphy/tools/args/backend/onnxrt/loader.py b/tools/Polygraphy/polygraphy/tools/args/backend/onnxrt/loader.py index d5a71ed7..ce0d4251 100644 --- a/tools/Polygraphy/polygraphy/tools/args/backend/onnxrt/loader.py +++ b/tools/Polygraphy/polygraphy/tools/args/backend/onnxrt/loader.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/Polygraphy/polygraphy/tools/args/backend/onnxrt/runner.py b/tools/Polygraphy/polygraphy/tools/args/backend/onnxrt/runner.py index f86b90c1..fd9f93f6 100644 --- a/tools/Polygraphy/polygraphy/tools/args/backend/onnxrt/runner.py +++ b/tools/Polygraphy/polygraphy/tools/args/backend/onnxrt/runner.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/Polygraphy/polygraphy/tools/args/backend/pluginref/runner.py b/tools/Polygraphy/polygraphy/tools/args/backend/pluginref/runner.py index 7e58ba00..2d5b5078 100644 --- a/tools/Polygraphy/polygraphy/tools/args/backend/pluginref/runner.py +++ b/tools/Polygraphy/polygraphy/tools/args/backend/pluginref/runner.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/Polygraphy/polygraphy/tools/args/backend/runner_select.py b/tools/Polygraphy/polygraphy/tools/args/backend/runner_select.py index d9be02ff..8d3fba18 100644 --- a/tools/Polygraphy/polygraphy/tools/args/backend/runner_select.py +++ b/tools/Polygraphy/polygraphy/tools/args/backend/runner_select.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/Polygraphy/polygraphy/tools/args/backend/tf/config.py b/tools/Polygraphy/polygraphy/tools/args/backend/tf/config.py index 4ae9dbeb..cfee0a8d 100644 --- a/tools/Polygraphy/polygraphy/tools/args/backend/tf/config.py +++ b/tools/Polygraphy/polygraphy/tools/args/backend/tf/config.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/Polygraphy/polygraphy/tools/args/backend/tf/loader.py b/tools/Polygraphy/polygraphy/tools/args/backend/tf/loader.py index d1a46c53..75c8ec1f 100644 --- a/tools/Polygraphy/polygraphy/tools/args/backend/tf/loader.py +++ b/tools/Polygraphy/polygraphy/tools/args/backend/tf/loader.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -101,7 +101,7 @@ class TfLoadArgs(BaseArgs): - ModelArgs - TfTrtArgs: if allow_tftrt == True - - TrtSaveEngineArgs: if allow_tftrt == True + - TrtSaveEngineBytesArgs: if allow_tftrt == True """ def __init__(self, allow_artifacts: bool = None, allow_custom_outputs: bool = None, allow_tftrt: bool = None): @@ -216,10 +216,10 @@ def add_to_script_impl(self, script, disable_custom_outputs=None): engine_dir = None if self._allow_tftrt: - from polygraphy.tools.args.backend.trt import TrtSaveEngineArgs + from polygraphy.tools.args.backend.trt import TrtSaveEngineBytesArgs loader_name = self.arg_groups[TfTrtArgs].add_to_script(script, loader_name) - engine_dir = self.arg_groups[TrtSaveEngineArgs].path + engine_dir = self.arg_groups[TrtSaveEngineBytesArgs].path MODIFY_TF = "ModifyGraphOutputs" outputs = None if disable_custom_outputs else args_util.get_outputs_for_script(script, self.outputs) diff --git a/tools/Polygraphy/polygraphy/tools/args/backend/tf/runner.py b/tools/Polygraphy/polygraphy/tools/args/backend/tf/runner.py index c4930cfe..1c9ea197 100644 --- a/tools/Polygraphy/polygraphy/tools/args/backend/tf/runner.py +++ b/tools/Polygraphy/polygraphy/tools/args/backend/tf/runner.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/Polygraphy/polygraphy/tools/args/backend/trt/config.py b/tools/Polygraphy/polygraphy/tools/args/backend/trt/config.py index 619b6daf..6b75bb59 100644 --- a/tools/Polygraphy/polygraphy/tools/args/backend/trt/config.py +++ b/tools/Polygraphy/polygraphy/tools/args/backend/trt/config.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,10 +21,11 @@ from polygraphy.common import TensorMetadata from polygraphy.logger import G_LOGGER, LogMode from polygraphy.tools.args import util as args_util +from polygraphy.tools.args.backend.trt.helper import make_trt_enum_val from polygraphy.tools.args.base import BaseArgs from polygraphy.tools.args.comparator.data_loader import DataLoaderArgs from polygraphy.tools.args.model import ModelArgs -from polygraphy.tools.script import inline_identifier, inline, make_invocable, make_invocable_if_nondefault, safe +from polygraphy.tools.script import inline, inline_identifier, make_invocable, make_invocable_if_nondefault, safe def parse_profile_shapes(default_shapes, min_args, opt_args, max_args): @@ -192,6 +193,18 @@ def add_parser_args_impl(self): action="store_true", default=None, ) + self.group.add_argument( + "--version-compatible", + help="Builds an engine designed to be forward TensorRT version compatible.", + action="store_true", + default=None, + ) + self.group.add_argument( + "--exclude-lean-runtime", + help="Exclude the lean runtime from the plan when version compatibility is enabled. ", + action="store_true", + default=None, + ) self.group.add_argument( "--workspace", @@ -335,7 +348,7 @@ def add_parser_args_impl(self): dest="preview_features", help="Preview features to enable. Values come from the names of the values " "in the trt.PreviewFeature enum, and are case-insensitive." - "If no arguments are provided, e.g. '--preview-features', then all preview features are disabled." + "If no arguments are provided, e.g. '--preview-features', then all preview features are disabled. " "Defaults to TensorRT's default preview features.", nargs="*", default=None, @@ -343,19 +356,31 @@ def add_parser_args_impl(self): self.group.add_argument( "--builder-optimization-level", - help="The builder optimization level. Setting a higher optimization" - "level allows the optimizer to spend more time searching for optimization opportunities." - "The resulting engine may have better performance compared to an engine built with a lower optimization level." - "Refer to the TensorRT API documentation for details.", + help="The builder optimization level. Setting a higher optimization " + "level allows the optimizer to spend more time searching for optimization opportunities. " + "The resulting engine may have better performance compared to an engine built with a lower optimization level. " + "Refer to the TensorRT API documentation for details. ", type=int, default=None, ) self.group.add_argument( "--hardware-compatibility-level", - help="The hardware compatibility level to use for the engine. This allows engines built on one GPU architecture to work on GPUs" + help="The hardware compatibility level to use for the engine. This allows engines built on one GPU architecture to work on GPUs " "of other architectures. Values come from the names of values in the `trt.HardwareCompatibilityLevel` enum and are case-insensitive. " - "For example, `--hardware-compatibility-level ampere_plus`", + "For example, `--hardware-compatibility-level ampere_plus` ", + default=None, + ) + + self.group.add_argument( + "--max-aux-streams", + help="The maximum number of auxiliary streams that TensorRT is allowed to use. If the network contains " + "operators that can run in parallel, TRT can execute them using auxiliary streams in addition to the one " + "provided to the IExecutionContext.execute_async_v3() call. " + "The default maximum number of auxiliary streams is determined by the heuristics in TensorRT on " + "whether enabling multi-stream would improve the performance. " + "Refer to the TensorRT API documentation for details.", + type=int, default=None, ) @@ -363,14 +388,14 @@ def add_parser_args_impl(self): self.group.add_argument( "--engine-capability", help="The desired engine capability. " - "Possible values come from the names of the values in the trt.EngineCapability enum and are case-insensitive.", + "Possible values come from the names of the values in the trt.EngineCapability enum and are case-insensitive. ", default=None, ) if self._allow_tensor_formats: self.group.add_argument( "--direct-io", - help="Disallow reformatting layers at network input/output tensors which have user-specified formats.", + help="Disallow reformatting layers at network input/output tensors which have user-specified formats. ", action="store_true", default=None, ) @@ -409,11 +434,11 @@ def parse_impl(self, args): refittable (bool): Whether the engine should be refittable. builder_optimization_level (int): The builder optimization level. hardware_compatibility_level (str): A string representing a hardware compatibility level enum value. + max_aux_streams (int): The maximum number of auxiliary streams that TensorRT is allowed to use. + version_compatible (bool): Whether or not to build a TensorRT forward-compatible. + exclude_lean_runtime (bool): Whether to exclude the lean runtime from a version compatible plan. """ - def make_enum_val(enum_name, value): - return inline(safe("trt.{:}.{:}", inline_identifier(enum_name), inline_identifier(value.upper()))) - trt_min_shapes = args_util.get(args, "trt_min_shapes", default=[]) trt_max_shapes = args_util.get(args, "trt_max_shapes", default=[]) trt_opt_shapes = args_util.get(args, "trt_opt_shapes", default=[]) @@ -466,7 +491,7 @@ def make_enum_val(enum_name, value): tactic_sources = args_util.get(args, "tactic_sources") self.tactic_sources = None if tactic_sources is not None: - self.tactic_sources = [make_enum_val("TacticSource", source) for source in tactic_sources] + self.tactic_sources = [make_trt_enum_val("TacticSource", source) for source in tactic_sources] self.trt_config_script, self.trt_config_func_name = args_util.parse_script_and_func_name( args_util.get(args, "trt_config_script"), default_func_name="load_config" @@ -503,19 +528,19 @@ def make_enum_val(enum_name, value): self.memory_pool_limits = None if memory_pool_limits is not None: self.memory_pool_limits = { - make_enum_val("MemoryPoolType", pool_type): pool_size + make_trt_enum_val("MemoryPoolType", pool_type): pool_size for pool_type, pool_size in memory_pool_limits.items() } preview_features = args_util.get(args, "preview_features") self.preview_features = None if preview_features is not None: - self.preview_features = [make_enum_val("PreviewFeature", feature) for feature in preview_features] + self.preview_features = [make_trt_enum_val("PreviewFeature", feature) for feature in preview_features] engine_capability = args_util.get(args, "engine_capability") self.engine_capability = None if engine_capability is not None: - self.engine_capability = make_enum_val("EngineCapability", engine_capability) + self.engine_capability = make_trt_enum_val("EngineCapability", engine_capability) self.direct_io = args_util.get(args, "direct_io") self.builder_optimization_level = args_util.get(args, "builder_optimization_level") @@ -523,10 +548,17 @@ def make_enum_val(enum_name, value): self.hardware_compatibility_level = None hardware_compatibility_level = args_util.get(args, "hardware_compatibility_level") if hardware_compatibility_level is not None: - self.hardware_compatibility_level = make_enum_val( + self.hardware_compatibility_level = make_trt_enum_val( "HardwareCompatibilityLevel", hardware_compatibility_level ) + self.max_aux_streams = args_util.get(args, "max_aux_streams") + self.version_compatible = args_util.get(args, "version_compatible") + self.exclude_lean_runtime = args_util.get(args, "exclude_lean_runtime") + + if self.exclude_lean_runtime and not self.version_compatible: + G_LOGGER.critical(f"`--exclude-lean-runtime` requires `--version-compatible` to be enabled.") + def add_to_script_impl(self, script): profiles = [] for profile_dict in self.profile_dicts: @@ -630,6 +662,9 @@ def add_to_script_impl(self, script): direct_io=self.direct_io, builder_optimization_level=self.builder_optimization_level, hardware_compatibility_level=self.hardware_compatibility_level, + max_aux_streams=self.max_aux_streams, + version_compatible=self.version_compatible, + exclude_lean_runtime=self.exclude_lean_runtime, ) if config_loader_str is not None: script.add_import(imports="CreateConfig", frm="polygraphy.backend.trt", imp_as="CreateTrtConfig") diff --git a/tools/Polygraphy/polygraphy/tools/args/backend/trt/helper.py b/tools/Polygraphy/polygraphy/tools/args/backend/trt/helper.py new file mode 100644 index 00000000..8ed8f64a --- /dev/null +++ b/tools/Polygraphy/polygraphy/tools/args/backend/trt/helper.py @@ -0,0 +1,28 @@ +# +# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +# This file would have been called `util.py` but if we do that, then for some reason Python thinks +# that this is the file we want when importing `polygraphy.tools.args.util`. +from polygraphy.tools.script import inline, inline_identifier, safe + + +def make_trt_enum_val(enum_name, value): + """ + Helper function to create inline TRT enums for usage across various TRT classes. + """ + return inline(safe("trt.{:}.{:}", inline_identifier(enum_name), inline_identifier(value.upper()))) diff --git a/tools/Polygraphy/polygraphy/tools/args/backend/trt/loader.py b/tools/Polygraphy/polygraphy/tools/args/backend/trt/loader.py index 995a236c..b1a5c613 100644 --- a/tools/Polygraphy/polygraphy/tools/args/backend/trt/loader.py +++ b/tools/Polygraphy/polygraphy/tools/args/backend/trt/loader.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,6 +15,7 @@ # limitations under the License. # +import copy import os from polygraphy import mod, util @@ -22,9 +23,10 @@ from polygraphy.tools.args import util as args_util from polygraphy.tools.args.backend.onnx.loader import OnnxLoadArgs from polygraphy.tools.args.backend.trt.config import TrtConfigArgs +from polygraphy.tools.args.backend.trt.helper import make_trt_enum_val from polygraphy.tools.args.base import BaseArgs from polygraphy.tools.args.model import ModelArgs -from polygraphy.tools.script import inline_identifier, inline, make_invocable, make_invocable_if_nondefault_kwargs, safe +from polygraphy.tools.script import inline, inline_identifier, make_invocable, make_invocable_if_nondefault_kwargs, safe @mod.export() @@ -59,6 +61,58 @@ def add_to_script_impl(self, script, loader_name: str): return loader_name +@mod.export() +class TrtOnnxFlagArgs(BaseArgs): + """ + ONNX-TRT Parser Flags: setting flags for TensorRT's ONNX parser + + Depends on: + + - TrtConfigArgs: If NATIVE_INSTANCENORM should be automatically enabled in VC/HC mode + """ + + def add_parser_args_impl(self): + self.group.add_argument( + "--onnx-flags", + help="Flag(s) for adjusting the default parsing behavior of the ONNX parser." + "Flag values come from the `trt.OnnxParserFlag` enum and are case-insensitve." + "For example: --onnx-flags native_instancenorm ", + nargs="+", + default=None, + ) + + def parse_impl(self, args): + """ + Parses command-line arguments and populates the following attributes: + + Attributes: + flags (List[str]): flags for onnxparser + """ + self._flags = args_util.get(args, "onnx_flags", default=[]) + + def get_flags(self): + """ + Updates and returns the ONNX parser flags as necessary. + This must be called only in `add_to_script_impl`. + Flags should not be accessed directly. + """ + flags = copy.copy(self._flags) or [] + if ( + TrtConfigArgs in self.arg_groups + and ( + self.arg_groups[TrtConfigArgs].hardware_compatibility_level is not None + or self.arg_groups[TrtConfigArgs].version_compatible + ) + and "native_instancenorm" not in [f.lower() for f in flags] + ): + G_LOGGER.warning( + f"Version or hardware compatibility mode is enabled. Automatically enabling `NATIVE_INSTANCENORM` ONNX parser flag." + ) + flags.append("native_instancenorm") + + return [make_trt_enum_val("OnnxParserFlag", f) for f in flags] or None + + @mod.export() class TrtLoadNetworkArgs(BaseArgs): """ @@ -69,6 +123,7 @@ class TrtLoadNetworkArgs(BaseArgs): - ModelArgs - TrtLoadPluginsArgs - OnnxLoadArgs: if allow_onnx_loading == True + - TrtOnnxFlagArgs """ def __init__( @@ -226,8 +281,11 @@ def add_to_script_impl(self, script): model_file = self.arg_groups[ModelArgs].path model_type = self.arg_groups[ModelArgs].model_type outputs = args_util.get_outputs_for_script(script, self.outputs) + parser_flags = self.arg_groups[TrtOnnxFlagArgs].get_flags() - if any(arg is not None for arg in [self.layer_precisions, self.tensor_datatypes, self.tensor_formats]): + if any( + arg is not None for arg in [self.layer_precisions, self.tensor_datatypes, self.tensor_formats, parser_flags] + ): script.add_import(imports="tensorrt", imp_as="trt") if model_type == "trt-network-script": @@ -247,13 +305,17 @@ def add_to_script_impl(self, script): script, disable_custom_outputs=True, serialize_model=True ) loader_str = make_invocable( - "NetworkFromOnnxBytes", self.arg_groups[TrtLoadPluginsArgs].add_to_script(script, onnx_loader) + "NetworkFromOnnxBytes", + self.arg_groups[TrtLoadPluginsArgs].add_to_script(script, onnx_loader), + flags=parser_flags, ) loader_name = script.add_loader(loader_str, "parse_network_from_onnx") else: script.add_import(imports=["NetworkFromOnnxPath"], frm="polygraphy.backend.trt") loader_str = make_invocable( - "NetworkFromOnnxPath", self.arg_groups[TrtLoadPluginsArgs].add_to_script(script, model_file) + "NetworkFromOnnxPath", + self.arg_groups[TrtLoadPluginsArgs].add_to_script(script, model_file), + flags=parser_flags, ) loader_name = script.add_loader(loader_str, "parse_network_from_onnx") else: @@ -300,9 +362,12 @@ def load_network(self): @mod.export() -class TrtSaveEngineArgs(BaseArgs): +class TrtSaveEngineBytesArgs(BaseArgs): """ TensorRT Engine Saving: saving TensorRT engines. + + Saves a serialized engine. This should be preferred over `TrtSaveEngineArgs()` since as of TensorRT 8.6, + version compatible engines cannot be re-serialized after they have been initially deserialized. """ def __init__(self, output_opt: str = None, output_short_opt: str = None): @@ -339,23 +404,76 @@ def add_to_script_impl(self, script, loader_name): """ Args: loader_name (str): - The name of the loader which should be consumed by the ``SaveEngine`` loader. + The name of the loader which will generate the serialized engine. Returns: - str: The name of the ``SaveEngine`` loader added to the script. + str: The name of the loader added to the script. """ if self.path is None: return loader_name - script.add_import(imports=["SaveEngine"], frm="polygraphy.backend.trt") - return script.add_loader(make_invocable("SaveEngine", loader_name, path=self.path), "save_engine") + script.add_import(imports=["SaveBytes"], frm="polygraphy.backend.common") + return script.add_loader(make_invocable("SaveBytes", loader_name, path=self.path), "save_engine_bytes") + + def save_engine_bytes(self, engine_bytes, path=None): + """ + Saves a serialized TensorRT engine according to arguments provided on the command-line. + + Args: + engine_bytes (bytes): The serialized TensorRT engine to save. + + path (str): + The path at which to save the engine. + If no path is provided, it is determined from command-line arguments. + + Returns: + bytes: The serialized engine that was saved. + """ + with util.TempAttrChange(self, {"path": path}): + loader = args_util.run_script(self.add_to_script, engine_bytes) + return loader() + + +@mod.deprecate(remove_in="0.55.0", use_instead="TrtSaveEngineBytesArgs") +@mod.export() +class TrtSaveEngineArgs(BaseArgs): + """ + TensorRT Engine Saving: saving TensorRT engines. + + Depends on: + + - TrtSaveEngineBytesArgs + """ + + # For backwards-compatibility + @property + def path(self): + return self.arg_groups[TrtSaveEngineBytesArgs].path + + def add_to_script_impl(self, script, loader_name): + """ + Args: + loader_name (str): + The name of the loader which will generate the engine. + + Returns: + str: The name of the loader added to the script. + """ + path = self.arg_groups[TrtSaveEngineBytesArgs].path + + if path is None: + return loader_name + + script.add_import(imports=["BytesFromEngine"], frm="polygraphy.backend.trt") + loader_name = script.add_loader(make_invocable("BytesFromEngine", loader_name, path=path), "bytes_from_engine") + return self.arg_groups[TrtSaveEngineArgs].add_to_script(script, loader_name) def save_engine(self, engine, path=None): """ Saves a TensorRT engine according to arguments provided on the command-line. Args: - model (onnx.ModelProto): The TensorRT engine to save. + engine (trt.ICudaEngine): The TensorRT engine to save. path (str): The path at which to save the engine. @@ -370,9 +488,9 @@ def save_engine(self, engine, path=None): @mod.export() -class TrtLoadEngineArgs(BaseArgs): +class TrtLoadEngineBytesArgs(BaseArgs): """ - TensorRT Engine: loading TensorRT engines. + TensorRT Engine: loading or building TensorRT engines. Depends on: @@ -380,7 +498,7 @@ class TrtLoadEngineArgs(BaseArgs): - TrtLoadPluginsArgs - TrtLoadNetworkArgs: if support for building engines is required - TrtConfigArgs: if support for building engines is required - - TrtSaveEngineArgs: if allow_saving == True + - TrtSaveEngineBytesArgs: if allow_saving == True """ def __init__(self, allow_saving: bool = None): @@ -416,27 +534,22 @@ def add_to_script_impl(self, script, network_name=None): network_name (str): The name of a variable in the script pointing to a network loader. """ if self.arg_groups[ModelArgs].model_type == "engine": - script.add_import(imports=["EngineFromBytes"], frm="polygraphy.backend.trt") script.add_import(imports=["BytesFromPath"], frm="polygraphy.backend.common") - load_engine = script.add_loader( - make_invocable("BytesFromPath", self.arg_groups[ModelArgs].path), "load_engine_bytes" - ) return script.add_loader( - make_invocable( - "EngineFromBytes", self.arg_groups[TrtLoadPluginsArgs].add_to_script(script, load_engine) - ), - "deserialize_engine", + make_invocable("BytesFromPath", self.arg_groups[ModelArgs].path), "load_engine_bytes" ) network_loader_name = network_name if network_loader_name is None: network_loader_name = self.arg_groups[TrtLoadNetworkArgs].add_to_script(script) - script.add_import(imports=["EngineFromNetwork"], frm="polygraphy.backend.trt") + script.add_import(imports=["EngineBytesFromNetwork"], frm="polygraphy.backend.trt") config_loader_name = self.arg_groups[TrtConfigArgs].add_to_script(script) + + script.add_import(imports=["EngineBytesFromNetwork"], frm="polygraphy.backend.trt") loader_str = make_invocable( - "EngineFromNetwork", + "EngineBytesFromNetwork", self.arg_groups[TrtLoadPluginsArgs].add_to_script(script, network_loader_name), config=config_loader_name, save_timing_cache=self.save_timing_cache, @@ -444,9 +557,82 @@ def add_to_script_impl(self, script, network_name=None): loader_name = script.add_loader(loader_str, "build_engine") if self._allow_saving: - loader_name = self.arg_groups[TrtSaveEngineArgs].add_to_script(script, loader_name) + loader_name = self.arg_groups[TrtSaveEngineBytesArgs].add_to_script(script, loader_name) return loader_name + def load_engine_bytes(self, network=None): + """ + Loads a TensorRT engine according to arguments provided on the command-line. + + Args: + network (Tuple[trt.Builder, trt.INetworkDefinition, Optional[parser]]): + A tuple containing a TensorRT builder, network and optionally parser. + + Returns: + tensorrt.ICudaEngine: The engine. + """ + loader = args_util.run_script(self.add_to_script, network) + return loader() + + +@mod.export() +class TrtLoadEngineArgs(BaseArgs): + """ + TensorRT Engine: loading TensorRT engines. + + Depends on: + + - TrtLoadEngineBytesArgs + - TrtLoadPluginsArgs + """ + + # For backwards-compatibility + @property + def save_timing_cache(self): + return self.arg_groups[TrtLoadEngineBytesArgs].save_timing_cache + + def add_parser_args_impl(self): + self.group.add_argument( + "--load-runtime", + help="Path from which to load a runtime that can be used to load a version compatible " + "engine that excludes the lean runtime. ", + default=None, + ) + + def parse_impl(self, args): + """ + Parses command-line arguments and populates the following attributes: + + Attributes: + load_runtime (str): + Path rom which to load a runtime that can be used to load a + version compatible engine that excludes the lean runtime. + """ + self.load_runtime = args_util.parse_path(args_util.get(args, "load_runtime"), "Runtime") + + def add_to_script_impl(self, script, network_name=None): + """ + Args: + network_name (str): The name of a variable in the script pointing to a network loader. + """ + load_serialized_engine = self.arg_groups[TrtLoadEngineBytesArgs].add_to_script(script, network_name) + + script.add_import(imports=["EngineFromBytes"], frm="polygraphy.backend.trt") + + runtime_loader = None + if self.load_runtime is not None: + script.add_import(imports=["LoadRuntime"], frm="polygraphy.backend.trt") + runtime_loader = script.add_loader(make_invocable("LoadRuntime", self.load_runtime), "load_runtime") + + return script.add_loader( + make_invocable( + "EngineFromBytes", + self.arg_groups[TrtLoadPluginsArgs].add_to_script(script, load_serialized_engine), + runtime=runtime_loader, + ), + "deserialize_engine", + ) + def load_engine(self, network=None): """ Loads a TensorRT engine according to arguments provided on the command-line. diff --git a/tools/Polygraphy/polygraphy/tools/args/backend/trt/runner.py b/tools/Polygraphy/polygraphy/tools/args/backend/trt/runner.py index e83f707d..7f5632f9 100644 --- a/tools/Polygraphy/polygraphy/tools/args/backend/trt/runner.py +++ b/tools/Polygraphy/polygraphy/tools/args/backend/trt/runner.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -16,9 +16,8 @@ # from polygraphy import mod from polygraphy.tools.args import util as args_util -from polygraphy.tools.args.base import BaseRunnerArgs -from polygraphy.tools.args.model import ModelArgs from polygraphy.tools.args.backend.trt.loader import TrtLoadEngineArgs +from polygraphy.tools.args.base import BaseRunnerArgs from polygraphy.tools.script import make_invocable diff --git a/tools/Polygraphy/polygraphy/tools/args/backend/trt_legacy.py b/tools/Polygraphy/polygraphy/tools/args/backend/trt_legacy.py index 4d709546..067fb362 100644 --- a/tools/Polygraphy/polygraphy/tools/args/backend/trt_legacy.py +++ b/tools/Polygraphy/polygraphy/tools/args/backend/trt_legacy.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -20,7 +20,7 @@ from polygraphy.tools.args import util as args_util from polygraphy.tools.args.backend.onnx.loader import OnnxLoadArgs from polygraphy.tools.args.backend.trt.config import TrtConfigArgs -from polygraphy.tools.args.backend.trt.loader import TrtLoadPluginsArgs, TrtSaveEngineArgs +from polygraphy.tools.args.backend.trt.loader import TrtLoadPluginsArgs, TrtSaveEngineBytesArgs from polygraphy.tools.args.base import BaseRunnerArgs from polygraphy.tools.args.comparator.data_loader import DataLoaderArgs from polygraphy.tools.args.model import ModelArgs @@ -38,7 +38,7 @@ class TrtLegacyRunnerArgs(BaseRunnerArgs): - TrtLoadPluginsArgs - TrtConfigArgs - TfLoadArgs - - TrtSaveEngineArgs + - TrtSaveEngineBytesArgs - DataLoaderArgs - OnnxLoadArgs @@ -119,7 +119,7 @@ def add_to_script_impl(self, script): script.add_import(imports=["LoadNetworkFromUff"], frm="polygraphy.backend.trt_legacy") if self.arg_groups[ModelArgs].model_type == "uff": script.add_import(imports=["LoadUffFile"], frm="polygraphy.backend.trt_legacy") - shapes = {name: shape for name, (_, shape) in self.arg_groups[ModelArgs].input_shapes.items()} + shapes = {name: tuple(shape) for name, (_, shape) in self.arg_groups[ModelArgs].input_shapes.items()} loader_name = script.add_loader( make_invocable( "LoadUffFile", self.arg_groups[ModelArgs].path, util.default(shapes, {}), self.trt_outputs @@ -170,7 +170,7 @@ def add_to_script_impl(self, script): fp16=self.arg_groups[TrtConfigArgs].fp16, tf32=self.arg_groups[TrtConfigArgs].tf32, load_engine=load_engine, - save_engine=self.arg_groups[TrtSaveEngineArgs].path, + save_engine=self.arg_groups[TrtSaveEngineBytesArgs].path, layerwise=self.trt_outputs == constants.MARK_ALL, plugins=self.arg_groups[TrtLoadPluginsArgs].plugins, int8=self.arg_groups[TrtConfigArgs].int8, diff --git a/tools/Polygraphy/polygraphy/tools/args/base.py b/tools/Polygraphy/polygraphy/tools/args/base.py index ed95492d..ca274f00 100644 --- a/tools/Polygraphy/polygraphy/tools/args/base.py +++ b/tools/Polygraphy/polygraphy/tools/args/base.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -45,7 +45,6 @@ class BaseArgs: - OtherArgs0 - OtherArgs1: - - OtherArgs2: [Optional] @@ -60,7 +59,7 @@ class BaseArgs: - TrtLoadPluginsArgs - TrtLoadNetworkArgs: if building engines - TrtConfigArgs: if building engines - - TrtSaveEngineArgs: if allow_saving == True + - TrtSaveEngineBytesArgs: if allow_saving == True The section header and description will be used to popluate the tool's help output. """ @@ -107,6 +106,8 @@ def add_parser_args(self, parser): """ Add arguments to a command-line parser. + This method is guaranteed to only be called after `register`. + Args: parser (argparse.ArgumentParser): The argument parser. """ @@ -118,15 +119,19 @@ def add_parser_args(self, parser): "See BaseArgs documentation for details." ) + num_prev_actions = len(parser._actions) + self.group = parser.add_argument_group(title.strip(), f"Options related to {desc.strip()}") self.add_parser_args_impl() + num_added_actions = len(parser._actions) - num_prev_actions # Remove empty groups from the parser. if self.group._action_groups: G_LOGGER.internal_error("Argument groups should not create subgroups!") - if not self.group._actions: + # Remove empty groups from help text + if not num_added_actions: parser._action_groups.remove(self.group) self.group = None @@ -139,6 +144,8 @@ def parse(self, args): Parses relevant arguments from command-line arguments and populates corresponding attributes of this argument group. + This method is guaranteed to only be called after `add_parser_args`. + Args: args: Arguments provided by argparse. """ @@ -154,6 +161,8 @@ def add_to_script(self, script, *args, **kwargs) -> str: For example, ``TrtConfigArgs`` would add a call to ``CreateConfig``. + This method is guaranteed to only be called after `parse`. + Args: script (polygraphy.tools.script.Script): A script to which code should be added. diff --git a/tools/Polygraphy/polygraphy/tools/args/comparator/comparator.py b/tools/Polygraphy/polygraphy/tools/args/comparator/comparator.py index 619b672a..d5528e70 100644 --- a/tools/Polygraphy/polygraphy/tools/args/comparator/comparator.py +++ b/tools/Polygraphy/polygraphy/tools/args/comparator/comparator.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -14,6 +14,8 @@ # See the License for the specific language governing permissions and # limitations under the License. # +import copy + from polygraphy import constants, mod, util from polygraphy.logger import G_LOGGER from polygraphy.tools.args import util as args_util @@ -129,6 +131,11 @@ def __init__(self, allow_postprocessing: bool = None): self._allow_postprocessing = util.default(allow_postprocessing, True) def add_parser_args_impl(self): + self._comparison_func_map = { + "simple": self.arg_groups[CompareFuncSimpleArgs], + "indices": self.arg_groups[CompareFuncIndicesArgs], + } + self.group.add_argument("--validate", help="Check outputs for NaNs and Infs", action="store_true", default=None) self.group.add_argument( "--fail-fast", help="Fail fast (stop comparing after the first failure)", action="store_true", default=None @@ -139,7 +146,7 @@ def add_parser_args_impl(self): "--compare-func", help="Name of the function to use to perform comparison. See the API documentation for `CompareFunc` for details. " "Defaults to 'simple'. ", - choices=["simple", "indices"], + choices=list(self._comparison_func_map.keys()), default="simple", dest="compare", ) @@ -181,6 +188,17 @@ def parse_impl(self, args): self.compare_func = args_util.get(args, "compare") + # Show warnings for any options provided for unselected comparison functions + unselected_comparison_funcs = copy.copy(self._comparison_func_map) + del unselected_comparison_funcs[self.compare_func] + for name, arg_group in unselected_comparison_funcs.items(): + for action in arg_group.group._group_actions: + if args_util.get(args, action.dest) is not None: + G_LOGGER.warning( + f"Option: {'/'.join(action.option_strings)} is only valid for comparison function: '{name}'. " + f"The selected comparison function is: '{self.compare_func}', so this option will be ignored." + ) + self.compare_func_script, self.compare_func_name = args_util.parse_script_and_func_name( args_util.get(args, "compare_func_script"), default_func_name="compare_outputs" ) @@ -221,10 +239,7 @@ def add_to_script_impl(self, script, results_name): script.add_import(imports=["InvokeFromScript"], frm="polygraphy.backend.common") compare_func = make_invocable("InvokeFromScript", self.compare_func_script, name=self.compare_func_name) else: - compare_func = { - "simple": self.arg_groups[CompareFuncSimpleArgs], - "indices": self.arg_groups[CompareFuncIndicesArgs], - }[self.compare_func].add_to_script(script) + compare_func = self._comparison_func_map[self.compare_func].add_to_script(script) compare_accuracy = make_invocable( "Comparator.compare_accuracy", results_name, compare_func=compare_func, fail_fast=self.fail_fast diff --git a/tools/Polygraphy/polygraphy/tools/args/comparator/compare.py b/tools/Polygraphy/polygraphy/tools/args/comparator/compare.py index 4d4ab83f..8d998dbd 100644 --- a/tools/Polygraphy/polygraphy/tools/args/comparator/compare.py +++ b/tools/Polygraphy/polygraphy/tools/args/comparator/compare.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -20,6 +20,12 @@ from polygraphy.tools.args.base import BaseArgs from polygraphy.tools.script import inline, make_invocable, make_invocable_if_nondefault, safe +# +# NOTE: The classes here are expected to use `None` as the default value for all arguments. +# This is because `ComparatorCompareArgs` will display warnings for any non-`None` arguments +# present in unselected compare func groups. This requirement is enforced by the test. +# + @mod.export() class CompareFuncSimpleArgs(BaseArgs): @@ -80,6 +86,30 @@ def add_parser_args_impl(self): action="store_true", default=None, ) + self.group.add_argument( + "--save-heatmaps", + help="[EXPERIMENTAL] Directory in which to save heatmaps of the absolute and relative error. ", + default=None, + ) + self.group.add_argument( + "--show-heatmaps", + help="[EXPERIMENTAL] Whether to display heatmaps of the absolute and relative error. Defaults to False. ", + action="store_true", + default=None, + ) + self.group.add_argument( + "--save-error-metrics-plot", + help="[EXPERIMENTAL] Path to directory to save error metrics plot(s). If set, generates plot of absolute and relative error against reference output magnitude." + "This directory is created if it does not already exist." + "This is useful for finding trends in errors, determining whether accuracy failures are just outliers or deeper problems.", + default=None, + ) + self.group.add_argument( + "--show-error-metrics-plot", + help="[EXPERIMENTAL] Whether to display the error metrics plots. Defaults to False. ", + action="store_true", + default=None, + ) def parse_impl(self, args): """ @@ -91,12 +121,20 @@ def parse_impl(self, args): atol (Dict[str, float]): Per-tensor absolute tolerance. check_error_stat (str): The error metric to check. infinities_compare_equal (bool): Whether to allow +-inf to compare as equal. + save_heatmaps (str): Directory in which to save heatmaps of error. + show_heatmaps (bool): Whether to display heatmaps of error. + save_error_metrics_plot (str): Path to store generated error plots. + show_error_metrics_plot (bool): Whether to display the error metrics plots. """ self.no_shape_check = args_util.get(args, "no_shape_check") self.rtol = args_util.parse_arglist_to_dict(args_util.get(args, "rtol")) self.atol = args_util.parse_arglist_to_dict(args_util.get(args, "atol")) self.check_error_stat = args_util.parse_arglist_to_dict(args_util.get(args, "check_error_stat")) self.infinities_compare_equal = args_util.get(args, "infinities_compare_equal") + self.save_heatmaps = args_util.get(args, "save_heatmaps") + self.show_heatmaps = args_util.get(args, "show_heatmaps") + self.save_error_metrics_plot = args_util.get(args, "save_error_metrics_plot") + self.show_error_metrics_plot = args_util.get(args, "show_error_metrics_plot") # Without this early check, failure would only happen after inference, which is clearly not desirable. if self.check_error_stat: @@ -118,6 +156,10 @@ def add_to_script_impl(self, script): fail_fast=self.arg_groups[ComparatorCompareArgs].fail_fast, check_error_stat=self.check_error_stat, infinities_compare_equal=self.infinities_compare_equal, + save_heatmaps=self.save_heatmaps, + show_heatmaps=self.show_heatmaps, + save_error_metrics_plot=self.save_error_metrics_plot, + show_error_metrics_plot=self.show_error_metrics_plot, ) compare_func = None if compare_func_str: diff --git a/tools/Polygraphy/polygraphy/tools/args/comparator/data_loader.py b/tools/Polygraphy/polygraphy/tools/args/comparator/data_loader.py index 8b0fa214..d203e66f 100644 --- a/tools/Polygraphy/polygraphy/tools/args/comparator/data_loader.py +++ b/tools/Polygraphy/polygraphy/tools/args/comparator/data_loader.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/Polygraphy/polygraphy/tools/args/comparator/postprocess.py b/tools/Polygraphy/polygraphy/tools/args/comparator/postprocess.py index fb04bdcd..c12c2a91 100644 --- a/tools/Polygraphy/polygraphy/tools/args/comparator/postprocess.py +++ b/tools/Polygraphy/polygraphy/tools/args/comparator/postprocess.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/Polygraphy/polygraphy/tools/args/logger/logger.py b/tools/Polygraphy/polygraphy/tools/args/logger/logger.py index 9c5b3509..ea8b412f 100644 --- a/tools/Polygraphy/polygraphy/tools/args/logger/logger.py +++ b/tools/Polygraphy/polygraphy/tools/args/logger/logger.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/Polygraphy/polygraphy/tools/args/model.py b/tools/Polygraphy/polygraphy/tools/args/model.py index 434a3f4a..da2d2be8 100644 --- a/tools/Polygraphy/polygraphy/tools/args/model.py +++ b/tools/Polygraphy/polygraphy/tools/args/model.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -195,12 +195,7 @@ def use_ext(ext_mapping): self.extra_model_info = None self.path, self.extra_model_info = args_util.parse_script_and_func_name(args_util.get(args, "model_file")) - - if self.path is not None: - G_LOGGER.verbose(f"Model: {self.path}") - if not os.path.exists(self.path): - G_LOGGER.warning(f"Model path does not exist: {self.path}") - self.path = os.path.abspath(self.path) + self.path = args_util.parse_path(self.path, "Model") model_type_str = self._required_model_type if self._required_model_type else determine_model_type(self.path) self.model_type = ModelArgs.ModelType(model_type_str) if model_type_str else None diff --git a/tools/Polygraphy/polygraphy/tools/args/util/util.py b/tools/Polygraphy/polygraphy/tools/args/util/util.py index 33087c86..7e8fe90b 100644 --- a/tools/Polygraphy/polygraphy/tools/args/util/util.py +++ b/tools/Polygraphy/polygraphy/tools/args/util/util.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -106,9 +106,7 @@ def get(args, attr, default=None): attr (str): The name of the command-line argument. default (obj): The default value to return if the argument is not found. Defaults to None. """ - if hasattr(args, attr): - return getattr(args, attr) - return default + return util.try_getattr(args, attr, default) @mod.export() @@ -379,3 +377,27 @@ def parse_num_bytes(num_bytes_arg): "Please use either an integer (e.g. 16000000), scientific notation (e.g. 16e6), " "or a number with a valid suffix: K, M, or G (e.g. 16M)." ) + + +@mod.export() +def parse_path(path, name): + """ + Parses a path from a command-line argument. + + If the path does not exist, emits a message using the specified logging function. + + Args: + path (str): The path. + name (str): Name of what the path refers to. + + Returns: + str: The path, converted to an absolute path if it exists. + """ + if path is None: + return None + + if os.path.exists(path): + path = os.path.abspath(path) + else: + G_LOGGER.warning(f"{name} path does not exist: {path}") + return path diff --git a/tools/Polygraphy/polygraphy/tools/base/tool.py b/tools/Polygraphy/polygraphy/tools/base/tool.py index 61a343ac..70e100c1 100644 --- a/tools/Polygraphy/polygraphy/tools/base/tool.py +++ b/tools/Polygraphy/polygraphy/tools/base/tool.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/Polygraphy/polygraphy/tools/convert/convert.py b/tools/Polygraphy/polygraphy/tools/convert/convert.py index 9482c334..4d3d3531 100644 --- a/tools/Polygraphy/polygraphy/tools/convert/convert.py +++ b/tools/Polygraphy/polygraphy/tools/convert/convert.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -27,10 +27,11 @@ OnnxSaveArgs, TfLoadArgs, TrtConfigArgs, - TrtLoadEngineArgs, + TrtLoadEngineBytesArgs, TrtLoadNetworkArgs, TrtLoadPluginsArgs, - TrtSaveEngineArgs, + TrtSaveEngineBytesArgs, + TrtOnnxFlagArgs, ) from polygraphy.tools.base import Tool @@ -58,8 +59,9 @@ def get_subscriptions_impl(self): TrtConfigArgs(allow_engine_capability=True, allow_tensor_formats=True), TrtLoadPluginsArgs(), TrtLoadNetworkArgs(allow_tensor_formats=True), - TrtLoadEngineArgs(), - TrtSaveEngineArgs(output_opt=False), + TrtLoadEngineBytesArgs(), + TrtSaveEngineBytesArgs(output_opt=False), + TrtOnnxFlagArgs(), ] def add_parser_args_impl(self, parser): @@ -72,16 +74,6 @@ def add_parser_args_impl(self, parser): choices=["onnx", "trt", "onnx-like-trt-network"], ) - onnx_args = self.arg_groups[OnnxLoadArgs].group - onnx_args.add_argument( - "--fp-to-fp16", - help="Convert all floating point tensors in an ONNX model to 16-bit precision. " - "This is *not* needed in order to use TensorRT's fp16 precision, but may be useful for other backends. " - "Requires onnxmltools. ", - action="store_true", - default=None, - ) - def run_impl(self, args): if not args.convert_to: _, ext = os.path.splitext(args.output) @@ -101,11 +93,9 @@ def run_impl(self, args): onnx_backend.save_onnx(onnx_like, args.output) elif convert_type.is_onnx(): model = self.arg_groups[OnnxLoadArgs].load_onnx() - if args.fp_to_fp16: - model = onnx_backend.convert_to_fp16(model) self.arg_groups[OnnxSaveArgs].save_onnx(model, args.output) elif convert_type.is_trt(): - with self.arg_groups[TrtLoadEngineArgs].load_engine() as engine: - self.arg_groups[TrtSaveEngineArgs].save_engine(engine, args.output) + with self.arg_groups[TrtLoadEngineBytesArgs].load_engine_bytes() as serialized_engine: + self.arg_groups[TrtSaveEngineBytesArgs].save_engine_bytes(serialized_engine, args.output) else: G_LOGGER.critical(f"Cannot convert to model type: {convert_type}") diff --git a/tools/Polygraphy/polygraphy/tools/data/data.py b/tools/Polygraphy/polygraphy/tools/data/data.py index e94125af..52374de2 100644 --- a/tools/Polygraphy/polygraphy/tools/data/data.py +++ b/tools/Polygraphy/polygraphy/tools/data/data.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/Polygraphy/polygraphy/tools/data/subtool/to_input.py b/tools/Polygraphy/polygraphy/tools/data/subtool/to_input.py index bff043ff..f19897c1 100644 --- a/tools/Polygraphy/polygraphy/tools/data/subtool/to_input.py +++ b/tools/Polygraphy/polygraphy/tools/data/subtool/to_input.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/Polygraphy/polygraphy/tools/debug/debug.py b/tools/Polygraphy/polygraphy/tools/debug/debug.py index 8f22be1b..dcc6f0b8 100644 --- a/tools/Polygraphy/polygraphy/tools/debug/debug.py +++ b/tools/Polygraphy/polygraphy/tools/debug/debug.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/Polygraphy/polygraphy/tools/debug/subtool/base.py b/tools/Polygraphy/polygraphy/tools/debug/subtool/base.py index 68419bbc..60ea46b3 100644 --- a/tools/Polygraphy/polygraphy/tools/debug/subtool/base.py +++ b/tools/Polygraphy/polygraphy/tools/debug/subtool/base.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -24,10 +24,11 @@ OnnxInferShapesArgs, OnnxLoadArgs, TrtConfigArgs, - TrtLoadEngineArgs, + TrtLoadEngineBytesArgs, TrtLoadNetworkArgs, TrtLoadPluginsArgs, - TrtSaveEngineArgs, + TrtOnnxFlagArgs, + TrtSaveEngineBytesArgs, ) from polygraphy.tools.base import Tool from polygraphy.tools.debug.subtool.iterative_debug_args import ArtifactSortArgs, CheckCmdArgs, IterativeDebugArgs @@ -66,9 +67,10 @@ def get_subscriptions_impl(self): DataLoaderArgs(), # For int8 calibration TrtConfigArgs(precision_constraints_default=self._precision_constraints_default), TrtLoadPluginsArgs(), + TrtOnnxFlagArgs(), TrtLoadNetworkArgs(), - TrtLoadEngineArgs(), - TrtSaveEngineArgs(output_opt=False), + TrtLoadEngineBytesArgs(), + TrtSaveEngineBytesArgs(output_opt=False), ] def show_start_end_logging_impl(self, args): @@ -134,7 +136,7 @@ def make_iter_art(_): self.process_network(network) try: - engine = self.arg_groups[TrtLoadEngineArgs].load_engine((builder, network)) + serialized_engine = self.arg_groups[TrtLoadEngineBytesArgs].load_engine_bytes((builder, network)) except Exception as err: G_LOGGER.warning( f"Failed to create network or engine, continuing to the next iteration.\nNote: Error was: {err}" @@ -143,9 +145,9 @@ def make_iter_art(_): self.arg_groups[IterativeDebugArgs].skip_iteration(success=False) else: # Don't need to keep the engine around in memory - just serialize to disk and free it. - with engine: - self.arg_groups[TrtSaveEngineArgs].save_engine( - engine, self.arg_groups[IterativeDebugArgs].iter_artifact_path + with serialized_engine: + self.arg_groups[TrtSaveEngineBytesArgs].save_engine_bytes( + serialized_engine, self.arg_groups[IterativeDebugArgs].iter_artifact_path ) def advance(context): diff --git a/tools/Polygraphy/polygraphy/tools/debug/subtool/build.py b/tools/Polygraphy/polygraphy/tools/debug/subtool/build.py index 5ffd9af7..3854b35a 100644 --- a/tools/Polygraphy/polygraphy/tools/debug/subtool/build.py +++ b/tools/Polygraphy/polygraphy/tools/debug/subtool/build.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/Polygraphy/polygraphy/tools/debug/subtool/iterative_debug_args.py b/tools/Polygraphy/polygraphy/tools/debug/subtool/iterative_debug_args.py index b4a704df..664c4837 100644 --- a/tools/Polygraphy/polygraphy/tools/debug/subtool/iterative_debug_args.py +++ b/tools/Polygraphy/polygraphy/tools/debug/subtool/iterative_debug_args.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/Polygraphy/polygraphy/tools/debug/subtool/precision.py b/tools/Polygraphy/polygraphy/tools/debug/subtool/precision.py index ee664072..78b7aa6f 100644 --- a/tools/Polygraphy/polygraphy/tools/debug/subtool/precision.py +++ b/tools/Polygraphy/polygraphy/tools/debug/subtool/precision.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -211,7 +211,11 @@ def should_exclude(): has_non_execution_output = any( not layer.get_output(i).is_execution_tensor for i in range(layer.num_outputs) ) - return layer.type in EXCLUDE_LAYERS or has_non_execution_output + has_non_activation_output = any( + layer.get_output(i).dtype not in [trt.float32, trt.float16, trt.int8] + for i in range(layer.num_outputs) + ) + return layer.type in EXCLUDE_LAYERS or has_non_execution_output or has_non_activation_output if not should_exclude(): G_LOGGER.extra_verbose(f"Running layer in higher precision: {trt_util.str_from_layer(layer, index)}") diff --git a/tools/Polygraphy/polygraphy/tools/debug/subtool/reduce.py b/tools/Polygraphy/polygraphy/tools/debug/subtool/reduce.py index b4339235..649db1a5 100644 --- a/tools/Polygraphy/polygraphy/tools/debug/subtool/reduce.py +++ b/tools/Polygraphy/polygraphy/tools/debug/subtool/reduce.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -367,6 +367,40 @@ def mark_io(graph, attr, tensors, filter_const=True): setattr(graph, attr, tensors) G_LOGGER.info(f"Marking model {attr}: {getattr(graph, attr)}") + + if attr == "inputs": + # When marking model inputs, there may be cases where the producer of the + # desired input also produces graph outputs like so: + # + # Node0 + # / \ + # out0 out1 (graph output) + # (desired + # graph input) + # | + # Node1 + # + # In this example, if we don't remove `out1` from the graph outputs, + # we'll be left with the following graph after cleanup: + # + # Node0 + # | + # out0 out1 (graph output) + # (graph input) + # | + # Node1 + # + # This will be malformed if `Node0` requires 2 outputs in the ONNX spec. + # + for tensor in tensors: + if not tensor.inputs: + continue + + producer = tensor.inputs[0] + for out in producer.outputs: + if out in graph.outputs: + graph.outputs.remove(out) + return graph def names_from_tensors(tensors): @@ -382,6 +416,9 @@ def lookup_tensors(graph, names): # debug_replay is used to provide the debug_replay from previous iterations to subsequent iterations. # Without this, the debug_replay would only contain entries for the final call to `bisect_io`. def bisect_io(graph, marker, attr, filter_const=True, debug_replay=None): + if attr not in ["inputs", "outputs"]: + G_LOGGER.internal_error(f"Invalid attribute specified: {attr}") + G_LOGGER.start(f"Reducing model {attr}") def make_iter_art(context): diff --git a/tools/Polygraphy/polygraphy/tools/debug/subtool/repeat.py b/tools/Polygraphy/polygraphy/tools/debug/subtool/repeat.py index 80de7d2c..9336a823 100644 --- a/tools/Polygraphy/polygraphy/tools/debug/subtool/repeat.py +++ b/tools/Polygraphy/polygraphy/tools/debug/subtool/repeat.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/Polygraphy/polygraphy/tools/inspect/inspect.py b/tools/Polygraphy/polygraphy/tools/inspect/inspect.py index a8469ce3..79a66a34 100644 --- a/tools/Polygraphy/polygraphy/tools/inspect/inspect.py +++ b/tools/Polygraphy/polygraphy/tools/inspect/inspect.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/Polygraphy/polygraphy/tools/inspect/subtool/capability.py b/tools/Polygraphy/polygraphy/tools/inspect/subtool/capability.py index 381fbd16..cfe10f60 100644 --- a/tools/Polygraphy/polygraphy/tools/inspect/subtool/capability.py +++ b/tools/Polygraphy/polygraphy/tools/inspect/subtool/capability.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/Polygraphy/polygraphy/tools/inspect/subtool/data.py b/tools/Polygraphy/polygraphy/tools/inspect/subtool/data.py index 566bc048..651ed83c 100644 --- a/tools/Polygraphy/polygraphy/tools/inspect/subtool/data.py +++ b/tools/Polygraphy/polygraphy/tools/inspect/subtool/data.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/Polygraphy/polygraphy/tools/inspect/subtool/diff_tactics.py b/tools/Polygraphy/polygraphy/tools/inspect/subtool/diff_tactics.py index 08a3c631..e6e18c63 100644 --- a/tools/Polygraphy/polygraphy/tools/inspect/subtool/diff_tactics.py +++ b/tools/Polygraphy/polygraphy/tools/inspect/subtool/diff_tactics.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/Polygraphy/polygraphy/tools/inspect/subtool/model.py b/tools/Polygraphy/polygraphy/tools/inspect/subtool/model.py index aba39557..d4489d9a 100644 --- a/tools/Polygraphy/polygraphy/tools/inspect/subtool/model.py +++ b/tools/Polygraphy/polygraphy/tools/inspect/subtool/model.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -20,17 +20,20 @@ from polygraphy.logger import G_LOGGER from polygraphy.tools.args import ( ModelArgs, - OnnxLoadArgs, OnnxInferShapesArgs, + OnnxLoadArgs, TfLoadArgs, TrtLoadEngineArgs, + TrtLoadEngineBytesArgs, TrtLoadNetworkArgs, TrtLoadPluginsArgs, + TrtOnnxFlagArgs, ) from polygraphy.tools.base import Tool trt_util = mod.lazy_import("polygraphy.backend.trt.util") onnx_util = mod.lazy_import("polygraphy.backend.onnx.util") +onnx_backend = mod.lazy_import("polygraphy.backend.onnx") tf_util = mod.lazy_import("polygraphy.backend.tf.util") @@ -50,7 +53,9 @@ def get_subscriptions_impl(self): OnnxLoadArgs(outputs_opt_prefix=False), TrtLoadPluginsArgs(), TrtLoadNetworkArgs(allow_custom_outputs=False), + TrtLoadEngineBytesArgs(), TrtLoadEngineArgs(), + TrtOnnxFlagArgs(), ] def add_parser_args_impl(self, parser): @@ -74,6 +79,18 @@ def add_parser_args_impl(self, parser): default=[], ) + parser.add_argument( + "--list-unbounded-dds", + help=""" + List all tensors with unbounded Data-Dependent Shapes (DDS). + + Note that listing unbounded DDS only works for models that have been constant folded and have shapes inferred. + """, + action="store_true", + default=None, + dest="show_unbounded_dds" + ) + def run_impl(self, args): def show(aspect): return aspect in args.show @@ -81,7 +98,10 @@ def show(aspect): def inspect_trt(): if self.arg_groups[ModelArgs].model_type == "engine": with self.arg_groups[TrtLoadEngineArgs].load_engine() as engine: - engine_str = trt_util.str_from_engine(engine, show_layers=show("layers"), show_attrs=show("attrs")) + context = engine.create_execution_context() + engine_str = trt_util.str_from_engine( + engine, context, show_layers=show("layers"), show_attrs=show("attrs") + ) G_LOGGER.info(f"==== TensorRT Engine ====\n{engine_str}") else: builder, network, parser = util.unpack_args(self.arg_groups[TrtLoadNetworkArgs].load_network(), 3) @@ -101,6 +121,11 @@ def inspect_onnx(): onnx_model, show_layers=show("layers"), show_attrs=show("attrs"), show_weights=show("weights") ).strip() G_LOGGER.info(f"==== ONNX Model ====\n{model_str}") + if args.show_unbounded_dds: + graph = onnx_backend.gs_from_onnx(onnx_model) + unbounded_dds_tensors = onnx_util.get_unbounded_dds_tensors(graph) + G_LOGGER.info(f"Found tensors with unbounded DDS: {unbounded_dds_tensors}") + def inspect_tf(): tf_graph, _ = self.arg_groups[TfLoadArgs].load_graph() diff --git a/tools/Polygraphy/polygraphy/tools/inspect/subtool/tactics.py b/tools/Polygraphy/polygraphy/tools/inspect/subtool/tactics.py index ed9e2445..d33b0978 100644 --- a/tools/Polygraphy/polygraphy/tools/inspect/subtool/tactics.py +++ b/tools/Polygraphy/polygraphy/tools/inspect/subtool/tactics.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/Polygraphy/polygraphy/tools/registry.py b/tools/Polygraphy/polygraphy/tools/registry.py index 341cd87c..9469fcd6 100644 --- a/tools/Polygraphy/polygraphy/tools/registry.py +++ b/tools/Polygraphy/polygraphy/tools/registry.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -44,9 +44,7 @@ def try_register_tool(module, tool_class): ToolClass = getattr(toolmod, tool_class) TOOL_REGISTRY.append(ToolClass()) except Exception as err: - G_LOGGER.internal_error( - f"Could not load command-line tool: {tool_class.lower()}.\nNote: Error was: {err}" - ) + G_LOGGER.internal_error(f"Could not load command-line tool: {tool_class.lower()}.\nNote: Error was: {err}") TOOL_REGISTRY.append(MissingTool(tool_class.lower(), err=err)) diff --git a/tools/Polygraphy/polygraphy/tools/run/run.py b/tools/Polygraphy/polygraphy/tools/run/run.py index 131f482b..2663a733 100644 --- a/tools/Polygraphy/polygraphy/tools/run/run.py +++ b/tools/Polygraphy/polygraphy/tools/run/run.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -45,10 +45,12 @@ TrtConfigArgs, TrtLegacyRunnerArgs, TrtLoadEngineArgs, + TrtLoadEngineBytesArgs, TrtLoadNetworkArgs, TrtLoadPluginsArgs, + TrtOnnxFlagArgs, TrtRunnerArgs, - TrtSaveEngineArgs, + TrtSaveEngineBytesArgs, ) from polygraphy.tools.base import Tool from polygraphy.tools.script import Script, safe @@ -127,9 +129,11 @@ def get_subscriptions_impl(self): # We run calibration/inference with the same data, so it doesn't really matter if it's random. TrtConfigArgs(allow_random_data_calib_warning=False), TrtLoadPluginsArgs(), + TrtOnnxFlagArgs(), TrtLoadNetworkArgs(), - TrtSaveEngineArgs(output_opt="save-engine", output_short_opt=False), - TrtLoadEngineArgs(allow_saving=True), + TrtSaveEngineBytesArgs(output_opt="save-engine", output_short_opt=False), + TrtLoadEngineBytesArgs(allow_saving=True), + TrtLoadEngineArgs(), TrtRunnerArgs(), TrtLegacyRunnerArgs(), DataLoaderArgs(), diff --git a/tools/Polygraphy/polygraphy/tools/script.py b/tools/Polygraphy/polygraphy/tools/script.py index 5ba91943..80ea1004 100644 --- a/tools/Polygraphy/polygraphy/tools/script.py +++ b/tools/Polygraphy/polygraphy/tools/script.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -227,6 +227,17 @@ def __iadd__(self, other): self.s += other.s return self + def __eq__(self, other): + return ( + isinstance(other, Script.String) + and self.safe == other.safe + and self.inline == other.inline + and self.s == other.s + ) + + def __hash__(self): + return hash((self.s, self.safe, self.inline)) + def unwrap(self): """ Returns the underlying string object. diff --git a/tools/Polygraphy/polygraphy/tools/surgeon/subtool/base.py b/tools/Polygraphy/polygraphy/tools/surgeon/subtool/base.py index dda098fb..c49c126a 100644 --- a/tools/Polygraphy/polygraphy/tools/surgeon/subtool/base.py +++ b/tools/Polygraphy/polygraphy/tools/surgeon/subtool/base.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/Polygraphy/polygraphy/tools/surgeon/subtool/extract.py b/tools/Polygraphy/polygraphy/tools/surgeon/subtool/extract.py index e6e9d520..69923a3d 100644 --- a/tools/Polygraphy/polygraphy/tools/surgeon/subtool/extract.py +++ b/tools/Polygraphy/polygraphy/tools/surgeon/subtool/extract.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/Polygraphy/polygraphy/tools/surgeon/subtool/insert.py b/tools/Polygraphy/polygraphy/tools/surgeon/subtool/insert.py index d5f7f33d..5459dc6e 100644 --- a/tools/Polygraphy/polygraphy/tools/surgeon/subtool/insert.py +++ b/tools/Polygraphy/polygraphy/tools/surgeon/subtool/insert.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/Polygraphy/polygraphy/tools/surgeon/subtool/sanitize.py b/tools/Polygraphy/polygraphy/tools/surgeon/subtool/sanitize.py index 384e153a..461bf564 100644 --- a/tools/Polygraphy/polygraphy/tools/surgeon/subtool/sanitize.py +++ b/tools/Polygraphy/polygraphy/tools/surgeon/subtool/sanitize.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -170,7 +170,7 @@ def get_subscriptions_impl(self): ), DataLoaderArgs(), OnnxInferShapesArgs(default=True, allow_force_fallback=True), - OnnxLoadArgs(outputs_opt_prefix=""), + OnnxLoadArgs(outputs_opt_prefix="", allow_setting_upper_bounds=True), OnnxSaveArgs(allow_shape_inference=True, output_opt_required=True), ConstFoldArgs(), ] diff --git a/tools/Polygraphy/polygraphy/tools/surgeon/surgeon.py b/tools/Polygraphy/polygraphy/tools/surgeon/surgeon.py index 5bf79ab5..3a0eb911 100644 --- a/tools/Polygraphy/polygraphy/tools/surgeon/surgeon.py +++ b/tools/Polygraphy/polygraphy/tools/surgeon/surgeon.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/Polygraphy/polygraphy/tools/template/subtool/base.py b/tools/Polygraphy/polygraphy/tools/template/subtool/base.py index 47d01ca0..cf6cd802 100644 --- a/tools/Polygraphy/polygraphy/tools/template/subtool/base.py +++ b/tools/Polygraphy/polygraphy/tools/template/subtool/base.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/Polygraphy/polygraphy/tools/template/subtool/onnx_gs.py b/tools/Polygraphy/polygraphy/tools/template/subtool/onnx_gs.py index afd33ff0..023b0f80 100644 --- a/tools/Polygraphy/polygraphy/tools/template/subtool/onnx_gs.py +++ b/tools/Polygraphy/polygraphy/tools/template/subtool/onnx_gs.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/Polygraphy/polygraphy/tools/template/subtool/trt_config.py b/tools/Polygraphy/polygraphy/tools/template/subtool/trt_config.py index 3fa1f505..f2a16da7 100644 --- a/tools/Polygraphy/polygraphy/tools/template/subtool/trt_config.py +++ b/tools/Polygraphy/polygraphy/tools/template/subtool/trt_config.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/Polygraphy/polygraphy/tools/template/subtool/trt_network.py b/tools/Polygraphy/polygraphy/tools/template/subtool/trt_network.py index cb29c67f..8796fb39 100644 --- a/tools/Polygraphy/polygraphy/tools/template/subtool/trt_network.py +++ b/tools/Polygraphy/polygraphy/tools/template/subtool/trt_network.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -22,6 +22,7 @@ TfLoadArgs, TrtLoadNetworkArgs, TrtLoadPluginsArgs, + TrtOnnxFlagArgs, ) from polygraphy.tools.script import Script, inline, safe from polygraphy.tools.template.subtool.base import BaseTemplateTool @@ -44,6 +45,7 @@ def get_subscriptions_impl(self): OnnxLoadArgs(allow_shape_inference=False, allow_from_tf=True), TrtLoadPluginsArgs(), TrtLoadNetworkArgs(), + TrtOnnxFlagArgs(), ] def run_impl(self, args): diff --git a/tools/Polygraphy/polygraphy/tools/template/template.py b/tools/Polygraphy/polygraphy/tools/template/template.py index d1267762..4fab5973 100644 --- a/tools/Polygraphy/polygraphy/tools/template/template.py +++ b/tools/Polygraphy/polygraphy/tools/template/template.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/Polygraphy/polygraphy/tools/util.py b/tools/Polygraphy/polygraphy/tools/util.py index a1b2f33c..9ead6c29 100644 --- a/tools/Polygraphy/polygraphy/tools/util.py +++ b/tools/Polygraphy/polygraphy/tools/util.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/Polygraphy/polygraphy/util/format.py b/tools/Polygraphy/polygraphy/util/format.py index 2360d114..a3b14d97 100644 --- a/tools/Polygraphy/polygraphy/util/format.py +++ b/tools/Polygraphy/polygraphy/util/format.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/Polygraphy/polygraphy/util/util.py b/tools/Polygraphy/polygraphy/util/util.py index fb8d1279..78d3a02c 100644 --- a/tools/Polygraphy/polygraphy/util/util.py +++ b/tools/Polygraphy/polygraphy/util/util.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -16,7 +16,9 @@ # import contextlib import copy +import functools import glob +import inspect import math import os import sys @@ -118,14 +120,14 @@ def check_sequence_contains( if check_missing and missing: log_func( f"The following {items_name} were not found in {name}: {missing}.\n" - f"Note: All {items_name} are: {items}, but {items_name} provided were: {sequence}" + f"Note: {items_name} requested were: {items}, but all {items_name} are: {sequence}" ) extra = sequence - items if check_extra and extra: log_func( f"Extra {items_name} in {name}: {extra}.\n" - f"Note: All {items_name} are: {items}, but {items_name} provided were: {sequence}" + f"Note: {items_name} requested were: {items}, but all {items_name} are: {sequence}" ) return missing, extra @@ -200,7 +202,7 @@ def unique_list(sequence): # >>> y.value # ['SHOULD NOT BE IN Y'] # -# If we rewrite the class using default value: +# If we rewrite the class using `default()`: # # class MyClass: # def __init__(self, value=None): @@ -663,6 +665,39 @@ def invoke_if_callable(func, *args, **kwargs): return func, False +@mod.export() +def check_called_by(expected_caller_name): + """ + Decorator that checks whether a callable was called by a + particular function and emits a warning if not. + + Args: + func (Callable): The callable to check. + expected_caller_name (str): The expected name of the caller. + """ + + def check_called_by_impl(func): + @functools.wraps(func) + def wrapped(*args, **kwargs): + + # Skip checks if we're calling these functions internally + module = inspect.getmodule(sys._getframe(1)) + called_from_polygraphy = module.__name__ and module.__name__.split(".")[0] == "polygraphy" + + if not called_from_polygraphy: + actual_caller_name = sys._getframe(1).f_code.co_name + if actual_caller_name != expected_caller_name: + G_LOGGER.warning( + f"Calling '{func.__qualname__}()' directly is not recommended. Please use '{expected_caller_name}()' instead.", + ) + + return func(*args, **kwargs) + + return wrapped + + return check_called_by_impl + + ## ## Shapes ## @@ -1049,3 +1084,18 @@ def getattr_nested(obj, attr): for typ in attr.split("."): obj = getattr(obj, typ) return obj + + +@mod.export() +def try_getattr(obj, attr, default=None): + """ + Gets an attribute if it exists, otherwise returns a default value. + + Args: + obj: The object from which to try to get the attribute. + attr (str): The name of the attribute. + default (obj): The default value to return if the argument is not found. Defaults to None. + """ + if hasattr(obj, attr): + return getattr(obj, attr) + return default diff --git a/tools/Polygraphy/polygraphy_debug_replay.json b/tools/Polygraphy/polygraphy_debug_replay.json new file mode 100644 index 00000000..5dd85163 --- /dev/null +++ b/tools/Polygraphy/polygraphy_debug_replay.json @@ -0,0 +1,14 @@ +{ + "_N0_outputs": [ + true, + [ + 3 + ] + ], + "_N1_outputs": [ + true, + [ + 4 + ] + ] +} \ No newline at end of file diff --git a/tools/Polygraphy/polygraphy_debug_replay_skip_current.json b/tools/Polygraphy/polygraphy_debug_replay_skip_current.json new file mode 100644 index 00000000..61694a8b --- /dev/null +++ b/tools/Polygraphy/polygraphy_debug_replay_skip_current.json @@ -0,0 +1,20 @@ +{ + "_N0_outputs": [ + true, + [ + 3 + ] + ], + "_N1_outputs": [ + true, + [ + 4 + ] + ], + "_N0_inputs": [ + false, + [ + 3 + ] + ] +} \ No newline at end of file diff --git a/tools/Polygraphy/reduced.onnx b/tools/Polygraphy/reduced.onnx new file mode 100644 index 0000000000000000000000000000000000000000..57da717077c18559a4bc93912dfbc2876a539616 GIT binary patch literal 151 zcmdBH9zt zi_gnXNsTvF;`PffQAkuMN=?jB$S)8Q$5aT`6(s;Rmy3&ogHecui;06VNrnr}79k{A KCl)RS0d4>;Rw$VO literal 0 HcmV?d00001 diff --git a/tools/Polygraphy/setup.py b/tools/Polygraphy/setup.py index 543fccd4..b0e14497 100644 --- a/tools/Polygraphy/setup.py +++ b/tools/Polygraphy/setup.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/Polygraphy/tests/backend/base/test_loader.py b/tools/Polygraphy/tests/backend/base/test_loader.py index 0daf7c6e..f836bcae 100644 --- a/tools/Polygraphy/tests/backend/base/test_loader.py +++ b/tools/Polygraphy/tests/backend/base/test_loader.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/Polygraphy/tests/backend/base/test_runner.py b/tools/Polygraphy/tests/backend/base/test_runner.py index 7099e44d..f61b0710 100644 --- a/tools/Polygraphy/tests/backend/base/test_runner.py +++ b/tools/Polygraphy/tests/backend/base/test_runner.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/Polygraphy/tests/backend/common/test_loader.py b/tools/Polygraphy/tests/backend/common/test_loader.py index 02b26658..0aaba8c9 100644 --- a/tools/Polygraphy/tests/backend/common/test_loader.py +++ b/tools/Polygraphy/tests/backend/common/test_loader.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/Polygraphy/tests/backend/onnx/test_loader.py b/tools/Polygraphy/tests/backend/onnx/test_loader.py index e3bb6725..feb07d8e 100644 --- a/tools/Polygraphy/tests/backend/onnx/test_loader.py +++ b/tools/Polygraphy/tests/backend/onnx/test_loader.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -29,6 +29,7 @@ OnnxFromPath, OnnxFromTfGraph, SaveOnnx, + SetUpperBound, extract_subgraph, gs_from_onnx, infer_shapes, @@ -56,6 +57,10 @@ def test_basic(self): assert isinstance(model, onnx.ModelProto) assert len(model.graph.node) == 1 + @pytest.mark.serial + def test_warn_if_impl_methods_called(self, check_warnings_on_loader_impl_methods): + check_warnings_on_loader_impl_methods(OnnxFromPath(ONNX_MODELS["identity"].path)) + def test_external_data(self): model = ONNX_MODELS["ext_weights"] loader = OnnxFromPath(model.path, model.ext_data) @@ -217,6 +222,49 @@ def test_size_threshold(self, size_threshold, expect_folding): assert model.graph.node[0].op_type == "Tile" +class TestSetUpperBound: + + @pytest.mark.parametrize("global_upper_bound", [False, True]) + @pytest.mark.parametrize("specified_upper_bound", [False, True]) + def test_set_upper_bound( + self, + global_upper_bound, + specified_upper_bound, + ): + original_model = onnx_from_path(ONNX_MODELS["unbounded_dds"].path) + upper_bound_dict = {} + if not global_upper_bound and not specified_upper_bound: + upper_bound_dict[""] = 1000 + upper_bound = 1000 + if global_upper_bound: + upper_bound_dict[""] = 2000 + upper_bound = 2000 + if specified_upper_bound: + upper_bound_dict["cast_out_6"] = 4000 + upper_bound = 4000 + + loader = SetUpperBound( + original_model, + upper_bounds=upper_bound_dict, + ) + + model = loader() + graph = gs_from_onnx(model) + + # Check if there is a Min operator in the modified model + find_min = False + for node in graph.nodes: + if node.op == 'Min': + find_min = True + # Check if the Min operator's second input is a constant tensor + assert isinstance(node.inputs[1], gs.Constant) + + val = node.inputs[1].values + # Check if the constant value equals the target upper bound + assert val == upper_bound + assert (find_min) + + class TestSaveOnnx: def test_save_onnx(self): with tempfile.TemporaryDirectory() as outdir: diff --git a/tools/Polygraphy/tests/backend/onnx/test_util.py b/tools/Polygraphy/tests/backend/onnx/test_util.py index 69be5639..2f731dff 100644 --- a/tools/Polygraphy/tests/backend/onnx/test_util.py +++ b/tools/Polygraphy/tests/backend/onnx/test_util.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,7 +15,10 @@ # limitations under the License. # -from polygraphy.backend.onnx import onnx_from_path +from polygraphy.backend.onnx import ( + onnx_from_path, + gs_from_onnx +) from polygraphy.backend.onnx import util as onnx_util from tests.models.meta import ONNX_MODELS @@ -23,3 +26,10 @@ def test_get_num_nodes(): model = onnx_from_path(ONNX_MODELS["scan"].path) assert onnx_util.get_num_nodes(model) == 3 # Should count subgraph nodes. + +def test_get_unbounded_dds_tensors(): + model = onnx_from_path(ONNX_MODELS["unbounded_dds"].path) + graph = gs_from_onnx(model) + tensors = onnx_util.get_unbounded_dds_tensors(graph) + assert len(tensors) == 1 + assert tensors[0].name == 'cast_out_6' \ No newline at end of file diff --git a/tools/Polygraphy/tests/backend/onnxrt/test_loader.py b/tools/Polygraphy/tests/backend/onnxrt/test_loader.py index 2fcd6cf8..4efab6f3 100644 --- a/tools/Polygraphy/tests/backend/onnxrt/test_loader.py +++ b/tools/Polygraphy/tests/backend/onnxrt/test_loader.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/Polygraphy/tests/backend/onnxrt/test_runner.py b/tools/Polygraphy/tests/backend/onnxrt/test_runner.py index 4e031bf9..3c17437d 100644 --- a/tools/Polygraphy/tests/backend/onnxrt/test_runner.py +++ b/tools/Polygraphy/tests/backend/onnxrt/test_runner.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -42,6 +42,12 @@ def test_basic(self): assert runner.last_inference_time() is not None assert not runner.is_active + @pytest.mark.serial + def test_warn_if_impl_methods_called(self, check_warnings_on_runner_impl_methods): + model = ONNX_MODELS["identity"] + runner = OnnxrtRunner(SessionFromOnnx(model.loader)) + check_warnings_on_runner_impl_methods(runner) + def test_shape_output(self): model = ONNX_MODELS["reshape"] with OnnxrtRunner(SessionFromOnnx(model.loader)) as runner: diff --git a/tools/Polygraphy/tests/backend/pluginref/test_runner.py b/tools/Polygraphy/tests/backend/pluginref/test_runner.py index 57b934ba..b0a18f0a 100644 --- a/tools/Polygraphy/tests/backend/pluginref/test_runner.py +++ b/tools/Polygraphy/tests/backend/pluginref/test_runner.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -43,6 +43,12 @@ def test_basic(self): model.check_runner(runner) assert not runner.is_active + @pytest.mark.serial + def test_warn_if_impl_methods_called(self, check_warnings_on_runner_impl_methods): + model = ONNX_MODELS["identity"] + runner = PluginRefRunner(GsFromOnnx(OnnxFromPath(model.path))) + check_warnings_on_runner_impl_methods(runner) + def test_works_on_multiple_nodes(self): model = ONNX_MODELS["identity_identity"] with PluginRefRunner(GsFromOnnx(OnnxFromPath(model.path))) as runner: diff --git a/tools/Polygraphy/tests/backend/test_tensorrt_legacy.py b/tools/Polygraphy/tests/backend/test_tensorrt_legacy.py index 2b993d55..0218dcbb 100644 --- a/tools/Polygraphy/tests/backend/test_tensorrt_legacy.py +++ b/tools/Polygraphy/tests/backend/test_tensorrt_legacy.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/Polygraphy/tests/backend/tf/test_loader.py b/tools/Polygraphy/tests/backend/tf/test_loader.py index bda5df2f..f05f402b 100644 --- a/tools/Polygraphy/tests/backend/tf/test_loader.py +++ b/tools/Polygraphy/tests/backend/tf/test_loader.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/Polygraphy/tests/backend/tf/test_runner.py b/tools/Polygraphy/tests/backend/tf/test_runner.py index ddf3771f..22ceb98f 100644 --- a/tools/Polygraphy/tests/backend/tf/test_runner.py +++ b/tools/Polygraphy/tests/backend/tf/test_runner.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -39,6 +39,12 @@ def test_basic(self): assert runner.last_inference_time() is not None assert not runner.is_active + @pytest.mark.serial + def test_warn_if_impl_methods_called(self, check_warnings_on_runner_impl_methods): + model = TF_MODELS["identity"] + runner = TfRunner(SessionFromGraph(model.loader)) + check_warnings_on_runner_impl_methods(runner) + @pytest.mark.skip(reason="Non-trivial to set up - requires CUPTI") def test_save_timeline(self): model = TF_MODELS["identity"] diff --git a/tools/Polygraphy/tests/backend/trt/test_algorithm_selector.py b/tools/Polygraphy/tests/backend/trt/test_algorithm_selector.py index 69bf9f58..48572eed 100644 --- a/tools/Polygraphy/tests/backend/trt/test_algorithm_selector.py +++ b/tools/Polygraphy/tests/backend/trt/test_algorithm_selector.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -20,120 +20,224 @@ import pytest import tensorrt as trt from polygraphy import mod, util -from polygraphy.backend.trt import Algorithm, TacticRecorder, TacticReplayData, TacticReplayer +from polygraphy.backend.trt import Algorithm, TacticRecorder, TacticReplayData, TacticReplayer, TensorInfo from polygraphy.exception import PolygraphyException -ALGO_EQ_CASES = [ - ( - Algorithm( - 6, 1, inputs=[(trt.TensorFormat.LINEAR, trt.float32)], outputs=[(trt.TensorFormat.LINEAR, trt.float32)] - ), - Algorithm( - 6, 1, inputs=[(trt.TensorFormat.LINEAR, trt.float32)], outputs=[(trt.TensorFormat.LINEAR, trt.float32)] - ), - True, - ), # Same - ( - Algorithm( - 6, 1, inputs=[(trt.TensorFormat.LINEAR, trt.float32)], outputs=[(trt.TensorFormat.LINEAR, trt.float32)] - ), - Algorithm( - 7, 1, inputs=[(trt.TensorFormat.LINEAR, trt.float32)], outputs=[(trt.TensorFormat.LINEAR, trt.float32)] - ), - False, - ), # Different implementation - ( - Algorithm( - 6, 2, inputs=[(trt.TensorFormat.LINEAR, trt.float32)], outputs=[(trt.TensorFormat.LINEAR, trt.float32)] - ), - Algorithm( - 6, 1, inputs=[(trt.TensorFormat.LINEAR, trt.float32)], outputs=[(trt.TensorFormat.LINEAR, trt.float32)] - ), - False, - ), # Different tactic - ( - Algorithm( - 6, 1, inputs=[(trt.TensorFormat.LINEAR, trt.float32)], outputs=[(trt.TensorFormat.LINEAR, trt.float32)] - ), - Algorithm( - 6, 1, inputs=[(trt.TensorFormat.CHW32, trt.float32)], outputs=[(trt.TensorFormat.LINEAR, trt.float32)] - ), - False, - ), # Different input format - ( - Algorithm( - 6, 1, inputs=[(trt.TensorFormat.LINEAR, trt.float32)], outputs=[(trt.TensorFormat.LINEAR, trt.float32)] - ), - Algorithm(6, 1, inputs=[(trt.TensorFormat.LINEAR, trt.int8)], outputs=[(trt.TensorFormat.LINEAR, trt.float32)]), - False, - ), # Different input data type - ( - Algorithm( - 6, 1, inputs=[(trt.TensorFormat.LINEAR, trt.float32)], outputs=[(trt.TensorFormat.LINEAR, trt.float32)] - ), - Algorithm( - 6, 1, inputs=[(trt.TensorFormat.LINEAR, trt.float32)], outputs=[(trt.TensorFormat.CHW32, trt.float32)] - ), - False, - ), # Different output format - ( - Algorithm( - 6, 1, inputs=[(trt.TensorFormat.LINEAR, trt.float32)], outputs=[(trt.TensorFormat.LINEAR, trt.float32)] - ), - Algorithm(6, 1, inputs=[(trt.TensorFormat.LINEAR, trt.float32)], outputs=[(trt.TensorFormat.LINEAR, trt.int8)]), - False, - ), # Different output data type - ( - Algorithm( - 6, 1, inputs=[(trt.TensorFormat.LINEAR, trt.float32)] * 2, outputs=[(trt.TensorFormat.LINEAR, trt.float32)] - ), - Algorithm( - 6, 1, inputs=[(trt.TensorFormat.LINEAR, trt.float32)], outputs=[(trt.TensorFormat.LINEAR, trt.float32)] - ), - False, - ), # Different number of inputs - ( - Algorithm( - 6, 1, inputs=[(trt.TensorFormat.LINEAR, trt.float32)], outputs=[(trt.TensorFormat.LINEAR, trt.float32)] * 2 - ), - Algorithm( - 6, 1, inputs=[(trt.TensorFormat.LINEAR, trt.float32)], outputs=[(trt.TensorFormat.LINEAR, trt.float32)] - ), - False, - ), # Different number of outputs -] - - -@pytest.mark.skipif(mod.version(trt.__version__) < mod.version("8.0"), reason="Unsupported for TRT 7.2 and older") -class TestAlgorithm: - @pytest.mark.parametrize("left, right, expected", ALGO_EQ_CASES) - def test_equality(self, left, right, expected): - assert (left == right) == expected - FakeAlgorithmContext = namedtuple("FakeAlgorithmContext", ["name", "num_inputs", "num_outputs"]) FakeAlgorithm = namedtuple("FakeAlgorithm", ["algorithm_variant", "io_info"]) FakeAlgorithm.get_algorithm_io_info = lambda this, index: this.io_info[index] FakeAlgorithmVariant = namedtuple("FakeAlgorithmVariant", ["implementation", "tactic"]) -FakeAlgorithmIOInfo = namedtuple("FakeAlgorithmIOInfo", ["tensor_format", "dtype", "strides"]) def fake_context(name): return FakeAlgorithmContext(name=name, num_inputs=1, num_outputs=1) +def make_tensor_info( + tensor_format=trt.TensorFormat.LINEAR, + dtype=trt.float32, + strides=(1, 2, 3), + vectorized_dim=-1, + components_per_element=1, +): + return TensorInfo(tensor_format, dtype, strides, vectorized_dim, components_per_element) + + def fake_algo(implementation=6, tactic=0, io=None): - io_info = [FakeAlgorithmIOInfo(tensor_format=trt.TensorFormat.LINEAR, dtype=trt.float32, strides=(4, 5, 6))] * 2 + io_info = [make_tensor_info()] * 2 if io: io_info = [] for fmt, dtype, strides in io: - io_info.append(FakeAlgorithmIOInfo(tensor_format=fmt, dtype=dtype, strides=strides)) + io_info.append( + TensorInfo(tensor_format=fmt, dtype=dtype, strides=strides, vectorized_dim=-1, components_per_element=1) + ) trt_algo = FakeAlgorithm(algorithm_variant=FakeAlgorithmVariant(implementation, tactic), io_info=io_info) return trt_algo +@pytest.mark.skipif(mod.version(trt.__version__) < mod.version("8.0"), reason="Unsupported for TRT 7.2 and older") +class TestTensorInfo: + @pytest.mark.parametrize( + "left, right, expected", + [ + ( + TensorInfo(trt.TensorFormat.LINEAR, trt.float32, (1, 2, 3), -1, 1), + TensorInfo(trt.TensorFormat.LINEAR, trt.float32, (1, 2, 3), -1, 1), + True, + ), + # Different format + ( + TensorInfo(trt.TensorFormat.LINEAR, trt.float32, (1, 2, 3), -1, 1), + TensorInfo(trt.TensorFormat.HWC, trt.float32, (1, 2, 3), -1, 1), + False, + ), + # Different data type + ( + TensorInfo(trt.TensorFormat.LINEAR, trt.float32, (1, 2, 3), -1, 1), + TensorInfo(trt.TensorFormat.LINEAR, trt.float16, (1, 2, 3), -1, 1), + False, + ), + # Different vectotrization + ( + TensorInfo(trt.TensorFormat.LINEAR, trt.float32, (1, 2, 3), -1, 1), + TensorInfo(trt.TensorFormat.LINEAR, trt.float32, (1, 2, 3), 0, 2), + False, + ), + ], + ) + def test_equality(self, left, right, expected): + assert (left == right) == expected + + +@pytest.mark.skipif(mod.version(trt.__version__) < mod.version("8.0"), reason="Unsupported for TRT 7.2 and older") +class TestAlgorithm: + @pytest.mark.parametrize( + "left, right, expected", + [ + ( + Algorithm( + 6, + 1, + inputs=[make_tensor_info(trt.TensorFormat.LINEAR, trt.float32)], + outputs=[make_tensor_info(trt.TensorFormat.LINEAR, trt.float32)], + ), + Algorithm( + 6, + 1, + inputs=[make_tensor_info(trt.TensorFormat.LINEAR, trt.float32)], + outputs=[make_tensor_info(trt.TensorFormat.LINEAR, trt.float32)], + ), + True, + ), # Same + ( + Algorithm( + 6, + 1, + inputs=[make_tensor_info(trt.TensorFormat.LINEAR, trt.float32)], + outputs=[make_tensor_info(trt.TensorFormat.LINEAR, trt.float32)], + ), + Algorithm( + 7, + 1, + inputs=[make_tensor_info(trt.TensorFormat.LINEAR, trt.float32)], + outputs=[make_tensor_info(trt.TensorFormat.LINEAR, trt.float32)], + ), + False, + ), # Different implementation + ( + Algorithm( + 6, + 2, + inputs=[make_tensor_info(trt.TensorFormat.LINEAR, trt.float32)], + outputs=[make_tensor_info(trt.TensorFormat.LINEAR, trt.float32)], + ), + Algorithm( + 6, + 1, + inputs=[make_tensor_info(trt.TensorFormat.LINEAR, trt.float32)], + outputs=[make_tensor_info(trt.TensorFormat.LINEAR, trt.float32)], + ), + False, + ), # Different tactic + ( + Algorithm( + 6, + 1, + inputs=[make_tensor_info(trt.TensorFormat.LINEAR, trt.float32)], + outputs=[make_tensor_info(trt.TensorFormat.LINEAR, trt.float32)], + ), + Algorithm( + 6, + 1, + inputs=[make_tensor_info(trt.TensorFormat.CHW32, trt.float32)], + outputs=[make_tensor_info(trt.TensorFormat.LINEAR, trt.float32)], + ), + False, + ), # Different input format + ( + Algorithm( + 6, + 1, + inputs=[make_tensor_info(trt.TensorFormat.LINEAR, trt.float32)], + outputs=[make_tensor_info(trt.TensorFormat.LINEAR, trt.float32)], + ), + Algorithm( + 6, + 1, + inputs=[make_tensor_info(trt.TensorFormat.LINEAR, trt.int8)], + outputs=[make_tensor_info(trt.TensorFormat.LINEAR, trt.float32)], + ), + False, + ), # Different input data type + ( + Algorithm( + 6, + 1, + inputs=[make_tensor_info(trt.TensorFormat.LINEAR, trt.float32)], + outputs=[make_tensor_info(trt.TensorFormat.LINEAR, trt.float32)], + ), + Algorithm( + 6, + 1, + inputs=[make_tensor_info(trt.TensorFormat.LINEAR, trt.float32)], + outputs=[make_tensor_info(trt.TensorFormat.CHW32, trt.float32)], + ), + False, + ), # Different output format + ( + Algorithm( + 6, + 1, + inputs=[make_tensor_info(trt.TensorFormat.LINEAR, trt.float32)], + outputs=[make_tensor_info(trt.TensorFormat.LINEAR, trt.float32)], + ), + Algorithm( + 6, + 1, + inputs=[make_tensor_info(trt.TensorFormat.LINEAR, trt.float32)], + outputs=[make_tensor_info(trt.TensorFormat.LINEAR, trt.int8)], + ), + False, + ), # Different output data type + ( + Algorithm( + 6, + 1, + inputs=[make_tensor_info(trt.TensorFormat.LINEAR, trt.float32)] * 2, + outputs=[make_tensor_info(trt.TensorFormat.LINEAR, trt.float32)], + ), + Algorithm( + 6, + 1, + inputs=[make_tensor_info(trt.TensorFormat.LINEAR, trt.float32)], + outputs=[make_tensor_info(trt.TensorFormat.LINEAR, trt.float32)], + ), + False, + ), # Different number of inputs + ( + Algorithm( + 6, + 1, + inputs=[make_tensor_info(trt.TensorFormat.LINEAR, trt.float32)], + outputs=[make_tensor_info(trt.TensorFormat.LINEAR, trt.float32)] * 2, + ), + Algorithm( + 6, + 1, + inputs=[make_tensor_info(trt.TensorFormat.LINEAR, trt.float32)], + outputs=[make_tensor_info(trt.TensorFormat.LINEAR, trt.float32)], + ), + False, + ), # Different number of outputs + ], + ) + def test_equality(self, left, right, expected): + assert (left == right) == expected + + @pytest.fixture(params=[True, False], ids=["path", "object"]) def replay(request): """ diff --git a/tools/Polygraphy/tests/backend/trt/test_calibrator.py b/tools/Polygraphy/tests/backend/trt/test_calibrator.py index e5bf81c8..2ae99a47 100644 --- a/tools/Polygraphy/tests/backend/trt/test_calibrator.py +++ b/tools/Polygraphy/tests/backend/trt/test_calibrator.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/Polygraphy/tests/backend/trt/test_config.py b/tools/Polygraphy/tests/backend/trt/test_config.py index 7800ce16..f6b97788 100644 --- a/tools/Polygraphy/tests/backend/trt/test_config.py +++ b/tools/Polygraphy/tests/backend/trt/test_config.py @@ -35,6 +35,8 @@ def test_defaults(self, identity_builder_network): assert not config.get_flag(trt.BuilderFlag.INT8) if mod.version(trt.__version__) >= mod.version("8.6"): assert not config.get_flag(trt.BuilderFlag.FP8) + assert not config.get_flag(trt.BuilderFlag.VERSION_COMPATIBLE) + assert not config.get_flag(trt.BuilderFlag.EXCLUDE_LEAN_RUNTIME) assert config.num_optimization_profiles == 1 assert config.int8_calibrator is None with contextlib.suppress(AttributeError): @@ -86,6 +88,20 @@ def test_precision_constraints(self, identity_builder_network, flag): else: assert not obey_set and not prefer_set + @pytest.mark.skipif(mod.version(trt.__version__) < mod.version("8.6"), reason="Unsupported before TRT 8.6") + @pytest.mark.parametrize( + "kwargs, expected_flag", + [ + ({"version_compatible": True}, "VERSION_COMPATIBLE"), + ({"version_compatible": True, "exclude_lean_runtime": True}, "EXCLUDE_LEAN_RUNTIME"), + ], + ) + def test_version_compatibility_flags(self, identity_builder_network, kwargs, expected_flag): + builder, network = identity_builder_network + loader = CreateConfig(**kwargs) + with loader(builder, network) as config: + assert config.get_flag(getattr(trt.BuilderFlag, expected_flag)) + @pytest.mark.skipif(mod.version(trt.__version__) < mod.version("8.2"), reason="Unsupported before TRT 8.2") def test_direct_io(self, identity_builder_network): builder, network = identity_builder_network @@ -347,7 +363,7 @@ def test_preview_features(self, identity_builder_network, preview_features): @pytest.mark.skipif( mod.version(trt.__version__) < mod.version("8.6"), reason="Unsupported for TRT versions prior to 8.6" ) - @pytest.mark.parametrize("level", range(5)) + @pytest.mark.parametrize("level", range(6)) def test_builder_optimization_level(self, identity_builder_network, level): builder, network = identity_builder_network loader = CreateConfig(builder_optimization_level=level) @@ -369,6 +385,16 @@ def test_hardware_compatibility_level(self, identity_builder_network, level): with loader(builder, network) as config: assert config.hardware_compatibility_level == level + @pytest.mark.skipif( + mod.version(trt.__version__) < mod.version("8.6"), reason="Unsupported for TRT versions prior to 8.6" + ) + @pytest.mark.parametrize("num_streams", range(3)) + def test_max_aux_streams(self, identity_builder_network, num_streams): + builder, network = identity_builder_network + loader = CreateConfig(max_aux_streams=num_streams) + with loader(builder, network) as config: + assert config.max_aux_streams == num_streams + class TestPostprocessConfig: def test_with_config(self, identity_builder_network): diff --git a/tools/Polygraphy/tests/backend/trt/test_loader.py b/tools/Polygraphy/tests/backend/trt/test_loader.py index 29be9fcd..ce1d0d78 100644 --- a/tools/Polygraphy/tests/backend/trt/test_loader.py +++ b/tools/Polygraphy/tests/backend/trt/test_loader.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -19,6 +19,7 @@ import numpy as np import pytest import tensorrt as trt + from polygraphy import constants, mod, util from polygraphy.backend.trt import ( Calibrator, @@ -27,24 +28,26 @@ EngineFromBytes, EngineFromNetwork, LoadPlugins, + LoadRuntime, ModifyNetworkOutputs, NetworkFromOnnxBytes, Profile, SaveEngine, bytes_from_engine, + create_config, + create_network, engine_from_network, + get_trt_logger, modify_network_outputs, network_from_onnx_bytes, network_from_onnx_path, onnx_like_from_network, - set_layer_precisions, - create_config, postprocess_network, + set_layer_precisions, set_tensor_datatypes, set_tensor_formats, - create_network, ) -from polygraphy.common.struct import MetadataTuple, BoundedShape +from polygraphy.common.struct import BoundedShape, MetadataTuple from polygraphy.comparator import DataLoader from polygraphy.exception import PolygraphyException from tests.helper import get_file_size, is_file_non_empty @@ -63,6 +66,16 @@ def identity_engine(): yield engine +@pytest.fixture(scope="session") +def identity_vc_engine_bytes(): + flags = [trt.OnnxParserFlag.NATIVE_INSTANCENORM] + config = CreateConfig(version_compatible=True) + network_loader = NetworkFromOnnxBytes(ONNX_MODELS["identity"].loader, flags=flags) + engine_loader = EngineBytesFromNetwork(network_loader, config=config) + with engine_loader() as engine_bytes: + yield engine_bytes + + @pytest.fixture(scope="session") def identity_builder_network(): builder, network, parser = network_from_onnx_bytes(ONNX_MODELS["identity"].loader) @@ -136,6 +149,37 @@ def test_serialized_engine_loader_from_buffer(self, identity_engine): with loader() as engine: assert isinstance(engine, trt.ICudaEngine) + def test_serialized_engine_loader_custom_runtime(self, identity_engine): + with identity_engine.serialize() as buffer: + loader = EngineFromBytes(buffer, runtime=trt.Runtime(get_trt_logger())) + with loader() as engine: + assert isinstance(engine, trt.ICudaEngine) + + +@pytest.mark.skipif(mod.version(trt.__version__) < mod.version("8.6"), reason="API was added in TRT 8.6") +class TestLoadRuntime: + def test_load_lean_runtime(self, nvinfer_lean_path): + loader = LoadRuntime(nvinfer_lean_path) + with loader() as runtime: + assert isinstance(runtime, trt.Runtime) + + +@pytest.mark.skipif(mod.version(trt.__version__) < mod.version("8.6"), reason="API was added in TRT 8.6") +class TestSerializedVCEngineLoader: + def test_serialized_vc_engine_loader_from_lambda(self, identity_vc_engine_bytes): + with util.NamedTemporaryFile() as outpath: + with open(outpath.name, "wb") as f: + f.write(identity_vc_engine_bytes) + + loader = EngineFromBytes(lambda: open(outpath.name, "rb").read()) + with loader() as engine: + assert isinstance(engine, trt.ICudaEngine) + + def test_serialized_engine_loader_from_buffer(self, identity_vc_engine_bytes): + loader = EngineFromBytes(identity_vc_engine_bytes) + with loader() as engine: + assert isinstance(engine, trt.ICudaEngine) + class TestOnnxNetworkLoader: def test_loader(self): @@ -272,13 +316,6 @@ def test_basic(self, modifiable_network): assert network[0].precision == trt.float16 assert network[1].precision == trt.int8 - def test_non_existent_layer(self, modifiable_network): - with pytest.raises(PolygraphyException, match="The following layers were not found"): - set_layer_precisions( - modifiable_network, - layer_precisions={"fake_layer": trt.float16}, - ) - class TestSetTensorDatatypes: def test_basic(self, modifiable_network): @@ -296,13 +333,6 @@ def test_basic(self, modifiable_network): assert network[1].get_input(0).dtype == trt.float32 assert network[1].get_output(0).dtype == trt.float16 - def test_non_existent_tensor(self, modifiable_network): - with pytest.raises(PolygraphyException, match="The following tensors were not found"): - set_tensor_datatypes( - modifiable_network, - tensor_datatypes={"fake_tensor": trt.float16}, - ) - class TestSetTensorFormats: def test_basic(self, modifiable_network): @@ -319,13 +349,6 @@ def test_basic(self, modifiable_network): ) assert network[1].get_output(0).allowed_formats == 1 << int(trt.TensorFormat.HWC8) - def test_non_existent_tensor(self, modifiable_network): - with pytest.raises(PolygraphyException, match="The following tensors were not found"): - set_tensor_formats( - modifiable_network, - tensor_formats={"fake_tensor": trt.float16}, - ) - class TestEngineBytesFromNetwork: def test_can_build(self, identity_network): @@ -341,14 +364,20 @@ def test_defaults(self, identity_network): def test_can_build_with_parser_owning(self, identity_network): loader = EngineFromNetwork(identity_network) - with loader(): - pass + with loader() as engine: + assert isinstance(engine, trt.ICudaEngine) def test_can_build_without_parser_non_owning(self, identity_builder_network): builder, network = identity_builder_network loader = EngineFromNetwork((builder, network)) - with loader(): - pass + with loader() as engine: + assert isinstance(engine, trt.ICudaEngine) + + def test_custom_runtime(self, identity_builder_network): + builder, network = identity_builder_network + loader = EngineFromNetwork((builder, network), runtime=trt.Runtime(get_trt_logger())) + with loader() as engine: + assert isinstance(engine, trt.ICudaEngine) @pytest.mark.parametrize("use_config_loader, set_calib_profile", [(True, None), (False, False), (False, True)]) def test_can_build_with_calibrator(self, identity_builder_network, use_config_loader, set_calib_profile): diff --git a/tools/Polygraphy/tests/backend/trt/test_profile.py b/tools/Polygraphy/tests/backend/trt/test_profile.py index 72353b9d..2a8d5ad6 100644 --- a/tools/Polygraphy/tests/backend/trt/test_profile.py +++ b/tools/Polygraphy/tests/backend/trt/test_profile.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/Polygraphy/tests/backend/trt/test_runner.py b/tools/Polygraphy/tests/backend/trt/test_runner.py index 608f3803..73dfcc39 100644 --- a/tools/Polygraphy/tests/backend/trt/test_runner.py +++ b/tools/Polygraphy/tests/backend/trt/test_runner.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -19,6 +19,7 @@ import numpy as np import pytest import tensorrt as trt + from polygraphy import cuda, mod from polygraphy.backend.trt import ( CreateConfig, @@ -31,8 +32,8 @@ ) from polygraphy.exception import PolygraphyException from polygraphy.logger import G_LOGGER -from tests.models.meta import ONNX_MODELS from tests.helper import time_func +from tests.models.meta import ONNX_MODELS class TestLoggerCallbacks: @@ -66,6 +67,12 @@ def test_basic(self): assert runner.last_inference_time() is not None assert not runner.is_active + @pytest.mark.serial + def test_warn_if_impl_methods_called(self, check_warnings_on_runner_impl_methods): + model = ONNX_MODELS["identity"] + runner = TrtRunner(EngineFromNetwork(NetworkFromOnnxBytes(model.loader))) + check_warnings_on_runner_impl_methods(runner) + @pytest.mark.skipif( mod.version(trt.__version__) <= mod.version("8.5.0.9"), reason="Unsupported for TRT 8.4 and older" ) @@ -290,3 +297,32 @@ def runner_infer(): print(f"Absolute difference: {runner_time - native_time:.5g}") print(f"Relative difference: {runner_time / native_time:.5g}") assert (runner_time - native_time) < 1e-3 or runner_time <= (native_time * 1.10) + + @pytest.mark.skipif(mod.version(trt.__version__) < mod.version("8.5"), reason="Unsupported before TRT 8.5") + @pytest.mark.parametrize("hwc_input", [True, False], ids=["hwc_input", "chw_input"]) + @pytest.mark.parametrize("hwc_output", [True, False], ids=["hwc_output", "chw_output"]) + def test_infer_chw_format(self, hwc_input, hwc_output): + model = ONNX_MODELS["identity_multi_ch"] + inp_shape = model.input_metadata["x"].shape + builder, network, parser = network_from_onnx_bytes(model.loader) + + formats = 1 << int(trt.TensorFormat.HWC) + if hwc_input: + network.get_input(0).allowed_formats = formats + if hwc_output: + network.get_output(0).allowed_formats = formats + + engine = engine_from_network((builder, network)) + + with TrtRunner(engine) as runner: + inp = np.random.normal(size=(inp_shape)).astype(np.float32) + if hwc_input: + inp = inp.transpose(0, 2, 3, 1) + + outputs = runner.infer({"x": inp}) + if hwc_input == hwc_output: # output in CHW/HWC format and similarly shaped + assert np.allclose(outputs["y"], inp) + elif not hwc_input and hwc_output: # output in HWC format and shaped (N, H, W, C) + assert np.allclose(outputs["y"].transpose(0, 3, 1, 2), inp) + else: # hwc_input and not hwc_output: output in CHW format and shaped (N, C, H, W) + assert np.allclose(outputs["y"].transpose(0, 2, 3, 1), inp) diff --git a/tools/Polygraphy/tests/backend/trt/test_util.py b/tools/Polygraphy/tests/backend/trt/test_util.py index 9daf73c8..55509348 100644 --- a/tools/Polygraphy/tests/backend/trt/test_util.py +++ b/tools/Polygraphy/tests/backend/trt/test_util.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -60,7 +60,7 @@ def add_default_preview_features_after_8_6(expected): if "Preview Features" not in expected: expected = ( dedent(expected).strip() - + "\nPreview Features | [DISABLE_EXTERNAL_TACTIC_SOURCES_FOR_CORE_0805]" + + "\nPreview Features | [FASTER_DYNAMIC_SHAPES_0805, DISABLE_EXTERNAL_TACTIC_SOURCES_FOR_CORE_0805]" ) return expected diff --git a/tools/Polygraphy/tests/common/test_interface.py b/tools/Polygraphy/tests/common/test_interface.py index bdcde290..72d731b7 100644 --- a/tools/Polygraphy/tests/common/test_interface.py +++ b/tools/Polygraphy/tests/common/test_interface.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/Polygraphy/tests/common/test_struct.py b/tools/Polygraphy/tests/common/test_struct.py index 680ef0f3..1eb1614d 100644 --- a/tools/Polygraphy/tests/common/test_struct.py +++ b/tools/Polygraphy/tests/common/test_struct.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/Polygraphy/tests/comparator/test_comparator.py b/tools/Polygraphy/tests/comparator/test_comparator.py index 78ecc467..7fc46308 100644 --- a/tools/Polygraphy/tests/comparator/test_comparator.py +++ b/tools/Polygraphy/tests/comparator/test_comparator.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/Polygraphy/tests/comparator/test_compare.py b/tools/Polygraphy/tests/comparator/test_compare.py index 20603b3e..18c49cdc 100644 --- a/tools/Polygraphy/tests/comparator/test_compare.py +++ b/tools/Polygraphy/tests/comparator/test_compare.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -26,6 +26,7 @@ class TestSimpleCompareFunc: @pytest.mark.parametrize( "values0, values1, dtype, expected_max_absdiff, expected_max_reldiff", [ + # Low precision arrays should be casted to higher precisions to avoid overflows/underflows. ([0], [1], np.uint8, 1, 1.0), ([1], [0], np.uint8, 1, np.inf), ([0], [1], np.uint16, 1, 1.0), @@ -34,10 +35,10 @@ class TestSimpleCompareFunc: ([1], [0], np.uint32, 1, np.inf), ([25], [30], np.int8, 5, 5.0 / 30.0), ([25], [30], np.float16, 5, np.array([5.0], dtype=np.float32) / np.array([30.0], dtype=np.float32)), + ([1], [0], np.float32, 1, 1 / np.finfo(float).eps), ], ) - # Low precision arrays should be casted to higher precisions to avoid overflows/underflows. - def test_low_precision_comparison(self, values0, values1, dtype, expected_max_absdiff, expected_max_reldiff): + def test_comparison(self, values0, values1, dtype, expected_max_absdiff, expected_max_reldiff): iter_result0 = IterationResult(outputs={"output": np.array(values0, dtype=dtype)}) iter_result1 = IterationResult(outputs={"output": np.array(values1, dtype=dtype)}) diff --git a/tools/Polygraphy/tests/comparator/test_data_loader.py b/tools/Polygraphy/tests/comparator/test_data_loader.py index bdb6a795..37de4f9a 100644 --- a/tools/Polygraphy/tests/comparator/test_data_loader.py +++ b/tools/Polygraphy/tests/comparator/test_data_loader.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/Polygraphy/tests/comparator/test_postprocess.py b/tools/Polygraphy/tests/comparator/test_postprocess.py index b75d6cb0..6f541d34 100644 --- a/tools/Polygraphy/tests/comparator/test_postprocess.py +++ b/tools/Polygraphy/tests/comparator/test_postprocess.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/Polygraphy/tests/comparator/test_struct.py b/tools/Polygraphy/tests/comparator/test_struct.py index 5af560cf..57847cae 100644 --- a/tools/Polygraphy/tests/comparator/test_struct.py +++ b/tools/Polygraphy/tests/comparator/test_struct.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/Polygraphy/tests/conftest.py b/tools/Polygraphy/tests/conftest.py index 861fb07e..25f8e9bb 100644 --- a/tools/Polygraphy/tests/conftest.py +++ b/tools/Polygraphy/tests/conftest.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -19,6 +19,8 @@ import glob import os import subprocess as sp +import sys +import ctypes.util import pytest @@ -64,3 +66,96 @@ def run_impl(command, cwd=None): return status return run_impl + + +@pytest.fixture() +def check_warnings_on_runner_impl_methods(): + """ + Fixture that ensures warnings are emitted when `_impl` methods of runners are called. + """ + + def check(runner): + import contextlib + import io + + import numpy as np + + outfile = io.StringIO() + with contextlib.redirect_stdout(outfile), contextlib.redirect_stderr(outfile): + runner.activate() + metadata = runner.get_input_metadata() + runner.infer({name: np.ones(shape, dtype=dtype) for name, (dtype, shape) in metadata.items()}) + runner.deactivate() + + outfile.seek(0) + out = outfile.read() + + def check_warning(method, warning_expected): + assert ( + f"Calling '{type(runner).__name__}.{method}_impl()' directly is not recommended. Please use '{method}()' instead." + in out + ) == warning_expected + + check_warning("get_input_metadata", warning_expected=False) + check_warning("activate", warning_expected=False) + check_warning("infer", warning_expected=False) + check_warning("deactivate", warning_expected=False) + + runner.activate_impl() + metadata = runner.get_input_metadata_impl() + runner.infer_impl({name: np.ones(shape, dtype=dtype) for name, (dtype, shape) in metadata.items()}) + runner.deactivate_impl() + + outfile.seek(0) + out = outfile.read() + print(out) + + check_warning("get_input_metadata", warning_expected=True) + check_warning("activate", warning_expected=True) + check_warning("infer", warning_expected=True) + check_warning("deactivate", warning_expected=True) + + return check + + +@pytest.fixture() +def check_warnings_on_loader_impl_methods(): + """ + Fixture that ensures warnings are emitted when loader `_impl` methods are called. + """ + + def check(loader): + import contextlib + import io + + outfile = io.StringIO() + with contextlib.redirect_stdout(outfile), contextlib.redirect_stderr(outfile): + warning_msg = f"Calling '{type(loader).__name__}.call_impl()' directly is not recommended. Please use '__call__()' instead." + loader.__call__() + + outfile.seek(0) + out = outfile.read() + + assert warning_msg not in out + + loader.call_impl() + + outfile.seek(0) + out = outfile.read() + print(out) + + assert warning_msg in out + + return check + + +@pytest.fixture() +@pytest.mark.skipif(sys.platform.startswith("win"), reason="Fixture has not been updated to work on Windows") +def nvinfer_lean_path(): + lean_library_name = ctypes.util.find_library("nvinfer_lean") + for dirname in os.environ.get("LD_LIBRARY_PATH", "").split(os.path.pathsep): + path = os.path.join(dirname, lean_library_name) + if os.path.exists(path): + return path + + assert False, "Could not find nvinfer_lean!" diff --git a/tools/Polygraphy/tests/cuda/test_cuda.py b/tools/Polygraphy/tests/cuda/test_cuda.py index 8966991d..a7ad8ccb 100644 --- a/tools/Polygraphy/tests/cuda/test_cuda.py +++ b/tools/Polygraphy/tests/cuda/test_cuda.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/Polygraphy/tests/func/test_func.py b/tools/Polygraphy/tests/func/test_func.py index 84e7b5e1..3685855a 100644 --- a/tools/Polygraphy/tests/func/test_func.py +++ b/tools/Polygraphy/tests/func/test_func.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/Polygraphy/tests/helper.py b/tools/Polygraphy/tests/helper.py index a8b7c7c3..10f309a7 100644 --- a/tools/Polygraphy/tests/helper.py +++ b/tools/Polygraphy/tests/helper.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,9 +17,6 @@ import os import time -import tensorrt as trt -from polygraphy.backend.trt import get_trt_logger - ROOT_DIR = os.path.realpath(os.path.join(os.path.dirname(__file__), os.path.pardir)) # Use bin/polygraphy for any invocations of Polygraphy that don't use the script_runner fixture. @@ -66,6 +63,10 @@ def time_func(func, warm_up=25, iters=100): def has_dla(): global HAS_DLA if HAS_DLA is None: + import tensorrt as trt + + from polygraphy.backend.trt import get_trt_logger + builder = trt.Builder(get_trt_logger()) try: diff --git a/tools/Polygraphy/tests/logger/test_logger.py b/tools/Polygraphy/tests/logger/test_logger.py index f4b80c6d..e7652829 100644 --- a/tools/Polygraphy/tests/logger/test_logger.py +++ b/tools/Polygraphy/tests/logger/test_logger.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/Polygraphy/tests/mod/test_dependencies.py b/tools/Polygraphy/tests/mod/test_dependencies.py index e6887477..6894b56e 100644 --- a/tools/Polygraphy/tests/mod/test_dependencies.py +++ b/tools/Polygraphy/tests/mod/test_dependencies.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -253,6 +253,8 @@ def test_all_lazy_imports(self): # new dependency. expected = [ "fcntl", + "matplotlib.pyplot", + "matplotlib", "msvcrt", "numpy", "onnx_graphsurgeon", diff --git a/tools/Polygraphy/tests/mod/test_exporter.py b/tools/Polygraphy/tests/mod/test_exporter.py index 9645485a..624a84e6 100644 --- a/tools/Polygraphy/tests/mod/test_exporter.py +++ b/tools/Polygraphy/tests/mod/test_exporter.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/Polygraphy/tests/mod/test_importer.py b/tools/Polygraphy/tests/mod/test_importer.py index 908c6a7c..b1821f07 100644 --- a/tools/Polygraphy/tests/mod/test_importer.py +++ b/tools/Polygraphy/tests/mod/test_importer.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/Polygraphy/tests/mod/test_util.py b/tools/Polygraphy/tests/mod/test_util.py new file mode 100644 index 00000000..5798275d --- /dev/null +++ b/tools/Polygraphy/tests/mod/test_util.py @@ -0,0 +1,37 @@ +# +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import pytest +from polygraphy import mod + + +@pytest.mark.parametrize( + "ver0, ver1, expected", + [ + ("1.0.0", "2.0.0", False), + ("1.0.0", "0.9.0", True), + ("0.0b1", "0.1b1", False), + ("0.1b1", "0.1b0", True), + ("0.12b1", "0.1b0", True), + ("0.1b0", "0.1a0", True), + ("0.1rc0", "0.1b0", True), + ("0.post1", "0.post0", True), + ("0.post1", "0.post2", False), + ], +) +def test_version(ver0, ver1, expected): + assert (mod.version(ver0) > mod.version(ver1)) == expected diff --git a/tools/Polygraphy/tests/models/I.onnx b/tools/Polygraphy/tests/models/I.onnx new file mode 100644 index 0000000000000000000000000000000000000000..3b2fe79fe8b01fed6f801dc366499bdddd3ed184 GIT binary patch literal 30743 zcmai+O|EQ55rl2P=;<;+55&I!v4ZZZ>_3Lg*Z@LIkP((KLeIz;1S?>>EP<$#ky4i` zx?_zN{Hu4r{qtuZzW(Ct z|GfR+?T8%lw#3^J?@PQN@mS(<#4k(ya>TDo{CdQ1OZ;}k?@RoC#2-)bA0E&Dc>l@x zyG~ESkKQtr%JZL0<($g%A5EpD^89C0>8U*b;Z!1(=RcjwC6(ttp2{_q=RcpyEtO{g zRPL!fBcPHD+RC7vj-0kKXvv_h3|ca1D}$B{+RC6MgSIkg$)K$aS~6%WgO&{1%Ah5K zwlZkRpsx&iGUzLVo(%fRpeKXAGU&;muMB!J=qrPs4EoBTCxgB+=*gh3406#&xF&<^ z%HWy|t}BCUGPte`uF2rKGPov#>&oDo46ZAKYcja546e!Gx-z&XgX_xRnhb6$gIhAV ztqg9-;I=ZjC4<|_;Fb(-D}!4yxUCFs$>6p!xFv(z%HWm^ZYzUZGPtb_Zpq-jGPoy$ z`^w;+4DKs~dosAM4DQL`zB0HcgZs+ho(%3QgL^W#uMF6avcqD_z%HWX<9xH=KGI*>E9?9UbGI%6|$I9T53?3_kM>2RkGx+i0gdUIg z-#$FjGlZYKF-GkfLuqGf&mc-`)Sgk4)~P+iC@rWx<0$Py?HNdES8C5lO1n{ehEm#{ z+B25Y$YnB*)Xt~OI}0vnaybhwXL30UE@yH%3od7JISVdlaybhwXL30UE@yH%3od7J zISVdla+wSzxxi5#`)0C~rIE{IDoZ1m$yWX>_aJ;_){I}heevO6pZ@KOKmYQ}zrR%` z$xP<5G?JO@WoaZc8O+j1X0n*2k<4T=OCy=dW>SOCJpPKwXqHAYlhrJZWG1s&8p%v{ zvovy<3}LEsb0zBU&1{Ojfisa+%C%Y2-55QEKp`2bal^mPRgd@V5t-$>5eoE|bMA zja(*^TN=4cHn%i#nT&2}^(#T~pyQPuKWOqv=m&x#!mR#Ctb-aijT-s@Mc8F11 zon1R6mv&m69b(K|on1R6mv&m69b(K|on1R6mv&m69b(K|on1R6mv&m69b%a$9B-rJ zZNl*)#yr7=j<*TNix~3+7dqZ194}(b6I|$cn{d2{F;8%z<88w6B9?i=@isc%CLAwf z%oAKNM;-tEwaM}3{zQzL;DT9d=864+X=-YM3+Ab*iT#3!YHETDW~!-){er3Lp$Wg+ z=y#j&yNEGQaKUUfYl?r5FkMYeaKU^vHNgcF*3<+S%ve(sTrg!#O>n`SH8sHnlh#8M zez(!@HsN;>W1irGd27}bTrhD>O>n`?H8sHnQ`gi47tCE#6I?KPO-*pY>@_vP1=H6< z6MnbR?>6Cg5o4aZp5TI6ZPpZAFs)5ZaKXGbHNgcF+tdUX%xqH=TrjmwO>n{7HZ{Qo zliNcRez(!@HsN;>W1irG`EAw|Trj~+O>n^sH#NZpQ{2=97tC=}6I?LKO-*pYEH^d5 z1=HL^6MnbR?>6Cg5o4a1Q*P9QxjY;-Azq!!F)G0 z!G#mvF8nTHM_w0x7cpupmrlRih2KStc`KJrzuSf1MT~hXmrlRih2KStc`KJrzuSf1 zMJ)4#-|h6fUHDzZm?yZ1k12`==zUCeh8W1iqbzuU!p7qQF}ez()_cHwss zW1iqbzuU!p7cu4uF7&%y%y$uEp5Q{i+r@krG3E&_^t)ZmcM;1x;deXzZWn$RG3E&_ z^t)ZmcM)Tr;6lIK#e5er<_RwJyIst85o4a66I6MnbT?{?vL5o4a1k12`==zUCeh8W1iqbzuU!p7cu4uF7&%y%y$vXJmGgc{cab27cu4uF7&%y%y$uE zp5Q{i+r@krG3E&_^t)ZmcM)Tr;6lIK#e5gB%oBdM)9-fScM)Tr;6lIK#e5er<_RwJ zyIst85o4a1k12`==zUCeh8%RJ$CJN<4Keit$32`==zUCeh8W1iqbzuU!p z7cu4uF7&%y%y$uEp5Q{i+r@kru_G^p-$jhtI^PZY-8ijW5M$oj2L%0YoK`M~F>mby zf_^tnD;LC=xAp-+zZ<8O3u4S$`+%U|4dHhY%RJ$CgMK%}i3Y@&C%Dk>hB(oH81n=d z`rQyG8W3Zi;6lF};zR>t%oAMbcSHDH#4=C#-JstMaiRe+<_RwJyD=S=&Dk$v%oAMb zcSD?KK#X~U3;k}0^9+bFPjI2%4dHhY%RJ$CgMK%}c?QIoC%Dk>hB(iF81n=d`rQ!c z84zQh;6lF};yeRl%oAMbcSHDH#4=C#-JstMah?G&<_RwJyCKdqAjUkwg?=~0c?QIo zC%Dk>hB(iF81n=d`rQzI7qQF}emCfML!4(ojCq0!{cec!42UsLaG~D~ah?G&<_RwJ zyCKdqAjUkwg?=}L-$g9*gx?MN-4N#)5M!R;Lcbg0JOg6P6I|$bL!4(ojCq0!{cec! z42UsLaG~D~;dc?sJmGhPemBH<2E>>rxX|y0IM09>^8^?A-4N#)5M!R;Lcbg0JOg6P z6I|$bL-<|9GEey3px+H~o&hoD2`==zA>rxX|y0IM09>^8^?A z-4K2kvCI>GH|TdmoM%9cd4dc5Ziw>?h%rxaq2CR0o&hoD2`==zA(6;F>_r9Y&E$8ZhtV%azZ(5!^t;g?FZEy4^|DuhxZvz6yhg|h zuNiW|YlvL%nj#mx#>fS)IdZ{kkX+zM!hr8d!iAd!jwJlJso+S$nVSlZBs{vQ;7G!) z?<>Hvn+1*}%)6=JNW#XO3XUWUy{X_x!rGe(jwDR}z5<-SS>Q;*^P38eB;3EL;7Fnm zFcln0bOiefXba2&M-mN!so+SWRWKDCNi+?nf+LCc!BlW0(MaqopqnrY97*&Qrh+4h z4#QM%B++Y_3XUYYj(r8R9%g|fi6+EUa3s-=mS*Zu+09VVQ0^ zAlBHlK()f|(@hgpHS9j!v_VzF?$b>pR5k2A-Lyhg!|u~fGsGI3Zm3q+eY)v~s)pUC zn~tbz*nPU`iK>R(r<<;bH8yQgt+4xa(->6^yH7W*QPr^fbkiJF4ZBY_?NQaR`*hPF zvBsuDsugyhZhEAuVfX2#OR5@npKki3s$uu(rc+{#O{-KZ>^|KzOI5?}(@ncnHS9j! zG)z^)?$b@nR5k2A-84h25u{zNu>1eY)wKs)pUCo8GBv*nPU`o>*hkKGh1l zPd5!z)v)_?(?V4ZyH7VwRMoKibkjyv4YyCnP1(JJ9rtD33b#+k-C0+|?bC6O*41$P zblj9UEK{7+ay2YdoZ50VEK}P@jeEN0S8$rk zt+4wPC%aq?%M_=)Tn)<&1Zn60l+ebB=LvuAOQ=Cb2 zH7rw{PjfXaQ=C;ZHEbU>?j^e#cAwfls^KJ?`xVO+r`lW%%M>TvTn)?A_EFU*(b2TheoP~2WEK{6|b2TheoRKp%Y#%l5L%SMwpV~gE;RK!g70VQ->0AxV6esIk z4a?N_QR6PP`4!toHJrP1H7rw{!E-e%Q=G?hH7rw{&D+-)H?Z9b$8_Arb~PN+aWmW1 za7@Q7ZCArF9XGaJ4a?N_QR5D``4!x3aVsoS+eeMN-R@T`Q~N%u+dgXC`8LmC`>1jM z+tsj4?fa;1`>1g*+&zb7YTrk7+eeN2;^sMQA2sfdyBe0M?W4MVA2sfjyXUY>Z6DR` z`>1i(+&qWvqsF~+SHm*3eN?x7)VPoCp2IS=@1wfyqsBdT^BlI18h6%R4a?N_QQf|e z8h6>eIGUM^zWZz-0XKN9MiCU)VS;K ze#P}skHdQjjtED=des|A2q&bV15N(HE=8Je@|^66}FEWUpsKmVgGw-`>3#e)c6X5 zc@Eo0jV~g&8kVW;qr&!4<4XzdIV@A#M}_U9#upUKbJ#v=d`-dCuuN?q6}FEWUs-U^ zVVT-KDr_G$zP@0d!}d|*OAM}tWorAVuzl3{LW6q_%hdK!Vf(1@Pnc6-oY#%kg`e2^J_EF;t5Uz%0YWt|Lebo3egnJIl)b>&Fx{vzJ Lx86Sb?sxtVSExI2 literal 0 HcmV?d00001 diff --git a/tools/Polygraphy/tests/models/identity_multi_ch.onnx b/tools/Polygraphy/tests/models/identity_multi_ch.onnx new file mode 100644 index 0000000000000000000000000000000000000000..8d8cc293ac9826616029349e89fbf2229498d6ca GIT binary patch literal 93 zcmdN*pO>GK8gHn?;R#jAmBWRmONh6)ASbf~Vnn!NtSDD8#|V h#K8i@tVv>Mo-`E#i8CgNB0C4@JBY9o3m1a`Hvs$*Y;gbp literal 0 HcmV?d00001 diff --git a/tools/Polygraphy/tests/models/unbounded_dds.onnx b/tools/Polygraphy/tests/models/unbounded_dds.onnx new file mode 100644 index 0000000000000000000000000000000000000000..259c4b5c00e8462f9a1665378b570e618a452545 GIT binary patch literal 1317 zcmeHH%Wl&^6!q8+p4`x44X?-ofxJQT%9E7Rb^%c;1Xw~s*&wkQnPi$oO~;Wvj=JP4 zSm$H-jQ&8sqKsduan!J2%Z#pO?)c30@jXYzm}ri6@)KZfko|Ea5*Z4~qEs^9=|)0a zvS}PHXGt1Qc_f&KCY(_hpO#4PT8GvuT%_E)(y>uY8x7lE`FuK)3G;(rx~Bbj_douv z2Ww=)yrEF;JfWj;ynO- z&iQgOzeu{;8tL#Bwk|6+VN$sD(Ahi9_A+8~ArB7}DV2I{&}G!MvioCrw(V#2HHG8f zD4LsEJCR4E6Nh5TbNJf4OBzUo+4PuN&BUq^bS4IK(VyQ(`{P$AHwr&Yq+z+0oKa# zSL{)MQH6bvO;ve!H-eWHyjpL{QRUxWl#V7(;1Nc|1|1_6h6>2y`$N_K+h!0m0}M^j G?7iQB5ow 0 + for action in arg_group.arg_group.group._group_actions: + assert action.default is None diff --git a/tools/Polygraphy/tests/tools/args/comparator/test_data_loader.py b/tools/Polygraphy/tests/tools/args/comparator/test_data_loader.py index 7ef6324f..b6e37824 100644 --- a/tools/Polygraphy/tests/tools/args/comparator/test_data_loader.py +++ b/tools/Polygraphy/tests/tools/args/comparator/test_data_loader.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/Polygraphy/tests/tools/args/helper.py b/tools/Polygraphy/tests/tools/args/helper.py index 7638cc24..4e9225e0 100644 --- a/tools/Polygraphy/tests/tools/args/helper.py +++ b/tools/Polygraphy/tests/tools/args/helper.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/Polygraphy/tests/tools/args/logger/test_logger.py b/tools/Polygraphy/tests/tools/args/logger/test_logger.py index aa898415..96c4e464 100644 --- a/tools/Polygraphy/tests/tools/args/logger/test_logger.py +++ b/tools/Polygraphy/tests/tools/args/logger/test_logger.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/Polygraphy/tests/tools/args/test_docstrings.py b/tools/Polygraphy/tests/tools/args/test_docstrings.py index d2e266f2..671a6557 100644 --- a/tools/Polygraphy/tests/tools/args/test_docstrings.py +++ b/tools/Polygraphy/tests/tools/args/test_docstrings.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -27,7 +27,7 @@ ARG_CLASSES = [cls for cls in args_mod.__dict__.values() if inspect.isclass(cls) and issubclass(cls, BaseArgs)] USES_DEP_PAT = re.compile(r"self.arg_groups\[(.*?)\]") -MEMBER_PAT = re.compile(r"self.(.*?)[ ,.\[]") +MEMBER_PAT = re.compile(r"self.(.*?)[ ,.\[}]") class TestDocStrings: @@ -63,7 +63,7 @@ def test_docstrings_document_dependencies(self, arg_group_type): # and doesn't use most members of the class, so this approach is generally ok. # # There are cases where we may not want to document some members, e.g. if they are deprecated. - # In those cases, you can prefix the member with a `_` and it will be ignored by + # In those cases, you can prefix the member with a `_` and it will be ignored by this test. @pytest.mark.parametrize("arg_group_type", ARG_CLASSES) def test_parse_docstring_documents_populated_members(self, arg_group_type): code = inspect.getsource(arg_group_type.parse_impl) diff --git a/tools/Polygraphy/tests/tools/args/test_model.py b/tools/Polygraphy/tests/tools/args/test_model.py index 6f5ccefa..c32231b7 100644 --- a/tools/Polygraphy/tests/tools/args/test_model.py +++ b/tools/Polygraphy/tests/tools/args/test_model.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -36,7 +36,7 @@ def test_path(self, group): group.parse_args(["model.onnx"]) - assert group.path == os.path.abspath("model.onnx") + assert group.path == "model.onnx" assert group.model_type.is_onnx() def test_input_shapes(self, group): @@ -68,5 +68,5 @@ def test_fixed_model_type(self): def test_model_with_extra_info(self, group, arg, expected_model, expected_extra_info): group.parse_args([arg]) - assert group.path == os.path.abspath(expected_model) + assert group.path == expected_model assert group.extra_model_info == expected_extra_info diff --git a/tools/Polygraphy/tests/tools/args/util/test_util.py b/tools/Polygraphy/tests/tools/args/util/test_util.py index ff0e3b19..23510580 100644 --- a/tools/Polygraphy/tests/tools/args/util/test_util.py +++ b/tools/Polygraphy/tests/tools/args/util/test_util.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/Polygraphy/tests/tools/conftest.py b/tools/Polygraphy/tests/tools/conftest.py index 7ba9d754..d0d79c2b 100644 --- a/tools/Polygraphy/tests/tools/conftest.py +++ b/tools/Polygraphy/tests/tools/conftest.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -23,7 +23,7 @@ import pytest import tensorrt as trt -from polygraphy.backend.trt import Algorithm, TacticReplayData +from polygraphy.backend.trt import Algorithm, TacticReplayData, TensorInfo from polygraphy.json import save_json @@ -60,7 +60,6 @@ def poly_fixture_impl(additional_opts: List[str] = [], expect_error: bool = Fals FakeAlgorithm.get_algorithm_io_info = lambda this, index: this.io_info[index] FakeAlgorithmVariant = namedtuple("FakeAlgorithmVariant", ["implementation", "tactic"]) -FakeAlgorithmIOInfo = namedtuple("FakeAlgorithmIOInfo", ["tensor_format", "dtype", "strides"]) @pytest.fixture(scope="session", params=["", "subdir"]) @@ -71,7 +70,11 @@ def fake_context(name, num_inputs=1, num_outputs=1): def fake_algo( implementation=6, tactic=0, num_io=2, tensor_format=trt.TensorFormat.LINEAR, dtype=trt.float32, strides=(1, 2) ): - io_info = [FakeAlgorithmIOInfo(tensor_format=tensor_format, dtype=dtype, strides=strides)] * num_io + io_info = [ + TensorInfo( + tensor_format=tensor_format, dtype=dtype, strides=strides, vectorized_dim=-1, components_per_element=1 + ) + ] * num_io return FakeAlgorithm(algorithm_variant=FakeAlgorithmVariant(implementation, tactic), io_info=io_info) def make_replay(tactic): @@ -100,7 +103,7 @@ def make_path(prefix, *args): [I] Loaded {num} bad tactic replays. [I] Found potentially bad tactics: [I] Layer: layer0 - Algorithms: ["(Implementation: 0, Tactic: 2) | Inputs: (('TensorFormat.LINEAR', 'DataType.FLOAT', '(1, 2)'),) | Outputs: (('TensorFormat.LINEAR', 'DataType.FLOAT', '(1, 2)'),)"] + Algorithms: ['(Implementation: 0, Tactic: 2) | Inputs: (TensorInfo(TensorFormat.LINEAR, DataType.FLOAT, (1, 2), -1, 1),) | Outputs: (TensorInfo(TensorFormat.LINEAR, DataType.FLOAT, (1, 2), -1, 1),)'] """ ) yield dir, EXPECTED_OUTPUT diff --git a/tools/Polygraphy/tests/tools/fake_reduce_checker.py b/tools/Polygraphy/tests/tools/fake_reduce_checker.py index 9ca770c7..f99085c9 100755 --- a/tools/Polygraphy/tests/tools/fake_reduce_checker.py +++ b/tools/Polygraphy/tests/tools/fake_reduce_checker.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/Polygraphy/tests/tools/test_convert.py b/tools/Polygraphy/tests/tools/test_convert.py index c037aa0c..8b0d4ba2 100644 --- a/tools/Polygraphy/tests/tools/test_convert.py +++ b/tools/Polygraphy/tests/tools/test_convert.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/Polygraphy/tests/tools/test_data.py b/tools/Polygraphy/tests/tools/test_data.py index 0f0c933a..09116c72 100644 --- a/tools/Polygraphy/tests/tools/test_data.py +++ b/tools/Polygraphy/tests/tools/test_data.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/Polygraphy/tests/tools/test_debug.py b/tools/Polygraphy/tests/tools/test_debug.py index 04087ddc..7e0677ce 100644 --- a/tools/Polygraphy/tests/tools/test_debug.py +++ b/tools/Polygraphy/tests/tools/test_debug.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -492,6 +492,27 @@ def test_reduce_interactive(self, poly_debug, responses): ) assert "Did 'polygraphy_debug.onnx' [p]ass or [f]ail?" in status.stdout + def test_reduce_node_with_multiple_outputs_that_are_graph_outputs(self, poly_debug): + # Tests a working model where one of the nodes has multiple outputs which are graph outputs. + with tempfile.TemporaryDirectory() as outdir: + status = poly_debug( + [ + "reduce", + ONNX_MODELS["multi_output"].path, + "--output=reduced.onnx", + "--no-reduce-outputs", + "--mode=linear", + "--check", + "polygraphy", + "run", + "polygraphy_debug.onnx", + "--onnxrt", + ], + cwd=outdir, + ) + + assert "FAILED" not in (status.stdout + status.stderr) + class TestRepeat: @pytest.mark.parametrize( diff --git a/tools/Polygraphy/tests/tools/test_deprecated.py b/tools/Polygraphy/tests/tools/test_deprecated.py index 111f4e24..779424de 100644 --- a/tools/Polygraphy/tests/tools/test_deprecated.py +++ b/tools/Polygraphy/tests/tools/test_deprecated.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/Polygraphy/tests/tools/test_inspect.py b/tools/Polygraphy/tests/tools/test_inspect.py index 85c883d6..68e63fb7 100644 --- a/tools/Polygraphy/tests/tools/test_inspect.py +++ b/tools/Polygraphy/tests/tools/test_inspect.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -662,6 +662,11 @@ def test_onnx(self, case, poly_inspect): check_lines_match(actual, expected, should_check_line=lambda line: "Note: Error was:" not in line) + def test_list_unbounded_dds(self, poly_inspect): + cmd = ["model", ONNX_MODELS["unbounded_dds"].path, "--list-unbounded-dds", "--shape-inference"] + status = poly_inspect(cmd) + assert ("cast_out_6" in status.stdout) + @pytest.mark.parametrize("model", ["identity", "scan", "tensor_attr"]) def test_trt_sanity(self, run_inspect_model, model): import tensorrt as trt @@ -706,7 +711,10 @@ def test_trt_engine(self, case, dynamic_identity_engine, poly_inspect): check_lines_match( actual, expected, - should_check_line=lambda exline: "Tactic =" not in exline and "Device Memory" not in exline and "Origin" not in exline, + should_check_line=lambda exline: "Tactic =" not in exline + and "Device Memory" not in exline + and "Origin" not in exline + and "Reformat" not in exline, ) def test_tf_sanity(self, run_inspect_model): diff --git a/tools/Polygraphy/tests/tools/test_polygraphy.py b/tools/Polygraphy/tests/tools/test_polygraphy.py index 0262f56b..c3469484 100644 --- a/tools/Polygraphy/tests/tools/test_polygraphy.py +++ b/tools/Polygraphy/tests/tools/test_polygraphy.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/Polygraphy/tests/tools/test_run.py b/tools/Polygraphy/tests/tools/test_run.py index 86d4958a..2d831fd2 100644 --- a/tools/Polygraphy/tests/tools/test_run.py +++ b/tools/Polygraphy/tests/tools/test_run.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/Polygraphy/tests/tools/test_script.py b/tools/Polygraphy/tests/tools/test_script.py index edec9327..4dabb64a 100644 --- a/tools/Polygraphy/tests/tools/test_script.py +++ b/tools/Polygraphy/tests/tools/test_script.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/Polygraphy/tests/tools/test_surgeon.py b/tools/Polygraphy/tests/tools/test_surgeon.py index 2b7f2283..776463c9 100644 --- a/tools/Polygraphy/tests/tools/test_surgeon.py +++ b/tools/Polygraphy/tests/tools/test_surgeon.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -288,7 +288,7 @@ def test_fold_constants( no_onnxruntime_shape_inference, ): with util.NamedTemporaryFile() as outmodel: - cmd = ["sanitize", ONNX_MODELS["const_foldable"].path, "-o", outmodel.name, "--fold-constants"] + cmd = ["sanitize", ONNX_MODELS["const_foldable"].path, "-o", outmodel.name, "--fold-constants", "-v"] if fold_shapes: cmd += [fold_shapes] if partitioning: @@ -299,7 +299,7 @@ def test_fold_constants( cmd += [no_onnxruntime_shape_inference] status = poly_surgeon(cmd) - assert ("Inferring shapes in the model with `onnxruntime.tools.symbolic_shape_infer`" in status.stdout) == ( + assert ("Inferred shapes in the model with `onnxruntime.tools.symbolic_shape_infer`" in status.stdout) == ( no_onnxruntime_shape_inference is None ) @@ -307,6 +307,45 @@ def test_fold_constants( model = onnx.load(outmodel.name) assert len(model.graph.node) == 1 + @pytest.mark.parametrize("global_upper_bound", [None, "2000"]) + @pytest.mark.parametrize("specified_upper_bound", [None, "cast_out_6:4000"]) + def test_set_upper_bound( + self, + poly_surgeon, + global_upper_bound, + specified_upper_bound, + onnx_model_sanity_check + ): + with util.NamedTemporaryFile() as outmodel: + cmd = ["sanitize", ONNX_MODELS["unbounded_dds"].path, "-o", outmodel.name, "--set-unbounded-dds-upper-bound"] + upper_bound = "1000" + if global_upper_bound: + upper_bound = "2000" + cmd += [global_upper_bound] + if specified_upper_bound: + upper_bound = "4000" + cmd += [specified_upper_bound] + if global_upper_bound is None and specified_upper_bound is None: + cmd += [upper_bound] + poly_surgeon(cmd) + + onnx_model_sanity_check(outmodel.name) + graph = gs.import_onnx(onnx.load(outmodel.name)) + + # Check if there is a Min operator in the modified model + find_min = False + for node in graph.nodes: + if node.op == 'Min': + find_min = True + # Check if the Min operator's second input is a constant tensor + assert isinstance(node.inputs[1], gs.Constant) + + val = node.inputs[1].values + # Check if the constant value equals the target upper bound + assert str(val) == upper_bound + assert (find_min) + + def test_fold_constants_single_pass(self, poly_surgeon, onnx_model_sanity_check): with util.NamedTemporaryFile() as outmodel: status = poly_surgeon( diff --git a/tools/Polygraphy/tests/tools/test_template.py b/tools/Polygraphy/tests/tools/test_template.py index 37e2c959..7f6ba23b 100644 --- a/tools/Polygraphy/tests/tools/test_template.py +++ b/tools/Polygraphy/tests/tools/test_template.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/Polygraphy/tests/util/test_format.py b/tools/Polygraphy/tests/util/test_format.py index e5efa843..67697e0c 100644 --- a/tools/Polygraphy/tests/util/test_format.py +++ b/tools/Polygraphy/tests/util/test_format.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/Polygraphy/tests/util/test_serde.py b/tools/Polygraphy/tests/util/test_serde.py index db15baff..82ff56b3 100644 --- a/tools/Polygraphy/tests/util/test_serde.py +++ b/tools/Polygraphy/tests/util/test_serde.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -18,8 +18,9 @@ import numpy as np import pytest import tensorrt as trt + from polygraphy import constants, util -from polygraphy.backend.trt import Algorithm, TacticReplayData +from polygraphy.backend.trt import Algorithm, TacticReplayData, TensorInfo from polygraphy.comparator import IterationResult, RunResults from polygraphy.exception import PolygraphyException from polygraphy.json import Decoder, Encoder, from_json, load_json, to_json @@ -64,8 +65,11 @@ def make_algo(): implementation=4, tactic=5, # Should work even if strides are not set - inputs=[(trt.TensorFormat.LINEAR, trt.float32, (1, 2)), (trt.TensorFormat.LINEAR, trt.float32)], - outputs=[(trt.TensorFormat.LINEAR, trt.float32, (2, 3))], + inputs=[ + TensorInfo(trt.TensorFormat.LINEAR, trt.float32, (1, 2), -1, 1), + TensorInfo(trt.TensorFormat.LINEAR, trt.float32, (1, 2), -1, 1), + ], + outputs=[TensorInfo(trt.TensorFormat.LINEAR, trt.float32, (2, 3), -1, 1)], ) @@ -90,17 +94,21 @@ class TestImplementations: @pytest.mark.parametrize( "obj", [ + TensorInfo(trt.TensorFormat.LINEAR, trt.float32, (1, 2, 3), -1, 1), Algorithm( implementation=4, tactic=5, - inputs=[(trt.TensorFormat.LINEAR, trt.float32)], - outputs=[(trt.TensorFormat.LINEAR, trt.float32)], + inputs=[TensorInfo(trt.TensorFormat.LINEAR, trt.float32, (1, 2, 3), -1, 1)], + outputs=[TensorInfo(trt.TensorFormat.LINEAR, trt.float32, (1, 2, 3), -1, 1)], ), Algorithm( implementation=4, tactic=5, - inputs=[(trt.TensorFormat.LINEAR, trt.float32), (trt.TensorFormat.CHW32, trt.int8)], - outputs=[(trt.TensorFormat.CHW32, trt.float16)], + inputs=[ + TensorInfo(trt.TensorFormat.LINEAR, trt.float32, (1, 2, 3), -1, 1), + TensorInfo(trt.TensorFormat.CHW32, trt.int8, (1, 2, 3), -1, 1), + ], + outputs=[TensorInfo(trt.TensorFormat.CHW32, trt.float16, (1, 2, 3), -1, 1)], ), np.ones((3, 4, 5), dtype=np.int64), np.ones(5, dtype=np.int64), diff --git a/tools/Polygraphy/tests/util/test_util.py b/tools/Polygraphy/tests/util/test_util.py index 33a3ea2c..d3e02607 100644 --- a/tools/Polygraphy/tests/util/test_util.py +++ b/tools/Polygraphy/tests/util/test_util.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -14,6 +14,8 @@ # See the License for the specific language governing permissions and # limitations under the License. # +import contextlib +import io import os import random import tempfile @@ -21,6 +23,7 @@ import numpy as np import pytest + from polygraphy import util VOLUME_CASES = [ @@ -238,3 +241,30 @@ def test_nan_inf(self, obj, recursion_depth): expected = f"{{'x': {expected}}}" assert util.make_repr("Example", obj) == (f"Example({expected})", False, True) + + +@pytest.mark.serial +def test_check_called_by(): + outfile = io.StringIO() + with contextlib.redirect_stdout(outfile): + + warn_msg = "Calling 'test_check_called_by..callee()' directly is not recommended. Please use 'caller()' instead." + + @util.check_called_by("caller") + def callee(): + pass + + def caller(): + return callee() + + # If we call via the caller, no message should be emitted + caller() + outfile.seek(0) + out = outfile.read() + assert warn_msg not in out + + # If we call the callee directly, we should see a warning + callee() + outfile.seek(0) + out = outfile.read() + assert warn_msg in out From 1030dcbec55c0625f49bae609cd89e14c13e31ca Mon Sep 17 00:00:00 2001 From: Ilya Sherstyuk Date: Thu, 4 May 2023 10:17:11 -0700 Subject: [PATCH 2/4] Update trt-engine-explorer to v0.1.6 Signed-off-by: Ilya Sherstyuk --- .../trt-engine-explorer/CHANGELOG.md | 5 +++ .../trt-engine-explorer/README.md | 2 +- .../experimental/trt-engine-explorer/setup.py | 2 +- .../trt-engine-explorer/trex/__init__.py | 2 +- .../trt-engine-explorer/trex/graphing.py | 41 ++++++++++++++++--- .../trt-engine-explorer/trex/parser.py | 16 ++++++++ 6 files changed, 59 insertions(+), 9 deletions(-) diff --git a/tools/experimental/trt-engine-explorer/CHANGELOG.md b/tools/experimental/trt-engine-explorer/CHANGELOG.md index 861731b2..66c57378 100644 --- a/tools/experimental/trt-engine-explorer/CHANGELOG.md +++ b/tools/experimental/trt-engine-explorer/CHANGELOG.md @@ -2,6 +2,11 @@ Dates are in YYYY-MM-DD format. +## v0.1.6 (2023-April) +- Graph rendering: + - Add node highlighting option. + - Fix bug https://github.com/NVIDIA/TensorRT/issues/2779 + ## v0.1.5 (2022-12-06) - Updated requirements.txt for Ubuntu 20.04 and 22.04 diff --git a/tools/experimental/trt-engine-explorer/README.md b/tools/experimental/trt-engine-explorer/README.md index 0de2c656..ab519e1c 100644 --- a/tools/experimental/trt-engine-explorer/README.md +++ b/tools/experimental/trt-engine-explorer/README.md @@ -43,7 +43,7 @@ $ git clone https://github.com/NVIDIA/TensorRT.git ``` ### 2. Create and activate a Python virtual environment -The commands listed below create and activate a Python virtual enviornment named ```env_trex``` which is stored in a directory by the same name, and configures the current shell to use it as the default python environment. +The commands listed below create and activate a Python virtual environment named ```env_trex``` which is stored in a directory by the same name, and configures the current shell to use it as the default python environment. ``` $ cd TensorRT/tools/experimental/trt-engine-explorer diff --git a/tools/experimental/trt-engine-explorer/setup.py b/tools/experimental/trt-engine-explorer/setup.py index 91032057..936d82a1 100644 --- a/tools/experimental/trt-engine-explorer/setup.py +++ b/tools/experimental/trt-engine-explorer/setup.py @@ -33,7 +33,7 @@ def main(): setup( name="trex", - version="0.1.5", + version="0.1.6", description="TREX: TensorRT Engine Exploration Toolkit", long_description=open("README.md", "r", encoding="utf-8").read(), author="NVIDIA", diff --git a/tools/experimental/trt-engine-explorer/trex/__init__.py b/tools/experimental/trt-engine-explorer/trex/__init__.py index 8dd001ac..33bf216b 100644 --- a/tools/experimental/trt-engine-explorer/trex/__init__.py +++ b/tools/experimental/trt-engine-explorer/trex/__init__.py @@ -30,4 +30,4 @@ from trex.compare_engines import * from trex.excel_summary import * -__version__ = "0.1.5" +__version__ = "0.1.6" diff --git a/tools/experimental/trt-engine-explorer/trex/graphing.py b/tools/experimental/trt-engine-explorer/trex/graphing.py index 63f511fd..49aa88ed 100644 --- a/tools/experimental/trt-engine-explorer/trex/graphing.py +++ b/tools/experimental/trt-engine-explorer/trex/graphing.py @@ -22,6 +22,7 @@ import os import re +import warnings from enum import Enum from graphviz import Digraph from typing import Callable, NamedTuple, List, Dict @@ -434,6 +435,17 @@ def add_io(tensors: List): return label +def layer_node_highlighter(node_id: str, highlighted_layers_ids: List[int] +) -> Dict: + """Highlight a layer node. + + Create a yellow hailo around the node. + """ + should_highlight = highlighted_layers_ids and node_id in highlighted_layers_ids + formatting = {'penwidth': str(6), 'color': 'yellow'} + return formatting if should_highlight else {} + + def layer_node_configurable_renderer( layer: Layer, latency: float, @@ -584,10 +596,12 @@ def handle_reformat(layer: Layer): except KeyError: layer_color = "#E5E7E9" - formatting = {'style': 'filled', + formatting = {'shape': 'Mrecord', + 'style': 'filled', 'tooltip': layer.tooltip(), 'fillcolor': layer_color, - 'color': 'white',} + 'color': 'lightgray', + 'fontname': 'Helvetica'} return formatting @@ -623,11 +637,16 @@ def get_latency(plan: EnginePlan, layer: Layer, latency_type) -> float: return latency +def get_dot_id(layer_name: str) -> str: + return layer_name.replace(":", "###") # f"l_{dot_node_id}" + + class DotGraph(object): """This class converts a TensorRT plan into Graphviz DOT graphs""" def __init__(self, plan: EnginePlan, layer_node_formatter: Callable, + layer_node_highlighter: Callable=layer_node_highlighter, layer_node_renderer: Callable=layer_node_configurable_renderer, region_formatter: Callable=region_precision_formatter, display_layer_names: bool=True, @@ -642,11 +661,13 @@ def __init__(self, display_region_names: bool=False, display_edge_name: bool=False, display_edge_details: bool=True, + highlight_layers: list=None, ): plan_graph = PlanGraph( plan, display_regions, display_constants, display_forking_regions) self.dot = Digraph() self.layer_node_formatter = layer_node_formatter + self.layer_node_highlighter = layer_node_highlighter self.layer_node_renderer = layer_node_renderer self.region_formatter = region_formatter self.expand_layer_details = expand_layer_details @@ -659,6 +680,14 @@ def __init__(self, self.display_region_names = display_region_names self.display_edge_name = display_edge_name self.display_edge_details = display_edge_details + # Get the node names of the layers to highlight + self.highlighted_layers_ids = None + if highlight_layers: + try: + highlight_layers_name = plan.df['Name'].iloc[highlight_layers].to_list() + self.highlighted_layers_ids = [get_dot_id(name) for name in highlight_layers_name] + except IndexError: + warnings.warn("The layers indices specified for highlighting are incorrect") node_name_2_node_id = {} self.__add_dot_region_nodes(plan_graph, node_name_2_node_id) @@ -672,7 +701,7 @@ def __init__(self, def __add_dot_region_nodes(self, plan_graph, node_name_2_node_id): dot_node_id = 0 for mem_node in plan_graph.memory_nodes: - node_name_2_node_id[mem_node.name] = dot_id = mem_node.name.replace(":", "###") #f"r_{dot_node_id}" + node_name_2_node_id[mem_node.name] = dot_id = get_dot_id(mem_node.name) self.__create_dot_region_node(dot_id, mem_node.tensor, mem_node.is_user, mem_node.region_gen) dot_node_id += 1 @@ -681,7 +710,7 @@ def __add_dot_layer_nodes(self, plan, plan_graph, node_name_2_node_id): layer = layer_node.layer latency = get_latency(plan, layer, self.latency_type) if not layer.type == 'Constant' or plan_graph.include_constants: - dot_id = layer.name.replace(":", "###") # f"l_{dot_node_id}" + dot_id = get_dot_id(layer.name) node_name_2_node_id[layer.name] = dot_id self.__create_dot_layer_node( dot_id, layer, latency, layer_node_renderer=self.layer_node_renderer) @@ -713,6 +742,7 @@ def __create_dot_layer_node( self, node_id: int, layer: Layer, latency: float, layer_node_renderer: Callable ): formatting = self.layer_node_formatter(layer) + formatting.update(self.layer_node_highlighter(node_id, self.highlighted_layers_ids)) self.dot.node( str(node_id), layer_node_renderer( @@ -721,8 +751,7 @@ def __create_dot_layer_node( expand_layer_details=self.expand_layer_details, display_layer_names=self.display_layer_names, stack_layer_names=self.stack_layer_names), - shape='Mrecord', - fontname="Helvetica", **formatting) + **formatting) def __create_dot_edge(self, src, end, tensor, region_gen): def generation_color(gen: int, line_color: str) -> str: diff --git a/tools/experimental/trt-engine-explorer/trex/parser.py b/tools/experimental/trt-engine-explorer/trex/parser.py index cc8654d0..057064c5 100644 --- a/tools/experimental/trt-engine-explorer/trex/parser.py +++ b/tools/experimental/trt-engine-explorer/trex/parser.py @@ -172,7 +172,23 @@ def convert_deconv(raw_layers: List) -> List: pass return raw_layers + def fix_metadata(raw_layers: List) -> List: + """TensorRT 8.6 introduced the Metadata field, with a non-ASCII character + that triggers an SVG rendering error. This function replaces this character. + + See: https://github.com/NVIDIA/TensorRT/issues/2779 + """ + TRT_METADATA_DELIM = '\x1E' + for l in raw_layers: + try: + if TRT_METADATA_DELIM in l['Metadata']: + l['Metadata'] = l['Metadata'].replace(TRT_METADATA_DELIM, '+') + except KeyError: + pass + return raw_layers + raw_layers, bindings = read_graph_file(graph_file) + raw_layers = fix_metadata(raw_layers) raw_layers = convert_deconv(raw_layers) raw_layers = disambiguate_layer_names(raw_layers) raw_layers, bindings = filter_profiles(raw_layers, bindings, profile_id) From b83cbbdf2860cfe0255ad308d1091fc4b287412e Mon Sep 17 00:00:00 2001 From: Ilya Sherstyuk Date: Thu, 4 May 2023 10:50:32 -0700 Subject: [PATCH 3/4] Update parsers/onnx and protobuf submodules for 8.6.1 Signed-off-by: Ilya Sherstyuk --- parsers/onnx | 2 +- third_party/protobuf | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/parsers/onnx b/parsers/onnx index 6872a947..6ba67d34 160000 --- a/parsers/onnx +++ b/parsers/onnx @@ -1 +1 @@ -Subproject commit 6872a9473391a73b96741711d52b98c2c3e25146 +Subproject commit 6ba67d3428e05f690145373ca87fb8d32f98df45 diff --git a/third_party/protobuf b/third_party/protobuf index fb6f8da0..aea4a275 160000 --- a/third_party/protobuf +++ b/third_party/protobuf @@ -1 +1 @@ -Subproject commit fb6f8da08b60b6beb5bb360d79dd3feda0147da7 +Subproject commit aea4a275e28329f648e046469c095eef74254bb2 From e31452828128b7611bdb985e2a5345a08788d400 Mon Sep 17 00:00:00 2001 From: Ilya Sherstyuk Date: Fri, 17 Mar 2023 00:19:19 +0000 Subject: [PATCH 4/4] Update TensorRT to 8.6.1 Signed-off-by: Ilya Sherstyuk --- .github/ISSUE_TEMPLATE/bug_report.md | 55 +- CHANGELOG.md | 47 +- CMakeLists.txt | 6 +- README.md | 26 +- VERSION | 2 +- .../toolchains/cmake_aarch64-native.toolchain | 2 +- demo/BERT/CMakeLists.txt | 2 +- demo/BERT/README.md | 3 +- demo/BERT/builder.py | 2 +- demo/BERT/builder_utils.py | 14 +- demo/BERT/builder_varseqlen.py | 14 +- demo/BERT/helpers/calibrator.py | 2 +- demo/BERT/helpers/data_processing.py | 2 +- demo/BERT/helpers/tokenization.py | 2 +- demo/BERT/infer_c/bert_infer.h | 2 +- demo/BERT/infer_c/common.h | 2 +- demo/BERT/infer_c/infer_c.cpp | 2 +- demo/BERT/infer_c/logging.cpp | 2 +- demo/BERT/infer_c/logging.h | 2 +- demo/BERT/infer_c/perf.cpp | 2 +- demo/BERT/inference.py | 2 +- demo/BERT/inference_c.py | 2 +- demo/BERT/inference_varseqlen.py | 2 +- demo/BERT/perf.py | 2 +- demo/BERT/perf_varseqlen.py | 2 +- demo/BERT/squad/evaluate-v1.1.py | 2 +- demo/BERT/squad/evaluate-v2.0.py | 8 +- demo/DeBERTa/deberta_onnx_modify.py | 32 +- demo/DeBERTa/deberta_ort_inference.py | 44 +- demo/DeBERTa/deberta_pytorch2onnx.py | 30 +- demo/DeBERTa/deberta_tensorrt_inference.py | 66 +-- demo/DeBERTa/requirements.txt | 2 +- demo/Diffusion/README.md | 7 +- demo/Diffusion/demo_img2img.py | 7 + demo/Diffusion/demo_inpaint.py | 10 +- demo/Diffusion/demo_txt2img.py | 10 +- demo/Diffusion/inpaint_pipeline.py | 3 +- demo/Diffusion/models.py | 2 +- demo/Diffusion/stable_diffusion_pipeline.py | 43 +- demo/Diffusion/utilities.py | 58 ++- .../notebooks/EfficientDet-TensorRT8.ipynb | 39 +- demo/HuggingFace/BART/BARTModelConfig.py | 52 +- demo/HuggingFace/BART/checkpoint.toml | 3 - demo/HuggingFace/BART/export.py | 17 +- demo/HuggingFace/BART/frameworks.py | 16 +- demo/HuggingFace/BART/hf.py | 6 +- demo/HuggingFace/BART/measurements.py | 12 +- demo/HuggingFace/BART/onnxrt.py | 6 +- demo/HuggingFace/BART/trt.py | 30 +- demo/HuggingFace/GPT2/GPT2ModelConfig.py | 4 +- demo/HuggingFace/GPT2/export.py | 15 +- demo/HuggingFace/GPT2/frameworks.py | 14 +- demo/HuggingFace/GPT2/measurements.py | 12 +- demo/HuggingFace/GPT2/trt.py | 90 ++-- demo/HuggingFace/NNDF/checkpoints.py | 2 +- demo/HuggingFace/NNDF/cuda_bootstrapper.py | 101 ++++ demo/HuggingFace/NNDF/general_utils.py | 24 +- demo/HuggingFace/NNDF/interface.py | 18 +- demo/HuggingFace/NNDF/logger.py | 2 +- demo/HuggingFace/NNDF/models.py | 4 +- demo/HuggingFace/NNDF/networks.py | 2 +- demo/HuggingFace/NNDF/tensorrt_utils.py | 58 ++- demo/HuggingFace/NNDF/torch_utils.py | 2 +- demo/HuggingFace/README.md | 18 +- demo/HuggingFace/T5/T5ModelConfig.py | 16 +- demo/HuggingFace/T5/export.py | 58 ++- demo/HuggingFace/T5/frameworks.py | 14 +- demo/HuggingFace/T5/measurements.py | 6 +- demo/HuggingFace/T5/onnxrt.py | 4 +- demo/HuggingFace/T5/trt.py | 107 ++-- .../notebooks/bart-playground.ipynb | 2 +- demo/HuggingFace/notebooks/bart.ipynb | 4 +- demo/HuggingFace/notebooks/gpt2.ipynb | 7 +- demo/HuggingFace/notebooks/t5.ipynb | 26 +- demo/HuggingFace/requirements.txt | 5 +- demo/HuggingFace/run.py | 9 +- demo/HuggingFace/tests/test_interface.py | 2 +- demo/Tacotron2/common/audio_processing.py | 2 +- demo/Tacotron2/common/layers.py | 2 +- demo/Tacotron2/common/stft.py | 2 +- demo/Tacotron2/common/utils.py | 2 +- demo/Tacotron2/data_functions.py | 2 +- demo/Tacotron2/inference.py | 2 +- demo/Tacotron2/inference_perf.py | 2 +- demo/Tacotron2/loss_functions.py | 2 +- demo/Tacotron2/main.py | 2 +- demo/Tacotron2/models.py | 2 +- demo/Tacotron2/multiproc.py | 2 +- demo/Tacotron2/preprocess_audio2mel.py | 2 +- demo/Tacotron2/tacotron2/arg_parser.py | 2 +- demo/Tacotron2/tacotron2/data_function.py | 2 +- demo/Tacotron2/tacotron2/loss_function.py | 2 +- demo/Tacotron2/tacotron2/model.py | 2 +- demo/Tacotron2/tacotron2/text/cleaners.py | 2 +- demo/Tacotron2/tacotron2/text/cmudict.py | 2 +- demo/Tacotron2/tacotron2/text/numbers.py | 2 +- demo/Tacotron2/tacotron2/text/symbols.py | 2 +- demo/Tacotron2/tensorrt/convert_onnx2trt.py | 3 +- .../tensorrt/convert_tacotron22onnx.py | 2 +- .../tensorrt/convert_waveglow2onnx.py | 2 +- demo/Tacotron2/tensorrt/generate_decoder.py | 4 +- demo/Tacotron2/tensorrt/inference_trt.py | 8 +- demo/Tacotron2/tensorrt/test_infer_trt.py | 4 +- demo/Tacotron2/tensorrt/trt_utils.py | 4 +- demo/Tacotron2/test_infer.py | 2 +- demo/Tacotron2/train.py | 2 +- demo/Tacotron2/waveglow/arg_parser.py | 2 +- demo/Tacotron2/waveglow/data_function.py | 2 +- demo/Tacotron2/waveglow/denoiser.py | 2 +- demo/Tacotron2/waveglow/loss_function.py | 2 +- demo/Tacotron2/waveglow/model.py | 11 +- docker/build.sh | 11 +- docker/centos-7.Dockerfile | 18 +- docker/launch.sh | 2 +- docker/ubuntu-18.04.Dockerfile | 15 +- docker/ubuntu-20.04-aarch64.Dockerfile | 12 +- docker/ubuntu-20.04.Dockerfile | 15 +- docker/ubuntu-cross-aarch64.Dockerfile | 18 +- include/NvInfer.h | 4 +- include/NvInferImpl.h | 6 + include/NvInferRuntime.h | 115 ++++- include/NvInferRuntimeCommon.h | 74 +-- include/NvInferRuntimePlugin.h | 2 +- include/NvInferVersion.h | 11 +- parsers/CMakeLists.txt | 2 +- parsers/caffe/CMakeLists.txt | 2 +- parsers/caffe/NvCaffeParser.cpp | 2 +- parsers/caffe/binaryProtoBlob.h | 2 +- parsers/caffe/blobNameToTensor.h | 2 +- parsers/caffe/caffeMacros.h | 2 +- parsers/caffe/caffeParser/caffeParser.cpp | 2 +- parsers/caffe/caffeParser/caffeParser.h | 2 +- .../caffe/caffeParser/opParsers/opParsers.h | 2 +- .../caffeParser/opParsers/parseAbsVal.cpp | 2 +- .../caffe/caffeParser/opParsers/parseBNLL.cpp | 2 +- .../caffeParser/opParsers/parseBatchNorm.cpp | 2 +- .../caffe/caffeParser/opParsers/parseClip.cpp | 2 +- .../caffeParser/opParsers/parseConcat.cpp | 2 +- .../caffe/caffeParser/opParsers/parseConv.cpp | 2 +- .../caffe/caffeParser/opParsers/parseCrop.cpp | 2 +- .../caffeParser/opParsers/parseDeconv.cpp | 2 +- .../caffe/caffeParser/opParsers/parseELU.cpp | 2 +- .../caffeParser/opParsers/parseEltwise.cpp | 2 +- .../opParsers/parseInnerProduct.cpp | 2 +- .../caffe/caffeParser/opParsers/parseLRN.cpp | 2 +- .../caffeParser/opParsers/parsePReLU.cpp | 2 +- .../caffeParser/opParsers/parsePermute.cpp | 2 +- .../caffeParser/opParsers/parsePooling.cpp | 2 +- .../caffeParser/opParsers/parsePower.cpp | 2 +- .../caffe/caffeParser/opParsers/parseReLU.cpp | 2 +- .../caffeParser/opParsers/parseReduction.cpp | 2 +- .../caffeParser/opParsers/parseReshape.cpp | 2 +- .../caffeParser/opParsers/parseScale.cpp | 2 +- .../caffeParser/opParsers/parseSigmoid.cpp | 2 +- .../caffeParser/opParsers/parseSoftMax.cpp | 2 +- .../caffe/caffeParser/opParsers/parseTanH.cpp | 2 +- parsers/caffe/caffeParser/readProto.h | 2 +- .../caffeWeightFactory/caffeWeightFactory.cpp | 2 +- .../caffeWeightFactory/caffeWeightFactory.h | 2 +- parsers/caffe/caffeWeightFactory/weightType.h | 2 +- parsers/common/half.h | 2 +- parsers/common/ieee_half.h | 2 +- parsers/common/parserUtils.h | 2 +- plugin/CMakeLists.txt | 2 +- plugin/api/inferPlugin.cpp | 17 +- plugin/batchTilePlugin/batchTilePlugin.h | 2 +- plugin/batchedNMSPlugin/gatherNMSOutputs.h | 4 +- ...KVToContextPluginDynamic_PluginConfig.yaml | 83 +++- .../include/fused_multihead_attention.h | 8 +- ...head_attention_fp16_128_64_kernel.sm75.cpp | 2 +- ...head_attention_fp16_128_64_kernel.sm80.cpp | 2 +- ...head_attention_fp16_128_64_kernel.sm87.cpp | 2 +- ...head_attention_fp16_128_64_kernel.sm90.cpp | 2 +- ...head_attention_fp16_384_64_kernel.sm75.cpp | 2 +- ...head_attention_fp16_384_64_kernel.sm80.cpp | 2 +- ...head_attention_fp16_384_64_kernel.sm86.cpp | 2 +- ...head_attention_fp16_384_64_kernel.sm87.cpp | 2 +- ...head_attention_fp16_384_64_kernel.sm90.cpp | 2 +- ...head_attention_fp16_512_64_kernel.sm90.cpp | 2 +- ...ihead_attention_fp16_64_64_kernel.sm75.cpp | 2 +- ...ihead_attention_fp16_64_64_kernel.sm80.cpp | 2 +- ...ihead_attention_fp16_64_64_kernel.sm87.cpp | 2 +- ...ihead_attention_fp16_64_64_kernel.sm90.cpp | 2 +- ...ihead_attention_fp16_96_64_kernel.sm75.cpp | 2 +- ...ihead_attention_fp16_96_64_kernel.sm80.cpp | 2 +- ...ihead_attention_fp16_96_64_kernel.sm87.cpp | 2 +- ...ihead_attention_fp16_96_64_kernel.sm90.cpp | 2 +- ...head_attention_int8_128_64_kernel.sm75.cpp | 2 +- ...head_attention_int8_128_64_kernel.sm80.cpp | 2 +- ...head_attention_int8_128_64_kernel.sm87.cpp | 2 +- ...head_attention_int8_128_64_kernel.sm90.cpp | 2 +- ...head_attention_int8_384_64_kernel.sm75.cpp | 2 +- ...head_attention_int8_384_64_kernel.sm80.cpp | 2 +- ...head_attention_int8_384_64_kernel.sm87.cpp | 2 +- ...head_attention_int8_384_64_kernel.sm90.cpp | 2 +- ...head_attention_int8_512_64_kernel.sm90.cpp | 2 +- ...ihead_attention_int8_64_64_kernel.sm80.cpp | 2 +- ...ihead_attention_int8_96_64_kernel.sm80.cpp | 2 +- ...d_attention_v2_fp16_128_32_kernel.sm75.cpp | 2 +- ...d_attention_v2_fp16_128_32_kernel.sm80.cpp | 2 +- ...d_attention_v2_fp16_128_64_kernel.sm75.cpp | 2 +- ...d_attention_v2_fp16_128_64_kernel.sm80.cpp | 2 +- ...d_attention_v2_fp16_128_64_kernel.sm86.cpp | 2 +- ...d_attention_v2_fp16_128_64_kernel.sm87.cpp | 2 +- ...d_attention_v2_fp16_128_64_kernel.sm90.cpp | 2 +- ...d_attention_v2_fp16_256_32_kernel.sm75.cpp | 2 +- ...d_attention_v2_fp16_256_32_kernel.sm80.cpp | 2 +- ...d_attention_v2_fp16_256_64_kernel.sm75.cpp | 2 +- ...d_attention_v2_fp16_256_64_kernel.sm80.cpp | 2 +- ...d_attention_v2_fp16_256_64_kernel.sm86.cpp | 2 +- ...d_attention_v2_fp16_256_64_kernel.sm87.cpp | 2 +- ...d_attention_v2_fp16_256_64_kernel.sm90.cpp | 2 +- ...d_attention_v2_fp16_384_64_kernel.sm75.cpp | 2 +- ...d_attention_v2_fp16_384_64_kernel.sm80.cpp | 2 +- ...d_attention_v2_fp16_384_64_kernel.sm86.cpp | 2 +- ...d_attention_v2_fp16_384_64_kernel.sm87.cpp | 2 +- ...d_attention_v2_fp16_384_64_kernel.sm90.cpp | 2 +- ...d_attention_v2_fp16_512_32_kernel.sm75.cpp | 2 +- ...d_attention_v2_fp16_512_32_kernel.sm80.cpp | 2 +- ...d_attention_v2_fp16_512_64_kernel.sm75.cpp | 2 +- ...d_attention_v2_fp16_512_64_kernel.sm80.cpp | 2 +- ...d_attention_v2_fp16_512_64_kernel.sm90.cpp | 2 +- ...ad_attention_v2_fp16_64_64_kernel.sm75.cpp | 2 +- ...ad_attention_v2_fp16_64_64_kernel.sm80.cpp | 2 +- ...ad_attention_v2_fp16_64_64_kernel.sm86.cpp | 2 +- ...ad_attention_v2_fp16_64_64_kernel.sm87.cpp | 2 +- ...ad_attention_v2_fp16_64_64_kernel.sm90.cpp | 2 +- ...ad_attention_v2_fp16_96_64_kernel.sm75.cpp | 2 +- ...ad_attention_v2_fp16_96_64_kernel.sm80.cpp | 2 +- ...ad_attention_v2_fp16_96_64_kernel.sm86.cpp | 2 +- ...ad_attention_v2_fp16_96_64_kernel.sm87.cpp | 2 +- ...ad_attention_v2_fp16_96_64_kernel.sm90.cpp | 2 +- ...ttention_v2_il_int8_128_32_kernel.sm80.cpp | 2 +- ...ttention_v2_il_int8_128_64_kernel.sm87.cpp | 2 +- ...ttention_v2_il_int8_128_64_kernel.sm90.cpp | 2 +- ...ttention_v2_il_int8_192_64_kernel.sm87.cpp | 2 +- ...ttention_v2_il_int8_192_64_kernel.sm90.cpp | 2 +- ...ttention_v2_il_int8_256_64_kernel.sm87.cpp | 2 +- ...ttention_v2_il_int8_256_64_kernel.sm90.cpp | 2 +- ...ttention_v2_il_int8_384_64_kernel.sm87.cpp | 2 +- ...ttention_v2_il_int8_384_64_kernel.sm90.cpp | 2 +- ...attention_v2_il_int8_64_64_kernel.sm80.cpp | 2 +- ...attention_v2_il_int8_64_64_kernel.sm87.cpp | 2 +- ...attention_v2_il_int8_64_64_kernel.sm90.cpp | 2 +- ...attention_v2_il_int8_96_64_kernel.sm80.cpp | 2 +- ...attention_v2_il_int8_96_64_kernel.sm87.cpp | 2 +- ...attention_v2_il_int8_96_64_kernel.sm90.cpp | 2 +- ...d_attention_v2_int8_128_32_kernel.sm75.cpp | 2 +- ...d_attention_v2_int8_128_32_kernel.sm80.cpp | 2 +- ...d_attention_v2_int8_128_64_kernel.sm72.cpp | 2 +- ...d_attention_v2_int8_128_64_kernel.sm75.cpp | 2 +- ...d_attention_v2_int8_128_64_kernel.sm80.cpp | 2 +- ...d_attention_v2_int8_128_64_kernel.sm86.cpp | 2 +- ...d_attention_v2_int8_128_64_kernel.sm87.cpp | 2 +- ...d_attention_v2_int8_128_64_kernel.sm90.cpp | 2 +- ...d_attention_v2_int8_192_64_kernel.sm72.cpp | 2 +- ...d_attention_v2_int8_192_64_kernel.sm75.cpp | 2 +- ...d_attention_v2_int8_192_64_kernel.sm80.cpp | 2 +- ...d_attention_v2_int8_192_64_kernel.sm86.cpp | 2 +- ...d_attention_v2_int8_192_64_kernel.sm87.cpp | 2 +- ...d_attention_v2_int8_192_64_kernel.sm90.cpp | 2 +- ...d_attention_v2_int8_256_32_kernel.sm75.cpp | 2 +- ...d_attention_v2_int8_256_32_kernel.sm80.cpp | 2 +- ...d_attention_v2_int8_256_64_kernel.sm72.cpp | 2 +- ...d_attention_v2_int8_256_64_kernel.sm75.cpp | 2 +- ...d_attention_v2_int8_256_64_kernel.sm80.cpp | 2 +- ...d_attention_v2_int8_256_64_kernel.sm86.cpp | 2 +- ...d_attention_v2_int8_256_64_kernel.sm87.cpp | 2 +- ...d_attention_v2_int8_256_64_kernel.sm90.cpp | 2 +- ...d_attention_v2_int8_384_64_kernel.sm72.cpp | 2 +- ...d_attention_v2_int8_384_64_kernel.sm75.cpp | 2 +- ...d_attention_v2_int8_384_64_kernel.sm80.cpp | 2 +- ...d_attention_v2_int8_384_64_kernel.sm86.cpp | 2 +- ...d_attention_v2_int8_384_64_kernel.sm87.cpp | 2 +- ...d_attention_v2_int8_384_64_kernel.sm90.cpp | 2 +- ...d_attention_v2_int8_512_32_kernel.sm75.cpp | 2 +- ...d_attention_v2_int8_512_32_kernel.sm80.cpp | 2 +- ...d_attention_v2_int8_512_64_kernel.sm75.cpp | 2 +- ...d_attention_v2_int8_512_64_kernel.sm80.cpp | 2 +- ...d_attention_v2_int8_512_64_kernel.sm90.cpp | 2 +- ...ad_attention_v2_int8_64_64_kernel.sm80.cpp | 2 +- ...ad_attention_v2_int8_64_64_kernel.sm87.cpp | 2 +- ...ad_attention_v2_int8_64_64_kernel.sm90.cpp | 2 +- ...ad_attention_v2_int8_96_64_kernel.sm80.cpp | 2 +- ...ad_attention_v2_int8_96_64_kernel.sm87.cpp | 2 +- ...ad_attention_v2_int8_96_64_kernel.sm90.cpp | 2 +- plugin/bertQKVToContextPlugin/qkvToContext.cu | 3 +- .../qkvToContextInt8InterleavedPlugin.cpp | 32 +- .../qkvToContextInt8InterleavedPlugin.h | 34 +- .../qkvToContextPlugin.cpp | 4 +- .../qkvToContextPlugin.h | 5 +- plugin/clipPlugin/clip.h | 4 +- plugin/clipPlugin/clipPlugin.cpp | 18 +- plugin/clipPlugin/clipPlugin.h | 14 +- plugin/common/bboxUtils.h | 8 +- plugin/common/bertCommon.h | 56 ++- plugin/common/checkMacrosPlugin.cpp | 16 +- plugin/common/checkMacrosPlugin.h | 34 +- plugin/common/common.cuh | 44 +- plugin/common/cub_helper.h | 4 +- plugin/common/cudaDriverWrapper.cpp | 2 +- plugin/common/cudaDriverWrapper.h | 16 +- plugin/common/kernels/decodeBbox3DKernels.cu | 9 +- plugin/common/kernels/kernel.h | 12 +- plugin/common/kernels/maskRCNNKernels.h | 66 +-- plugin/common/kernels/reducedMathPlugin.h | 24 +- plugin/common/mrcnn_config.h | 18 +- plugin/common/nmsHelper.cpp | 12 +- plugin/common/nmsUtils.h | 4 +- plugin/common/plugin.h | 1 + plugin/common/reducedMathPlugin.cpp | 12 +- plugin/common/templates.h | 2 +- plugin/common/vfcCommon.cpp | 2 +- plugin/common/vfcCommon.h | 2 +- .../coordConvACPlugin/coordConvACPlugin.cpp | 25 +- plugin/coordConvACPlugin/coordConvACPlugin.h | 22 +- plugin/decodeBbox3DPlugin/decodeBbox3D.cpp | 470 ++++++++++-------- plugin/decodeBbox3DPlugin/decodeBbox3D.h | 64 ++- .../detectionLayerPlugin.cpp | 44 +- .../detectionLayerPlugin.h | 2 +- .../disentangledAttentionPlugin.cpp | 299 +++++------ .../disentangledAttentionPlugin.h | 36 +- .../disentangledKernel.cu | 17 +- .../efficientNMSInference.h | 3 +- .../efficientNMSParameters.h | 22 +- .../efficientNMSPlugin/efficientNMSPlugin.cpp | 36 +- .../efficientNMSPlugin/efficientNMSPlugin.h | 22 +- .../tftrt/efficientNMSExplicitTFTRTPlugin.cpp | 10 +- .../tftrt/efficientNMSImplicitTFTRTPlugin.cpp | 37 +- .../tftrt/efficientNMSImplicitTFTRTPlugin.h | 25 +- ...mbLayerNormPluginDynamic_PluginConfig.yaml | 51 +- .../embLayerNormPlugin/embLayerNormPlugin.cpp | 8 +- .../embLayerNormPlugin/embLayerNormPlugin.h | 2 +- plugin/exports-vfc_plugin.map | 2 +- plugin/exports.map | 2 +- .../CustomFCPluginDynamic_PluginConfig.yaml | 19 +- plugin/flattenConcat/flattenConcat.cpp | 63 +-- plugin/flattenConcat/flattenConcat.h | 33 +- .../CustomGeluPluginDynamic_PluginConfig.yaml | 20 + plugin/geluPlugin/geluPlugin.cpp | 180 ++++--- plugin/geluPlugin/geluPlugin.h | 36 +- .../generateDetectionPlugin.cpp | 36 +- .../generateDetectionPlugin.h | 25 +- plugin/gridAnchorPlugin/gridAnchorPlugin.cpp | 121 ++--- plugin/gridAnchorPlugin/gridAnchorPlugin.h | 27 +- ...GroupNormalizationPlugin_PluginConfig.yaml | 74 ++- ...roupNormalizationPlugin_PluginReference.py | 27 + .../groupNormalizationPlugin.cpp | 329 +++++++----- .../groupNormalizationPlugin.h | 43 +- .../instanceNormalizationPlugin.h | 2 +- plugin/leakyReluPlugin/lReluPlugin.cpp | 22 +- plugin/leakyReluPlugin/lReluPlugin.h | 16 +- .../modulatedDeformConvPlugin.h | 2 +- .../multilevelCropAndResizePlugin.cpp | 33 +- .../multilevelCropAndResizePlugin.h | 25 +- .../multilevelProposeROIPlugin.cpp | 83 ++-- .../multilevelProposeROIPlugin.h | 43 +- .../multilevelProposeROI/tlt_mrcnn_config.h | 22 +- .../multiscaleDeformableAttn.cu | 8 +- .../multiscaleDeformableAttnPlugin.cpp | 12 +- .../multiscaleDeformableIm2ColCuda.cuh | 66 +-- plugin/nmsPlugin/nmsPlugin.cpp | 137 ++--- plugin/nmsPlugin/nmsPlugin.h | 48 +- plugin/normalizePlugin/normalizePlugin.cpp | 54 +- plugin/normalizePlugin/normalizePlugin.h | 37 +- plugin/nvFasterRCNN/nvFasterRCNNPlugin.cpp | 45 +- plugin/nvFasterRCNN/nvFasterRCNNPlugin.h | 21 +- plugin/pillarScatterPlugin/pillarScatter.cpp | 38 +- plugin/pillarScatterPlugin/pillarScatter.h | 20 +- .../proposalLayerPlugin.cpp | 53 +- .../proposalLayerPlugin/proposalLayerPlugin.h | 33 +- .../pyramidROIAlignPlugin.cpp | 32 +- .../pyramidROIAlignPlugin.h | 2 +- plugin/regionPlugin/regionPlugin.cpp | 97 ++-- plugin/regionPlugin/regionPlugin.h | 31 +- plugin/reorgPlugin/reorgPlugin.cpp | 37 +- plugin/reorgPlugin/reorgPlugin.h | 31 +- .../resizeNearestPlugin.cpp | 39 +- .../resizeNearestPlugin/resizeNearestPlugin.h | 21 +- plugin/scatterPlugin/scatterPlugin.cpp | 15 +- plugin/scatterPlugin/scatterPlugin.h | 15 +- ...ipLayerNormPluginDynamic_PluginConfig.yaml | 34 ++ .../skipLayerNormPlugin.cpp | 4 +- .../specialSlicePlugin/specialSlicePlugin.cpp | 29 +- .../specialSlicePlugin/specialSlicePlugin.h | 25 +- plugin/splitPlugin/split.cu | 67 ++- plugin/splitPlugin/split.h | 36 +- plugin/splitPlugin/splitPlugin.cpp | 89 ---- plugin/splitPlugin/splitPlugin.h | 187 ------- python/CMakeLists.txt | 2 +- python/docstrings/infer/pyCoreDoc.h | 3 +- python/docstrings/infer/pyGraphDoc.h | 8 +- python/docstrings/parsers/pyCaffeDoc.h | 2 +- python/docstrings/parsers/pyOnnxDoc.h | 8 +- .../{ => bindings_wheel}/LICENSE.txt | 0 .../packaging/{ => bindings_wheel}/setup.cfg | 0 python/packaging/bindings_wheel/setup.py | 50 ++ .../{ => bindings_wheel}/tensorrt/__init__.py | 44 +- python/packaging/frontend_sdist/LICENSE.txt | 180 +++++++ python/packaging/frontend_sdist/setup.cfg | 5 + python/packaging/frontend_sdist/setup.py | 70 +++ .../frontend_sdist/tensorrt/__init__.py | 18 + python/packaging/libs_wheel/LICENSE.txt | 180 +++++++ python/packaging/libs_wheel/setup.cfg | 5 + python/packaging/{ => libs_wheel}/setup.py | 45 +- .../libs_wheel/tensorrt_libs/__init__.py | 33 ++ python/requirements.txt | 2 +- python/src/infer/pyCore.cpp | 80 ++- python/src/infer/pyGraph.cpp | 130 ++--- python/src/parsers/pyOnnx.cpp | 6 +- .../3. Using Tensorflow 2 through ONNX.ipynb | 13 +- .../Additional Examples/helper.py | 2 +- quickstart/IntroNotebooks/helper.py | 2 +- quickstart/IntroNotebooks/onnx_helper.py | 2 +- quickstart/Makefile | 2 +- quickstart/Makefile.config | 2 +- quickstart/SemanticSegmentation/Makefile | 2 +- quickstart/SemanticSegmentation/export.py | 2 +- .../SemanticSegmentation/tutorial-runtime.cpp | 2 +- quickstart/common/logger.cpp | 2 +- quickstart/common/logger.h | 2 +- quickstart/common/logging.h | 2 +- quickstart/common/util.cpp | 2 +- quickstart/common/util.h | 2 +- quickstart/deploy_to_triton/config.pbtxt | 66 +-- .../deploy_to_triton/export_resnet_to_onnx.py | 61 ++- quickstart/deploy_to_triton/triton_client.py | 91 ++-- requirements.txt | 3 +- samples/CMakeLists.txt | 3 +- samples/CMakeSamplesTemplate.txt | 2 +- samples/common/BatchStream.h | 6 +- samples/common/EntropyCalibrator.h | 2 +- samples/common/ErrorRecorder.h | 2 +- samples/common/argsParser.h | 2 +- samples/common/buffers.h | 2 +- samples/common/common.h | 24 +- samples/common/dumpTFWts.py | 2 +- samples/common/getOptions.cpp | 2 +- samples/common/getOptions.h | 2 +- samples/common/getoptWin.h | 2 +- samples/common/half.h | 6 +- samples/common/logger.cpp | 2 +- samples/common/logger.h | 2 +- samples/common/logging.h | 2 +- samples/common/parserOnnxConfig.h | 2 +- samples/common/safeCommon.h | 6 +- samples/common/sampleConfig.h | 10 +- samples/common/sampleDevice.h | 2 +- samples/common/sampleEngines.cpp | 49 +- samples/common/sampleEngines.h | 2 +- samples/common/sampleInference.cpp | 152 ++++-- samples/common/sampleInference.h | 2 +- samples/common/sampleOptions.cpp | 66 ++- samples/common/sampleOptions.h | 7 +- samples/common/sampleReporting.cpp | 28 +- samples/common/sampleReporting.h | 2 +- samples/common/sampleUtils.cpp | 7 +- samples/common/sampleUtils.h | 2 +- samples/python/common.py | 2 +- samples/python/detectron2/build_engine.py | 2 +- samples/python/detectron2/create_onnx.py | 2 +- samples/python/detectron2/eval_coco.py | 18 +- samples/python/detectron2/image_batcher.py | 8 +- samples/python/detectron2/infer.py | 2 +- samples/python/detectron2/onnx_utils.py | 12 +- samples/python/detectron2/requirements.txt | 6 +- samples/python/detectron2/visualize.py | 4 +- samples/python/downloader.py | 2 +- samples/python/efficientdet/build_engine.py | 2 +- samples/python/efficientdet/compare_tf.py | 2 +- samples/python/efficientdet/create_onnx.py | 2 +- samples/python/efficientdet/eval_coco.py | 2 +- samples/python/efficientdet/image_batcher.py | 2 +- samples/python/efficientdet/infer.py | 2 +- samples/python/efficientdet/infer_tf.py | 2 +- samples/python/efficientdet/onnx_utils.py | 2 +- samples/python/efficientdet/visualize.py | 2 +- samples/python/efficientnet/build_engine.py | 2 +- samples/python/efficientnet/compare_tf.py | 2 +- samples/python/efficientnet/create_onnx.py | 2 +- samples/python/efficientnet/eval_gt.py | 2 +- samples/python/efficientnet/image_batcher.py | 2 +- samples/python/efficientnet/infer.py | 2 +- .../build_and_refit_engine.py | 2 +- .../data_processing.py | 2 +- .../engine_refit_onnx_bidaf/prepare_model.py | 2 +- .../onnx_resnet50.py | 2 +- .../python/network_api_pytorch_mnist/model.py | 2 +- .../network_api_pytorch_mnist/sample.py | 2 +- .../python/onnx_custom_plugin/CMakeLists.txt | 17 + .../onnx_custom_plugin/load_plugin_lib.py | 2 +- samples/python/onnx_custom_plugin/model.py | 4 +- .../plugin/customHardmaxPlugin.cpp | 2 +- .../plugin/customHardmaxPlugin.h | 2 +- samples/python/onnx_custom_plugin/sample.py | 2 +- .../test_custom_hardmax_plugin.py | 2 +- .../python/onnx_packnet/convert_to_onnx.py | 2 +- .../python/onnx_packnet/post_processing.py | 2 +- samples/python/scripts/download_mnist_data.sh | 2 +- samples/python/scripts/download_mnist_pgms.py | 3 +- .../build_engine.py | 2 +- .../compare_tf.py | 12 +- .../create_onnx.py | 95 ++-- .../eval_coco.py | 12 +- .../image_batcher.py | 2 +- .../tensorflow_object_detection_api/infer.py | 2 +- .../onnx_utils.py | 6 +- .../visualize.py | 4 +- samples/python/yolov3_onnx/data_processing.py | 2 +- .../python/yolov3_onnx/onnx_to_tensorrt.py | 2 +- samples/python/yolov3_onnx/yolov3_to_onnx.py | 2 +- .../sampleAlgorithmSelector/CMakeLists.txt | 2 +- .../sampleAlgorithmSelector.cpp | 2 +- samples/sampleCharRNN/CMakeLists.txt | 2 +- samples/sampleCharRNN/sampleCharRNN.cpp | 10 +- samples/sampleDynamicReshape/CMakeLists.txt | 2 +- .../sampleDynamicReshape.cpp | 2 +- samples/sampleINT8API/CMakeLists.txt | 2 +- samples/sampleINT8API/sampleINT8API.cpp | 2 +- samples/sampleIOFormats/CMakeLists.txt | 2 +- samples/sampleIOFormats/sampleIOFormats.cpp | 2 +- samples/sampleNamedDimensions/CMakeLists.txt | 2 +- samples/sampleNamedDimensions/create_model.py | 4 +- .../sampleNamedDimensions.cpp | 2 +- samples/sampleOnnxMNIST/CMakeLists.txt | 2 +- samples/sampleOnnxMNIST/README.md | 5 +- samples/sampleOnnxMNIST/sampleOnnxMNIST.cpp | 2 +- .../sampleOnnxMnistCoordConvAC/CMakeLists.txt | 2 +- .../sampleOnnxMnistCoordConvAC/coord_conv.py | 2 +- .../mnist_coord_conv_train.py | 12 +- .../modify_onnx_ac.py | 4 +- .../sampleOnnxMnistCoordConvAC.cpp | 2 +- samples/trtexec/CMakeLists.txt | 2 +- samples/trtexec/prn_utils.py | 2 +- samples/trtexec/profiler.py | 2 +- samples/trtexec/tracer.py | 2 +- samples/trtexec/trtexec.cpp | 8 +- scripts/copyright-scan.py | 2 +- scripts/stubify.sh | 19 +- third_party/ieee/half.h | 2 +- tools/onnx-graphsurgeon/Makefile | 2 +- tools/onnx-graphsurgeon/docs/conf.py | 2 +- .../examples/01_creating_a_model/example.py | 2 +- .../example.py | 2 +- .../03_isolating_a_subgraph/generate.py | 2 +- .../03_isolating_a_subgraph/isolate.py | 2 +- .../examples/04_modifying_a_model/generate.py | 2 +- .../examples/04_modifying_a_model/modify.py | 2 +- .../examples/05_folding_constants/README.md | 2 +- .../examples/05_folding_constants/fold.py | 2 +- .../examples/05_folding_constants/generate.py | 2 +- .../examples/06_removing_nodes/generate.py | 2 +- .../examples/06_removing_nodes/remove.py | 2 +- .../generate.py | 2 +- .../08_replacing_a_subgraph/generate.py | 2 +- .../08_replacing_a_subgraph/replace.py | 2 +- .../generate.py | 2 +- .../10_dynamic_batch_size/generate.py | 2 +- .../examples/10_dynamic_batch_size/modify.py | 2 +- .../exporters/base_exporter.py | 2 +- .../exporters/onnx_exporter.py | 2 +- .../importers/base_importer.py | 2 +- .../importers/onnx_importer.py | 2 +- .../onnx_graphsurgeon/ir/graph.py | 2 +- .../onnx_graphsurgeon/ir/node.py | 2 +- .../onnx_graphsurgeon/ir/tensor.py | 2 +- .../onnx_graphsurgeon/logger/logger.py | 2 +- .../onnx_graphsurgeon/util/exception.py | 2 +- .../onnx_graphsurgeon/util/misc.py | 2 +- tools/onnx-graphsurgeon/setup.py | 2 +- .../onnx-graphsurgeon/tests/ir/test_graph.py | 2 +- tools/onnx-graphsurgeon/tests/onnx_models.py | 2 +- tools/onnx-graphsurgeon/tests/test_api.py | 2 +- .../onnx-graphsurgeon/tests/test_examples.py | 2 +- .../onnx-graphsurgeon/tests/test_exporters.py | 2 +- .../onnx-graphsurgeon/tests/test_importers.py | 2 +- tools/onnx-graphsurgeon/tests/test_ir.py | 2 +- tools/onnx-graphsurgeon/tests/test_util.py | 2 +- 578 files changed, 4943 insertions(+), 3505 deletions(-) mode change 100644 => 100755 demo/BERT/README.md create mode 100644 demo/HuggingFace/NNDF/cuda_bootstrapper.py create mode 100644 plugin/groupNormalizationPlugin/GroupNormalizationPlugin_PluginReference.py delete mode 100644 plugin/splitPlugin/splitPlugin.cpp delete mode 100644 plugin/splitPlugin/splitPlugin.h rename python/packaging/{ => bindings_wheel}/LICENSE.txt (100%) rename python/packaging/{ => bindings_wheel}/setup.cfg (100%) create mode 100644 python/packaging/bindings_wheel/setup.py rename python/packaging/{ => bindings_wheel}/tensorrt/__init__.py (86%) create mode 100644 python/packaging/frontend_sdist/LICENSE.txt create mode 100644 python/packaging/frontend_sdist/setup.cfg create mode 100644 python/packaging/frontend_sdist/setup.py create mode 100644 python/packaging/frontend_sdist/tensorrt/__init__.py create mode 100644 python/packaging/libs_wheel/LICENSE.txt create mode 100644 python/packaging/libs_wheel/setup.cfg rename python/packaging/{ => libs_wheel}/setup.py (59%) create mode 100644 python/packaging/libs_wheel/tensorrt_libs/__init__.py diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md index 06990f58..89573c94 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -1,7 +1,7 @@ --- -name: TensorRT OSS Bug Report -about: Report any bugs to help us improve TensorRT. -title: '' +name: Report a TensorRT issue +about: The more information you share, the more feedback we can provide. +title: 'XXX failure of TensorRT X.Y when running XXX on GPU XXX' labels: '' assignees: '' @@ -9,36 +9,59 @@ assignees: '' ## Description - + ## Environment -**TensorRT Version**: -**NVIDIA GPU**: -**NVIDIA Driver Version**: -**CUDA Version**: -**CUDNN Version**: -**Operating System**: -**Python Version (if applicable)**: -**Tensorflow Version (if applicable)**: -**PyTorch Version (if applicable)**: -**Baremetal or Container (if so, version)**: + + +**TensorRT Version**: + +**NVIDIA GPU**: + +**NVIDIA Driver Version**: + +**CUDA Version**: + +**CUDNN Version**: + + +Operating System: + +Python Version (if applicable): + +Tensorflow Version (if applicable): + +PyTorch Version (if applicable): + +Baremetal or Container (if so, version): ## Relevant Files +**Model link**: + ## Steps To Reproduce - +**Commands or scripts**: + +**Have you tried [the latest release](https://developer.nvidia.com/tensorrt)?**: + +**Can this model run on other frameworks?** For example run ONNX model with ONNXRuntime (`polygraphy run --onnxrt`): diff --git a/CHANGELOG.md b/CHANGELOG.md index 1e8abf23..1c92f9f9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,10 +1,21 @@ # TensorRT OSS Release Changelog -## [8.6.0 EA](https://docs.nvidia.com/deeplearning/tensorrt/release-notes/tensorrt-8.html#tensorrt-8) - 2023-03-14 +## [8.6.1 GA](https://docs.nvidia.com/deeplearning/tensorrt/release-notes/#rel-8-6-1) - 2023-05-02 + +TensorRT OSS release corresponding to TensorRT 8.6.1.6 GA release. +- Updates since [TensorRT 8.6.0 EA release](https://docs.nvidia.com/deeplearning/tensorrt/release-notes/#rel-8-6-0-EA). +- Please refer to the [TensorRT 8.6.1.6 GA release notes](https://docs.nvidia.com/deeplearning/tensorrt/release-notes/#rel-8-6-1) for more information. + +Key Features and Updates: + +- Added a new flag `--use-cuda-graph` to demoDiffusion to improve performance. +- Optimized GPT2 and T5 HuggingFace demos to use fp16 I/O tensors for fp16 networks. + +## [8.6.0 EA](https://docs.nvidia.com/deeplearning/tensorrt/release-notes/#rel-8-6-0-EA) - 2023-03-10 TensorRT OSS release corresponding to TensorRT 8.6.0.12 EA release. -- Updates since [TensorRT 8.5.3 GA release](https://docs.nvidia.com/deeplearning/tensorrt/release-notes/tensorrt-8.html#rel-8-5-3). -- Please refer to the [TensorRT 8.6.0.12 EA release notes](https://docs.nvidia.com/deeplearning/tensorrt/release-notes/tensorrt-8.html#tensorrt-8) for more information. +- Updates since [TensorRT 8.5.3 GA release](https://docs.nvidia.com/deeplearning/tensorrt/release-notes/#rel-8-5-3). +- Please refer to the [TensorRT 8.6.0.12 EA release notes](https://docs.nvidia.com/deeplearning/tensorrt/release-notes/#rel-8-6-0-EA) for more information. Key Features and Updates: @@ -12,11 +23,11 @@ Key Features and Updates: - The following plugins have been removed accordingly: GroupNorm, LayerNorm, MultiHeadCrossAttention, MultiHeadFlashAttention, SeqLen2Spatial, and SplitGeLU. - Added a new sample called onnx_custom_plugin. -## [8.5.3 GA](https://docs.nvidia.com/deeplearning/tensorrt/release-notes/tensorrt-8.html#rel-8-5-3) - 2023-01-30 +## [8.5.3 GA](https://docs.nvidia.com/deeplearning/tensorrt/release-notes/#rel-8-5-3) - 2023-01-30 TensorRT OSS release corresponding to TensorRT 8.5.3.1 GA release. -- Updates since [TensorRT 8.5.2 GA release](https://docs.nvidia.com/deeplearning/tensorrt/release-notes/tensorrt-8.html#rel-8-5-2). -- Please refer to the [TensorRT 8.5.3 GA release notes](https://docs.nvidia.com/deeplearning/tensorrt/release-notes/tensorrt-8.html#rel-8-5-3) for more information. +- Updates since [TensorRT 8.5.2 GA release](https://docs.nvidia.com/deeplearning/tensorrt/release-notes/#rel-8-5-2). +- Please refer to the [TensorRT 8.5.3 GA release notes](https://docs.nvidia.com/deeplearning/tensorrt/release-notes/#rel-8-5-3) for more information. Key Features and Updates: @@ -24,11 +35,11 @@ Key Features and Updates: - Added nvinfer1::plugin namespace - Optimized KV Cache performance for T5 -## [8.5.2 GA](https://docs.nvidia.com/deeplearning/tensorrt/release-notes/tensorrt-8.html#rel-8-5-2) - 2022-12-12 +## [8.5.2 GA](https://docs.nvidia.com/deeplearning/tensorrt/release-notes/#rel-8-5-2) - 2022-12-12 TensorRT OSS release corresponding to TensorRT 8.5.2.2 GA release. -- Updates since [TensorRT 8.5.1 GA release](https://docs.nvidia.com/deeplearning/tensorrt/release-notes/tensorrt-8.html#rel-8-5-1). -- Please refer to the [TensorRT 8.5.2 GA release notes](https://docs.nvidia.com/deeplearning/tensorrt/release-notes/tensorrt-8.html#rel-8-5-2) for more information. +- Updates since [TensorRT 8.5.1 GA release](https://docs.nvidia.com/deeplearning/tensorrt/release-notes/#rel-8-5-1). +- Please refer to the [TensorRT 8.5.2 GA release notes](https://docs.nvidia.com/deeplearning/tensorrt/release-notes/#rel-8-5-2) for more information. Key Features and Updates: @@ -51,11 +62,11 @@ Key Features and Updates: ### Removed - None -## [8.5.1 GA](https://docs.nvidia.com/deeplearning/tensorrt/release-notes/tensorrt-8.html#rel-8-5-1) - 2022-11-01 +## [8.5.1 GA](https://docs.nvidia.com/deeplearning/tensorrt/release-notes/#rel-8-5-1) - 2022-11-01 TensorRT OSS release corresponding to TensorRT 8.5.1.7 GA release. - Updates since [TensorRT 8.4.1 GA release](https://github.com/NVIDIA/TensorRT/releases/tag/8.4.1). -- Please refer to the [TensorRT 8.5.1 GA release notes](https://docs.nvidia.com/deeplearning/tensorrt/release-notes/tensorrt-8.html#rel-8-5-1) for more information. +- Please refer to the [TensorRT 8.5.1 GA release notes](https://docs.nvidia.com/deeplearning/tensorrt/release-notes/#rel-8-5-1) for more information. Key Features and Updates: @@ -84,7 +95,7 @@ Key Features and Updates: ## [22.08](https://github.com/NVIDIA/TensorRT/releases/tag/22.08) - 2022-08-16 -Updated TensorRT version to 8.4.2 - see the [TensorRT 8.4.2 release notes](https://docs.nvidia.com/deeplearning/tensorrt/release-notes/tensorrt-8.html#rel-8-4-2) for more information +Updated TensorRT version to 8.4.2 - see the [TensorRT 8.4.2 release notes](https://docs.nvidia.com/deeplearning/tensorrt/release-notes/#rel-8-4-2) for more information ### Changed - Updated default protobuf version to 3.20.x @@ -114,11 +125,11 @@ Updated TensorRT version to 8.4.2 - see the [TensorRT 8.4.2 release notes](https ### Removed - None -## [8.4.1 GA](https://docs.nvidia.com/deeplearning/tensorrt/release-notes/tensorrt-8.html#rel-8-4-1) - 2022-06-14 +## [8.4.1 GA](https://docs.nvidia.com/deeplearning/tensorrt/release-notes/#rel-8-4-1) - 2022-06-14 TensorRT OSS release corresponding to TensorRT 8.4.1.5 GA release. - Updates since [TensorRT 8.2.1 GA release](https://github.com/NVIDIA/TensorRT/releases/tag/8.2.1). -- Please refer to the [TensorRT 8.4.1 GA release notes](https://docs.nvidia.com/deeplearning/tensorrt/release-notes/tensorrt-8.html#rel-8-4-1) for more information. +- Please refer to the [TensorRT 8.4.1 GA release notes](https://docs.nvidia.com/deeplearning/tensorrt/release-notes/#rel-8-4-1) for more information. Key Features and Updates: @@ -258,11 +269,11 @@ Key Features and Updates: ### Removed - Unused source file(s) in demo/BERT -## [8.2.1 GA](https://docs.nvidia.com/deeplearning/tensorrt/release-notes/tensorrt-8.html#rel-8-2-1) - 2021-11-24 +## [8.2.1 GA](https://docs.nvidia.com/deeplearning/tensorrt/release-notes/#rel-8-2-1) - 2021-11-24 TensorRT OSS release corresponding to TensorRT 8.2.1.8 GA release. - Updates since [TensorRT 8.2.0 EA release](https://github.com/NVIDIA/TensorRT/releases/tag/8.2.0-EA). -- Please refer to the [TensorRT 8.2.1 GA release notes](https://docs.nvidia.com/deeplearning/tensorrt/release-notes/tensorrt-8.html#rel-8-2-1) for more information. +- Please refer to the [TensorRT 8.2.1 GA release notes](https://docs.nvidia.com/deeplearning/tensorrt/release-notes/#rel-8-2-1) for more information. - ONNX parser [v8.2.1](https://github.com/onnx/onnx-tensorrt/releases/tag/release%2F8.2-GA) - Removed duplicate constant layer checks that caused some performance regressions @@ -316,7 +327,7 @@ TensorRT OSS release corresponding to TensorRT 8.2.1.8 GA release. - Updated Python documentation for `add_reduce`, `add_top_k`, and `ISoftMaxLayer` - Renamed default GitHub branch to `main` and updated hyperlinks -## [8.2.0 EA](https://docs.nvidia.com/deeplearning/tensorrt/release-notes/tensorrt-8.html#rel-8-2-0-EA) - 2021-10-05 +## [8.2.0 EA](https://docs.nvidia.com/deeplearning/tensorrt/release-notes/#rel-8-2-0-EA) - 2021-10-05 ### Added - [Demo applications](demo/HuggingFace) showcasing TensorRT inference of [HuggingFace Transformers](https://huggingface.co/transformers). - Support is currently extended to GPT-2 and T5 models. @@ -426,7 +437,7 @@ TensorRT OSS release corresponding to TensorRT 8.2.1.8 GA release. ## [21.07](https://github.com/NVIDIA/TensorRT/releases/tag/21.07) - 2021-07-21 Identical to the TensorRT-OSS [8.0.1](https://github.com/NVIDIA/TensorRT/releases/tag/8.0.1) Release. -## [8.0.1](https://docs.nvidia.com/deeplearning/tensorrt/release-notes/tensorrt-8.html#tensorrt-8) - 2021-07-02 +## [8.0.1](https://docs.nvidia.com/deeplearning/tensorrt/release-notes/#tensorrt-8) - 2021-07-02 ### Added - Added support for the following ONNX operators: `Celu`, `CumSum`, `EyeLike`, `GatherElements`, `GlobalLpPool`, `GreaterOrEqual`, `LessOrEqual`, `LpNormalization`, `LpPool`, `ReverseSequence`, and `SoftmaxCrossEntropyLoss` [details](). - Rehauled `Resize` ONNX operator, now fully supporting the following modes: diff --git a/CMakeLists.txt b/CMakeLists.txt index 4847eaf5..66f4201b 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -48,7 +48,7 @@ set(CMAKE_SKIP_BUILD_RPATH True) project(TensorRT LANGUAGES CXX CUDA VERSION ${TRT_VERSION} - DESCRIPTION "TensorRT is a C++ library that facilitates high performance inference on NVIDIA GPUs and deep learning accelerators." + DESCRIPTION "TensorRT is a C++ library that facilitates high-performance inference on NVIDIA GPUs and deep learning accelerators." HOMEPAGE_URL "https://github.com/NVIDIA/TensorRT") if(CMAKE_INSTALL_PREFIX_INITIALIZED_TO_DEFAULT) @@ -88,8 +88,8 @@ endif() ############################################################################################ # Dependencies -set(DEFAULT_CUDA_VERSION 11.3.1) -set(DEFAULT_CUDNN_VERSION 8.2) +set(DEFAULT_CUDA_VERSION 12.0.1) +set(DEFAULT_CUDNN_VERSION 8.8) set(DEFAULT_PROTOBUF_VERSION 3.20.1) # Dependency Version Resolution diff --git a/README.md b/README.md index e3585685..d31f2c4c 100644 --- a/README.md +++ b/README.md @@ -25,8 +25,8 @@ You can skip the **Build** section to enjoy TensorRT with Python. ## Prerequisites To build the TensorRT-OSS components, you will first need the following software packages. -**TensorRT EA build** -* [TensorRT](https://developer.nvidia.com/nvidia-tensorrt-download) v8.6.0.12 +**TensorRT GA build** +* [TensorRT](https://developer.nvidia.com/nvidia-tensorrt-download) v8.6.1.6 **System Packages** * [CUDA](https://developer.nvidia.com/cuda-toolkit) @@ -48,8 +48,8 @@ To build the TensorRT-OSS components, you will first need the following software * (Cross compilation for Jetson platform) [NVIDIA JetPack](https://developer.nvidia.com/embedded/jetpack) >= 5.0 (current support only for TensorRT 8.4.0 and TensorRT 8.5.2) * (Cross compilation for QNX platform) [QNX Toolchain](https://blackberry.qnx.com/en) * PyPI packages (for demo applications/tests) - * [onnx](https://pypi.org/project/onnx/) 1.9.0 - * [onnxruntime](https://pypi.org/project/onnxruntime/) 1.8.0 + * [onnx](https://pypi.org/project/onnx/) + * [onnxruntime](https://pypi.org/project/onnxruntime/) * [tensorflow-gpu](https://pypi.org/project/tensorflow/) >= 2.5.1 * [Pillow](https://pypi.org/project/Pillow/) >= 9.0.1 * [pycuda](https://pypi.org/project/pycuda/) < 2021.1 @@ -70,18 +70,18 @@ To build the TensorRT-OSS components, you will first need the following software git submodule update --init --recursive ``` -2. #### (Optional - if not using TensorRT container) Specify the TensorRT EA release build path +2. #### (Optional - if not using TensorRT container) Specify the TensorRT GA release build path If using the TensorRT OSS build container, TensorRT libraries are preinstalled under `/usr/lib/x86_64-linux-gnu` and you may skip this step. - Else download and extract the TensorRT EA build from [NVIDIA Developer Zone](https://developer.nvidia.com/nvidia-tensorrt-download). + Else download and extract the TensorRT GA build from [NVIDIA Developer Zone](https://developer.nvidia.com/nvidia-tensorrt-download). **Example: Ubuntu 20.04 on x86-64 with cuda-12.0** ```bash cd ~/Downloads - tar -xvzf TensorRT-8.6.0.12.Linux.x86_64-gnu.cuda-12.0.tar.gz - export TRT_LIBPATH=`pwd`/TensorRT-8.6.0.12 + tar -xvzf TensorRT-8.6.1.6.Linux.x86_64-gnu.cuda-12.0.tar.gz + export TRT_LIBPATH=`pwd`/TensorRT-8.6.1.6 ``` @@ -111,9 +111,9 @@ For Linux platforms, we recommend that you generate a docker container for build ```bash ./docker/build.sh --file docker/ubuntu-cross-aarch64.Dockerfile --tag tensorrt-jetpack-cuda11.4 ``` - **Example: Ubuntu 20.04 on aarch64 with cuda-11.4.2** + **Example: Ubuntu 20.04 on aarch64 with cuda-11.8** ```bash - ./docker/build.sh --file docker/ubuntu-20.04-aarch64.Dockerfile --tag tensorrt-aarch64-ubuntu20.04-cuda11.4 + ./docker/build.sh --file docker/ubuntu-20.04-aarch64.Dockerfile --tag tensorrt-aarch64-ubuntu20.04-cuda11.8 --cuda 11.8.0 ``` 2. #### Launch the TensorRT-OSS build container. @@ -143,7 +143,7 @@ For Linux platforms, we recommend that you generate a docker container for build yum -y install centos-release-scl yum-config-manager --enable rhel-server-rhscl-7-rpms yum -y install devtoolset-8 - export PATH="/opt/rh/devtoolset-8/root/bin:${PATH} + export PATH="/opt/rh/devtoolset-8/root/bin:${PATH}" ``` **Example: Linux (aarch64) build with default cuda-12.0** @@ -174,14 +174,14 @@ For Linux platforms, we recommend that you generate a docker container for build > NOTE: The latest JetPack SDK v5.1 only supports TensorRT 8.5.2. > NOTE: -
1. The default CUDA version used by CMake is 11.8.0. To override this, for example to 10.2, append `-DCUDA_VERSION=10.2` to the cmake command. +
1. The default CUDA version used by CMake is 12.0.1. To override this, for example to 11.8, append `-DCUDA_VERSION=11.8` to the cmake command.
2. If samples fail to link on CentOS7, create this symbolic link: `ln -s $TRT_OUT_DIR/libnvinfer_plugin.so $TRT_OUT_DIR/libnvinfer_plugin.so.8` * Required CMake build arguments are: - `TRT_LIB_DIR`: Path to the TensorRT installation directory containing libraries. - `TRT_OUT_DIR`: Output directory where generated build artifacts will be copied. * Optional CMake build arguments: - `CMAKE_BUILD_TYPE`: Specify if binaries generated are for release or debug (contain debug symbols). Values consists of [`Release`] | `Debug` - - `CUDA_VERISON`: The version of CUDA to target, for example [`11.7.1`]. + - `CUDA_VERSION`: The version of CUDA to target, for example [`11.7.1`]. - `CUDNN_VERSION`: The version of cuDNN to target, for example [`8.6`]. - `PROTOBUF_VERSION`: The version of Protobuf to use, for example [`3.0.0`]. Note: Changing this will not configure CMake to use a system version of Protobuf, it will configure CMake to download and try building that version. - `CMAKE_TOOLCHAIN_FILE`: The path to a toolchain file for cross compilation. diff --git a/VERSION b/VERSION index 2f889e47..811e1c1d 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -8.6.0.12 +8.6.1.6 diff --git a/cmake/toolchains/cmake_aarch64-native.toolchain b/cmake/toolchains/cmake_aarch64-native.toolchain index b15f024c..fd4e30cc 100644 --- a/cmake/toolchains/cmake_aarch64-native.toolchain +++ b/cmake/toolchains/cmake_aarch64-native.toolchain @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/demo/BERT/CMakeLists.txt b/demo/BERT/CMakeLists.txt index ef5cffbb..cc2c8fc9 100644 --- a/demo/BERT/CMakeLists.txt +++ b/demo/BERT/CMakeLists.txt @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/demo/BERT/README.md b/demo/BERT/README.md old mode 100644 new mode 100755 index 6f2f6aa5..ff55e1b0 --- a/demo/BERT/README.md +++ b/demo/BERT/README.md @@ -64,7 +64,7 @@ Since the tokenizer and projection of the final predictions are not nearly as co The tokenizer splits the input text into tokens that can be consumed by the model. For details on this process, see [this tutorial](https://mccormickml.com/2019/05/14/BERT-word-embeddings-tutorial/). -To run the BERT model in TensorRT, we construct the model using TensorRT APIs and import the weights from a pre-trained TensorFlow checkpoint from [NGC](https://ngc.nvidia.com/models/nvidian:bert_tf_v2_large_fp16_128). Finally, a TensorRT engine is generated and serialized to the disk. The various inference scripts then load this engine for inference. +To run the BERT model in TensorRT, we construct the model using TensorRT APIs and import the weights from a pre-trained TensorFlow checkpoint from [NGC](https://catalog.ngc.nvidia.com/orgs/nvidia/models/bert_tf_ckpt_large_qa_squad2_amp_128). Finally, a TensorRT engine is generated and serialized to the disk. The various inference scripts then load this engine for inference. Lastly, the tokens predicted by the model are projected back to the original text to get a final result. @@ -586,3 +586,4 @@ Results were obtained by running `scripts/inference_benchmark.sh --gpu Ampere` o | 384 | 32 | 40.79 | 40.97 | 40.46 | | 384 | 64 | 78.04 | 78.41 | 77.51 | | 384 | 128 | 151.33 | 151.62 | 150.76 | + diff --git a/demo/BERT/builder.py b/demo/BERT/builder.py index f9b49901..c6d15d00 100755 --- a/demo/BERT/builder.py +++ b/demo/BERT/builder.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/demo/BERT/builder_utils.py b/demo/BERT/builder_utils.py index accef397..248bee80 100644 --- a/demo/BERT/builder_utils.py +++ b/demo/BERT/builder_utils.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,7 +15,7 @@ # limitations under the License. # -import re +import re import pickle import numpy as np @@ -161,7 +161,7 @@ def onnx_to_trt_name(onnx_name): if toks[0] == 'bert': #embeddings or encoder if toks[1] == 'encoder': #transformer # Token conversions for sparse checkpoints - if toks[-2] == 'dense_act': + if toks[-2] == 'dense_act': toks[-2] = 'dense' elif toks[-3] == 'dense_act': if toks[-2] == 'input_quantizer': @@ -187,7 +187,7 @@ def onnx_to_trt_name(onnx_name): toks[-2] = 'kernel' elif toks[-2] == 'input_quantizer': toks[-2] = 'input' - + if 'final_input_quantizer' not in toks[2]: ind = toks.index('layers')+1 if 'layers' in toks else 3 toks = toks[ind:] @@ -229,14 +229,14 @@ def get_onnx_weight_dict(tensor_dict, config): Bqkv[0,:] = tensor Bqkv[1,:] = tensor_dict[prefix + BK] Bqkv[2,:] = tensor_dict[prefix + BV] - + if config.use_int8 and getattr(config, 'interleaved', False): Wqkv = np.ascontiguousarray(Wqkv.reshape((3, N, H, N, H))) Bqkv = np.ascontiguousarray(Bqkv.reshape((3, N, H))) else: Wqkv = np.ascontiguousarray(Wqkv.reshape((3, N, H, N, H)).transpose((1,0,2,3,4))) Bqkv = np.ascontiguousarray(Bqkv.reshape((3, N, H)).transpose((1,0,2))) - + weights_dict[prefix + WQKV] = trt.Weights(Wqkv) weights_dict[prefix + BQKV] = trt.Weights(Bqkv) weights_dict[prefix + WQKV + "_notrans"] = trt.Weights(np.ascontiguousarray(Wqkv.T)) @@ -261,7 +261,7 @@ def load_onnx_weights_and_quant(path, config): model = onnx.load(path) weights = model.graph.initializer tensor_dict = dict((onnx_to_trt_name(w.name), np.frombuffer(w.raw_data, np.int8).reshape(w.dims)) - if w.name.split('_')[-1] == 'mask' else + if w.name.split('_')[-1] == 'mask' else (onnx_to_trt_name(w.name), np.frombuffer(w.raw_data, np.float32).reshape(w.dims)) for w in weights) return get_onnx_weight_dict(tensor_dict, config) diff --git a/demo/BERT/builder_varseqlen.py b/demo/BERT/builder_varseqlen.py index b9afa5e8..0c1aeaac 100755 --- a/demo/BERT/builder_varseqlen.py +++ b/demo/BERT/builder_varseqlen.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -385,11 +385,11 @@ def emb_layernorm(builder, network, config, weights_dict, builder_config, max_se if config.use_int8 and config.use_qat: dr_input = weights_dict['l0_attention_self_query_input_amax'] set_output_range(emb_layer, dr_input, out_idx=0) - + if config.use_megatron: dr_skln1_res_in = weights_dict['l0_attention_output_add_residual_input_quantizer_amax'] set_output_range(emb_layer, dr_skln1_res_in, out_idx=1) - + set_output_name(emb_layer, "embeddings_", "output") return emb_layer, cu_seqlens, max_seqlen @@ -409,7 +409,7 @@ def build_engine(batch_sizes, workspace_size, sequence_length, config, weights_d if verbose: builder_config.profiling_verbosity = trt.ProfilingVerbosity.DETAILED - # speed up the engine build for trt major version >= 8 + # speed up the engine build for trt major version >= 8 # 1. disable cudnn tactic # 2. load global timing cache if trt_version[0] >= 8: @@ -438,7 +438,7 @@ def build_engine(batch_sizes, workspace_size, sequence_length, config, weights_d mask_idx = None else: mask_idx = emb_layer.get_output(1) - + if config.use_megatron: # megatron currently only supports int8 and interleaved shuffler = network.add_shuffle(emb_layer.get_output(1)) shuffler.second_transpose = (2, 1, 0, 3) @@ -503,8 +503,8 @@ def main(): cc = pycuda.autoinit.device.compute_capability() if cc[0] * 10 + cc[1] < 72: raise RuntimeError("This variable-length BERT demo only support Xavier+ GPU.") - - if args.megatron: + + if args.megatron: if not (args.interleaved and args.int8): raise RuntimeError("Megatron BERT currently only supports int8 and interleaved.") if not args.pickle: diff --git a/demo/BERT/helpers/calibrator.py b/demo/BERT/helpers/calibrator.py index 1ebf0793..beacc625 100644 --- a/demo/BERT/helpers/calibrator.py +++ b/demo/BERT/helpers/calibrator.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/demo/BERT/helpers/data_processing.py b/demo/BERT/helpers/data_processing.py index dd7d1733..88459ebf 100644 --- a/demo/BERT/helpers/data_processing.py +++ b/demo/BERT/helpers/data_processing.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/demo/BERT/helpers/tokenization.py b/demo/BERT/helpers/tokenization.py index 41186ea7..434f411d 100644 --- a/demo/BERT/helpers/tokenization.py +++ b/demo/BERT/helpers/tokenization.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/demo/BERT/infer_c/bert_infer.h b/demo/BERT/infer_c/bert_infer.h index db1e1352..827f9ba9 100644 --- a/demo/BERT/infer_c/bert_infer.h +++ b/demo/BERT/infer_c/bert_infer.h @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/demo/BERT/infer_c/common.h b/demo/BERT/infer_c/common.h index b20d993a..b5280e2a 100644 --- a/demo/BERT/infer_c/common.h +++ b/demo/BERT/infer_c/common.h @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/demo/BERT/infer_c/infer_c.cpp b/demo/BERT/infer_c/infer_c.cpp index 7ccc1c8f..b868a661 100644 --- a/demo/BERT/infer_c/infer_c.cpp +++ b/demo/BERT/infer_c/infer_c.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/demo/BERT/infer_c/logging.cpp b/demo/BERT/infer_c/logging.cpp index 748a8b3b..b6b14298 100644 --- a/demo/BERT/infer_c/logging.cpp +++ b/demo/BERT/infer_c/logging.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/demo/BERT/infer_c/logging.h b/demo/BERT/infer_c/logging.h index 0fad4642..2c36d039 100644 --- a/demo/BERT/infer_c/logging.h +++ b/demo/BERT/infer_c/logging.h @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/demo/BERT/infer_c/perf.cpp b/demo/BERT/infer_c/perf.cpp index 365006f7..bbc6de76 100644 --- a/demo/BERT/infer_c/perf.cpp +++ b/demo/BERT/infer_c/perf.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/demo/BERT/inference.py b/demo/BERT/inference.py index e4742d0b..2116de8f 100644 --- a/demo/BERT/inference.py +++ b/demo/BERT/inference.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/demo/BERT/inference_c.py b/demo/BERT/inference_c.py index 3022b3dc..e2bda9af 100644 --- a/demo/BERT/inference_c.py +++ b/demo/BERT/inference_c.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/demo/BERT/inference_varseqlen.py b/demo/BERT/inference_varseqlen.py index e5205f38..9cd08519 100644 --- a/demo/BERT/inference_varseqlen.py +++ b/demo/BERT/inference_varseqlen.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/demo/BERT/perf.py b/demo/BERT/perf.py index 541337e4..5943b41b 100644 --- a/demo/BERT/perf.py +++ b/demo/BERT/perf.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/demo/BERT/perf_varseqlen.py b/demo/BERT/perf_varseqlen.py index 48af2ce1..a1680797 100644 --- a/demo/BERT/perf_varseqlen.py +++ b/demo/BERT/perf_varseqlen.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/demo/BERT/squad/evaluate-v1.1.py b/demo/BERT/squad/evaluate-v1.1.py index d2c5ecc9..c73db423 100644 --- a/demo/BERT/squad/evaluate-v1.1.py +++ b/demo/BERT/squad/evaluate-v1.1.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/demo/BERT/squad/evaluate-v2.0.py b/demo/BERT/squad/evaluate-v2.0.py index 4ec1b887..e36d3e9f 100644 --- a/demo/BERT/squad/evaluate-v2.0.py +++ b/demo/BERT/squad/evaluate-v2.0.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -183,7 +183,7 @@ def make_precision_recall_eval(scores, na_probs, num_true_pos, qid_to_has_ans, plot_pr_curve(precisions, recalls, out_image, title) return {'ap': 100.0 * avg_prec} -def run_precision_recall_analysis(main_eval, exact_raw, f1_raw, na_probs, +def run_precision_recall_analysis(main_eval, exact_raw, f1_raw, na_probs, qid_to_has_ans, out_image_dir): if out_image_dir and not os.path.exists(out_image_dir): os.makedirs(out_image_dir) @@ -277,7 +277,7 @@ def main(): if OPTS.na_prob_file: find_all_best_thresh(out_eval, preds, exact_raw, f1_raw, na_probs, qid_to_has_ans) if OPTS.na_prob_file and OPTS.out_image_dir: - run_precision_recall_analysis(out_eval, exact_raw, f1_raw, na_probs, + run_precision_recall_analysis(out_eval, exact_raw, f1_raw, na_probs, qid_to_has_ans, OPTS.out_image_dir) histogram_na_prob(na_probs, has_ans_qids, OPTS.out_image_dir, 'hasAns') histogram_na_prob(na_probs, no_ans_qids, OPTS.out_image_dir, 'noAns') @@ -292,5 +292,5 @@ def main(): if OPTS.out_image_dir: import matplotlib matplotlib.use('Agg') - import matplotlib.pyplot as plt + import matplotlib.pyplot as plt main() diff --git a/demo/DeBERTa/deberta_onnx_modify.py b/demo/DeBERTa/deberta_onnx_modify.py index 4c61dd6e..234c4659 100644 --- a/demo/DeBERTa/deberta_onnx_modify.py +++ b/demo/DeBERTa/deberta_onnx_modify.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -52,7 +52,7 @@ if args.output is None: model_output = os.path.splitext(model_input)[0] + ("_plugin" if use_plugin else "_original") + os.path.splitext(model_input)[-1] else: - model_output = args.output + model_output = args.output def remove_uint8_cast(graph): ''' @@ -72,7 +72,7 @@ def remove_uint8_cast(graph): # node.attrs["to"] = TensorProto.INT64 return graph - + @gs.Graph.register() def insert_disentangled_attention(self, inputs, outputs, factor, span): ''' @@ -80,7 +80,7 @@ def insert_disentangled_attention(self, inputs, outputs, factor, span): inputs: list of plugin inputs outputs: list of plugin outputs - factor: scaling factor of disentangled attention, sqrt(3d), converted from a division factor to a multiplying factor + factor: scaling factor of disentangled attention, sqrt(3d), converted from a division factor to a multiplying factor span: relative distance span, k ''' # disconnect previous output from flow (the previous subgraph still exists but is effectively dead since it has no link to an output tensor, and thus will be cleaned up) @@ -102,21 +102,21 @@ def insert_disentangled_attention_all(graph): layers = [(nodes[2*i+0], nodes[2*i+1]) for i in range(len(nodes)//2)] # 2 gatherelements in 1 layer for l, (left,right) in enumerate(layers): print(f"Fusing layer {l}") - + # CAVEAT! MUST cast to list() when setting the inputs & outputs. graphsurgeon's default for X.inputs and X.outputs is `onnx_graphsurgeon.util.misc.SynchronizedList`, i.e. 2-way node-tensor updating mechanism. If not cast, when we remove the input nodes of a tensor, the tensor itself will be removed as well... - + # inputs: (data0, data1, data2), input tensors for c2c add and 2 gathers inputs = list(left.o().o().o().o().i().inputs)[0:1] + list(left.inputs)[0:1] + list(right.inputs)[0:1] - + # outputs: (result), output tensors after adding 3 gather results outputs = list(left.o().o().o().o().outputs) - + # constants: scaling factor, relative distance span factor = left.o().inputs[1].inputs[0].attrs["value"].values.item() span = right.i(1,0).i().i().i().inputs[1].inputs[0].attrs["value"].values.item() - # insert plugin layer - graph.insert_disentangled_attention(inputs, outputs, factor, span) + # insert plugin layer + graph.insert_disentangled_attention(inputs, outputs, factor, span) return graph @@ -142,7 +142,7 @@ def correctness_check_models(graph): end_node.outputs[0].dtype = graph_raw.outputs[0].dtype # need to explicitly specify dtype and shape of graph output tensor end_node.outputs[0].shape = ['batch_size*num_heads', seq_len, seq_len] original_output_all.append(end_node.outputs[0]) - + graph_raw.outputs = graph_raw.outputs + original_output_all # add plugin outputs to graph output ## for modified graph with plugin @@ -165,13 +165,13 @@ def correctness_check_models(graph): factor = left.o().inputs[1].inputs[0].attrs["value"].values.item() span = right.i(1,0).i().i().i().inputs[1].inputs[0].attrs["value"].values.item() - # insert plugin layer - graph.insert_disentangled_attention(inputs, outputs, factor, span) + # insert plugin layer + graph.insert_disentangled_attention(inputs, outputs, factor, span) graph.outputs = graph.outputs + plugin_output_all # add plugin outputs to graph output return graph_raw, graph - + def check_model(model_name): # Load the ONNX model model = onnx.load(model_name) @@ -200,7 +200,7 @@ def check_model(model_name): # don't check model because 'DisentangledAttention_TRT' is not a registered op -elif correctness_check: +elif correctness_check: # correctness check, save two models (original and w/ plugin) with intermediate output nodes inserted graph_raw, graph = correctness_check_models(graph) @@ -213,7 +213,7 @@ def check_model(model_name): model_output2 = os.path.splitext(model_input)[0] + "_correctness_check_plugin" + os.path.splitext(model_input)[-1] onnx.save_model(gs.export_onnx(graph_raw), model_output1) onnx.save_model(gs.export_onnx(graph), model_output2) - + print(f"Saving models for correctness check to {model_output1} (original) and {model_output2} (with plugin)") check_model(model_output1) diff --git a/demo/DeBERTa/deberta_ort_inference.py b/demo/DeBERTa/deberta_ort_inference.py index 24bf966c..17378989 100644 --- a/demo/DeBERTa/deberta_ort_inference.py +++ b/demo/DeBERTa/deberta_ort_inference.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -18,21 +18,21 @@ """ Test ORT-TRT engine of DeBERTa model. Different precisions are supported. -Usage: +Usage: Test model inference time: - python deberta_ort_inference.py --onnx=./test/deberta.onnx --test fp16 Correctness check by comparing original model and model with plugin: - python deberta_ort_inference.py --onnx=./test/deberta --correctness-check fp16 -Notes: +Notes: - supported precisions are fp32/fp16. For test, you can specify more than one precisions, and TensorRT engine of each precision will be built sequentially. - - engine files are saved at `./engine_cache/[Model name]_[GPU name]_[Precision]/`. Note that TensorRT engine is specific to both GPU architecture and TensorRT version. + - engine files are saved at `./engine_cache/[Model name]_[GPU name]_[Precision]/`. Note that TensorRT engine is specific to both GPU architecture and TensorRT version. - if in --correctness-check mode, the argument for --onnx is the stem name for the model without .onnx extension. """ -import os, argparse -import onnxruntime as ort +import os, argparse +import onnxruntime as ort import numpy as np import torch from time import time @@ -44,10 +44,10 @@ def GPU_ABBREV(name): ''' Map GPU device query name to abbreviation. - + ::param str name Device name from torch.cuda.get_device_name(). ::return str GPU abbreviation. - ''' + ''' GPU_LIST = [ 'V100', @@ -56,13 +56,13 @@ def GPU_ABBREV(name): 'A100', 'A10G', 'A10' - ] + ] # Partial list, can be extended. The order of A100, A10G, A10 matters. They're put in a way to not detect substring A10 as A100 - + for i in GPU_LIST: if i in name: - return i - + return i + return 'GPU' # for names not in the partial list, use 'GPU' as default gpu_name = GPU_ABBREV(torch.cuda.get_device_name()) @@ -79,7 +79,7 @@ def GPU_ABBREV(name): args = parser.parse_args() -ONNX_MODEL = args.onnx +ONNX_MODEL = args.onnx MODEL_STEM = os.path.splitext(args.onnx)[0].split('/')[-1] TEST = args.test CORRECTNESS = args.correctness_check @@ -96,8 +96,8 @@ def GPU_ABBREV(name): def test_engine(): for precision in TEST: - - engine_cachepath = '/'.join([ENGINE_PATH, '_'.join([MODEL_STEM, gpu_name, precision, 'ort'])]) + + engine_cachepath = '/'.join([ENGINE_PATH, '_'.join([MODEL_STEM, gpu_name, precision, 'ort'])]) providers = [ ('TensorrtExecutionProvider', { @@ -110,7 +110,7 @@ def test_engine(): so = ort.SessionOptions() - sess = ort.InferenceSession(ONNX_MODEL, sess_options=so, providers=providers) + sess = ort.InferenceSession(ONNX_MODEL, sess_options=so, providers=providers) print(f'Running inference on engine {engine_cachepath}') @@ -121,7 +121,7 @@ def test_engine(): input_ids = torch.randint(0, vocab, (batch_size, seq_len), dtype=torch.long) attention_mask = torch.randint(0, 2, (batch_size, seq_len), dtype=torch.long) inputs = { - 'input_ids': input_ids.numpy(), + 'input_ids': input_ids.numpy(), 'attention_mask': attention_mask.numpy() } @@ -137,7 +137,7 @@ def test_engine(): print(f'Average Inference time (ms) of {nreps} runs: {duration/nreps*1000:.3f}. For more accurate test, please use the onnxruntime_perf_test commands.') def correctness_check_engines(): - + for precision in CORRECTNESS: engine_cachepath1 = '/'.join([ENGINE_PATH, '_'.join([MODEL_STEM, 'original', gpu_name, precision, 'ort'])]) @@ -145,7 +145,7 @@ def correctness_check_engines(): if not os.path.exists(engine_cachepath1) or not os.path.exists(engine_cachepath2): print('At least one of the original and/or plugin engines do not exist. Please build them first by --test') - return + return print(f'Running inference on original engine {engine_cachepath1} and plugin engine {engine_cachepath2}') @@ -158,7 +158,7 @@ def correctness_check_engines(): 'trt_engine_cache_enable': True, 'trt_engine_cache_path': engine_cachepath1 }), - 'CUDAExecutionProvider'] + 'CUDAExecutionProvider'] providers2 = [ ('TensorrtExecutionProvider', { @@ -167,7 +167,7 @@ def correctness_check_engines(): 'trt_engine_cache_enable': True, 'trt_engine_cache_path': engine_cachepath2 }), - 'CUDAExecutionProvider'] + 'CUDAExecutionProvider'] sess1 = ort.InferenceSession(ONNX_MODEL+'_original.onnx', sess_options=so, providers=providers1) sess2 = ort.InferenceSession(ONNX_MODEL+'_plugin.onnx', sess_options=so, providers=providers2) @@ -179,7 +179,7 @@ def correctness_check_engines(): input_ids = torch.randint(0, vocab, (batch_size, seq_len), dtype=torch.long) attention_mask = torch.randint(0, 2, (batch_size, seq_len), dtype=torch.long) inputs = { - 'input_ids': input_ids.numpy(), + 'input_ids': input_ids.numpy(), 'attention_mask': attention_mask.numpy() } diff --git a/demo/DeBERTa/deberta_pytorch2onnx.py b/demo/DeBERTa/deberta_pytorch2onnx.py index 74c3ccbb..51546b29 100644 --- a/demo/DeBERTa/deberta_pytorch2onnx.py +++ b/demo/DeBERTa/deberta_pytorch2onnx.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -22,8 +22,8 @@ python deberta_pytorch2onnx.py [--filename xx.onnx] [--variant microsoft/deberta-xx] [--seq-len xx] ''' -import os, time, argparse -from transformers import DebertaV2Tokenizer, DebertaV2Config, DebertaV2ForSequenceClassification +import os, time, argparse +from transformers import DebertaV2Tokenizer, DebertaV2Config, DebertaV2ForSequenceClassification # DEBERTA V2 implementation, https://github.com/huggingface/transformers/blob/master/src/transformers/models/deberta_v2/modeling_deberta_v2.py import torch, onnxruntime as ort, numpy as np @@ -34,14 +34,14 @@ args = parser.parse_args() onnx_filename = args.filename -model_variant = args.variant +model_variant = args.variant sequence_length = args.seq_len assert not args.variant or (args.variant and not args.seq_len), "--variant and --seq-len cannot be used together!" assert torch.cuda.is_available(), "CUDA not available!" def randomize_model(model): - for module_ in model.named_modules(): + for module_ in model.named_modules(): if isinstance(module_[1],(torch.nn.Linear, torch.nn.Embedding)): module_[1].weight.data.normal_(mean=0.0, std=model.config.initializer_range) elif isinstance(module_[1], torch.nn.LayerNorm): @@ -55,7 +55,7 @@ def export(): parent_dir = os.path.dirname(onnx_filename) if not os.path.exists(parent_dir): os.makedirs(parent_dir) - + if model_variant is None: # default model hyper-params batch_size = 1 @@ -71,7 +71,7 @@ def export(): relative_attention=True max_relative_positions = 256 # k pos_att_type = ["p2c", "c2p"] - + deberta_config = DebertaV2Config(vocab_size=vocab_size, hidden_size=hidden_size, num_hidden_layers=layers, num_attention_heads=heads, intermediate_size=intermediate_size, type_vocab_size=type_vocab_size, max_position_embeddings=max_position_embeddings, relative_attention=relative_attention, max_relative_positions=max_relative_positions, pos_att_type=pos_att_type) deberta_model = DebertaV2ForSequenceClassification(deberta_config) deberta_model = randomize_model(deberta_model) @@ -82,21 +82,21 @@ def export(): batch_size = 1 seq_len = deberta_config.max_position_embeddings vocab_size = deberta_config.vocab_size - + deberta_model.cuda().eval() # input/output gpu = torch.device('cuda') input_ids = torch.randint(0, vocab_size, (batch_size, seq_len), dtype=torch.long, device=gpu) attention_mask = torch.randint(0, 2, (batch_size, seq_len), dtype=torch.long, device=gpu) - input_names = ['input_ids', 'attention_mask'] + input_names = ['input_ids', 'attention_mask'] output_names = ['output'] - dynamic_axes={'input_ids' : {0 : 'batch_size'}, - 'attention_mask' : {0 : 'batch_size'}, + dynamic_axes={'input_ids' : {0 : 'batch_size'}, + 'attention_mask' : {0 : 'batch_size'}, 'output' : {0 : 'batch_size'}} - + # ONNX export - torch.onnx.export(deberta_model, # model + torch.onnx.export(deberta_model, # model (input_ids, attention_mask), # model inputs onnx_filename, export_params=True, @@ -105,7 +105,7 @@ def export(): input_names = input_names, output_names = output_names, dynamic_axes = dynamic_axes) - + # full precision inference num_trials = 10 @@ -115,7 +115,7 @@ def export(): end = time.time() print("Average PyTorch FP32(TF32) time: {:.2f} ms".format((end - start)/num_trials*1000)) - + # half precision inference (do this after onnx export, otherwise the export ONNX model is with FP16 weights...) deberta_model_fp16 = deberta_model.half() start = time.time() diff --git a/demo/DeBERTa/deberta_tensorrt_inference.py b/demo/DeBERTa/deberta_tensorrt_inference.py index b17806a8..6a579a1c 100644 --- a/demo/DeBERTa/deberta_tensorrt_inference.py +++ b/demo/DeBERTa/deberta_tensorrt_inference.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -18,7 +18,7 @@ """ Build and test TensorRT engines generated from the DeBERTa model. Different precisions are supported. -Usage: +Usage: Build and test a model: - build: python deberta_tensorrt_inference.py --onnx=xx.onnx --build fp16 # build TRT engines - test: python deberta_tensorrt_inference.py --onnx=xx.onnx --test fp16 # test will measure the inference time @@ -30,15 +30,15 @@ - [3] build plugin model: python deberta_tensorrt_inference.py --onnx=xx_correctness_check_plugin.onnx --build fp16 - [4] correctness check: python deberta_tensorrt_inference.py --onnx=deberta --correctness_check fp16 -Notes: +Notes: - supported precisions are fp32/tf32/fp16. For both --build and --test, you can specify more than one precisions, and TensorRT engines of each precision will be built sequentially. - - engine files are saved as `**/[Model name]_[GPU name]_[Precision].engine`. Note that TensorRT engines are specific to both GPU architecture and TensorRT version, and therefore are not compatible cross-version nor cross-device. + - engine files are saved as `**/[Model name]_[GPU name]_[Precision].engine`. Note that TensorRT engines are specific to both GPU architecture and TensorRT version, and therefore are not compatible cross-version nor cross-device. - in --correctness-check mode, the argument for --onnx is the `root` name for the models [root]_correctness_check_original/plugin.onnx """ import torch import tensorrt as trt -import os, sys, argparse +import os, sys, argparse import numpy as np import pycuda.driver as cuda import pycuda.autoinit # without this, "LogicError: explicit_context_dependent failed: invalid device context - no currently active context?" @@ -49,10 +49,10 @@ def GPU_ABBREV(name): ''' Map GPU device query name to abbreviation. - + ::param str name Device name from torch.cuda.get_device_name(). ::return str GPU abbreviation. - ''' + ''' GPU_LIST = [ 'V100', @@ -61,13 +61,13 @@ def GPU_ABBREV(name): 'A100', 'A10G', 'A10' - ] + ] # Partial list, can be extended. The order of A100, A10G, A10 matters. They're put in a way to not detect substring A10 as A100 - + for i in GPU_LIST: if i in name: - return i - + return i + return 'GPU' # for names not in the partial list, use 'GPU' as default gpu_name = GPU_ABBREV(torch.cuda.get_device_name()) @@ -86,7 +86,7 @@ def GPU_ABBREV(name): args = parser.parse_args() -ONNX_MODEL = args.onnx +ONNX_MODEL = args.onnx MODEL_NAME = os.path.splitext(args.onnx)[0] BUILD = args.build TEST = args.test @@ -127,7 +127,7 @@ def __repr__(self): return self.__str__() def __init__(self, engine_path): - self.engine_path = engine_path + self.engine_path = engine_path self.logger = trt.Logger(trt.Logger.WARNING) self.runtime = trt.Runtime(self.logger) @@ -159,7 +159,7 @@ def load_engine(self): with open(self.engine_path, 'rb') as f: engine = self.runtime.deserialize_cuda_engine(f.read()) return engine - + def allocate_buffers(self, engine): ''' Allocates all buffers required for an engine, i.e. host/device inputs/outputs. @@ -172,7 +172,7 @@ def allocate_buffers(self, engine): for binding in engine: # binding is the name of input/output size = trt.volume(engine.get_binding_shape(binding)) * engine.max_batch_size dtype = trt.nptype(engine.get_binding_dtype(binding)) - + # Allocate host and device buffers host_mem = cuda.pagelocked_empty(size, dtype) # page-locked memory buffer (won't swapped to disk) device_mem = cuda.mem_alloc(host_mem.nbytes) @@ -202,7 +202,7 @@ def __call__(self, model_inputs: list, timing=False): TORCH = True else: assert False, 'Unsupported input data format!' - + # batch size consistency check if NUMPY: batch_size = np.unique(np.array([i.shape[0] for i in model_inputs])) @@ -226,18 +226,18 @@ def __call__(self, model_inputs: list, timing=False): np.copyto(self.inputs[i].host, model_input.ravel()) elif TORCH: if timing: - cuda.memcpy_dtod(self.inputs[i].device, model_input.data_ptr(), model_input.element_size() * model_input.nelement()) + cuda.memcpy_dtod(self.inputs[i].device, model_input.data_ptr(), model_input.element_size() * model_input.nelement()) else: # for Torch GPU tensor it's easier, can just do Device to Device copy cuda.memcpy_dtod_async(self.inputs[i].device, model_input.data_ptr(), model_input.element_size() * model_input.nelement(), self.stream) # dtod need size in bytes - if NUMPY: + if NUMPY: if timing: [cuda.memcpy_htod(inp.device, inp.host) for inp in self.inputs] else: # input, Host to Device [cuda.memcpy_htod_async(inp.device, inp.host, self.stream) for inp in self.inputs] - + duration = 0 if timing: start_time = time() @@ -258,7 +258,7 @@ def __call__(self, model_inputs: list, timing=False): # synchronize to ensure completion of async calls self.stream.synchronize() - if NUMPY: + if NUMPY: return [out.host.reshape(batch_size,-1) for out in self.outputs], duration elif TORCH: return [torch.from_numpy(out.host.reshape(batch_size,-1)) for out in self.outputs], duration @@ -284,19 +284,19 @@ def build_engine(): print(onnx_parser.get_error(idx)) if not parse_success: sys.exit('ONNX model parsing failed') - + ## build TRT engine (configuration options at: https://docs.nvidia.com/deeplearning/tensorrt/api/python_api/infer/Core/BuilderConfig.html#ibuilderconfig) config = TRT_BUILDER.create_builder_config() - + seq_len = network.get_input(0).shape[1] - + # handle dynamic shape (min/opt/max): https://docs.nvidia.com/deeplearning/tensorrt/developer-guide/index.html#work_dynamic_shapes # by default batch dim set as 1 for all min/opt/max. If there are batch need, change the value for opt and max accordingly - profile = TRT_BUILDER.create_optimization_profile() - profile.set_shape("input_ids", (1,seq_len), (1,seq_len), (1,seq_len)) - profile.set_shape("attention_mask", (1,seq_len), (1,seq_len), (1,seq_len)) + profile = TRT_BUILDER.create_optimization_profile() + profile.set_shape("input_ids", (1,seq_len), (1,seq_len), (1,seq_len)) + profile.set_shape("attention_mask", (1,seq_len), (1,seq_len), (1,seq_len)) config.add_optimization_profile(profile) - + if TRT_VERSION >= 84: config.set_memory_pool_limit(trt.MemoryPoolType.WORKSPACE, 4096 * (1 << 20)) # 4096 MiB, syntax after TRT 8.4 else: @@ -312,7 +312,7 @@ def build_engine(): # build serialized_engine = TRT_BUILDER.build_serialized_network(network, config) - + ## save TRT engine with open(engine_filename, 'wb') as f: f.write(serialized_engine) @@ -326,7 +326,7 @@ def test_engine(): print(f'Running inference on engine {engine_filename}') model = TRTModel(engine_filename) - + ## psuedo-random input test batch_size = 1 seq_len = model.engine.get_binding_shape(0)[1] @@ -343,7 +343,7 @@ def test_engine(): for _ in range(nreps): outputs, duration = model(inputs, timing=True) duration_total += duration - + print(f'Average Inference time (ms) of {nreps} runs: {duration_total/nreps*1000:.3f}') def correctness_check_engines(): @@ -351,7 +351,7 @@ def correctness_check_engines(): ## load and deserialize TRT engine engine_filename1 = '_'.join([ONNX_MODEL, 'correctness_check_original', gpu_name, precision]) + '.engine' engine_filename2 = '_'.join([ONNX_MODEL, 'correctness_check_plugin', gpu_name, precision]) + '.engine' - + assert os.path.exists(engine_filename1), f'Engine file {engine_filename1} does not exist. Please build the engine first by --build' assert os.path.exists(engine_filename2), f'Engine file {engine_filename2} does not exist. Please build the engine first by --build' @@ -359,7 +359,7 @@ def correctness_check_engines(): model1 = TRTModel(engine_filename1) model2 = TRTModel(engine_filename2) - + ## psuedo-random input test batch_size = 1 seq_len = model1.engine.get_binding_shape(0)[1] @@ -369,7 +369,7 @@ def correctness_check_engines(): input_ids = torch.randint(0, vocab, (batch_size, seq_len), dtype=torch.long, device=gpu) attention_mask = torch.randint(0, 2, (batch_size, seq_len), dtype=torch.long, device=gpu) inputs = [input_ids, attention_mask] - + outputs1, _ = model1(inputs) outputs2, _ = model2(inputs) diff --git a/demo/DeBERTa/requirements.txt b/demo/DeBERTa/requirements.txt index 5046d50c..59b63433 100644 --- a/demo/DeBERTa/requirements.txt +++ b/demo/DeBERTa/requirements.txt @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/demo/Diffusion/README.md b/demo/Diffusion/README.md index ad9aa0d2..daaeeb09 100644 --- a/demo/Diffusion/README.md +++ b/demo/Diffusion/README.md @@ -93,7 +93,10 @@ Use `--input-image=` to specify your image. Otherwise the example ### Generate an inpainted image guided by an image, mask and single text prompt ```bash -python3 demo_inpaint.py "a mecha robot sitting on a bench" --hf-token=$HF_TOKEN -v +# Create separate onnx/engine directories when switching versions +mkdir -p onnx-1.5 engine-1.5 + +python3 demo_inpaint.py "a mecha robot sitting on a bench" --hf-token=$HF_TOKEN --version=1.5 --onnx-dir=onnx-1.5 --engine-dir=engine-1.5 -v ``` Use `--input-image=` and `--mask-image=` to specify your inputs. They must have the same dimensions. Otherwise the example image and mask will be downloaded from the Internet. @@ -102,4 +105,4 @@ Use `--input-image=` and `--mask-image=` to specify - One can set schdeuler using `--scheduler=EulerA`. Note that some schedulers are not available for some pipelines or version. - To accelerate engine building time one can use `--timing-cache=`. This cache file will be created if does not exist. Note, that it may influence the performance if the cache file created on the other hardware is used. It is suggested to use this flag only during development. To achieve the best perfromance during deployment, please, build engines without timing cache. - To switch between versions or pipelines one needs either to clear onnx and engine dirs, or to specify `--force-onnx-export --force-onnx-optimize --force-engine-build` or to create new dirs and to specify `--onnx-dir= --engine-dir=`. - +- Inference performance can be improved by enabling [CUDA graphs](https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#cuda-graphs) using `--use-cuda-graph`. Enabling CUDA graphs requires fixed input shapes, so this flag must be combined with `--build-static-batch` and cannot be combined with `--build-dynamic-shape`. diff --git a/demo/Diffusion/demo_img2img.py b/demo/Diffusion/demo_img2img.py index c15c099b..963babee 100755 --- a/demo/Diffusion/demo_img2img.py +++ b/demo/Diffusion/demo_img2img.py @@ -74,6 +74,9 @@ def parseArgs(): if batch_size > max_batch_size: raise ValueError(f"Batch size {len(prompt)} is larger than allowed {max_batch_size}. If dynamic shape is used, then maximum batch size is 4") + if args.use_cuda_graph and (not args.build_static_batch or args.build_dynamic_shape): + raise ValueError(f"Using CUDA graph requires static dimensions. Enable `--build-static-batch` and do not specify `--build-dynamic-shape`") + # Initialize demo demo = Img2ImgPipeline( scheduler=args.scheduler, @@ -95,6 +98,10 @@ def parseArgs(): timing_cache=args.timing_cache, onnx_refit_dir=args.onnx_refit_dir) demo.loadResources(image_height, image_width, batch_size, args.seed) + if args.use_cuda_graph: + # inference once to get cuda graph + images = demo.infer(prompt, negative_prompt, input_image, image_height, image_width, strength=0.75, warmup=True) + print("[I] Warming up ..") for _ in range(args.num_warmup_runs): images = demo.infer(prompt, negative_prompt, input_image, image_height, image_width, strength=0.75, warmup=True) diff --git a/demo/Diffusion/demo_inpaint.py b/demo/Diffusion/demo_inpaint.py index 27eedf2b..1fa8219a 100755 --- a/demo/Diffusion/demo_inpaint.py +++ b/demo/Diffusion/demo_inpaint.py @@ -34,7 +34,7 @@ def parseArgs(): print("[I] Initializing StableDiffusion inpainting demo using TensorRT") args = parseArgs() - # Inpainting is currently only supported for v1.5 and v2.1 + # Inpainting is currently only supported for v1.5 and v2.0 if args.version not in ("1.5", "2.0"): raise ValueError(f"Inpainting not supported in version {args.version}. Use v2.0, or v1.5") @@ -84,6 +84,9 @@ def parseArgs(): if batch_size > max_batch_size: raise ValueError(f"Batch size {len(prompt)} is larger than allowed {max_batch_size}. If dynamic shape is used, then maximum batch size is 4") + if args.use_cuda_graph and (not args.build_static_batch or args.build_dynamic_shape): + raise ValueError(f"Using CUDA graph requires static dimensions. Enable `--build-static-batch` and do not specify `--build-dynamic-shape`") + # Initialize demo demo = InpaintPipeline( scheduler=args.scheduler, @@ -105,6 +108,11 @@ def parseArgs(): timing_cache=args.timing_cache) demo.loadResources(image_height, image_width, batch_size, args.seed) + + if args.use_cuda_graph: + # inference once to get cuda graph + images = demo.infer(prompt, negative_prompt, input_image, mask_image, image_height, image_width, strength=0.75, warmup=True) + print("[I] Warming up ..") for _ in range(args.num_warmup_runs): images = demo.infer(prompt, negative_prompt, input_image, mask_image, image_height, image_width, strength=0.75, warmup=True) diff --git a/demo/Diffusion/demo_txt2img.py b/demo/Diffusion/demo_txt2img.py index 219e4792..4491c45e 100644 --- a/demo/Diffusion/demo_txt2img.py +++ b/demo/Diffusion/demo_txt2img.py @@ -61,6 +61,9 @@ def parseArgs(): if batch_size > max_batch_size: raise ValueError(f"Batch size {len(prompt)} is larger than allowed {max_batch_size}. If dynamic shape is used, then maximum batch size is 4") + if args.use_cuda_graph and (not args.build_static_batch or args.build_dynamic_shape): + raise ValueError(f"Using CUDA graph requires static dimensions. Enable `--build-static-batch` and do not specify `--build-dynamic-shape`") + # Initialize demo demo = Txt2ImgPipeline( scheduler=args.scheduler, @@ -70,7 +73,8 @@ def parseArgs(): hf_token=args.hf_token, verbose=args.verbose, nvtx_profile=args.nvtx_profile, - max_batch_size=max_batch_size) + max_batch_size=max_batch_size, + use_cuda_graph=args.use_cuda_graph) # Load TensorRT engines and pytorch modules demo.loadEngines(args.engine_dir, args.onnx_dir, args.onnx_opset, @@ -82,6 +86,10 @@ def parseArgs(): timing_cache=args.timing_cache, onnx_refit_dir=args.onnx_refit_dir) demo.loadResources(image_height, image_width, batch_size, args.seed) + if args.use_cuda_graph: + # inference once to get cuda graph + images = demo.infer(prompt, negative_prompt, image_height, image_width, warmup=True, verbose=False) + print("[I] Warming up ..") for _ in range(args.num_warmup_runs): images = demo.infer(prompt, negative_prompt, image_height, image_width, warmup=True, verbose=False) diff --git a/demo/Diffusion/inpaint_pipeline.py b/demo/Diffusion/inpaint_pipeline.py index 3931609f..3a1ade5a 100755 --- a/demo/Diffusion/inpaint_pipeline.py +++ b/demo/Diffusion/inpaint_pipeline.py @@ -42,7 +42,7 @@ def __init__( if scheduler != "PNDM": raise ValueError(f"Inpainting only supports PNDM scheduler") - + super(InpaintPipeline, self).__init__(*args, **kwargs, \ inpaint=True, scheduler=scheduler, stages=[ 'vae_encoder', 'clip', 'unet', 'vae']) @@ -132,3 +132,4 @@ def infer( if not warmup: self.print_summary(self.denoising_steps, e2e_tic, e2e_toc, vae_enc=True) self.save_image(images, 'inpaint', prompt) + diff --git a/demo/Diffusion/models.py b/demo/Diffusion/models.py index 3a5c4947..bcf69b32 100644 --- a/demo/Diffusion/models.py +++ b/demo/Diffusion/models.py @@ -383,7 +383,7 @@ def __init__(self, token, device, path): super().__init__() self.path = path self.vae_encoder = AutoencoderKL.from_pretrained(self.path, subfolder="vae", use_auth_token=token).to(device) - + def forward(self, x): return self.vae_encoder.encode(x).latent_dist.sample() diff --git a/demo/Diffusion/stable_diffusion_pipeline.py b/demo/Diffusion/stable_diffusion_pipeline.py index 9fd77fa2..7632995a 100755 --- a/demo/Diffusion/stable_diffusion_pipeline.py +++ b/demo/Diffusion/stable_diffusion_pipeline.py @@ -24,7 +24,7 @@ import onnx from polygraphy import cuda import torch -from utilities import Engine, device_view, save_image +from utilities import Engine, save_image from utilities import DPMScheduler, DDIMScheduler, EulerAncestralDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler class StableDiffusionPipeline: @@ -45,6 +45,7 @@ def __init__( hf_token=None, verbose=False, nvtx_profile=False, + use_cuda_graph=False, ): """ Initializes the Diffusion pipeline. @@ -76,6 +77,8 @@ def __init__( Enable verbose logging. nvtx_profile (bool): Insert NVTX profiling markers. + use_cuda_graph (bool): + Use CUDA graph to capture engine execution and then launch inference """ self.denoising_steps = denoising_steps @@ -125,11 +128,15 @@ def __init__( self.stages = stages self.inpaint = inpaint + self.use_cuda_graph = use_cuda_graph - self.stream = None # loaded in loadResources() - self.tokenizer = None # loaded in loadResources() - self.models = {} # loaded in loadEngines() - self.engine = {} # loaded in loadEngines() + # initialized in loadResources() + self.stream = None + self.tokenizer = None + # initialized in loadEngines() + self.models = {} + self.engine = {} + self.shared_device_memory = None def loadResources(self, image_height, image_width, batch_size, seed): # Initialize noise generator @@ -157,6 +164,9 @@ def teardown(self): for engine in self.engine.values(): del engine + if self.shared_device_memory: + self.shared_device_memory.free() + self.stream.free() del self.stream @@ -301,18 +311,23 @@ def loadEngines( self.engine[model_name] = engine # Load and activate TensorRT engines + max_device_memory = 0 for model_name, obj in self.models.items(): engine = self.engine[model_name] engine.load() + max_device_memory = max(max_device_memory, engine.engine.device_memory_size) if onnx_refit_dir: onnx_refit_path = self.getOnnxPath(model_name, onnx_refit_dir) if os.path.exists(onnx_refit_path): engine.refit(onnx_opt_path, onnx_refit_path) - engine.activate() + + self.shared_device_memory = cuda.DeviceArray.raw((max_device_memory,)) + for engine in self.engine.values(): + engine.activate(reuse_device_memory=self.shared_device_memory.ptr) def runEngine(self, model_name, feed_dict): engine = self.engine[model_name] - return engine.infer(feed_dict, self.stream) + return engine.infer(feed_dict, self.stream, use_cuda_graph=self.use_cuda_graph) def initialize_latents(self, batch_size, unet_channels, latent_height, latent_width): latents_dtype = torch.float32 # text_embeddings.dtype @@ -357,7 +372,7 @@ def encode_prompt(self, prompt, negative_prompt): return_tensors="pt", ).input_ids.type(torch.int32).to(self.device) - text_input_ids_inp = device_view(text_input_ids) + text_input_ids_inp = text_input_ids # NOTE: output tensor for CLIP must be cloned because it will be overwritten when called again for negative prompt text_embeddings = self.runEngine('clip', {"input_ids": text_input_ids_inp})['text_embeddings'].clone() @@ -369,7 +384,7 @@ def encode_prompt(self, prompt, negative_prompt): truncation=True, return_tensors="pt", ).input_ids.type(torch.int32).to(self.device) - uncond_input_ids_inp = device_view(uncond_input_ids) + uncond_input_ids_inp = uncond_input_ids uncond_embeddings = self.runEngine('clip', {"input_ids": uncond_input_ids_inp})['text_embeddings'] # Concatenate the unconditional and text embeddings into a single batch to avoid doing two forward passes for classifier free guidance @@ -404,9 +419,9 @@ def denoise_latent(self, latents, text_embeddings, timesteps=None, step_offset=0 embeddings_dtype = np.float16 timestep_float = timestep.float() if timestep.dtype != torch.float32 else timestep - sample_inp = device_view(latent_model_input) - timestep_inp = device_view(timestep_float) - embeddings_inp = device_view(text_embeddings) + sample_inp = latent_model_input + timestep_inp = timestep_float + embeddings_inp = text_embeddings noise_pred = self.runEngine('unet', {"sample": sample_inp, "timestep": timestep_inp, "encoder_hidden_states": embeddings_inp})['latent'] if self.nvtx_profile: nvtx.end_range(nvtx_unet) @@ -431,7 +446,7 @@ def encode_image(self, init_image): if self.nvtx_profile: nvtx_vae = nvtx.start_range(message='vae_encoder', color='red') cudart.cudaEventRecord(self.events['vae_encoder-start'], 0) - init_latents = self.runEngine('vae_encoder', {"images": device_view(init_image)})['latent'] + init_latents = self.runEngine('vae_encoder', {"images": init_image})['latent'] cudart.cudaEventRecord(self.events['vae_encoder-stop'], 0) if self.nvtx_profile: nvtx.end_range(nvtx_vae) @@ -443,7 +458,7 @@ def decode_latent(self, latents): if self.nvtx_profile: nvtx_vae = nvtx.start_range(message='vae', color='red') cudart.cudaEventRecord(self.events['vae-start'], 0) - images = self.runEngine('vae', {"latent": device_view(latents)})['images'] + images = self.runEngine('vae', {"latent": latents})['images'] cudart.cudaEventRecord(self.events['vae-stop'], 0) if self.nvtx_profile: nvtx.end_range(nvtx_vae) diff --git a/demo/Diffusion/utilities.py b/demo/Diffusion/utilities.py index cdccbf7a..59cea047 100644 --- a/demo/Diffusion/utilities.py +++ b/demo/Diffusion/utilities.py @@ -35,6 +35,7 @@ import torch import requests from io import BytesIO +from cuda import cudart TRT_LOGGER = trt.Logger(trt.Logger.ERROR) @@ -59,9 +60,13 @@ # Map of torch dtype -> numpy dtype torch_to_numpy_dtype_dict = {value : key for (key, value) in numpy_to_torch_dtype_dict.items()} -def device_view(t): - return cuda.DeviceView(ptr=t.data_ptr(), shape=t.shape, dtype=torch_to_numpy_dtype_dict[t.dtype]) - +def CUASSERT(cuda_ret): + err = cuda_ret[0] + if err != cudart.cudaError_t.cudaSuccess: + raise RuntimeError(f"CUDA ERROR: {err}, error code reference: https://nvidia.github.io/cuda-python/module/cudart.html#cuda.cudart.cudaError_t") + if len(cuda_ret) > 1: + return cuda_ret[1] + return None class Engine(): def __init__( @@ -73,6 +78,7 @@ def __init__( self.context = None self.buffers = OrderedDict() self.tensors = OrderedDict() + self.cuda_graph_instance = None # cuda graph def __del__(self): [buf.free() for buf in self.buffers.values() if isinstance(buf, cuda.DeviceArray) ] @@ -219,8 +225,12 @@ def load(self): print(f"Loading TensorRT engine: {self.engine_path}") self.engine = engine_from_bytes(bytes_from_path(self.engine_path)) - def activate(self): - self.context = self.engine.create_execution_context() + def activate(self, reuse_device_memory=None): + if reuse_device_memory: + self.context = self.engine.create_execution_context_without_device_memory() + self.context.device_memory = reuse_device_memory + else: + self.context = self.engine.create_execution_context() def allocate_buffers(self, shape_dict=None, device='cuda'): for idx in range(trt_util.get_bindings_per_profile(self.engine)): @@ -234,19 +244,32 @@ def allocate_buffers(self, shape_dict=None, device='cuda'): self.context.set_binding_shape(idx, shape) tensor = torch.empty(tuple(shape), dtype=numpy_to_torch_dtype_dict[dtype]).to(device=device) self.tensors[binding] = tensor - self.buffers[binding] = cuda.DeviceView(ptr=tensor.data_ptr(), shape=shape, dtype=dtype) - def infer(self, feed_dict, stream): - start_binding, end_binding = trt_util.get_active_profile_bindings(self.context) - # shallow copy of ordered dict - device_buffers = copy(self.buffers) + def infer(self, feed_dict, stream, use_cuda_graph=False): for name, buf in feed_dict.items(): - assert isinstance(buf, cuda.DeviceView) - device_buffers[name] = buf - bindings = [0] * start_binding + [buf.ptr for buf in device_buffers.values()] - noerror = self.context.execute_async_v2(bindings=bindings, stream_handle=stream.ptr) - if not noerror: - raise ValueError(f"ERROR: inference failed.") + self.tensors[name].copy_(buf) + + for name, tensor in self.tensors.items(): + self.context.set_tensor_address(name, tensor.data_ptr()) + + if use_cuda_graph: + if self.cuda_graph_instance is not None: + CUASSERT(cudart.cudaGraphLaunch(self.cuda_graph_instance, stream.ptr)) + CUASSERT(cudart.cudaStreamSynchronize(stream.ptr)) + else: + # do inference before CUDA graph capture + noerror = self.context.execute_async_v3(stream.ptr) + if not noerror: + raise ValueError(f"ERROR: inference failed.") + # capture cuda graph + CUASSERT(cudart.cudaStreamBeginCapture(stream.ptr, cudart.cudaStreamCaptureMode.cudaStreamCaptureModeGlobal)) + self.context.execute_async_v3(stream.ptr) + self.graph = CUASSERT(cudart.cudaStreamEndCapture(stream.ptr)) + self.cuda_graph_instance = CUASSERT(cudart.cudaGraphInstantiate(self.graph, 0)) + else: + noerror = self.context.execute_async_v3(stream.ptr) + if not noerror: + raise ValueError(f"ERROR: inference failed.") return self.tensors @@ -1186,8 +1209,11 @@ def add_arguments(parser): parser.add_argument('--num-warmup-runs', type=int, default=5, help="Number of warmup runs before benchmarking performance") parser.add_argument('--nvtx-profile', action='store_true', help="Enable NVTX markers for performance profiling") parser.add_argument('--seed', type=int, default=None, help="Seed for random generator to get consistent results") + parser.add_argument('--use-cuda-graph', action='store_true', help="Enable cuda graph") parser.add_argument('--output-dir', default='output', help="Output directory for logs and image artifacts") parser.add_argument('--hf-token', type=str, help="HuggingFace API access token for downloading model checkpoints") parser.add_argument('-v', '--verbose', action='store_true', help="Show verbose output") return parser + + diff --git a/demo/EfficientDet/notebooks/EfficientDet-TensorRT8.ipynb b/demo/EfficientDet/notebooks/EfficientDet-TensorRT8.ipynb index 5eba10cb..a38ef2c4 100644 --- a/demo/EfficientDet/notebooks/EfficientDet-TensorRT8.ipynb +++ b/demo/EfficientDet/notebooks/EfficientDet-TensorRT8.ipynb @@ -347,28 +347,29 @@ "\n", "To generate an ONNX model file, first find the input shape that corresponds to the model you're converting:\n", "\n", - "| **Model** | **Input Shape** |\n", - "| -----------------|-----------------|\n", - "| EfficientDet D0 | N,512,512,3 |\n", - "| EfficientDet D1 | N,640,640,3 |\n", - "| EfficientDet D2 | N,768,768,3 |\n", - "| EfficientDet D3 | N,896,896,3 |\n", - "| EfficientDet D4 | N,1024,1024,3 |\n", - "| EfficientDet D5 | N,1280,1280,3 |\n", - "| EfficientDet D6 | N,1280,1280,3 |\n", - "| EfficientDet D7 | N,1536,1536,3 |\n", - "| EfficientDet D7x | N,1536,1536,3 |\n", - "\n", - "\n", - "Where **N** is the batch size you would like to run inference at, such as `8,512,512,3` for a batch size of 8.\n", - "\n", - "The conversion process supports both NHWC and NCHW input formats, so if your input source is an `NCHW` data format, you can use the corresponding input shape, i.e. `1,512,512,3` -> `1,3,512,512`.\n", + "| **Model** | **Input Size** |\n", + "| --------------------|----------------|\n", + "| efficientdet-d0 | 512,512 |\n", + "| efficientdet-d1 | 640,640 |\n", + "| efficientdet-d2 | 768,768 |\n", + "| efficientdet-d3 | 896,896 |\n", + "| efficientdet-d4 | 1024,1024 |\n", + "| efficientdet-d5 | 1280,1280 |\n", + "| efficientdet-d6 | 1280,1280 |\n", + "| efficientdet-d7 | 1536,1536 |\n", + "| efficientdet-d7x | 1536,1536 |\n", + "| efficientdet-lite0 | 320,320 |\n", + "| efficientdet-lite1 | 384,384 |\n", + "| efficientdet-lite2 | 448,448 |\n", + "| efficientdet-lite3 | 512,512 |\n", + "| efficientdet-lite3x | 640,640 |\n", + "| efficientdet-lite4 | 640,640 |\n", "\n", "To create the ONNX graph, execute efficientdet/create_onnx.py script which takes the following arguments:\n", "```\n", "* --saved_model /path/to/tf_model \n", "* --onnx /path/to/onnx.model\n", - "* --input_shape One of the input shapes corresponding to the model mentioned previously\n", + "* --input_size One of the input shapes corresponding to the model mentioned previously\n", "```" ] }, @@ -388,7 +389,7 @@ "!python3 $TRT_OSSPATH/samples/python/efficientdet/create_onnx.py \\\n", " --saved_model ./tf_model/ \\\n", " --onnx ./onnx_model/model.onnx \\\n", - " --input_shape '1,512,512,3'" + " --input_size '512,512'" ] }, { @@ -403,7 +404,7 @@ "The script has a few additional arguments:\n", "\n", "* `--nms_threshold` allows overriding the NMS score threshold value. The runtime latency of the EfficientNMS plugin is sensitive to the score threshold used, so it's a good practice to set this value as high as possible, while still fulfilling your application requirements, to reduce latency as much as possible.\n", - "* `--legacy_plugins` allows falling back to older plugins on systems where a version lower than TensorRT 8.0.1 is installed. This will result in substantially slower inference times however.\n" + "* `--preprocessor [imagenet,scale_range]` allows switching between two possible image preprocessing methods. Most EfficientDet models use the `imagenet` method, which this argument defaults to, and corresponds to standard ImageNet mean subtraction and standard deviation normalization. The `scale_range` method instead normalizes the image to a range of [-1,+1]. Please use this method only when converting the **AdvProp** pre-trained checkpoints, as they were created with this preprocessor operation.\n" ] }, { diff --git a/demo/HuggingFace/BART/BARTModelConfig.py b/demo/HuggingFace/BART/BARTModelConfig.py index 8994e7e5..f8ea3bd7 100755 --- a/demo/HuggingFace/BART/BARTModelConfig.py +++ b/demo/HuggingFace/BART/BARTModelConfig.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -98,8 +98,8 @@ class BARTModelTRTConfig(NNConfig): TARGET_MODELS = ["facebook/bart-base", "facebook/bart-large", "facebook/bart-large-cnn", "facebook/mbart-large-50"] MAX_DECODER_WORKSPACE_MB = { - TARGET_MODELS[0]: 3072, - TARGET_MODELS[1]: 3072, + TARGET_MODELS[0]: 3072, + TARGET_MODELS[1]: 3072, TARGET_MODELS[2]: 3072, TARGET_MODELS[3]: 3072, } @@ -108,23 +108,23 @@ class BARTModelTRTConfig(NNConfig): # bart-large: 24-layer, 1024-hidden, 406M parameters # in all bart variants, # of encoder layers and # of decoder layers are the same NUMBER_OF_LAYERS = { - TARGET_MODELS[0]: 12, - TARGET_MODELS[1]: 24, + TARGET_MODELS[0]: 12, + TARGET_MODELS[1]: 24, TARGET_MODELS[2]: 24, TARGET_MODELS[3]: 24, - } - + } + NUMBER_OF_DECODER_LAYERS = { - TARGET_MODELS[0]: 6, - TARGET_MODELS[1]: 12, + TARGET_MODELS[0]: 6, + TARGET_MODELS[1]: 12, TARGET_MODELS[2]: 12, TARGET_MODELS[3]: 12, - } - + } + # in all bart variants, # of heads in encoder and decoder are the same NUMBER_OF_HEADS = { - TARGET_MODELS[0]: 12, - TARGET_MODELS[1]: 16, + TARGET_MODELS[0]: 12, + TARGET_MODELS[1]: 16, TARGET_MODELS[2]: 16, TARGET_MODELS[3]: 16, } @@ -142,7 +142,7 @@ class BARTModelTRTConfig(NNConfig): TARGET_MODELS[1]: 1024, TARGET_MODELS[2]: 1024, TARGET_MODELS[3]: 1024, - } + } # To achieve identical results with original HuggingFace implementation, the min_length in model config should be consistent with each model variant # see task-specific params in config.json of each variant model @@ -151,7 +151,7 @@ class BARTModelTRTConfig(NNConfig): TARGET_MODELS[1]: 0, TARGET_MODELS[2]: 56, TARGET_MODELS[3]: 0, - } + } #TODO: this might better be an inference time input like the `max_length` arg in generate() and greedy_search(). The change needed is in NNDF/interface.py:__call__ so it's a fundamental change affecting GPT2 and T5 code. Here I just put this option in BART model config for now. But it's also reasonable to treat this as a model config, because the TRT engine building may need this to have fixed dimension (e.g., to enable KV-cache) # see task-specific params in config.json of each variant model @@ -160,9 +160,9 @@ class BARTModelTRTConfig(NNConfig): TARGET_MODELS[1]: 1024, TARGET_MODELS[2]: 142, TARGET_MODELS[3]: 200, - } + } - # BART specific configs: https://huggingface.co/facebook/bart-base/blob/main/config.json + # BART specific configs: https://huggingface.co/facebook/bart-base/blob/main/config.json NO_REPEAT_NGRAM_SIZE = 3 BOS_TOKEN_ID = 0 EOS_TOKEN_ID = 2 @@ -178,7 +178,7 @@ class BARTModelTRTConfig(NNConfig): NETWORK_DECODER_SEGMENT_NAME = "decoder" NETWORK_ENCODER_SEGMENT_NAME = "encoder" NETWORK_SEGMENTS = [NETWORK_DECODER_SEGMENT_NAME, NETWORK_ENCODER_SEGMENT_NAME] - + def __init__(self): precision_fp16 = [False, True] kv_caches = [False, True] @@ -214,7 +214,7 @@ def get_metadata_string(self, metadata: NetworkMetadata) -> str: # Remove redundant bart name prefix if "mbart" in metadata.variant: metadata = metadata._replace(variant=metadata.variant.replace("facebook/mbart-","mbart-")) - else: + else: metadata = metadata._replace(variant=metadata.variant.replace("facebook/bart-","")) return super().get_metadata_string(metadata) @@ -240,13 +240,13 @@ def get_input_dims(metadata) -> Dict: if metadata.other.kv_cache: # for KV cache version, we need add per-layer KV cache inputs. `past_key_values` at each layer is (self-attention K, self-attention V, cross-attention K, cross-attention V) for i in range(BARTModelTRTConfig.NUMBER_OF_DECODER_LAYERS[metadata.variant]): - # decoder self-attention KV cache (dim[0] & dim[2] are dynamic, and dim[2] varies at each decoding timestep) + # decoder self-attention KV cache (dim[0] & dim[2] are dynamic, and dim[2] varies at each decoding timestep) self_attention_past_kv_dims = (Dims.BATCH, "num_heads", Dims.create_new_sequence_dim("past_decoder_length"), "embedding_size_per_head") decoder_inputs_dict[f"past_key_values.{i}.decoder.key"] = self_attention_past_kv_dims decoder_inputs_dict[f"past_key_values.{i}.decoder.value"] = self_attention_past_kv_dims - + # encoder-decoder cross-attention KV cache (dim[0] & dim[2] are dynamic, but dim[2] is constant at each decoding timestep) - cross_attention_past_kv_dims = (Dims.BATCH, "num_heads", Dims.create_new_sequence_dim("encoder_length"), "embedding_size_per_head") + cross_attention_past_kv_dims = (Dims.BATCH, "num_heads", Dims.create_new_sequence_dim("encoder_length"), "embedding_size_per_head") decoder_inputs_dict[f"past_key_values.{i}.encoder.key"] = cross_attention_past_kv_dims decoder_inputs_dict[f"past_key_values.{i}.encoder.value"] = cross_attention_past_kv_dims @@ -273,16 +273,16 @@ def get_output_dims(metadata) -> Dict: if metadata.other.kv_cache: # for KV cache version, we need add per-layer KV cache inputs. `past_key_values` at each layer is (self-attention K, self-attention V, cross-attention K, cross-attention V) - + # for all BART variants, # encoder layers = # decoder layers, so just divide total # layers by 2 for i in range(BARTModelTRTConfig.NUMBER_OF_DECODER_LAYERS[metadata.variant]): - # decoder self-attention KV cache (dim[0] & dim[2] are dynamic, and dim[2] varies at each decoding timestep) + # decoder self-attention KV cache (dim[0] & dim[2] are dynamic, and dim[2] varies at each decoding timestep) self_attention_present_kv_dims = (Dims.BATCH, "num_heads", Dims.create_new_sequence_dim("decoder_length"), "embedding_size_per_head") decoder_outputs_dict[f"present_key_values.{i}.decoder.key"] = self_attention_present_kv_dims decoder_outputs_dict[f"present_key_values.{i}.decoder.value"] = self_attention_present_kv_dims - + # encoder-decoder cross-attention KV cache (dim[0] & dim[2] are dynamic, but dim[2] is constant at each decoding timestep) - cross_attention_present_kv_dims = (Dims.BATCH, "num_heads", Dims.create_new_sequence_dim("encoder_length"), "embedding_size_per_head") + cross_attention_present_kv_dims = (Dims.BATCH, "num_heads", Dims.create_new_sequence_dim("encoder_length"), "embedding_size_per_head") decoder_outputs_dict[f"present_key_values.{i}.encoder.key"] = cross_attention_present_kv_dims decoder_outputs_dict[f"present_key_values.{i}.encoder.value"] = cross_attention_present_kv_dims diff --git a/demo/HuggingFace/BART/checkpoint.toml b/demo/HuggingFace/BART/checkpoint.toml index d7ddcafd..52add215 100755 --- a/demo/HuggingFace/BART/checkpoint.toml +++ b/demo/HuggingFace/BART/checkpoint.toml @@ -9,9 +9,6 @@ label = "NVIDIA TensorRT-based applications perform up to 36X faster than CPU-on [BART.all."facebook/bart-large".all.summarization] -[BART.all."facebook/mbart-large-50".all.summarization] - -label = "NVIDIA TensorRT-based applications perform up to 36X faster than CPU-only platforms during inference, enabling developers to optimize neural network models trained on all major frameworks, calibrate for lower precision with high accuracy, and deploy to hyperscale data centers, embedded platforms, or automotive product platforms. TensorTM, built on the NVIDIA CUDA parallel programming model, enables developers of applications to optimise inference by leveraging libraries, development tools, and technologies in CUDA-X for AI, autonomous machines, high performance computing, and graphics. With new NVIDIA Ampere Architecture GPUs, Tensor RT also uses sparse tensor cores for an additional performance boost." label = "NVIDIA TensorRT-based applications perform up to 36X faster than CPU-only platforms during inference, enabling developers to optimize neural network models trained on all major frameworks, calibrate for lower precision with high accuracy, and deploy to hyperscale data centers, embedded platforms, or automotive product platforms. Tensor RT is the first GPU-based inference platform to use NVIDIA's CUDA-X architecture. TenseRT, built on the NVIDIA CUDA parallel programming model, enables developers to analyze neural network data and perform inference by leveraging libraries, development tools, and technologies in CUDA, including CUDA for AI, autonomous machines, high performance computing, and graphics. With new NVIDIA Ampere Architecture GPUs, TensorRex also uses sparse tensor cores for an additional performance boost." [BART.all."facebook/bart-large-cnn".all.summarization] diff --git a/demo/HuggingFace/BART/export.py b/demo/HuggingFace/BART/export.py index faa8b945..f3730178 100755 --- a/demo/HuggingFace/BART/export.py +++ b/demo/HuggingFace/BART/export.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -41,7 +41,7 @@ # TRT-HuggingFace from BART.BARTModelConfig import BARTModelTRTConfig -from NNDF.tensorrt_utils import clamp_weights_onnx_to_fp16_bounds, move_t5_cast_op +from NNDF.tensorrt_utils import OnnxProcessOperation, process_onnx from NNDF.networks import NetworkMetadata, Precision, Dims from NNDF.logger import G_LOGGER from NNDF.models import ( @@ -354,13 +354,14 @@ def _export_forward(input_ids, encoder_hidden_states, use_cache): if network_metadata.precision.fp16: G_LOGGER.debug("Clamping FP16 weights for BART") - # move_t5_cast_op(output_fpath, output_fpath) # BART doesn't have T5's Add-Cast-Pow ordering issue + # BART doesn't have T5's Add-Cast-Pow ordering issue if network_metadata.other.kv_cache: # both onnx files need clamp - clamp_weights_onnx_to_fp16_bounds(non_kv_fpath, non_kv_fpath) - clamp_weights_onnx_to_fp16_bounds(kv_fpath, kv_fpath) + process_onnx([OnnxProcessOperation.CLAMP_WEIGHTS], kv_fpath, kv_fpath) + process_onnx([OnnxProcessOperation.CLAMP_WEIGHTS], non_kv_fpath, non_kv_fpath) + else: - clamp_weights_onnx_to_fp16_bounds(output_fpath, output_fpath) + process_onnx([OnnxProcessOperation.CLAMP_WEIGHTS], output_fpath, output_fpath) return BARTDecoderONNXFile(output_fpath, network_metadata) @@ -412,7 +413,7 @@ def torch_to_onnx( if network_metadata.precision.fp16: G_LOGGER.debug("Clamping FP16 weights for BART") - # move_t5_cast_op(output_fpath, output_fpath) # BART doesn't have T5's Add-Cast-Pow ordering issue - clamp_weights_onnx_to_fp16_bounds(output_fpath, output_fpath) + # BART doesn't have T5's Add-Cast-Pow ordering issue + process_onnx([OnnxProcessOperation.CLAMP_WEIGHTS], output_fpath, output_fpath) return BARTEncoderONNXFile(output_fpath, network_metadata) diff --git a/demo/HuggingFace/BART/frameworks.py b/demo/HuggingFace/BART/frameworks.py index 69fecb72..3df3e908 100644 --- a/demo/HuggingFace/BART/frameworks.py +++ b/demo/HuggingFace/BART/frameworks.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -68,16 +68,16 @@ def __init__(self): def generate_and_download_framework( self, metadata: NetworkMetadata, workspace: NNFolderWorkspace ) -> NetworkModels: - + cache_variant = False if metadata.other.kv_cache: cache_variant = True - + trt_BART_config = self.config metadata_serialized = trt_BART_config.get_metadata_string(metadata) workspace_dir, encoder_onnx_root, decoder_onnx_root = workspace.set_model_path(metadata_serialized, is_encoder_decoder = True) pytorch_model_dir = os.path.join(workspace_dir, "pytorch_model") - + # We keep track of the generated torch location for cleanup later self.torch_BART_dir = pytorch_model_dir @@ -111,8 +111,8 @@ def generate_and_download_framework( model = MBartForConditionalGeneration.from_pretrained(pytorch_model_dir) model.config.use_cache = cache_variant # somehow the use_cache config automatically set to True even though specified in tfm_config before. Force change - - # These ONNX models can be converted using special encoder and decoder classes. + + # These ONNX models can be converted using special encoder and decoder classes. encoder_onnx_model_fpath = os.path.join(encoder_onnx_root, metadata_serialized + "-encoder.onnx") decoder_onnx_model_fpath = os.path.join(decoder_onnx_root, metadata_serialized + "-decoder-with-lm-head.onnx") @@ -262,7 +262,7 @@ def execute_inference( batch_size=batch_size, use_cache=metadata.other.kv_cache, ) - + # Prepare runtime results. runtime=[ NetworkRuntime( @@ -370,4 +370,4 @@ def run_framework( if __name__ == "__main__": result = RUN_CMD() - print("Results: {}".format(result)) \ No newline at end of file + print("Results: {}".format(result)) diff --git a/demo/HuggingFace/BART/hf.py b/demo/HuggingFace/BART/hf.py index 0e8dc48b..ae79b64c 100755 --- a/demo/HuggingFace/BART/hf.py +++ b/demo/HuggingFace/BART/hf.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,7 +21,7 @@ Usage: python3 hf.py --variant facebook/bart-base [--enable-kv-cache] [--fp16] """ -import time +import time from transformers import BartTokenizer, BartForConditionalGeneration import argparse @@ -63,6 +63,6 @@ output = tokenizer.decode(summary_ids[-1,:], skip_special_tokens=True) -print('BART output: ', output) +print('BART output: ', output) print(f"Input sequence length: {input_ids.size(1)}, Output sequence length: {summary_ids[-1,:].size(0)}") print("Average run time: {:.2f} ms".format((end - start)/trials*1000)) diff --git a/demo/HuggingFace/BART/measurements.py b/demo/HuggingFace/BART/measurements.py index 06dd7c1b..54f809b0 100644 --- a/demo/HuggingFace/BART/measurements.py +++ b/demo/HuggingFace/BART/measurements.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -96,7 +96,7 @@ def full_inference_greedy( stopping_criteria = StoppingCriteriaList([MaxLengthCriteria(max_length)]) no_repeat_ngram_size = BARTModelTRTConfig.NO_REPEAT_NGRAM_SIZE logits_processor = LogitsProcessorList([ - NoRepeatNGramLogitsProcessor(no_repeat_ngram_size), + NoRepeatNGramLogitsProcessor(no_repeat_ngram_size), MinLengthLogitsProcessor(min_length, tokenizer.convert_tokens_to_ids(tokenizer.eos_token)), ForcedBOSTokenLogitsProcessor(tokenizer.convert_tokens_to_ids(tokenizer.bos_token)), ForcedEOSTokenLogitsProcessor(max_length, tokenizer.convert_tokens_to_ids(tokenizer.eos_token)) @@ -167,7 +167,7 @@ def full_inference_beam( stopping_criteria = StoppingCriteriaList([MaxLengthCriteria(max_length)]) no_repeat_ngram_size = BARTModelTRTConfig.NO_REPEAT_NGRAM_SIZE logits_processor = LogitsProcessorList([ - NoRepeatNGramLogitsProcessor(no_repeat_ngram_size), + NoRepeatNGramLogitsProcessor(no_repeat_ngram_size), MinLengthLogitsProcessor(min_length, tokenizer.convert_tokens_to_ids(tokenizer.eos_token)), ForcedBOSTokenLogitsProcessor(tokenizer.convert_tokens_to_ids(tokenizer.bos_token)), ForcedEOSTokenLogitsProcessor(max_length, tokenizer.convert_tokens_to_ids(tokenizer.eos_token)) @@ -194,7 +194,7 @@ def _e2e(): ) encoder_last_hidden_state = BART_encoder(input_ids=input_ids) - + encoder_last_hidden_state = expand_inputs_for_beam_search(encoder_last_hidden_state, expand_size=num_beams) decoder_output_beam = BART_decoder.beam_search( @@ -219,9 +219,9 @@ def _e2e_trt(): ) encoder_last_hidden_state = BART_encoder(input_ids=input_ids) - + encoder_last_hidden_state = expand_inputs_for_beam_search(encoder_last_hidden_state, expand_size=num_beams) - + BART_decoder.set_encoder_hidden_states_for_inference_cycle(encoder_last_hidden_state) decoder_output_beam = BART_decoder.beam_search( input_ids=decoder_input_ids, diff --git a/demo/HuggingFace/BART/onnxrt.py b/demo/HuggingFace/BART/onnxrt.py index 0d8cbed2..b7523e0d 100644 --- a/demo/HuggingFace/BART/onnxrt.py +++ b/demo/HuggingFace/BART/onnxrt.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -125,7 +125,7 @@ def execute_inference( benchmarking_mode: bool = False, benchmarking_args: BARTBenchmarkingArgs = None, ) -> NetworkResult: - + if "mbart" not in metadata.variant: tokenizer = BartTokenizer.from_pretrained(metadata.variant) else: @@ -207,7 +207,7 @@ def execute_inference( onnx=list(onnx_fpaths.values()), trt=None ) - + # Skip result checking in benchmarking mode since the input data is random. if benchmarking_mode: return BenchmarkingResult(median_runtime=runtime, models=models) diff --git a/demo/HuggingFace/BART/trt.py b/demo/HuggingFace/BART/trt.py index b8be8d81..85bb2790 100644 --- a/demo/HuggingFace/BART/trt.py +++ b/demo/HuggingFace/BART/trt.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -278,7 +278,7 @@ def __init__( # non kv-cache mode: False. Then in forward(), trt_context and bindings are set to the default ones # kv-cache mode: True. By default 1st decoding step starts with non-kv engine's context and binding; then flag gets updated in prepare_inputs_for_generation() - self.return_device = "cuda" + self.return_device = torch.device('cuda') self.variant = network_metadata.variant # record variant name to later index the vocab_size in forward() @@ -806,14 +806,15 @@ def execute_calculate_perplexity( metadata: NetworkMetadata, encoder_input: str, decoder_input: str, + batch_size: int, ): if "mbart" not in metadata.variant: tokenizer = BartTokenizer.from_pretrained(metadata.variant) else: tokenizer = MBart50Tokenizer.from_pretrained(metadata.variant, src_lang="en_XX") - encoder_input_ids = tokenizer([encoder_input], padding=True, return_tensors="pt").input_ids - decoder_input_ids = tokenizer([decoder_input], padding=True, return_tensors="pt").input_ids + encoder_input_ids = tokenizer([encoder_input] * batch_size, padding=True, return_tensors="pt").input_ids + decoder_input_ids = tokenizer([decoder_input] * batch_size, padding=True, return_tensors="pt").input_ids perplexity = calculate_perplexity( self.BART_trt_encoder, self.BART_trt_decoder, tokenizer, encoder_input_ids, decoder_input_ids, @@ -821,21 +822,6 @@ def execute_calculate_perplexity( ) return perplexity - def _setup_workspace(self, metadata: NetworkMetadata, working_directory: str) -> NNFolderWorkspace: - return NNFolderWorkspace( - self.frameworks_cmd.config.network_name, metadata, working_directory - ) - - def _download_models( - self, - workspace: NNFolderWorkspace, - metadata: NetworkMetadata, - ) -> Tuple[NetworkModel]: - # No fpath provided for onnx files, download them from HuggingFace repo. - return self.frameworks_cmd.generate_and_download_framework( - metadata, workspace - ).onnx - def _setup_engines( self, metadata: NetworkMetadata, @@ -967,7 +953,7 @@ def _setup_engines( if num_beams > 1: engine_tag += "-beam{}".format(num_beams) - preview_features = [] + preview_features = [PreviewFeature.DISABLE_EXTERNAL_TACTIC_SOURCES_FOR_CORE_0805] if disable_preview_dynamic_shapes: engine_tag += "-noPreviewFasterDynamicShapes" else: @@ -1080,7 +1066,7 @@ def run_trt( else: for ei, di in zip(network_input, perplexity_reference): ppl_results.append( - self.execute_calculate_perplexity(metadata, ei, di) + self.execute_calculate_perplexity(metadata, ei, di, batch_size) ) else: @@ -1106,7 +1092,7 @@ def run_trt( assert benchmarking_args.output_seq_len <= benchmarking_args.output_profile_max_len, "output_seq_len should <= output_profile_max_len = {} for benchmarking mode".format(benchmarking_args.output_profile_max_len) assert benchmarking_args.input_profile_max_len <= max_input_seq_len, "Model config restrict input_profile_max_len <= {} for benchmark mode".format(max_input_seq_len) assert benchmarking_args.output_profile_max_len <= max_output_seq_len, "Model config restrict output_profile_max_len <= {} for benchmark mode".format(max_output_seq_len) - + self._setup_engines(metadata, hash_onnx_fpath, batch_size, args.num_beams, disable_preview_dynamic_shapes, benchmarking_args, seq_tag) inference_results = self.execute_inference( metadata, hash_onnx_fpath, None, timing_profile, batch_size, args.num_beams, True, benchmarking_args diff --git a/demo/HuggingFace/GPT2/GPT2ModelConfig.py b/demo/HuggingFace/GPT2/GPT2ModelConfig.py index 3d83024b..a0edca9f 100644 --- a/demo/HuggingFace/GPT2/GPT2ModelConfig.py +++ b/demo/HuggingFace/GPT2/GPT2ModelConfig.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -51,7 +51,7 @@ def add_args(parser: argparse.ArgumentParser) -> None: network_group.add_argument( "--num-beams", type=int, default=1, help="Enables beam search during decoding." ) - + network_group.add_argument( "--fp16", action="store_true", help="Enables fp16 TensorRT tactics." ) diff --git a/demo/HuggingFace/GPT2/export.py b/demo/HuggingFace/GPT2/export.py index 513de75b..cbd06964 100644 --- a/demo/HuggingFace/GPT2/export.py +++ b/demo/HuggingFace/GPT2/export.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -60,7 +60,7 @@ def __init__(self, transformer, lm_head, config): self.transformer = transformer self.lm_head = lm_head self.config = config - self.device = "cuda" # WAR to avoid beam search in framework + self.device = torch.device('cuda') # WAR to avoid beam search in framework self.main_input_name = "input_ids" # For better HuggingFace version compatibility def prepare_inputs_for_generation(self, input_ids, past = None, use_cache=None, **kwargs): @@ -143,6 +143,17 @@ def pairwise(iterable): l_next.precision = trt.float32 l_next.set_output_type(0, trt.float32) + if self.network_metadata.precision.fp16: + for i in range(network_definition[1].num_inputs): + t = network_definition[1].get_input(i) + if t.dtype == trt.float32: + t.dtype = trt.float16 + + for i in range(network_definition[1].num_outputs): + t = network_definition[1].get_output(i) + if t.dtype == trt.float32: + t.dtype = trt.float16 + return network_definition # Converters diff --git a/demo/HuggingFace/GPT2/frameworks.py b/demo/HuggingFace/GPT2/frameworks.py index f5d0abd9..d430a056 100644 --- a/demo/HuggingFace/GPT2/frameworks.py +++ b/demo/HuggingFace/GPT2/frameworks.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -148,10 +148,10 @@ def setup_tokenizer_and_model( # By default, HuggingFace model structure is one giant file. gpt2_torch_fpath = network_fpaths.torch[0].fpath gpt2_model = AutoModelForCausalLM.from_pretrained(gpt2_torch_fpath) + # Framework fp16 does not support cpu mode for GPT2 - # TODO: Enable true fp16. Using cuda 11.4 with PyTorch 1.13 will cause issue for this function. - # if metadata.precision.fp16: - # gpt2_model = gpt2_model.cuda().half() + if metadata.precision.fp16: + gpt2_model = gpt2_model.cuda().half() gpt2_torch = GPT2TorchFile.TorchModule( gpt2_model.transformer, gpt2_model.lm_head, gpt2_model.config @@ -185,9 +185,9 @@ def execute_inference( # get single decoder iteration inference timing profile _, decoder_e2e_time = gpt2_inference( - gpt2_torch, - input_ids, - timing_profile, + gpt2_torch, + input_ids, + timing_profile, use_cuda=(not use_cpu), use_cache = metadata.other.kv_cache, ) diff --git a/demo/HuggingFace/GPT2/measurements.py b/demo/HuggingFace/GPT2/measurements.py index 740b5e69..f783f872 100644 --- a/demo/HuggingFace/GPT2/measurements.py +++ b/demo/HuggingFace/GPT2/measurements.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -64,14 +64,14 @@ def full_inference( if isinstance(gpt2, TRTNativeRunner): gpt2.set_return_device("cuda" if use_cuda else "cpu") - + def _e2e(): with torch.no_grad(): output = gpt2.generate( - input_ids, - max_length=max_length, - min_length=min_length, - batch_size=batch_size, + input_ids, + max_length=max_length, + min_length=min_length, + batch_size=batch_size, num_beams=num_beams, use_cache=use_cache, early_stopping=early_stopping diff --git a/demo/HuggingFace/GPT2/trt.py b/demo/HuggingFace/GPT2/trt.py index f215fe2a..411eb72c 100644 --- a/demo/HuggingFace/GPT2/trt.py +++ b/demo/HuggingFace/GPT2/trt.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -122,7 +122,7 @@ def __init__( ): super().__init__(trt_engine_file, network_metadata, hf_config, batch_size = batch_size) self.network_metadata = network_metadata - self.data_type = torch.float32 + self.data_type = torch.float32 if not network_metadata.precision.fp16 else torch.float16 # In benchmarking mode, if input_profile_max is provided, should use that as max_sequence_length if benchmarking_args is not None: if benchmarking_args.input_profile_max_len is not None: @@ -153,7 +153,7 @@ def __init__( if self.config.use_cache: self.bindings[self.trt_engine.get_binding_index("logits") + self.num_bindings] = self.logits.data_ptr() - + # Setting input and output the same does not work for GPT2. Needs separate cache and copy the memory address after each iteration self.self_attention_cache_1 = {} self.self_attention_cache_2 = {} @@ -172,9 +172,9 @@ def __init__( input_idx = self.trt_engine.get_binding_index("past_" + self_attention_name) output_idx = self.trt_engine.get_binding_index("present_" + self_attention_name) - + self.bindings[input_idx] = kv_buffer_1.data_ptr() # Generation phase - self.bindings[output_idx] = kv_buffer_2.data_ptr() + self.bindings[output_idx] = kv_buffer_2.data_ptr() # Context mode will always use buffer 1 as output self.bindings[input_idx + self.num_bindings] = 0 # Context phase, should be 0 @@ -184,17 +184,17 @@ def __init__( self.past_decoder_length = 0 self.use_cache_1_as_input = True self._set_context_mode_trt_context() - + self.context_mode = self.config.use_cache - self.return_device = "cuda" - self.device = "cuda" + self.return_device = torch.device('cuda') + self.device = torch.device('cuda') def reset(self): ''' Resets the input specific fields after finishing a task. ''' self.context_mode = self.config.use_cache - + def _switch_input_output_binding(self): ''' For kv cache mode, switch input and output pointers to avoid data concurrency issue and D2D copy @@ -212,7 +212,7 @@ def _switch_input_output_binding(self): self.bindings[output_idx] = self.bindings[input_idx] self.bindings[input_idx] = temp self.use_cache_1_as_input = not self.use_cache_1_as_input - + def prepare_inputs_for_generation(self, input_ids, past = None, use_cache = None, **kwargs): # TODO: add position_ids, token_type_ids support if past is not None: @@ -220,7 +220,7 @@ def prepare_inputs_for_generation(self, input_ids, past = None, use_cache = None self.context_mode = False else: self.context_mode = self.config.use_cache - + return { "input_ids": input_ids, "past_key_values": past, @@ -244,7 +244,7 @@ def _reorder_cache(self, past: Tuple[Tuple[torch.Tensor]], beam_idx: torch.Tenso tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past) for layer_past in past ) - + def _set_context_mode_trt_context(self): # Create TRT context for context mode (1st decoder run) with optimization profile = 1 self.context_trt_context = self.trt_engine.create_execution_context() @@ -260,7 +260,7 @@ def forward(self, input_ids, *args, **kwargs): if is_cpu_mode: input_ids = input_ids.int().cuda() - + # Set the binding shape of input_ids, which should be (bs, input_length). if not self.context_mode: self.bindings[0] = input_ids.int().data_ptr() @@ -269,7 +269,7 @@ def forward(self, input_ids, *args, **kwargs): self.bindings[self.num_bindings] = input_ids.int().data_ptr() self.context_trt_context.set_binding_shape(self.num_bindings, input_ids.shape) - if self.config.use_cache: + if self.config.use_cache: if self.context_mode: self.past_decoder_length = 0 @@ -284,7 +284,7 @@ def forward(self, input_ids, *args, **kwargs): # Optimization Profile 0 is context phase with kv inputs self.context_trt_context.set_binding_shape(self.kv_cache_binding_offset+2*i + self.num_bindings, self_attention_kv_shape) self.context_trt_context.set_binding_shape(self.kv_cache_binding_offset+2*i + 1 + self.num_bindings, self_attention_kv_shape) - + # Launch TRT inference. if not self.context_mode: assert self.trt_context.all_binding_shapes_specified @@ -292,7 +292,7 @@ def forward(self, input_ids, *args, **kwargs): else: assert self.context_trt_context.all_binding_shapes_specified self.context_trt_context.execute_v2(bindings=self.bindings) - + # For bs > 1, this is required, so cannnot avoid this D2D copy logits_length = bs * input_length * self.config.vocab_size logits = self.logits.flatten()[:logits_length].view(bs, input_length, self.config.vocab_size) @@ -306,7 +306,7 @@ def forward(self, input_ids, *args, **kwargs): present_key_values = () self_attention_cache = self.self_attention_cache_1 if self.use_cache_1_as_input or (self.profile_idx == 0) else self.self_attention_cache_2 - + for i in range(self.num_decoder_layers): self_attention_k_output = self_attention_cache[f"key_values.{i}.decoder.key"] @@ -316,7 +316,7 @@ def forward(self, input_ids, *args, **kwargs): self_attention_k_output = self_attention_k_output.cpu() self_attention_v_output = self_attention_v_output.cpu() - present_key_values += ((self_attention_k_output, self_attention_v_output),) + present_key_values += ((self_attention_k_output, self_attention_v_output),) self._switch_input_output_binding() return CausalLMOutputWithPast(logits=logits.to(self.return_device), past_key_values = present_key_values) @@ -344,6 +344,33 @@ def cleanup( self.frameworks_cmd.cleanup(workspace, keep_onnx_model, keep_torch_model) + def generate( + self, + input_ids, + min_length: int = None, + max_length: int = None, + num_beams: int = 1, + use_cache: bool = False, + early_stopping: bool = True, + ): + if max_length is None: + max_length = GPT2ModelTRTConfig.MAX_OUTPUT_LENGTH[self.metadata.variant] + + if min_length is None: + min_length = GPT2ModelTRTConfig.MIN_OUTPUT_LENGTH[self.metadata.variant] + + output = self.gpt2_trt.generate( + input_ids, + max_length=max_length, + min_length=min_length, + num_beams=num_beams, + use_cache=use_cache, + early_stopping=early_stopping + ) + + self.gpt2_trt.reset() + return output + def execute_inference( self, metadata: NetworkMetadata, @@ -379,7 +406,7 @@ def execute_inference( timing_profile, use_cache = metadata.other.kv_cache, ) - + # get complete decoder inference result and its timing profile sample_output, full_e2e_runtime = full_inference( self.gpt2_trt, @@ -439,6 +466,7 @@ def execute_calculate_perplexity( self, metadata: NetworkMetadata, reference: str, + batch_size: int, ): tokenizer = GPT2Tokenizer.from_pretrained(metadata.variant) @@ -446,7 +474,7 @@ def execute_calculate_perplexity( # replace with EOS token when using generating mode tokenizer.add_special_tokens({"pad_token": "[PAD]"}) reference = reference.replace("\\n", "\n") - ppl_input_ids = tokenizer([reference], padding=False, return_tensors="pt").input_ids + ppl_input_ids = tokenizer([reference] * batch_size, padding=False, return_tensors="pt").input_ids perplexity = calculate_perplexity( self.gpt2_trt, ppl_input_ids, GPT2ModelTRTConfig.MAX_LENGTH[metadata.variant] @@ -495,7 +523,7 @@ def _setup_engines( max_output_length = benchmarking_args.output_profile_max_len opt_input_seq_len = benchmarking_args.input_seq_len opt_output_seq_len = benchmarking_args.output_seq_len - + if not hf_config.use_cache: # If not using kv cache, only input_ids is passed decoder_profiles = [Profile().add( @@ -531,7 +559,7 @@ def _setup_engines( opt=(batch_size * num_beams, 1), max=(batch_size * num_beams, 1), ) - + self_attention_profile_generation = { "min": (batch_size * num_beams, num_heads, 1, embedding_size_per_head), "opt": (batch_size * num_beams, num_heads, opt_output_seq_len - 1, embedding_size_per_head), @@ -554,11 +582,11 @@ def _setup_engines( f"past_key_values.{i}.decoder.value", **self_attention_profile_generation ) - + # TensorRT accepts multiple optimization engines for the same model. # Profile 1 is only used in the first decoder iterations. decoder_profiles = [dec_profiles_generation, dec_profiles_context] - + # Convert ONNX models to TRT engines. if benchmarking_args is None: engine_tag = "bs{}".format(batch_size) @@ -572,12 +600,12 @@ def _setup_engines( if num_beams > 1: engine_tag += "-beam{}".format(num_beams) - preview_features = [] + preview_features = [PreviewFeature.DISABLE_EXTERNAL_TACTIC_SOURCES_FOR_CORE_0805] if disable_preview_dynamic_shapes: engine_tag += "-noPreviewFasterDynamicShapes" else: preview_features.append(PreviewFeature.FASTER_DYNAMIC_SHAPES_0805) - + self.gpt2_trt_engine = GPT2ONNXFile( decoder_onnx_fpath, metadata ).as_trt_engine( @@ -606,15 +634,11 @@ def run_trt( perplexity_reference: List[str] = None, ) -> Union[List[NetworkResult], BenchmarkingResult]: - workspace = NNFolderWorkspace( - self.frameworks_cmd.config.network_name, metadata, working_directory - ) + workspace = self._setup_workspace(metadata, working_directory) # no fpath provided for onnx files, download them if len(onnx_fpaths) == 0: - onnx_fpaths = self.frameworks_cmd.generate_and_download_framework( - metadata, workspace - ).onnx + onnx_fpaths = self._download_models(workspace, metadata) else: keep_onnx_model = True keep_torch_model = True @@ -642,7 +666,7 @@ def run_trt( else: for r in perplexity_reference: ppl_results.append( - self.execute_calculate_perplexity(metadata, r) + self.execute_calculate_perplexity(metadata, r, batch_size) ) else: hf_config = AutoConfig.from_pretrained(metadata.variant, use_cache = metadata.other.kv_cache) diff --git a/demo/HuggingFace/NNDF/checkpoints.py b/demo/HuggingFace/NNDF/checkpoints.py index c690a314..3c94ea32 100644 --- a/demo/HuggingFace/NNDF/checkpoints.py +++ b/demo/HuggingFace/NNDF/checkpoints.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/demo/HuggingFace/NNDF/cuda_bootstrapper.py b/demo/HuggingFace/NNDF/cuda_bootstrapper.py new file mode 100644 index 00000000..e9fdb26d --- /dev/null +++ b/demo/HuggingFace/NNDF/cuda_bootstrapper.py @@ -0,0 +1,101 @@ +# +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +""" +Holds logic for modifying and removing invalid CUDA libraries in LD_LIBRARY_PATH. + +Users may have CUDA libraries in LD_LIBRARY_PATH which causes issues with Torch cublas. +This problem only occurs on Linux. +See: + https://github.com/pytorch/pytorch/issues/94294 + https://github.com/pytorch/pytorch/issues/64097 +""" + +import os +import sys +import glob +import shutil + +import subprocess as sp +from NNDF.logger import G_LOGGER + +def bootstrap_ld_library_path() -> bool: + """ + Modifies the LD_LIBRARY_PATH if applicable and then spawns a child process + using first "poetry" and then "python3"/"python" if "poetry" fails. + """ + if os.environ.get("TRT_OSS_DISABLE_BOOTSTRAP") or "linux" not in sys.platform: + return False + + # Walk through each path in environment to see if there are cublas libraries being loaded. + paths = os.environ.get("LD_LIBRARY_PATH", "").split(os.pathsep) + new_paths = [] + modified_path = False + for path in paths: + for lib in ("cublas", "cudart", "cublasLt"): + g = glob.glob(os.path.join(path, f"lib{lib}.so.*")) + if g: + modified_path = True + G_LOGGER.warning(f"Discarding `{path}` from LD_LIBRARY_PATH since it contains CUDA libraries.") + break + else: + new_paths.append(path) + + + if not modified_path: + return False + else: + warning_msg = ("Attempting to bootstrap altered LD_LIBRARY_PATH. " + "\nYou can disable this with TRT_OSS_DISABLE_BOOTSTRAP=1 however frameworks performance may be impacted. " + "\nThere are known issues with cuBLAS loading and PyTorch compatability " + "that is still being resolved for most CUDA <= 12.1 and Torch setups. See: " + "\n - https://github.com/pytorch/pytorch/issues/94294" + "\n - https://github.com/pytorch/pytorch/issues/64097\n") + G_LOGGER.warning(warning_msg) + + G_LOGGER.info(f"CUDA detected in path. Restarting scripts with modified LD_LIBRARY_PATH: {new_paths}") + os.environ["LD_LIBRARY_PATH"] = os.pathsep.join(new_paths) + # To prevent potential recursion, we add one more modification just in case. + os.environ["TRT_OSS_DISABLE_BOOTSTRAP"] = "1" + + # Spawn a new child process instead. + try: + # Use the same python exe that invoked this script + default_python = sys.executable + + # Demo supports both poetry and python3 invocation. + # Check if poetry works first. + cmd = [default_python] + list(sys.argv) + if shutil.which("poetry") is not None: + poetry_cmd = ["poetry", "run"] + cmd + + # Poetry command will be tried. If it fails, we ignore the error and fallback to default python. + try: + # Instantiate a secondary child process. + sp.check_call(" ".join(poetry_cmd), env=dict(os.environ), cwd=os.getcwd(), shell=True) + return True + except: + pass + + # Default python fallback. + sp.check_call(" ".join(cmd), env=dict(os.environ), cwd=os.getcwd(), shell=True) + except Exception as e: + G_LOGGER.error("Unable to start a new process with modified LD_LIBRARY_PATH. Consider removing CUDA lib in LD_LIBRARY_PATH manually.") + G_LOGGER.error(str(e)) + G_LOGGER.warning("Attempting to continue with demo.") + + return True diff --git a/demo/HuggingFace/NNDF/general_utils.py b/demo/HuggingFace/NNDF/general_utils.py index e606c143..f8fb9897 100644 --- a/demo/HuggingFace/NNDF/general_utils.py +++ b/demo/HuggingFace/NNDF/general_utils.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -16,8 +16,8 @@ # """Common utils used by demo folder. -Note: -- For now, users/developers that are contributing to TensorRT OSS should NOT import non-default Python packages in this file, because the test pipeline's boot-up process cannot load extra dependencies. In the near future, alternative solutions such as creating a separate boot-up util list can be possible. +Note: +- For now, users/developers that are contributing to TensorRT OSS should NOT import non-default Python packages in this file, because the test pipeline's boot-up process cannot load extra dependencies. In the near future, alternative solutions such as creating a separate boot-up util list can be possible. - Users/developers that are just using the TensorRT OSS without contributing are still free to modify this file and customize for deployment. """ @@ -170,7 +170,7 @@ def simple_percentile(data, p): Temporary replacement for numpy.percentile() because TRT CI/CD pipeline requires additional packages to be added at boot up in this general_utils.py file. """ assert p >= 0 and p <= 100, "Percentile must be between 1 and 99" - + rank = len(data) * p / 100 if rank.is_integer(): return sorted(data)[int(rank)] @@ -229,7 +229,7 @@ def set_model_path(self, metadata_serialized, is_encoder_decoder: bool) -> str: os.makedirs(self.decoder_path, exist_ok=True) if is_encoder_decoder: self.encoder_path = os.path.join(self.model_path, "encoder") - os.makedirs(self.encoder_path, exist_ok=True) + os.makedirs(self.encoder_path, exist_ok=True) # For decoder only models, there is no encoder else: self.encoder_path = None @@ -242,19 +242,19 @@ def set_model_path(self, metadata_serialized, is_encoder_decoder: bool) -> str: os.makedirs(self.decoder_kv_path, exist_ok=True) return self.model_path, self.encoder_path, self.decoder_path - + def get_path(self) -> str: return self.dpath - + def get_model_path(self) -> str: return self.model_path - + def get_encoder_path(self) -> str: return self.encoder_path - + def get_decoder_path(self) -> str: return self.decoder_path - + def get_decoder_path_kv(self) -> (str, str): if not self.metadata.other.kv_cache: raise RuntimeError("Trying to access kv specific folder in non kv mode") @@ -267,7 +267,7 @@ def cleanup(self, force_remove: bool = False) -> None: ''' if force_remove: return shutil.rmtree(self.dpath) - + if self.is_encoder_decoder_path_set: if self.encoder_path is not None: remove_if_empty(self.encoder_path) @@ -281,6 +281,6 @@ def cleanup(self, force_remove: bool = False) -> None: remove_if_empty( self.decoder_path ) - + remove_if_empty(self.model_path) remove_if_empty(self.dpath) diff --git a/demo/HuggingFace/NNDF/interface.py b/demo/HuggingFace/NNDF/interface.py index 9635fcbd..8d1a739c 100644 --- a/demo/HuggingFace/NNDF/interface.py +++ b/demo/HuggingFace/NNDF/interface.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -35,6 +35,7 @@ TimingProfile, ) from NNDF.logger import G_LOGGER +from NNDF.general_utils import NNFolderWorkspace # externals # None, there should be no external dependencies for testing purposes. @@ -322,6 +323,21 @@ def __init__( # Should be set by self.frameworks_cmd = frameworks_cmd() + def _setup_workspace(self, metadata: NetworkMetadata, working_directory: str) -> NNFolderWorkspace: + return NNFolderWorkspace( + self.frameworks_cmd.config.network_name, metadata, working_directory + ) + + def _download_models( + self, + workspace: NNFolderWorkspace, + metadata: NetworkMetadata, + ) -> Tuple[NetworkModel]: + # No fpath provided for onnx files, download them from HuggingFace repo. + return self.frameworks_cmd.generate_and_download_framework( + metadata, workspace + ).onnx + @abstractmethod def run_trt( self, diff --git a/demo/HuggingFace/NNDF/logger.py b/demo/HuggingFace/NNDF/logger.py index 396280c9..220394b7 100644 --- a/demo/HuggingFace/NNDF/logger.py +++ b/demo/HuggingFace/NNDF/logger.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/demo/HuggingFace/NNDF/models.py b/demo/HuggingFace/NNDF/models.py index dc2dd851..8a51392b 100644 --- a/demo/HuggingFace/NNDF/models.py +++ b/demo/HuggingFace/NNDF/models.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -422,7 +422,7 @@ def as_torch_model( return converter.torch_class(output_fpath, self.network_metadata) return converter.onnx_to_torch(output_fpath, self.fpath, self.network_metadata) - + def _cleanup_onnx_folder(self, folder_dir): for d in os.listdir(folder_dir): fpath = os.path.join(folder_dir, d) diff --git a/demo/HuggingFace/NNDF/networks.py b/demo/HuggingFace/NNDF/networks.py index 9f21fac1..ff8700fc 100644 --- a/demo/HuggingFace/NNDF/networks.py +++ b/demo/HuggingFace/NNDF/networks.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/demo/HuggingFace/NNDF/tensorrt_utils.py b/demo/HuggingFace/NNDF/tensorrt_utils.py index 702d213d..74226ae8 100644 --- a/demo/HuggingFace/NNDF/tensorrt_utils.py +++ b/demo/HuggingFace/NNDF/tensorrt_utils.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -19,6 +19,7 @@ from typing import Dict, List from functools import reduce +from enum import Enum # polygraphy from polygraphy.backend.trt import engine_from_bytes, TrtRunner @@ -28,6 +29,7 @@ # tensorrt import tensorrt as trt +import os # ONNX import onnx @@ -70,21 +72,19 @@ def set_kv_data(kv_dict, past_or_present, layer_id, segment_value_dict): kv_dict: Dict[str, tuple/torch.dtype], the dict to modify within the function past_or_present: str, either "past" or "present" layer_id: int, need kv cache for each decoder layer - segment_value_dict: Dict[str, tuple/torch.dtype], example: + segment_value_dict: Dict[str, tuple/torch.dtype], example: kvcache type: {"encoder": torch.float32, "decoder": torch.float32} kvcache shape: {"encoder": cross_attention_kv_shape, "decoder": self_attention_kv_shape} ''' for segment, value in segment_value_dict.items(): for code in ['key', 'value']: kv_dict[f"{past_or_present}_key_values.{layer_id}.{segment}.{code}"] = value - -def clamp_weights_onnx(onnx_input_fpath: str, onnx_output_fpath: str, min: float, max: float, ignore_nodes: List = None): +def clamp_weights_onnx(graph, min: float, max: float, ignore_nodes: List = None): """ Clamps given onnx model to targeted upper and lower bounds. """ - graph = gs.import_onnx(onnx.load(onnx_input_fpath)) if ignore_nodes is None: ignore_nodes = {} else: @@ -103,24 +103,22 @@ def clamp_weights_onnx(onnx_input_fpath: str, onnx_output_fpath: str, min: float if node_attr is not None: np.clip(node_attr.values, min, max, out=node_attr.values) - - model = gs.export_onnx(graph) - onnx.save(model, onnx_output_fpath, save_as_external_data=False) + + return graph -def clamp_weights_onnx_to_fp16_bounds(onnx_input_fpath: str, onnx_output_fpath: str, ignore_nodes: List = None): +def clamp_weights_onnx_to_fp16_bounds(graph, ignore_nodes: List = None): upper_bound = 65504 - return clamp_weights_onnx(onnx_input_fpath, onnx_output_fpath, -upper_bound, upper_bound, ignore_nodes) + return clamp_weights_onnx(graph, -upper_bound, upper_bound, ignore_nodes) -def move_t5_cast_op(onnx_input_fpath: str, onnx_output_fpath: str): +def move_t5_cast_op(graph): """ T5 encoder and decoder have cast ops after residual add operation. Moving the cast operation before add helps with FP16 accuracy as addition operation can cause overflow in FP16. """ - graph = gs.import_onnx(onnx.load(onnx_input_fpath)) cast_nodes = [node for node in graph.nodes if node.op == "Cast"] # Version check for backward compatibility torch_version_major = int(torch.__version__.split('.')[0]) @@ -180,13 +178,47 @@ def move_t5_cast_op(onnx_input_fpath: str, onnx_output_fpath: str): n.inputs = outs graph.cleanup().toposort() + return graph + +# The current operations would require loading/unloading onnx files twice, +class OnnxProcessOperation(Enum): + CLAMP_WEIGHTS = 1 + MOVE_CAST_OP = 2 + +def process_onnx(config: List[OnnxProcessOperation], onnx_input_fpath, onnx_output_fpath, keep_input = False, **kwargs): + graph = gs.import_onnx(onnx.load(onnx_input_fpath)) + folder = os.path.split(onnx_input_fpath)[0] + for op in config: + if op == OnnxProcessOperation.CLAMP_WEIGHTS: + graph = clamp_weights_onnx_to_fp16_bounds(graph, **kwargs) + elif op == OnnxProcessOperation.MOVE_CAST_OP: + graph = move_t5_cast_op(graph) + model = gs.export_onnx(graph) - onnx.save(model, onnx_output_fpath, save_as_external_data=False) + folder = os.path.split(onnx_input_fpath)[0] + model_size = 0 + for filename in os.listdir(folder): + file_path = os.path.join(folder, filename) + try: + if os.path.isfile(file_path) or os.path.islink(file_path): + model_size += os.stat(file_path).st_size + if not keep_input: + os.unlink(file_path) + + except Exception as e: + print('Failed to delete %s. Reason: %s' % (file_path, e)) + + # Save the weights as external data only when model > 2GB + if model_size >= 1.8 * 1024 * 1024 * 1024: + onnx.save_model(model, onnx_output_fpath, save_as_external_data=True, all_tensors_to_one_file = False, convert_attribute=False) + else: + onnx.save_model(model, onnx_output_fpath, save_as_external_data=False) # Helper Classes class TRTNativeRunner: """TRTNativeRunner avoids the high overheads with Polygraphy runner providing performance comparable to C++ implementation.""" def __init__(self, trt_engine_file: TRTEngineFile, network_metadata: NetworkMetadata): + self.network_metadata = network_metadata self.trt_engine_file = trt_engine_file self.trt_logger = trt.Logger() diff --git a/demo/HuggingFace/NNDF/torch_utils.py b/demo/HuggingFace/NNDF/torch_utils.py index e8569e3f..f3b2fadc 100644 --- a/demo/HuggingFace/NNDF/torch_utils.py +++ b/demo/HuggingFace/NNDF/torch_utils.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/demo/HuggingFace/README.md b/demo/HuggingFace/README.md index a025ee1a..cbc8e9c2 100644 --- a/demo/HuggingFace/README.md +++ b/demo/HuggingFace/README.md @@ -18,11 +18,13 @@ Currently, this repository supports the following models: ## Setup + Follow the setup steps in the TensorRT OSS repository. It is recommended to experiment inside Docker container. For a smoother setup experience, it is recommended to use [Poetry](https://python-poetry.org/) to install requirements and execute: ```bash poetry install # one-time setup +poetry add # see top level repo README.md on how to get TensorRT wheels. poetry run python run.py # execute program ``` @@ -120,7 +122,7 @@ Notes: ## How to run with K-V cache -For all the models (GPT2/BART/T5), use `--enable-kv-cache` option to get the same effect of HuggingFace's `use_cache` option. For encoder-decoder models, this option will use key & value cache in decoder for uni-directional self-attention and encoder-decoder cross-attention. KV cache could reduce the size of `input_ids` and improve runtime performance when `input_ids` is long. Current benchmarking result shows that at `input_seq_len = 1024` and `output_seq_len = 1024`, t5-large model with kv cache could achieve 3x faster than without kv cache in single NVIDIA A100 GPU. +For all the models (GPT2/BART/T5), use `--enable-kv-cache` option to get the same effect of HuggingFace's `use_cache` option. For encoder-decoder models, this option will use key & value cache in decoder for uni-directional self-attention and encoder-decoder cross-attention. KV cache could reduce the size of `input_ids` and improve runtime performance when `input_ids` is long. Current benchmarking result shows that at `input_seq_len = 1024` and `output_seq_len = 1024`, t5-large model with kv cache could achieve 3x faster than without kv cache in single NVIDIA A100 GPU. ```python python3 run.py run BART [frameworks | trt] --variant facebook/bart-base --working-dir temp --enable-kv-cache @@ -131,7 +133,7 @@ Notes: * For BART, we will be porting similar optimization from T5, but currently, K-V cache decoder with TensorRT requires exporting 2 onnx files and building separate engines respectively, called "non-kv" and "kv". For the first decoder run, KV Cache needs to be generated with only `input_ids` and `encoder_hidden_states`(if encoder_decoder), which is named "non-kv". For the other decoder iterations, previous KV Cache and other inputs are passed into the model to generate the updated KV Cache and decoder_hidden_states, which is named "kv". Because current onnx export cannot handle dynamic number of inputs, 2 onnx files with slightly different configurations are used together. -* For GPT2, since it is decoder only, only self attention kv is needed, and it has 2 mode, corresonding to 2 optimization profiles for a single TensorRT engine: context mode which takes in `input_ids` with various length only and outputs `hidden_states` and self attention cache; generation mode, which takes in `input_ids` with seq_len = 1 and entire self attention kv cache, and outputs `hidden_states` with seq_len = 1 and kv cache with cum_seq_len (`past_decoder_length`) + 1. It has some memory concurrency issue that cannot let self attention input and output point to the same memory location, so it requires dual cache. +* For GPT2, since it is decoder only, only self attention kv is needed, and it has 2 mode, corresonding to 2 optimization profiles for a single TensorRT engine: context mode which takes in `input_ids` with various length only and outputs `hidden_states` and self attention cache; generation mode, which takes in `input_ids` with seq_len = 1 and entire self attention kv cache, and outputs `hidden_states` with seq_len = 1 and kv cache with cum_seq_len (`past_decoder_length`) + 1. It has some memory concurrency issue that cannot let self attention input and output point to the same memory location, so it requires dual cache. ## How to run with beam search @@ -179,3 +181,15 @@ pytest ``` It is recommended to use Pytest `4.6.x`. Your Python environment must have already had the setup completed. + + +## Troubleshooting + +### cuBLAS Errors + +``` +CUDA error: CUBLAS_STATUS_INVALID_VALUE when calling `cublasSgemm( handle, opa, opb, m, n, k, &alpha, a, lda, b, ldb, &beta, c, ldc)` +``` + +It is possible that your LD_LIBRARY_PATH has a competing CUDA version stored inside, causing PyTorch to read the incorrect library. +Consider modifying LD_LIBRARY_PATH and removing your CUDA path. diff --git a/demo/HuggingFace/T5/T5ModelConfig.py b/demo/HuggingFace/T5/T5ModelConfig.py index 2db90363..5490fb4b 100644 --- a/demo/HuggingFace/T5/T5ModelConfig.py +++ b/demo/HuggingFace/T5/T5ModelConfig.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -103,7 +103,7 @@ class T5ModelTRTConfig(NNConfig): TARGET_MODELS[3]: 4096, TARGET_MODELS[4]: 5120, } - + MAX_SEQUENCE_LENGTH = { TARGET_MODELS[0]: 512, TARGET_MODELS[1]: 768, @@ -139,7 +139,7 @@ class T5ModelTRTConfig(NNConfig): TARGET_MODELS[2]: 24, TARGET_MODELS[3]: 24, TARGET_MODELS[4]: 24, - } + } NETWORK_FULL_NAME = "full" NETWORK_DECODER_SEGMENT_NAME = "decoder" NETWORK_ENCODER_SEGMENT_NAME = "encoder" @@ -215,12 +215,12 @@ def get_input_dims(metadata) -> Dict: self_attention_past_kv_dims = (Dims.BATCH, "num_heads", Dims.create_new_sequence_dim("past_decoder_length"), "embedding_size_per_head") decoder_inputs_dict[f"past_key_values.{i}.decoder.key"] = self_attention_past_kv_dims decoder_inputs_dict[f"past_key_values.{i}.decoder.value"] = self_attention_past_kv_dims - + # encoder-decoder cross-attention KV cache (dim[0] & dim[2] are dynamic, but dim[2] is constant at each decoding timestep) - cross_attention_past_kv_dims = (Dims.BATCH, "num_heads", Dims.create_new_sequence_dim("encoder_length"), "embedding_size_per_head") + cross_attention_past_kv_dims = (Dims.BATCH, "num_heads", Dims.create_new_sequence_dim("encoder_length"), "embedding_size_per_head") decoder_inputs_dict[f"past_key_values.{i}.encoder.key"] = cross_attention_past_kv_dims decoder_inputs_dict[f"past_key_values.{i}.encoder.value"] = cross_attention_past_kv_dims - + decoder_inputs = [Dims(context_inputs_dict), Dims(decoder_inputs_dict)] else: decoder_inputs_dict = OrderedDict( @@ -262,12 +262,12 @@ def get_output_dims(metadata) -> Dict: self_attention_present_kv_dims = (Dims.BATCH, "num_heads", Dims.create_new_sequence_dim("past_decoder_length"), "embedding_size_per_head") decoder_outputs_dict[f"present_key_values.{i}.decoder.key"] = self_attention_present_kv_dims decoder_outputs_dict[f"present_key_values.{i}.decoder.value"] = self_attention_present_kv_dims - + # encoder-decoder cross-attention KV cache (dim[0] & dim[2] are dynamic, but dim[2] is constant at each decoding timestep) cross_attention_present_kv_dims = (Dims.BATCH, "num_heads", Dims.create_new_sequence_dim("encoder_length"), "embedding_size_per_head") context_outputs_dict[f"present_key_values.{i}.encoder.key"] = cross_attention_present_kv_dims context_outputs_dict[f"present_key_values.{i}.encoder.value"] = cross_attention_present_kv_dims - + decoder_outputs = [Dims(context_outputs_dict), Dims(decoder_outputs_dict)] else: decoder_outputs_dict = OrderedDict( diff --git a/demo/HuggingFace/T5/export.py b/demo/HuggingFace/T5/export.py index 5a8c8bc8..63b7a73e 100644 --- a/demo/HuggingFace/T5/export.py +++ b/demo/HuggingFace/T5/export.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -44,7 +44,7 @@ # TRT-HuggingFace from T5.T5ModelConfig import T5ModelTRTConfig -from NNDF.tensorrt_utils import clamp_weights_onnx_to_fp16_bounds, move_t5_cast_op +from NNDF.tensorrt_utils import OnnxProcessOperation, process_onnx from NNDF.networks import NetworkMetadata, Precision, Dims from NNDF.logger import G_LOGGER from NNDF.models import ( @@ -123,17 +123,18 @@ def __init__(self, decoder, lm_head, config, is_trt = False): self.decoder = decoder self.lm_head = lm_head self.config = config - self.device = "cuda" # HuggingFace's beam search requires to set self.device. Set it to avoid application crash + # HuggingFace's beam search requires to set self.device. Set it to avoid application crash + self.device = torch.device('cuda') # Use hardcoded value to extend compatibility with older HF versions. self.main_input_name = "input_ids" # trt uses cached and precomputed cross attention vs. framework uses the entire kv cache as output. Need to treat them differently. self.is_trt = is_trt def prepare_inputs_for_generation( - self, - input_ids, - past=None, - use_cache=None, + self, + input_ids, + past=None, + use_cache=None, **kwargs ): # cut decoder_input_ids if past is used @@ -148,10 +149,10 @@ def prepare_inputs_for_generation( } def forward( - self, - input_ids, - encoder_hidden_states, - use_cache = None, + self, + input_ids, + encoder_hidden_states, + use_cache = None, past_key_values = None, return_dict = None, **kwargs, @@ -181,9 +182,9 @@ def forward( if not return_dict: return (logits, past_key_values) - + return Seq2SeqLMOutput( - logits=logits, + logits=logits, past_key_values=past_key_values ) @@ -206,16 +207,16 @@ def forward(self, encoder_hidden_states): dummy_hidden_states = torch.zeros(1,1).to(self.device) dummy_position_bias = torch.zeros(1, layer_module.layer[1].EncDecAttention.n_heads, 1, encoder_hidden_states.shape[1]).to(self.device) cross_attention_outputs = layer_module.layer[1]( - hidden_states=dummy_hidden_states, - key_value_states=encoder_hidden_states, - use_cache=True, + hidden_states=dummy_hidden_states, + key_value_states=encoder_hidden_states, + use_cache=True, past_key_value=None, position_bias=dummy_position_bias ) present_key_values = present_key_values + cross_attention_outputs[1] - + return present_key_values - + def __call__(self, *args, **kwargs): return self.forward(*args, **kwargs) @@ -256,9 +257,20 @@ class T5DecoderTRTEngine(TRTEngineFile): def __init__(self, model, network_metadata): super().__init__(model, T5DecoderConverter, network_metadata) self.max_trt_workspace = T5ModelTRTConfig.MAX_DECODER_WORKSPACE_MB[network_metadata.variant] - + def get_network_definition(self, network_definition): + if self.network_metadata.precision.fp16: + for i in range(network_definition[1].num_inputs): + t = network_definition[1].get_input(i) + if t.dtype == trt.float32: + t.dtype = trt.float16 + + for i in range(network_definition[1].num_outputs): + t = network_definition[1].get_output(i) + if t.dtype == trt.float32: + t.dtype = trt.float16 + return add_extra_fp32(network_definition) def use_obey_precision_constraints(self): @@ -393,11 +405,10 @@ def _export_forward(input_ids, encoder_hidden_states, past_key_values): ) if network_metadata.precision.fp16: - clamp_weights_onnx_to_fp16_bounds(output_fpath_kv_generator, output_fpath_kv_generator) + process_onnx([OnnxProcessOperation.CLAMP_WEIGHTS], output_fpath_kv_generator, output_fpath_kv_generator) if network_metadata.precision.fp16: - move_t5_cast_op(output_fpath, output_fpath) - clamp_weights_onnx_to_fp16_bounds(output_fpath, output_fpath) + process_onnx([OnnxProcessOperation.MOVE_CAST_OP, OnnxProcessOperation.CLAMP_WEIGHTS], output_fpath, output_fpath) return T5DecoderONNXFile(output_fpath, network_metadata) @@ -467,7 +478,6 @@ def torch_to_onnx( ) if network_metadata.precision.fp16: - move_t5_cast_op(output_fpath, output_fpath) - clamp_weights_onnx_to_fp16_bounds(output_fpath, output_fpath) + process_onnx([OnnxProcessOperation.MOVE_CAST_OP, OnnxProcessOperation.CLAMP_WEIGHTS], output_fpath, output_fpath) return T5EncoderONNXFile(output_fpath, network_metadata) diff --git a/demo/HuggingFace/T5/frameworks.py b/demo/HuggingFace/T5/frameworks.py index c98e4f9b..2f06128d 100644 --- a/demo/HuggingFace/T5/frameworks.py +++ b/demo/HuggingFace/T5/frameworks.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -91,8 +91,8 @@ def generate_and_download_framework( pytorch_model_dir, use_cache = metadata.other.kv_cache ) - - # These ONNX models can be converted using special encoder and decoder classes. + + # These ONNX models can be converted using special encoder and decoder classes. encoder_onnx_model_fpath = os.path.join(encoder_onnx_root, metadata_serialized + "-encoder.onnx") decoder_onnx_model_fpath = os.path.join(decoder_onnx_root, metadata_serialized + "-decoder-with-lm-head.onnx") @@ -162,10 +162,8 @@ def setup_tokenizer_and_model( # By default, huggingface model structure is one giant file. t5_torch_fpath = network_fpaths.torch[0].fpath t5_model = T5ForConditionalGeneration.from_pretrained(t5_torch_fpath, use_cache=metadata.other.kv_cache) - # Framework fp16 does not support cpu mode for T5 - # TODO: Enable true frameworks fp16. CUDA 11.4 so far does not support model.half() for PyTorch 1.13. - # if metadata.precision.fp16: - # t5_model = t5_model.cuda().half() + if metadata.precision.fp16: + t5_model = t5_model.cuda().half() t5_torch_encoder = T5EncoderTorchFile.TorchModule(t5_model.encoder) t5_torch_decoder = T5DecoderTorchFile.TorchModule( @@ -203,7 +201,7 @@ def execute_inference( t5_torch_encoder, input_ids, timing_profile, use_cuda=(not use_cpu) ) - # Need to feed the decoder a new empty input_ids for text generation. + # Need to feed the decoder a new empty input_ids for text generation. decoder_output_len = output_seq_len // 2 if (not metadata.other.kv_cache) else 1 decoder_input_ids = torch.full( diff --git a/demo/HuggingFace/T5/measurements.py b/demo/HuggingFace/T5/measurements.py index 3f4a8043..3b30e8c1 100644 --- a/demo/HuggingFace/T5/measurements.py +++ b/demo/HuggingFace/T5/measurements.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -41,7 +41,7 @@ def decoder_inference( def decoder_stmt(): t5_decoder( - input_ids=input_ids, encoder_hidden_states=encoder_last_hidden_state, use_cache=use_cache, + input_ids=input_ids, encoder_hidden_states=encoder_last_hidden_state, use_cache=use_cache, past_key_values=past_key_values ) @@ -90,7 +90,7 @@ def _e2e(): encoder_outputs = BaseModelOutput(last_hidden_state = encoder_last_hidden_state), ) return decoder_output - + if isinstance(t5_decoder, TRTNativeRunner): t5_decoder.set_return_device("cuda" if use_cuda else "cpu") diff --git a/demo/HuggingFace/T5/onnxrt.py b/demo/HuggingFace/T5/onnxrt.py index f33b251d..499ba2a5 100644 --- a/demo/HuggingFace/T5/onnxrt.py +++ b/demo/HuggingFace/T5/onnxrt.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -146,7 +146,7 @@ def execute_inference( self.t5_ort_encoder, input_ids, timing_profile ) - # Need to feed the decoder a new empty input_ids for text generation. + # Need to feed the decoder a new empty input_ids for text generation. decoder_output_len = output_seq_len // 2 decoder_input_ids = torch.full( diff --git a/demo/HuggingFace/T5/trt.py b/demo/HuggingFace/T5/trt.py index 3f61d41b..3a2decc2 100644 --- a/demo/HuggingFace/T5/trt.py +++ b/demo/HuggingFace/T5/trt.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -129,7 +129,7 @@ def __init__( # In benchmarking mode, the max_sequence_length should be the designated input_profile_max_len if benchmarking_args is not None and benchmarking_args.input_profile_max_len is not None: self.max_sequence_length = benchmarking_args.input_profile_max_len - else: + else: self.max_sequence_length = hf_config.d_model self.encoder_hidden_size = hf_config.d_model self.main_input_name = "input_ids" @@ -201,20 +201,21 @@ def __init__( benchmarking_args: T5TRTBenchmarkingArgs = None, ): super().__init__(trt_engine_file, network_metadata, hf_config, batch_size = batch_size) - self.data_type = torch.float32 + self.data_type = torch.float32 if not network_metadata.precision.fp16 else torch.float16 # In benchmarking mode, the max_sequence_length should be the user-provided input_profile_max_len if benchmarking_args is not None and benchmarking_args.input_profile_max_len is not None: self.max_input_length = benchmarking_args.input_profile_max_len - else: + else: self.max_input_length = hf_config.d_model - + # Similarly, the max_output_length should be the user-provided output_profile_max_len if benchmarking_args is not None and benchmarking_args.output_profile_max_len is not None: self.max_output_length = benchmarking_args.output_profile_max_len - else: + else: self.max_output_length = hf_config.d_model - + + self.device = torch.device('cuda') self.main_input_name = "input_ids" self.encoder_hidden_size = hf_config.d_model self.num_heads = hf_config.num_heads @@ -246,7 +247,7 @@ def __init__( input_idx = self.trt_engine.get_binding_index("past_" + self_attention_name) self.self_attention_cache[self_attention_name] = input_buffer self.bindings[input_idx] = input_buffer.data_ptr() - + output_idx = self.trt_engine.get_binding_index("present_" + self_attention_name) self.bindings[output_idx] = input_buffer.data_ptr() @@ -262,22 +263,21 @@ def __init__( # Optimization bit self.persist_encoder_hidden_states = False - self.encoder_hidden_states = None + self.encoder_hidden_states = torch.zeros((self.batch_size * num_beams * self.max_input_length * self.encoder_hidden_size), dtype=self.data_type).cuda() + self.bindings[1] = self.encoder_hidden_states.data_ptr() self.persist_cross_attention_kv_cache = False - self.return_device = "cuda" + self.return_device = torch.device('cuda') self.variant = network_metadata.variant # record variant name to later index the vocab_size in forward() - + def set_encoder_hidden_states_for_inference_cycle(self, encoder_hidden_states): """Used to cache encoder hidden state runs across same encoder sessions""" - if encoder_hidden_states.device == torch.device("cpu"): - self.encoder_hidden_states = encoder_hidden_states.cuda() - else: - self.encoder_hidden_states = encoder_hidden_states - - self.bindings[1] = self.encoder_hidden_states.data_ptr() + # Use in-place assignment so that the memory location of self.encoder_hidden_states will never change. + # PyTorch will handle the FP32->FP16 conversion automatically if that is needed. + self.encoder_hidden_states[:encoder_hidden_states.numel()] = encoder_hidden_states.flatten() self.persist_encoder_hidden_states = True + self.trt_context.set_binding_shape(1, encoder_hidden_states.shape) def set_cross_attention_kv_cache_engine(self, cross_attention_kv_generator): self.cross_attention_kv_generator = cross_attention_kv_generator @@ -285,6 +285,12 @@ def set_cross_attention_kv_cache_engine(self, cross_attention_kv_generator): trt_runtime = trt.Runtime(self.trt_logger) self.cross_attention_kv_generator_trt_engine = trt_runtime.deserialize_cuda_engine(f.read()) self.cross_attention_kv_generator_trt_context = self.cross_attention_kv_generator_trt_engine.create_execution_context() + self.cross_attention_bindings = [None] * self.cross_attention_kv_generator_trt_engine.num_bindings + self.cross_attention_bindings[0] = self.encoder_hidden_states.data_ptr() + # Cross attention cache as outputs + for i in range(self.num_decoder_layers): + self.cross_attention_bindings[2*i+1] = self.cross_attention_cache[f"past_key_values.{i}.encoder.key"].data_ptr() + self.cross_attention_bindings[2*i+2] = self.cross_attention_cache[f"past_key_values.{i}.encoder.value"].data_ptr() def set_cross_attention_kv_cache_for_inference_cycle(self, encoder_hidden_states): """ @@ -293,17 +299,8 @@ def set_cross_attention_kv_cache_for_inference_cycle(self, encoder_hidden_states Unlike self-attention cache, cross attention is constant during the decoding process, so we only need to set its bindings once at the first decoding step, and skip in all later steps (by self.persist_cross_attention_kv_cache flag) """ self.cross_attention_kv_generator_trt_context.set_binding_shape(0, encoder_hidden_states.shape) - bindings = [None] * self.cross_attention_kv_generator_trt_engine.num_bindings - bindings[0] = encoder_hidden_states.data_ptr() assert self.cross_attention_kv_generator_trt_context.all_binding_shapes_specified - - cross_attention_kv_shape_output = (encoder_hidden_states.shape[0], self.num_heads, self.max_input_length, self.embedding_size_per_head) - # Cross attention cache as outputs - for i in range(self.num_decoder_layers): - bindings[2*i+1] = self.cross_attention_cache[f"past_key_values.{i}.encoder.key"].data_ptr() - bindings[2*i+2] = self.cross_attention_cache[f"past_key_values.{i}.encoder.value"].data_ptr() - - self.cross_attention_kv_generator_trt_context.execute_v2(bindings=bindings) + self.cross_attention_kv_generator_trt_context.execute_v2(bindings=self.cross_attention_bindings) self.persist_cross_attention_kv_cache = True def set_return_device(self, return_device): @@ -343,7 +340,7 @@ def _reorder_cache(self, past, beam_idx): reordered_decoder_past = reordered_decoder_past + (reordered_layer_past_states,) return reordered_decoder_past - def forward(self, input_ids, encoder_hidden_states, *args, **kwargs): + def forward(self, input_ids, encoder_hidden_states, encoder_outputs=None, *args, **kwargs): # Get the batch size. bs = input_ids.shape[0] # in beam search mode, bs is batch_size * num_beams @@ -366,8 +363,6 @@ def forward(self, input_ids, encoder_hidden_states, *args, **kwargs): if not self.persist_encoder_hidden_states: self.set_encoder_hidden_states_for_inference_cycle(encoder_hidden_states) - self.trt_context.set_binding_shape(1, self.encoder_hidden_states.shape) - if self.config.use_cache: if (kwargs.get("past_key_values") is None): self.past_decoder_length = 0 @@ -388,7 +383,9 @@ def forward(self, input_ids, encoder_hidden_states, *args, **kwargs): assert self.trt_context.all_binding_shapes_specified self.trt_context.execute_v2(bindings=self.bindings) - logits = self.hidden_states[:,:input_length,:] + # For bs > 1, this is required, so cannot avoid this D2D copy + logits_length = bs * input_length * self.config.vocab_size + logits = self.hidden_states.flatten()[:logits_length].view(bs, input_length, self.config.vocab_size) if is_cpu_mode: logits = logits.cpu() @@ -414,7 +411,7 @@ def forward(self, input_ids, encoder_hidden_states, *args, **kwargs): def prepare_inputs_for_generation(self, input_ids, past=None, use_cache=None, **kwargs): # In HuggingFace generation_utils.py, this function will be called at each decoding step, before running the decoder's forward(). - + if past is not None: input_ids = input_ids[:, -1:] @@ -428,13 +425,13 @@ def prepare_inputs_for_generation(self, input_ids, past=None, use_cache=None, ** ret["past_key_values"] = past return ret - + def reset(self): ''' You should always call this function after a use case because T5TRTDecoder does not clear the cached encoder_hidden_states or cross_attention itself. ''' self.persist_encoder_hidden_states = False - self.encoder_hidden_states = None + self.encoder_hidden_states.zero_() if self.config.use_cache: self.persist_cross_attention_kv_cache = False @@ -487,9 +484,9 @@ def generate( if min_length is None: min_length = T5ModelTRTConfig.MIN_OUTPUT_LENGTH[self.metadata.variant] - + encoder_last_hidden_state = self.t5_trt_encoder(input_ids=input_ids).to("cuda") - + decoder_output = self.t5_trt_decoder.generate( input_ids, max_length = max_length, @@ -504,7 +501,7 @@ def generate( self.t5_trt_decoder.reset() return decoder_output - + def execute_inference( self, metadata: NetworkMetadata, @@ -526,7 +523,7 @@ def execute_inference( else: input_seq_len = benchmarking_args.input_seq_len output_seq_len = benchmarking_args.output_seq_len - + input_ids = torch.randint(0, hf_config.vocab_size, (batch_size, input_seq_len)) encoder_last_hidden_state, encoder_e2e_time = encoder_inference( @@ -599,7 +596,7 @@ def execute_inference( # Remove the padding and end tokens. semantic_outputs = tokenizer.decode( - decoder_output[-1, :], skip_special_tokens=True + decoder_output[0, :], skip_special_tokens=True ) if isinstance(semantic_outputs, list): @@ -618,10 +615,11 @@ def execute_calculate_perplexity( metadata: NetworkMetadata, encoder_input: str, decoder_input: str, + batch_size: int, ): tokenizer = T5Tokenizer.from_pretrained(metadata.variant) - encoder_input_ids = tokenizer([encoder_input], padding=True, return_tensors="pt").input_ids - decoder_input_ids = tokenizer([decoder_input], padding=True, return_tensors="pt").input_ids + encoder_input_ids = tokenizer([encoder_input] * batch_size, padding=True, return_tensors="pt").input_ids + decoder_input_ids = tokenizer([decoder_input] * batch_size, padding=True, return_tensors="pt").input_ids perplexity = calculate_perplexity( self.t5_trt_encoder, self.t5_trt_decoder, tokenizer, encoder_input_ids, decoder_input_ids, @@ -629,21 +627,6 @@ def execute_calculate_perplexity( ) return perplexity - def _setup_workspace(self, metadata: NetworkMetadata, working_directory: str) -> NNFolderWorkspace: - return NNFolderWorkspace( - self.frameworks_cmd.config.network_name, metadata, working_directory - ) - - def _download_models( - self, - workspace: NNFolderWorkspace, - metadata: NetworkMetadata, - ) -> Tuple[NetworkModel]: - # No fpath provided for onnx files, download them from HuggingFace repo. - return self.frameworks_cmd.generate_and_download_framework( - metadata, workspace - ).onnx - def _setup_engines( self, metadata: NetworkMetadata, @@ -703,7 +686,7 @@ def _setup_engines( # Set up the non kv engine, used for non-kv mode and kv mode generation phase (1st decoder run uses the non-kv profile to generate kv cache) dec_profiles = Profile() - + # for beam search, decoder engine's inputs are expanded `num_beams` times # optimization profiles should be changed accordingly, but onnx models can be shared across greedy/beam because the first dim (batch size) is already a dynamic value, so no change needed in export.py if not hf_config.use_cache: @@ -727,7 +710,7 @@ def _setup_engines( opt=(batch_size * num_beams, opt_input_seq_len, encoder_hidden_size), max=(batch_size * num_beams, max_input_length, encoder_hidden_size), ) - + if hf_config.use_cache: num_heads = hf_config.num_heads @@ -761,7 +744,7 @@ def _setup_engines( f"past_key_values.{i}.encoder.value", **cross_attention_profile ) - + decoder_profiles = [dec_profiles] # Convert ONNX models to TRT engines. @@ -777,7 +760,7 @@ def _setup_engines( if num_beams > 1: engine_tag += "-beam{}".format(num_beams) - preview_features = [] + preview_features = [PreviewFeature.DISABLE_EXTERNAL_TACTIC_SOURCES_FOR_CORE_0805] if disable_preview_dynamic_shapes: engine_tag += "-noPreviewFasterDynamicShapes" else: @@ -825,7 +808,7 @@ def _setup_engines( profiles=cross_attention_kv_generation_profiles, preview_features=preview_features ) - + self.t5_trt_decoder.set_cross_attention_kv_cache_engine(self.t5_trt_cross_attention_kv_generator) def run_trt( @@ -876,7 +859,7 @@ def run_trt( else: for ei, di in zip(network_input, perplexity_reference): ppl_results.append( - self.execute_calculate_perplexity(metadata, ei, di) + self.execute_calculate_perplexity(metadata, ei, di, batch_size) ) self.t5_trt_decoder.reset() diff --git a/demo/HuggingFace/notebooks/bart-playground.ipynb b/demo/HuggingFace/notebooks/bart-playground.ipynb index c59c5264..59e0e20f 100644 --- a/demo/HuggingFace/notebooks/bart-playground.ipynb +++ b/demo/HuggingFace/notebooks/bart-playground.ipynb @@ -7,7 +7,7 @@ "metadata": {}, "outputs": [], "source": [ - "# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.\n", + "# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.\n", "# SPDX-License-Identifier: Apache-2.0\n", "#\n", "# Licensed under the Apache License, Version 2.0 (the \"License\");\n", diff --git a/demo/HuggingFace/notebooks/bart.ipynb b/demo/HuggingFace/notebooks/bart.ipynb index dbd574af..5a9dd70b 100644 --- a/demo/HuggingFace/notebooks/bart.ipynb +++ b/demo/HuggingFace/notebooks/bart.ipynb @@ -7,7 +7,7 @@ "metadata": {}, "outputs": [], "source": [ - "# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.\n", + "# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.\n", "# SPDX-License-Identifier: Apache-2.0\n", "#\n", "# Licensed under the Apache License, Version 2.0 (the \"License\");\n", @@ -673,7 +673,7 @@ "if num_beams > 1:\n", " engine_tag += \"-beam{}\".format(num_beams)\n", "\n", - "preview_features = []\n", + "preview_features = [PreviewFeature.DISABLE_EXTERNAL_TACTIC_SOURCES_FOR_CORE_0805]\n", "if disable_preview_dynamic_shapes:\n", " engine_tag += \"-noPreviewFasterDynamicShapes\"\n", "else:\n", diff --git a/demo/HuggingFace/notebooks/gpt2.ipynb b/demo/HuggingFace/notebooks/gpt2.ipynb index cf96475c..745b996b 100644 --- a/demo/HuggingFace/notebooks/gpt2.ipynb +++ b/demo/HuggingFace/notebooks/gpt2.ipynb @@ -509,10 +509,11 @@ "opt_length = input_id.shape[1] if use_input_length else max_length // 2 \n", "# Create different engine tags for different configurations\n", "engine_tag = f\"bs{batch_size}\"\n", - "preview_features = [PreviewFeature.FASTER_DYNAMIC_SHAPES_0805]\n", + "preview_features = [PreviewFeature.DISABLE_EXTERNAL_TACTIC_SOURCES_FOR_CORE_0805]\n", "if disable_preview_dynamic_shapes:\n", - " engine_tag += \"-disableFasterDynamicShapes\"\n", - " preview_features = []\n", + " engine_tag += \"-noPreviewFasterDynamicShapes\"\n", + "else:\n", + " preview_features += [PreviewFeature.FASTER_DYNAMIC_SHAPES_0805]\n", "\n", "profiles = [Profile().add(\n", " \"input_ids\",\n", diff --git a/demo/HuggingFace/notebooks/t5.ipynb b/demo/HuggingFace/notebooks/t5.ipynb index b752108e..c708e04e 100644 --- a/demo/HuggingFace/notebooks/t5.ipynb +++ b/demo/HuggingFace/notebooks/t5.ipynb @@ -351,10 +351,14 @@ "outputs": [], "source": [ "onnx_model_path = './models/{}/ONNX'.format(T5_VARIANT)\n", - "!mkdir -p $onnx_model_path\n", "\n", "metadata=NetworkMetadata(variant=T5_VARIANT, precision=Precision(fp16=True), other=T5Metadata(kv_cache=False))\n", "\n", + "encoder_onnx_model_path = os.path.join(onnx_model_path, \"encoder\")\n", + "decoder_onnx_model_path = os.path.join(onnx_model_path, \"decoder\")\n", + "!mkdir -p $encoder_onnx_model_path\n", + "!mkdir -p $decoder_onnx_model_path\n", + "\n", "encoder_onnx_model_fpath = T5_VARIANT + \"-encoder.onnx\"\n", "decoder_onnx_model_fpath = T5_VARIANT + \"-decoder-with-lm-head.onnx\"\n", "\n", @@ -362,10 +366,10 @@ "t5_decoder = T5DecoderTorchFile(t5_model.to('cpu'), metadata)\n", "\n", "onnx_t5_encoder = t5_encoder.as_onnx_model(\n", - " os.path.join(onnx_model_path, encoder_onnx_model_fpath), force_overwrite=False\n", + " os.path.join(encoder_onnx_model_path, encoder_onnx_model_fpath), force_overwrite=False\n", ")\n", "onnx_t5_decoder = t5_decoder.as_onnx_model(\n", - " os.path.join(onnx_model_path, decoder_onnx_model_fpath), force_overwrite=False\n", + " os.path.join(decoder_onnx_model_path, decoder_onnx_model_fpath), force_overwrite=False\n", ")" ] }, @@ -444,7 +448,7 @@ "if num_beams > 1:\n", " engine_tag += \"-beam{}\".format(num_beams)\n", "\n", - "preview_features = []\n", + "preview_features = [PreviewFeature.DISABLE_EXTERNAL_TACTIC_SOURCES_FOR_CORE_0805]\n", "if disable_preview_dynamic_shapes:\n", " engine_tag += \"-noFasterDynamicShapes\"\n", "else:\n", @@ -454,7 +458,7 @@ "decoder_engine_name = os.path.join(tensorrt_model_path, decoder_onnx_model_fpath) + f\"-{engine_tag}.engine\"\n", "\n", "if not os.path.exists(encoder_engine_name):\n", - " t5_trt_encoder_engine = T5EncoderONNXFile(os.path.join(onnx_model_path, encoder_onnx_model_fpath), metadata).as_trt_engine(\n", + " t5_trt_encoder_engine = T5EncoderONNXFile(os.path.join(encoder_onnx_model_path, encoder_onnx_model_fpath), metadata).as_trt_engine(\n", " encoder_engine_name,\n", " profiles=[encoder_profile],\n", " preview_features=preview_features)\n", @@ -462,7 +466,7 @@ " t5_trt_encoder_engine = T5EncoderTRTEngine(encoder_engine_name, metadata)\n", "\n", "if not os.path.exists(decoder_engine_name):\n", - " t5_trt_decoder_engine = T5DecoderONNXFile(os.path.join(onnx_model_path, decoder_onnx_model_fpath), metadata).as_trt_engine(\n", + " t5_trt_decoder_engine = T5DecoderONNXFile(os.path.join(decoder_onnx_model_path, decoder_onnx_model_fpath), metadata).as_trt_engine(\n", " decoder_engine_name,\n", " profiles=[decoder_profile],\n", " preview_features=preview_features)\n", @@ -621,6 +625,14 @@ "\n", "If you are interested in further details of the conversion process, check out [T5/trt.py](../T5/trt.py)" ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "b6a8b7c8", + "metadata": {}, + "outputs": [], + "source": [] } ], "metadata": { @@ -639,7 +651,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.8.10" + "version": "3.10.6" }, "vscode": { "interpreter": { diff --git a/demo/HuggingFace/requirements.txt b/demo/HuggingFace/requirements.txt index db19f166..30d9cdb1 100644 --- a/demo/HuggingFace/requirements.txt +++ b/demo/HuggingFace/requirements.txt @@ -18,12 +18,13 @@ huggingface-hub==0.11.0; python_version>="3.7" huggingface-hub==0.4.0; python_version<"3.7" transformers==4.20.0; python_version>="3.7" transformers==4.18.0; python_version<"3.7" -torch <= 1.11 +torch==1.13.1; python_version>="3.7" +torch==1.10; python_version<"3.7" sentencepiece==0.1.95; python_version<"3.10" sentencepiece==0.1.97; python_version>="3.10" --extra-index-url https://pypi.ngc.nvidia.com onnx==1.9.0; python_version<"3.8" -onnx==1.12.0; python_version>="3.8" +onnx==1.13.1; python_version>="3.8" polygraphy>=0.42.2 tabulate toml diff --git a/demo/HuggingFace/run.py b/demo/HuggingFace/run.py index d9752128..3521b57f 100644 --- a/demo/HuggingFace/run.py +++ b/demo/HuggingFace/run.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -45,6 +45,7 @@ # NNDF from NNDF.general_utils import process_per_result_entries, process_results, register_network_folders, RANDOM_SEED from NNDF.logger import G_LOGGER +from NNDF.cuda_bootstrapper import bootstrap_ld_library_path # huggingface from transformers import set_seed @@ -298,6 +299,12 @@ def main() -> None: # Delegate parser to action specifics action = get_action(known_args.action, networks, parser) known_args, _ = parser.parse_known_args() + + # If bootstrap occurs, then the spawned process completes the rest of demo. + # We can exit safely. We spawn after parsing basic args to reduce loading churn on rudimentary help commands. + if bootstrap_ld_library_path(): + sys.exit(0) + return action.execute(known_args) diff --git a/demo/HuggingFace/tests/test_interface.py b/demo/HuggingFace/tests/test_interface.py index ebf1cca7..9dda902f 100644 --- a/demo/HuggingFace/tests/test_interface.py +++ b/demo/HuggingFace/tests/test_interface.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/demo/Tacotron2/common/audio_processing.py b/demo/Tacotron2/common/audio_processing.py index ac7f4b12..090581d5 100644 --- a/demo/Tacotron2/common/audio_processing.py +++ b/demo/Tacotron2/common/audio_processing.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/demo/Tacotron2/common/layers.py b/demo/Tacotron2/common/layers.py index 2996e849..cbeb4910 100644 --- a/demo/Tacotron2/common/layers.py +++ b/demo/Tacotron2/common/layers.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/demo/Tacotron2/common/stft.py b/demo/Tacotron2/common/stft.py index 77d33ade..59700e99 100644 --- a/demo/Tacotron2/common/stft.py +++ b/demo/Tacotron2/common/stft.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/demo/Tacotron2/common/utils.py b/demo/Tacotron2/common/utils.py index a874259f..6cccbf22 100644 --- a/demo/Tacotron2/common/utils.py +++ b/demo/Tacotron2/common/utils.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/demo/Tacotron2/data_functions.py b/demo/Tacotron2/data_functions.py index 8e2c0011..623e5af6 100644 --- a/demo/Tacotron2/data_functions.py +++ b/demo/Tacotron2/data_functions.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/demo/Tacotron2/inference.py b/demo/Tacotron2/inference.py index 8aa7d0ec..77bbccc1 100644 --- a/demo/Tacotron2/inference.py +++ b/demo/Tacotron2/inference.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/demo/Tacotron2/inference_perf.py b/demo/Tacotron2/inference_perf.py index 3d07a0bf..cb13463e 100644 --- a/demo/Tacotron2/inference_perf.py +++ b/demo/Tacotron2/inference_perf.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/demo/Tacotron2/loss_functions.py b/demo/Tacotron2/loss_functions.py index 1cae61fc..7ee1a5b2 100644 --- a/demo/Tacotron2/loss_functions.py +++ b/demo/Tacotron2/loss_functions.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/demo/Tacotron2/main.py b/demo/Tacotron2/main.py index ac309808..2fee8563 100644 --- a/demo/Tacotron2/main.py +++ b/demo/Tacotron2/main.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/demo/Tacotron2/models.py b/demo/Tacotron2/models.py index c3d91ab5..fad8af46 100644 --- a/demo/Tacotron2/models.py +++ b/demo/Tacotron2/models.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/demo/Tacotron2/multiproc.py b/demo/Tacotron2/multiproc.py index 4bfd3bc6..d3eb63ad 100644 --- a/demo/Tacotron2/multiproc.py +++ b/demo/Tacotron2/multiproc.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/demo/Tacotron2/preprocess_audio2mel.py b/demo/Tacotron2/preprocess_audio2mel.py index e57709ec..32026325 100644 --- a/demo/Tacotron2/preprocess_audio2mel.py +++ b/demo/Tacotron2/preprocess_audio2mel.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/demo/Tacotron2/tacotron2/arg_parser.py b/demo/Tacotron2/tacotron2/arg_parser.py index e0044088..2a450ef6 100644 --- a/demo/Tacotron2/tacotron2/arg_parser.py +++ b/demo/Tacotron2/tacotron2/arg_parser.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/demo/Tacotron2/tacotron2/data_function.py b/demo/Tacotron2/tacotron2/data_function.py index f89e8793..5d2c0064 100644 --- a/demo/Tacotron2/tacotron2/data_function.py +++ b/demo/Tacotron2/tacotron2/data_function.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/demo/Tacotron2/tacotron2/loss_function.py b/demo/Tacotron2/tacotron2/loss_function.py index 2ef0f15d..07b3610e 100644 --- a/demo/Tacotron2/tacotron2/loss_function.py +++ b/demo/Tacotron2/tacotron2/loss_function.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/demo/Tacotron2/tacotron2/model.py b/demo/Tacotron2/tacotron2/model.py index e90bbd4e..c8ba9f96 100644 --- a/demo/Tacotron2/tacotron2/model.py +++ b/demo/Tacotron2/tacotron2/model.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/demo/Tacotron2/tacotron2/text/cleaners.py b/demo/Tacotron2/tacotron2/text/cleaners.py index f34a56fc..4cbcb015 100644 --- a/demo/Tacotron2/tacotron2/text/cleaners.py +++ b/demo/Tacotron2/tacotron2/text/cleaners.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/demo/Tacotron2/tacotron2/text/cmudict.py b/demo/Tacotron2/tacotron2/text/cmudict.py index a18ccf6a..b359b235 100644 --- a/demo/Tacotron2/tacotron2/text/cmudict.py +++ b/demo/Tacotron2/tacotron2/text/cmudict.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/demo/Tacotron2/tacotron2/text/numbers.py b/demo/Tacotron2/tacotron2/text/numbers.py index 365b9e64..43df588d 100644 --- a/demo/Tacotron2/tacotron2/text/numbers.py +++ b/demo/Tacotron2/tacotron2/text/numbers.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/demo/Tacotron2/tacotron2/text/symbols.py b/demo/Tacotron2/tacotron2/text/symbols.py index 090ef204..604626ec 100644 --- a/demo/Tacotron2/tacotron2/text/symbols.py +++ b/demo/Tacotron2/tacotron2/text/symbols.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/demo/Tacotron2/tensorrt/convert_onnx2trt.py b/demo/Tacotron2/tensorrt/convert_onnx2trt.py index 64f83a68..ec43cb05 100644 --- a/demo/Tacotron2/tensorrt/convert_onnx2trt.py +++ b/demo/Tacotron2/tensorrt/convert_onnx2trt.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -64,7 +64,6 @@ def parse_args(parser): required=False) parser.add_argument("--disable-preview-dynamic-shapes", action="store_true", help="Disable dynamic shape preview feature.") parser.set_defaults(loop=int(trt.__version__[0]) >= 8) - return parser diff --git a/demo/Tacotron2/tensorrt/convert_tacotron22onnx.py b/demo/Tacotron2/tensorrt/convert_tacotron22onnx.py index 09420e1c..361a2221 100644 --- a/demo/Tacotron2/tensorrt/convert_tacotron22onnx.py +++ b/demo/Tacotron2/tensorrt/convert_tacotron22onnx.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/demo/Tacotron2/tensorrt/convert_waveglow2onnx.py b/demo/Tacotron2/tensorrt/convert_waveglow2onnx.py index 8894c2f0..4b9aecbc 100644 --- a/demo/Tacotron2/tensorrt/convert_waveglow2onnx.py +++ b/demo/Tacotron2/tensorrt/convert_waveglow2onnx.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/demo/Tacotron2/tensorrt/generate_decoder.py b/demo/Tacotron2/tensorrt/generate_decoder.py index ec7c2c34..62f8b04e 100644 --- a/demo/Tacotron2/tensorrt/generate_decoder.py +++ b/demo/Tacotron2/tensorrt/generate_decoder.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -209,4 +209,4 @@ def insert_decoder_loop(decoder_iter_onnx_path, output_dir, decoder_out_name, fp if args.decoder_out == None: args.decoder_out = "decoder_with_outer_loop_{}.onnx".format("fp16" if args.fp16 else "fp32") - insert_decoder_loop(args.model_path, args.output_dir, args.decoder_out, args.fp16) \ No newline at end of file + insert_decoder_loop(args.model_path, args.output_dir, args.decoder_out, args.fp16) diff --git a/demo/Tacotron2/tensorrt/inference_trt.py b/demo/Tacotron2/tensorrt/inference_trt.py index 87fd7324..4f5f76d3 100644 --- a/demo/Tacotron2/tensorrt/inference_trt.py +++ b/demo/Tacotron2/tensorrt/inference_trt.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -244,7 +244,7 @@ def infer_tacotron2_trt(encoder, decoder_iter, postnet, mel_outputs = torch.tensor(result[0], device=device) mel_lengths = torch.tensor(result[1], device=device) - else: + else: print("Running Tacotron2 Decoder with loop") decoder_tensors = { "inputs" : @@ -449,10 +449,10 @@ def main(): with encoder_context, postnet_context: pass - + if decoder_context is not None: with decoder_context: pass - + if waveglow_context is not None: with waveglow_context: pass diff --git a/demo/Tacotron2/tensorrt/test_infer_trt.py b/demo/Tacotron2/tensorrt/test_infer_trt.py index 36dbc6f9..7023f02f 100644 --- a/demo/Tacotron2/tensorrt/test_infer_trt.py +++ b/demo/Tacotron2/tensorrt/test_infer_trt.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -194,7 +194,7 @@ def main(): with MeasureTime(measurements, "waveglow_latency"): audios = infer_waveglow_trt(waveglow, waveglow_context, mel, measurements, args.fp16) - + num_mels = mel.size(0)*mel.size(2) num_samples = audios.size(0)*audios.size(1) diff --git a/demo/Tacotron2/tensorrt/trt_utils.py b/demo/Tacotron2/tensorrt/trt_utils.py index 06b072fe..3e1d534a 100644 --- a/demo/Tacotron2/tensorrt/trt_utils.py +++ b/demo/Tacotron2/tensorrt/trt_utils.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -27,7 +27,7 @@ def parse_dynamic_size(dim): split = str(dim).split(',') assert len(split) in (1,3) , "Dynamic size input must be either 1 or 3 comma-separated integers" ints = [int(i) for i in split] - + if len(ints) == 1: ints *= 3 diff --git a/demo/Tacotron2/test_infer.py b/demo/Tacotron2/test_infer.py index 5c5217db..81254d37 100644 --- a/demo/Tacotron2/test_infer.py +++ b/demo/Tacotron2/test_infer.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/demo/Tacotron2/train.py b/demo/Tacotron2/train.py index c3cee862..55a9e56f 100644 --- a/demo/Tacotron2/train.py +++ b/demo/Tacotron2/train.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/demo/Tacotron2/waveglow/arg_parser.py b/demo/Tacotron2/waveglow/arg_parser.py index ef18272b..7002bf6d 100644 --- a/demo/Tacotron2/waveglow/arg_parser.py +++ b/demo/Tacotron2/waveglow/arg_parser.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/demo/Tacotron2/waveglow/data_function.py b/demo/Tacotron2/waveglow/data_function.py index f41a31c0..62076eba 100644 --- a/demo/Tacotron2/waveglow/data_function.py +++ b/demo/Tacotron2/waveglow/data_function.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/demo/Tacotron2/waveglow/denoiser.py b/demo/Tacotron2/waveglow/denoiser.py index dc798ee1..5dc2d789 100644 --- a/demo/Tacotron2/waveglow/denoiser.py +++ b/demo/Tacotron2/waveglow/denoiser.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/demo/Tacotron2/waveglow/loss_function.py b/demo/Tacotron2/waveglow/loss_function.py index f3f5ce1c..75620df9 100644 --- a/demo/Tacotron2/waveglow/loss_function.py +++ b/demo/Tacotron2/waveglow/loss_function.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/demo/Tacotron2/waveglow/model.py b/demo/Tacotron2/waveglow/model.py index eee0145c..00a26421 100644 --- a/demo/Tacotron2/waveglow/model.py +++ b/demo/Tacotron2/waveglow/model.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -18,6 +18,7 @@ import torch from torch.autograd import Variable import torch.nn.functional as F +import numpy as np @torch.jit.script @@ -293,7 +294,13 @@ def infer_onnx(self, spect, z, sigma=0.9): audio = z[:, :self.n_remaining_channels, :, :] z = z[:, self.n_remaining_channels:self.n_group, :, :] - audio = sigma*audio + + # Convert sigma to a torch tensor to ensure constant is exported properly + if audio.type() == 'torch.cuda.HalfTensor' or audio.type() == 'torch.HalfTensor': + sigma = torch.tensor(np.float16(sigma)) + else: + sigma = torch.tensor(np.float32(sigma)) + audio = sigma * audio for k in reversed(range(self.n_flows)): n_half = int(audio.size(1) // 2) diff --git a/docker/build.sh b/docker/build.sh index 39d37a32..6b28fd09 100755 --- a/docker/build.sh +++ b/docker/build.sh @@ -1,6 +1,6 @@ #!/usr/bin/env bash # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -18,7 +18,6 @@ arg_dockerfile=docker/ubuntu-20.04.Dockerfile arg_imagename=tensorrt-ubuntu -arg_cudaversion=12.0.1 arg_help=0 while [[ "$#" -gt 0 ]]; do case $1 in @@ -38,7 +37,13 @@ if [ "$arg_help" -eq "1" ]; then exit; fi -docker_args="-f $arg_dockerfile --build-arg CUDA_VERSION=$arg_cudaversion --build-arg uid=$(id -u) --build-arg gid=$(id -g) --tag=$arg_imagename ." +if [ -z "$arg_cudaversion" ] +then + echo "--cuda not specified, so not passing in --build-arg CUDA_VERSION to Dockerfile" + docker_args="-f $arg_dockerfile --build-arg uid=$(id -u) --build-arg gid=$(id -g) --tag=$arg_imagename ." +else + docker_args="-f $arg_dockerfile --build-arg CUDA_VERSION=$arg_cudaversion --build-arg uid=$(id -u) --build-arg gid=$(id -g) --tag=$arg_imagename ." +fi echo "Building container:" echo "> docker build $docker_args" diff --git a/docker/centos-7.Dockerfile b/docker/centos-7.Dockerfile index 164b1533..ff27d6d2 100644 --- a/docker/centos-7.Dockerfile +++ b/docker/centos-7.Dockerfile @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -16,12 +16,11 @@ # ARG CUDA_VERSION=12.0.1 -ARG OS_VERSION=7 -FROM nvidia/cuda:${CUDA_VERSION}-cudnn8-devel-centos${OS_VERSION} +FROM nvidia/cuda:${CUDA_VERSION}-cudnn8-devel-centos7 LABEL maintainer="NVIDIA CORPORATION" -ENV TRT_VERSION 8.6.0.12 +ENV TRT_VERSION 8.6.1.6 SHELL ["/bin/bash", "-c"] # Setup user account @@ -48,19 +47,26 @@ RUN yum -y install \ # Install python3 RUN yum install -y python36 python3-devel +# yum needs to use python2 +RUN sed -i "1s/python/python2/" /usr/bin/yum + # Install TensorRT RUN if [ "${CUDA_VERSION}" = "10.2" ] ; then \ v="${TRT_VERSION%.*}-1.cuda${CUDA_VERSION}" &&\ yum-config-manager --add-repo https://developer.download.nvidia.com/compute/cuda/repos/rhel7/x86_64/cuda-rhel7.repo &&\ yum -y install libnvinfer8-${v} libnvparsers8-${v} libnvonnxparsers8-${v} libnvinfer-plugin8-${v} \ libnvinfer-devel-${v} libnvparsers-devel-${v} libnvonnxparsers-devel-${v} libnvinfer-plugin-devel-${v} \ - python3-libnvinfer-${v}; \ + python3-libnvinfer-=${v} libnvinfer-dispatch8-=${v} libnvinfer-dispatch-devel-=${v} libnvinfer-lean8-=${v} \ + libnvinfer-lean-devel-=${v} libnvinfer-vc-plugin8-=${v} libnvinfer-vc-plugin-devel-=${v} \ + libnvinfer-headers-devel-=${v} libnvinfer-headers-plugin-devel-=${v}; \ else \ v="${TRT_VERSION}-1.cuda${CUDA_VERSION%.*}" &&\ yum-config-manager --add-repo https://developer.download.nvidia.com/compute/cuda/repos/rhel7/x86_64/cuda-rhel7.repo &&\ yum -y install libnvinfer8-${v} libnvparsers8-${v} libnvonnxparsers8-${v} libnvinfer-plugin8-${v} \ libnvinfer-devel-${v} libnvparsers-devel-${v} libnvonnxparsers-devel-${v} libnvinfer-plugin-devel-${v} \ - python3-libnvinfer-${v}; \ + python3-libnvinfer-=${v} libnvinfer-dispatch8-=${v} libnvinfer-dispatch-devel-=${v} libnvinfer-lean8-=${v} \ + libnvinfer-lean-devel-=${v} libnvinfer-vc-plugin8-=${v} libnvinfer-vc-plugin-devel-=${v} \ + libnvinfer-headers-devel-=${v} libnvinfer-headers-plugin-devel-=${v}; \ fi # Install dev-toolset-8 for g++ version that supports c++14 diff --git a/docker/launch.sh b/docker/launch.sh index 13763d24..2fe9d299 100755 --- a/docker/launch.sh +++ b/docker/launch.sh @@ -1,6 +1,6 @@ #!/usr/bin/env bash # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/docker/ubuntu-18.04.Dockerfile b/docker/ubuntu-18.04.Dockerfile index cf42b792..8c246126 100644 --- a/docker/ubuntu-18.04.Dockerfile +++ b/docker/ubuntu-18.04.Dockerfile @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -16,12 +16,11 @@ # ARG CUDA_VERSION=12.0.1 -ARG OS_VERSION=18.04 -FROM nvidia/cuda:${CUDA_VERSION}-cudnn8-devel-ubuntu${OS_VERSION} +FROM nvidia/cuda:${CUDA_VERSION}-cudnn8-devel-ubuntu18.04 LABEL maintainer="NVIDIA CORPORATION" -ENV TRT_VERSION 8.6.0.12 +ENV TRT_VERSION 8.6.1.6 SHELL ["/bin/bash", "-c"] # Setup user account @@ -70,14 +69,18 @@ RUN if [ "${CUDA_VERSION}" = "10.2" ] ; then \ apt-get update &&\ sudo apt-get install libnvinfer8=${v} libnvonnxparsers8=${v} libnvparsers8=${v} libnvinfer-plugin8=${v} \ libnvinfer-dev=${v} libnvonnxparsers-dev=${v} libnvparsers-dev=${v} libnvinfer-plugin-dev=${v} \ - python3-libnvinfer=${v}; \ + python3-libnvinfer=${v} libnvinfer-dispatch8=${v} libnvinfer-dispatch-dev=${v} libnvinfer-lean8=${v} \ + libnvinfer-lean-dev=${v} libnvinfer-vc-plugin8=${v} libnvinfer-vc-plugin-dev=${v} \ + libnvinfer-headers-dev=${v} libnvinfer-headers-plugin-dev=${v}; \ else \ v="${TRT_VERSION}-1+cuda${CUDA_VERSION%.*}" &&\ apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64/3bf863cc.pub &&\ apt-get update &&\ sudo apt-get -y install libnvinfer8=${v} libnvonnxparsers8=${v} libnvparsers8=${v} libnvinfer-plugin8=${v} \ libnvinfer-dev=${v} libnvonnxparsers-dev=${v} libnvparsers-dev=${v} libnvinfer-plugin-dev=${v} \ - python3-libnvinfer=${v}; \ + python3-libnvinfer=${v} libnvinfer-dispatch8=${v} libnvinfer-dispatch-dev=${v} libnvinfer-lean8=${v} \ + libnvinfer-lean-dev=${v} libnvinfer-vc-plugin8=${v} libnvinfer-vc-plugin-dev=${v} \ + libnvinfer-headers-dev=${v} libnvinfer-headers-plugin-dev=${v}; \ fi # Install PyPI packages diff --git a/docker/ubuntu-20.04-aarch64.Dockerfile b/docker/ubuntu-20.04-aarch64.Dockerfile index c0cda07e..540943cd 100644 --- a/docker/ubuntu-20.04-aarch64.Dockerfile +++ b/docker/ubuntu-20.04-aarch64.Dockerfile @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,10 +15,12 @@ # limitations under the License. # +ARG CUDA_VERSION=12.0.1 + # Multi-arch container support available in non-cudnn containers. -FROM nvidia/cuda:12.0.1-devel-ubuntu20.04 +FROM nvidia/cuda:${CUDA_VERSION}-devel-ubuntu20.04 -ENV TRT_VERSION 8.6.0.12 +ENV TRT_VERSION 8.6.1.6 SHELL ["/bin/bash", "-c"] # Setup user account @@ -72,7 +74,9 @@ RUN v="${TRT_VERSION}-1+cuda${CUDA_VERSION%.*}" &&\ apt-get update &&\ sudo apt-get -y install libnvinfer8=${v} libnvonnxparsers8=${v} libnvparsers8=${v} libnvinfer-plugin8=${v} \ libnvinfer-dev=${v} libnvonnxparsers-dev=${v} libnvparsers-dev=${v} libnvinfer-plugin-dev=${v} \ - python3-libnvinfer=${v}; + python3-libnvinfer=${v} libnvinfer-dispatch8=${v} libnvinfer-dispatch-dev=${v} libnvinfer-lean8=${v} \ + libnvinfer-lean-dev=${v} libnvinfer-vc-plugin8=${v} libnvinfer-vc-plugin-dev=${v} \ + libnvinfer-headers-dev=${v} libnvinfer-headers-plugin-dev=${v}; # Install Cmake RUN cd /tmp && \ diff --git a/docker/ubuntu-20.04.Dockerfile b/docker/ubuntu-20.04.Dockerfile index b4143713..65605b47 100644 --- a/docker/ubuntu-20.04.Dockerfile +++ b/docker/ubuntu-20.04.Dockerfile @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -16,12 +16,11 @@ # ARG CUDA_VERSION=12.0.1 -ARG OS_VERSION=20.04 -FROM nvidia/cuda:${CUDA_VERSION}-cudnn8-devel-ubuntu${OS_VERSION} +FROM nvidia/cuda:${CUDA_VERSION}-cudnn8-devel-ubuntu20.04 LABEL maintainer="NVIDIA CORPORATION" -ENV TRT_VERSION 8.6.0.12 +ENV TRT_VERSION 8.6.1.6 SHELL ["/bin/bash", "-c"] # Setup user account @@ -76,14 +75,18 @@ RUN if [ "${CUDA_VERSION}" = "10.2" ] ; then \ apt-get update &&\ sudo apt-get install libnvinfer8=${v} libnvonnxparsers8=${v} libnvparsers8=${v} libnvinfer-plugin8=${v} \ libnvinfer-dev=${v} libnvonnxparsers-dev=${v} libnvparsers-dev=${v} libnvinfer-plugin-dev=${v} \ - python3-libnvinfer=${v}; \ + python3-libnvinfer=${v} libnvinfer-dispatch8=${v} libnvinfer-dispatch-dev=${v} libnvinfer-lean8=${v} \ + libnvinfer-lean-dev=${v} libnvinfer-vc-plugin8=${v} libnvinfer-vc-plugin-dev=${v} \ + libnvinfer-headers-dev=${v} libnvinfer-headers-plugin-dev=${v}; \ else \ v="${TRT_VERSION}-1+cuda${CUDA_VERSION%.*}" &&\ apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2004/x86_64/3bf863cc.pub &&\ apt-get update &&\ sudo apt-get -y install libnvinfer8=${v} libnvonnxparsers8=${v} libnvparsers8=${v} libnvinfer-plugin8=${v} \ libnvinfer-dev=${v} libnvonnxparsers-dev=${v} libnvparsers-dev=${v} libnvinfer-plugin-dev=${v} \ - python3-libnvinfer=${v}; \ + python3-libnvinfer=${v} libnvinfer-dispatch8=${v} libnvinfer-dispatch-dev=${v} libnvinfer-lean8=${v} \ + libnvinfer-lean-dev=${v} libnvinfer-vc-plugin8=${v} libnvinfer-vc-plugin-dev=${v} \ + libnvinfer-headers-dev=${v} libnvinfer-headers-plugin-dev=${v}; \ fi # Install PyPI packages diff --git a/docker/ubuntu-cross-aarch64.Dockerfile b/docker/ubuntu-cross-aarch64.Dockerfile index 0ff1fbc7..cf5f31d9 100644 --- a/docker/ubuntu-cross-aarch64.Dockerfile +++ b/docker/ubuntu-cross-aarch64.Dockerfile @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -16,12 +16,12 @@ # ARG CUDA_VERSION=11.4.1 -ARG OS_VERSION=20.04 -FROM nvidia/cuda:${CUDA_VERSION}-devel-ubuntu${OS_VERSION} +# Multi-arch container support available in non-cudnn containers. +FROM nvidia/cuda:${CUDA_VERSION}-devel-ubuntu20.04 LABEL maintainer="NVIDIA CORPORATION" -ENV TRT_VERSION 8.6.0.12 +ENV TRT_VERSION 8.5.2 ENV DEBIAN_FRONTEND=noninteractive ARG uid=1000 @@ -75,14 +75,16 @@ RUN apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/cuda/ # Install CUDA cross compile toolchain RUN dpkg -i /pdk_files/cuda-repo-cross-aarch64*.deb /pdk_files/cuda-repo-ubuntu*_amd64.deb \ + && cp /var/cuda-repo-cross*/cuda-*-keyring.gpg /usr/share/keyrings/ \ + && cp /var/cuda-repo-ubuntu*/cuda-*-keyring.gpg /usr/share/keyrings/ \ && apt-get update \ && apt-get install -y cuda-cross-aarch64 \ && rm -rf /var/lib/apt/lists/* # Unpack cudnn -RUN dpkg -x /pdk_files/cudnn-local-repo*.deb /pdk_files/cudnn_extract \ - && dpkg -x /pdk_files/cudnn_extract/var/cudnn-local-repo*/libcudnn[7-8]_*-1+cuda11.[0-9]_arm64.deb /pdk_files/cudnn \ - && dpkg -x /pdk_files/cudnn_extract/var/cudnn-local-repo*/libcudnn[7-8]-dev_*-1+cuda11.[0-9]_arm64.deb /pdk_files/cudnn \ +RUN dpkg -x /pdk_files/cudnn-local-tegra-repo*.deb /pdk_files/cudnn_extract \ + && dpkg -x /pdk_files/cudnn_extract/var/cudnn-local-tegra-repo*/libcudnn[7-8]_*-1+cuda11.[0-9]_arm64.deb /pdk_files/cudnn \ + && dpkg -x /pdk_files/cudnn_extract/var/cudnn-local-tegra-repo*/libcudnn[7-8]-dev_*-1+cuda11.[0-9]_arm64.deb /pdk_files/cudnn \ && cd /pdk_files/cudnn/usr/lib/aarch64-linux-gnu \ && cd /pdk_files/cudnn \ && ln -s usr/include/aarch64-linux-gnu include \ @@ -98,6 +100,8 @@ RUN dpkg -x /pdk_files/cudnn-local-repo*.deb /pdk_files/cudnn_extract \ && ln -s /pdk_files/cudnn/usr/include/aarch64-linux-gnu/cudnn_version_v[7-9].h /usr/include/cudnn_version.h # Unpack libnvinfer +RUN dpkg -x /pdk_files/nv-tensorrt-local-repo-l4t-[0-8].[0-9].[0-9]-cuda-11.[0-9]_*_arm64.deb /pdk_files/tensorrt +RUN mv /pdk_files/tensorrt/var/nv-tensorrt-local-repo-l4t-[0-8].[0-9].[0-9]-cuda-11.[0-9]/*.deb /pdk_files RUN dpkg -x /pdk_files/libnvinfer[0-8]_*-1+cuda11.[0-9]_arm64.deb /pdk_files/tensorrt \ && dpkg -x /pdk_files/libnvinfer-dev_*-1+cuda11.[0-9]_arm64.deb /pdk_files/tensorrt \ && dpkg -x /pdk_files/libnvparsers[6-8]_*-1+cuda11.[0-9]_arm64.deb /pdk_files/tensorrt \ diff --git a/include/NvInfer.h b/include/NvInfer.h index 4a1ee65d..064bc93b 100644 --- a/include/NvInfer.h +++ b/include/NvInfer.h @@ -8971,8 +8971,8 @@ enum class MemoryPoolType : int32_t kDLA_GLOBAL_DRAM = 3, //! - //! kTACTIC_DRAM is the host DRAM used by the optimizer to - //! run tactics. On embedded devices, where host and device memory are unified, this includes all device + //! kTACTIC_DRAM is the device DRAM used by the optimizer to + //! run tactics. On embedded devices, where host and device memory are unified, this includes all host //! memory required by TensorRT to build the network up to the point of each memory allocation. //! This defaults to 75% of totalGlobalMem as reported by cudaGetDeviceProperties when //! cudaGetDeviceProperties.embedded is true, and 100% otherwise. diff --git a/include/NvInferImpl.h b/include/NvInferImpl.h index 522163cb..ddc6a89a 100644 --- a/include/NvInferImpl.h +++ b/include/NvInferImpl.h @@ -306,6 +306,12 @@ class VCudaEngine : public VRoot virtual HardwareCompatibilityLevel getHardwareCompatibilityLevel() const noexcept = 0; virtual ICudaEngine* getPImpl() noexcept = 0; virtual int32_t getNbAuxStreams() const noexcept = 0; + + virtual int32_t getTensorBytesPerComponentV2(char const* tensorName, int32_t profileIndex) const noexcept = 0; + virtual int32_t getTensorComponentsPerElementV2(char const* tensorName, int32_t profileIndex) const noexcept = 0; + virtual TensorFormat getTensorFormatV2(char const* tensorName, int32_t profileIndex) const noexcept = 0; + virtual char const* getTensorFormatDescV2(char const* tensorName, int32_t profileIndex) const noexcept = 0; + virtual int32_t getTensorVectorizedDimV2(char const* tensorName, int32_t profileIndex) const noexcept = 0; }; class VExecutionContext : public VRoot diff --git a/include/NvInferRuntime.h b/include/NvInferRuntime.h index fffbdeef..3850ab93 100644 --- a/include/NvInferRuntime.h +++ b/include/NvInferRuntime.h @@ -388,7 +388,7 @@ class IPluginV2DynamicExt : public nvinfer1::IPluginV2Ext //! \brief Return true if plugin supports the format and datatype for the input/output indexed by pos. //! //! For this method inputs are numbered 0..(nbInputs-1) and outputs are numbered nbInputs..(nbInputs+nbOutputs-1). - //! Using this numbering, pos is an index into InOut, where 0 <= pos < nbInputs+nbOutputs-1. + //! Using this numbering, pos is an index into InOut, where 0 <= pos < nbInputs+nbOutputs. //! //! TensorRT invokes this method to ask if the input/output indexed by pos supports the format/datatype specified //! by inOut[pos].format and inOut[pos].type. The override should return true if that format/datatype at inOut[pos] @@ -406,8 +406,8 @@ class IPluginV2DynamicExt : public nvinfer1::IPluginV2Ext //! * A definition for a plugin that supports only FP16 NCHW for its two inputs, //! and FP32 NCHW for its single output: //! - //! return inOut.format[pos] == TensorFormat::kLINEAR && (inOut.type[pos] == pos < 2 ? DataType::kHALF : - //! DataType::kFLOAT); + //! return inOut.format[pos] == TensorFormat::kLINEAR && (inOut.type[pos] == (pos < 2 ? DataType::kHALF : + //! DataType::kFLOAT)); //! //! * A definition for a "polymorphic" plugin with two inputs and one output that supports //! any format or type, but the inputs and output must have the same format and type: @@ -1896,14 +1896,36 @@ class ICudaEngine : public INoCopy //! \param tensorName The name of an input or output tensor. //! //! \warning The string tensorName must be null-terminated, and be at most 4096 bytes including the terminator. + //! \warning The function can only return the result of profile 0, and issues a warning message when there are + //! multiple profiles in the engine, use getTensorBytesPerComponent with profileIndex when there are multiple + //! profiles. //! //! \see getTensorVectorizedDim() + //! \see getTensorBytesPerComponent(tensorName, profileIndex) //! int32_t getTensorBytesPerComponent(char const* tensorName) const noexcept { return mImpl->getTensorBytesPerComponent(tensorName); } + //! + //! \brief Return the number of bytes per component of an element of given profile, or -1 if the provided name does + //! not map to an input or output tensor. + //! + //! The vector component size is returned if getTensorVectorizedDim(tensorName, profileIndex) != -1. + //! + //! \param tensorName The name of an input or output tensor. + //! \param profileIndex The profile index to query + //! + //! \warning The string tensorName must be null-terminated, and be at most 4096 bytes including the terminator. + //! + //! \see getTensorVectorizedDim(tensorName, profileIndex) + //! + int32_t getTensorBytesPerComponent(char const* tensorName, int32_t profileIndex) const noexcept + { + return mImpl->getTensorBytesPerComponentV2(tensorName, profileIndex); + } + //! //! \brief Return the number of components included in one element. //! @@ -1929,14 +1951,36 @@ class ICudaEngine : public INoCopy //! \param tensorName The name of an input or output tensor. //! //! \warning The string tensorName must be null-terminated, and be at most 4096 bytes including the terminator. + //! \warning The function can only return the result of profile 0, and issues a warning message when there + //! are multiple profiles in the engine, use getTensorComponentsPerElement with profileIndex when there are + //! multiple profiles. //! //! \see getTensorVectorizedDim() + //! \see getTensorComponentsPerElement(tensorName, profileIndex) //! int32_t getTensorComponentsPerElement(char const* tensorName) const noexcept { return mImpl->getTensorComponentsPerElement(tensorName); } + //! + //! \brief Return the number of components included in one element of given profile, or -1 if the provided name does + //! not map to an input or output tensor. + //! + //! The number of elements in the vectors is returned if getTensorVectorizedDim(tensorName, profileIndex) != -1. + //! + //! \param tensorName The name of an input or output tensor. + //! \param profileIndex The profile index to query + //! + //! \warning The string tensorName must be null-terminated, and be at most 4096 bytes including the terminator. + //! + //! \see getTensorVectorizedDim(tensorName, profileIndex) + //! + int32_t getTensorComponentsPerElement(char const* tensorName, int32_t profileIndex) const noexcept + { + return mImpl->getTensorComponentsPerElementV2(tensorName, profileIndex); + } + //! //! \brief Return the binding format. //! @@ -1952,18 +1996,34 @@ class ICudaEngine : public INoCopy } //! - //! \brief Return the binding format, or TensorFormat::kLINEAR if the provided name does not map to an input or + //! \brief Return the tensor format, or TensorFormat::kLINEAR if the provided name does not map to an input or //! output tensor. //! - //! \param tensorName The name of an input or output tensor. - //! //! \warning The string tensorName must be null-terminated, and be at most 4096 bytes including the terminator. + //! \warning This API can only return the tensor format of profile 0, and issues a warning message when there are + //! multiple profiles in the engine, use getTensorFormat with profileIndex when there are multiple profiles. + //! + //! \see getTensorFormat(tensorName, profileIndex) //! TensorFormat getTensorFormat(char const* tensorName) const noexcept { return mImpl->getTensorFormat(tensorName); } + //! + //! \brief Return the tensor format of given profile, or TensorFormat::kLINEAR if the provided name does not map to + //! an input or output tensor. + //! + //! \param tensorName The name of an input or output tensor. + //! \param profileIndex The profile index to query the format for. + //! + //! \warning The string tensorName must be null-terminated, and be at most 4096 bytes including the terminator. + //! + TensorFormat getTensorFormat(char const* tensorName, int32_t profileIndex) const noexcept + { + return mImpl->getTensorFormatV2(tensorName, profileIndex); + } + //! //! \brief Return the human readable description of the tensor format, or nullptr if the provided name does not //! map to an input or output tensor. @@ -2004,12 +2064,37 @@ class ICudaEngine : public INoCopy //! \param tensorName The name of an input or output tensor. //! //! \warning The string tensorName must be null-terminated, and be at most 4096 bytes including the terminator. + //! \warning The function can only return the result of profile 0, and issues a warning message when there are + //! multiple profiles in the engine, use getTensorFormatDesc with profileIndex when there are multiple profiles. //! char const* getTensorFormatDesc(char const* tensorName) const noexcept { return mImpl->getTensorFormatDesc(tensorName); } + //! + //! \brief Return the human readable description of the tensor format of given profile, or empty string if the + //! provided name does not map to an input or output tensor. + //! + //! The description includes the order, vectorization, data type, and strides. + //! Examples are shown as follows: + //! Example 1: kCHW + FP32 + //! "Row major linear FP32 format" + //! Example 2: kCHW2 + FP16 + //! "Two wide channel vectorized row major FP16 format" + //! Example 3: kHWC8 + FP16 + Line Stride = 32 + //! "Channel major FP16 format where C % 8 == 0 and H Stride % 32 == 0" + //! + //! \param tensorName The name of an input or output tensor. + //! \param profileIndex The profile index to query the format for. + //! + //! \warning The string tensorName must be null-terminated, and be at most 4096 bytes including the terminator. + //! + char const* getTensorFormatDesc(char const* tensorName, int32_t profileIndex) const noexcept + { + return mImpl->getTensorFormatDescV2(tensorName, profileIndex); + } + //! //! \brief Return the dimension index that the buffer is vectorized, or -1 is the name is not found. //! @@ -2035,12 +2120,30 @@ class ICudaEngine : public INoCopy //! \param tensorName The name of an input or output tensor. //! //! \warning The string tensorName must be null-terminated, and be at most 4096 bytes including the terminator. + //! \warning The function can only return the result of profile 0, and issues a warning message when there are + //! multiple profiles in the engine, use getTensorVectorizedDim with profileIndex when there are multiple profiles. //! int32_t getTensorVectorizedDim(char const* tensorName) const noexcept { return mImpl->getTensorVectorizedDim(tensorName); } + //! + //! \brief Return the dimension index that the buffer is vectorized of given profile, or -1 if the provided name + //! does not map to an input or output tensor. + //! + //! Specifically -1 is returned if scalars per vector is 1. + //! + //! \param tensorName The name of an input. + //! \param profileIndex The profile index to query the format for. + //! + //! \warning The string tensorName must be null-terminated, and be at most 4096 bytes including the terminator. + //! + int32_t getTensorVectorizedDim(char const* tensorName, int32_t profileIndex) const noexcept + { + return mImpl->getTensorVectorizedDimV2(tensorName, profileIndex); + } + //! //! \brief Returns the name of the network associated with the engine. //! diff --git a/include/NvInferRuntimeCommon.h b/include/NvInferRuntimeCommon.h index 261cecce..10dc5640 100644 --- a/include/NvInferRuntimeCommon.h +++ b/include/NvInferRuntimeCommon.h @@ -89,43 +89,6 @@ class IPluginRegistry AsciiChar const* const pluginNamespace = "") noexcept = 0; - //! - //! \brief Return whether the parent registry will be searched if a plugin is not found in this registry - //! default: true - //! - //! \return bool variable indicating whether parent search is enabled. - //! - //! \see setParentSearchEnabled - //! - virtual bool isParentSearchEnabled() const = 0; - - //! - //! \brief Set whether the parent registry will be searched if a plugin is not found in this registry. - //! - //! \param enabled The bool variable indicating whether parent search is enabled. - //! - //! \see isParentSearchEnabled - //! - virtual void setParentSearchEnabled(bool const enabled) = 0; - - //! - //! \brief Load and register a shared library of plugins. - //! - //! \param pluginPath the plugin library path. - //! - //! \return The loaded plugin library handle. The call will fail and return - //! nullptr if any of the plugins are already registered. - //! - virtual PluginLibraryHandle loadLibrary(AsciiChar const* pluginPath) noexcept = 0; - - //! - //! \brief Deregister plugins associated with a library. Any resources acquired when the library - //! was loaded will be released. - //! - //! \param handle the plugin library handle to deregister. - //! - virtual void deregisterLibrary(PluginLibraryHandle handle) noexcept = 0; - // @cond SuppressDoxyWarnings IPluginRegistry() = default; IPluginRegistry(IPluginRegistry const&) = delete; @@ -189,6 +152,43 @@ class IPluginRegistry //! - Thread-safe: Yes //! virtual bool deregisterCreator(IPluginCreator const& creator) noexcept = 0; + + //! + //! \brief Return whether the parent registry will be searched if a plugin is not found in this registry + //! default: true + //! + //! \return bool variable indicating whether parent search is enabled. + //! + //! \see setParentSearchEnabled + //! + virtual bool isParentSearchEnabled() const = 0; + + //! + //! \brief Set whether the parent registry will be searched if a plugin is not found in this registry. + //! + //! \param enabled The bool variable indicating whether parent search is enabled. + //! + //! \see isParentSearchEnabled + //! + virtual void setParentSearchEnabled(bool const enabled) = 0; + + //! + //! \brief Load and register a shared library of plugins. + //! + //! \param pluginPath the plugin library path. + //! + //! \return The loaded plugin library handle. The call will fail and return + //! nullptr if any of the plugins are already registered. + //! + virtual PluginLibraryHandle loadLibrary(AsciiChar const* pluginPath) noexcept = 0; + + //! + //! \brief Deregister plugins associated with a library. Any resources acquired when the library + //! was loaded will be released. + //! + //! \param handle the plugin library handle to deregister. + //! + virtual void deregisterLibrary(PluginLibraryHandle handle) noexcept = 0; }; } // namespace nvinfer1 diff --git a/include/NvInferRuntimePlugin.h b/include/NvInferRuntimePlugin.h index f722d921..fbe578ff 100644 --- a/include/NvInferRuntimePlugin.h +++ b/include/NvInferRuntimePlugin.h @@ -636,7 +636,7 @@ class IPluginV2IOExt : public IPluginV2Ext //! \brief Return true if plugin supports the format and datatype for the input/output indexed by pos. //! //! For this method inputs are numbered 0..(nbInputs-1) and outputs are numbered nbInputs..(nbInputs+nbOutputs-1). - //! Using this numbering, pos is an index into InOut, where 0 <= pos < nbInputs+nbOutputs-1. + //! Using this numbering, pos is an index into InOut, where 0 <= pos < nbInputs+nbOutputs. //! //! TensorRT invokes this method to ask if the input/output indexed by pos supports the format/datatype specified //! by inOut[pos].format and inOut[pos].type. The override should return true if that format/datatype at inOut[pos] diff --git a/include/NvInferVersion.h b/include/NvInferVersion.h index cb3d790f..670b5e1b 100644 --- a/include/NvInferVersion.h +++ b/include/NvInferVersion.h @@ -20,21 +20,24 @@ #define NV_TENSORRT_MAJOR 8 //!< TensorRT major version. #define NV_TENSORRT_MINOR 6 //!< TensorRT minor version. -#define NV_TENSORRT_PATCH 0 //!< TensorRT patch version. -#define NV_TENSORRT_BUILD 12 //!< TensorRT build number. +#define NV_TENSORRT_PATCH 1 //!< TensorRT patch version. +#define NV_TENSORRT_BUILD 5 //!< TensorRT build number. #define NV_TENSORRT_LWS_MAJOR 0 //!< TensorRT LWS major version. #define NV_TENSORRT_LWS_MINOR 0 //!< TensorRT LWS minor version. #define NV_TENSORRT_LWS_PATCH 0 //!< TensorRT LWS patch version. +// This #define is deprecated in TensorRT 8.6 and will be removed in 10.0. Use NV_TENSORRT_MAJOR. #define NV_TENSORRT_SONAME_MAJOR 8 //!< Shared object library major version number. +// This #define is deprecated in TensorRT 8.6 and will be removed in 10.0. Use NV_TENSORRT_MINOR. #define NV_TENSORRT_SONAME_MINOR 6 //!< Shared object library minor version number. -#define NV_TENSORRT_SONAME_PATCH 0 //!< Shared object library patch version number. +// This #define is deprecated in TensorRT 8.6 and will be removed in 10.0. Use NV_TENSORRT_PATCH. +#define NV_TENSORRT_SONAME_PATCH 1 //!< Shared object library patch version number. #define NV_TENSORRT_RELEASE_TYPE_EARLY_ACCESS 0 //!< An early access release #define NV_TENSORRT_RELEASE_TYPE_RELEASE_CANDIDATE 1 //!< A release candidate #define NV_TENSORRT_RELEASE_TYPE_GENERAL_AVAILABILITY 2 //!< A final release -#define NV_TENSORRT_RELEASE_TYPE NV_TENSORRT_RELEASE_TYPE_EARLY_ACCESS //!< TensorRT release type +#define NV_TENSORRT_RELEASE_TYPE NV_TENSORRT_RELEASE_TYPE_GENERAL_AVAILABILITY //!< TensorRT release type #endif // NV_INFER_VERSION_H diff --git a/parsers/CMakeLists.txt b/parsers/CMakeLists.txt index 955e109d..5dab1c9f 100644 --- a/parsers/CMakeLists.txt +++ b/parsers/CMakeLists.txt @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/parsers/caffe/CMakeLists.txt b/parsers/caffe/CMakeLists.txt index 74fd358f..f6abda79 100644 --- a/parsers/caffe/CMakeLists.txt +++ b/parsers/caffe/CMakeLists.txt @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/parsers/caffe/NvCaffeParser.cpp b/parsers/caffe/NvCaffeParser.cpp index 7e1906d3..2a9737e6 100644 --- a/parsers/caffe/NvCaffeParser.cpp +++ b/parsers/caffe/NvCaffeParser.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/parsers/caffe/binaryProtoBlob.h b/parsers/caffe/binaryProtoBlob.h index d0f52d20..79ec2976 100644 --- a/parsers/caffe/binaryProtoBlob.h +++ b/parsers/caffe/binaryProtoBlob.h @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/parsers/caffe/blobNameToTensor.h b/parsers/caffe/blobNameToTensor.h index a4929e56..d685cced 100644 --- a/parsers/caffe/blobNameToTensor.h +++ b/parsers/caffe/blobNameToTensor.h @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/parsers/caffe/caffeMacros.h b/parsers/caffe/caffeMacros.h index f403de3d..d9cca466 100644 --- a/parsers/caffe/caffeMacros.h +++ b/parsers/caffe/caffeMacros.h @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/parsers/caffe/caffeParser/caffeParser.cpp b/parsers/caffe/caffeParser/caffeParser.cpp index d447c5ca..9e8722b2 100644 --- a/parsers/caffe/caffeParser/caffeParser.cpp +++ b/parsers/caffe/caffeParser/caffeParser.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/parsers/caffe/caffeParser/caffeParser.h b/parsers/caffe/caffeParser/caffeParser.h index 5a24f636..bd79967b 100644 --- a/parsers/caffe/caffeParser/caffeParser.h +++ b/parsers/caffe/caffeParser/caffeParser.h @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/parsers/caffe/caffeParser/opParsers/opParsers.h b/parsers/caffe/caffeParser/opParsers/opParsers.h index ee6a38c0..b4641bcd 100644 --- a/parsers/caffe/caffeParser/opParsers/opParsers.h +++ b/parsers/caffe/caffeParser/opParsers/opParsers.h @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/parsers/caffe/caffeParser/opParsers/parseAbsVal.cpp b/parsers/caffe/caffeParser/opParsers/parseAbsVal.cpp index 5814e92f..6b9415c6 100644 --- a/parsers/caffe/caffeParser/opParsers/parseAbsVal.cpp +++ b/parsers/caffe/caffeParser/opParsers/parseAbsVal.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/parsers/caffe/caffeParser/opParsers/parseBNLL.cpp b/parsers/caffe/caffeParser/opParsers/parseBNLL.cpp index 751fa9f0..6ff13917 100644 --- a/parsers/caffe/caffeParser/opParsers/parseBNLL.cpp +++ b/parsers/caffe/caffeParser/opParsers/parseBNLL.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/parsers/caffe/caffeParser/opParsers/parseBatchNorm.cpp b/parsers/caffe/caffeParser/opParsers/parseBatchNorm.cpp index 4c4aa14c..b16552d7 100644 --- a/parsers/caffe/caffeParser/opParsers/parseBatchNorm.cpp +++ b/parsers/caffe/caffeParser/opParsers/parseBatchNorm.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/parsers/caffe/caffeParser/opParsers/parseClip.cpp b/parsers/caffe/caffeParser/opParsers/parseClip.cpp index c94f351e..108acf4f 100644 --- a/parsers/caffe/caffeParser/opParsers/parseClip.cpp +++ b/parsers/caffe/caffeParser/opParsers/parseClip.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/parsers/caffe/caffeParser/opParsers/parseConcat.cpp b/parsers/caffe/caffeParser/opParsers/parseConcat.cpp index 44291c6e..a682b798 100644 --- a/parsers/caffe/caffeParser/opParsers/parseConcat.cpp +++ b/parsers/caffe/caffeParser/opParsers/parseConcat.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/parsers/caffe/caffeParser/opParsers/parseConv.cpp b/parsers/caffe/caffeParser/opParsers/parseConv.cpp index 87421476..ae7372ad 100644 --- a/parsers/caffe/caffeParser/opParsers/parseConv.cpp +++ b/parsers/caffe/caffeParser/opParsers/parseConv.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/parsers/caffe/caffeParser/opParsers/parseCrop.cpp b/parsers/caffe/caffeParser/opParsers/parseCrop.cpp index 46531c30..9f907c38 100644 --- a/parsers/caffe/caffeParser/opParsers/parseCrop.cpp +++ b/parsers/caffe/caffeParser/opParsers/parseCrop.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/parsers/caffe/caffeParser/opParsers/parseDeconv.cpp b/parsers/caffe/caffeParser/opParsers/parseDeconv.cpp index 366425f2..92ef6816 100644 --- a/parsers/caffe/caffeParser/opParsers/parseDeconv.cpp +++ b/parsers/caffe/caffeParser/opParsers/parseDeconv.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/parsers/caffe/caffeParser/opParsers/parseELU.cpp b/parsers/caffe/caffeParser/opParsers/parseELU.cpp index 90afc517..ad67af98 100644 --- a/parsers/caffe/caffeParser/opParsers/parseELU.cpp +++ b/parsers/caffe/caffeParser/opParsers/parseELU.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/parsers/caffe/caffeParser/opParsers/parseEltwise.cpp b/parsers/caffe/caffeParser/opParsers/parseEltwise.cpp index 82601ffc..5f572075 100644 --- a/parsers/caffe/caffeParser/opParsers/parseEltwise.cpp +++ b/parsers/caffe/caffeParser/opParsers/parseEltwise.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/parsers/caffe/caffeParser/opParsers/parseInnerProduct.cpp b/parsers/caffe/caffeParser/opParsers/parseInnerProduct.cpp index 4819c8a2..a13c0d6a 100644 --- a/parsers/caffe/caffeParser/opParsers/parseInnerProduct.cpp +++ b/parsers/caffe/caffeParser/opParsers/parseInnerProduct.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/parsers/caffe/caffeParser/opParsers/parseLRN.cpp b/parsers/caffe/caffeParser/opParsers/parseLRN.cpp index 2b7ff20b..b9587afe 100644 --- a/parsers/caffe/caffeParser/opParsers/parseLRN.cpp +++ b/parsers/caffe/caffeParser/opParsers/parseLRN.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/parsers/caffe/caffeParser/opParsers/parsePReLU.cpp b/parsers/caffe/caffeParser/opParsers/parsePReLU.cpp index d2805988..f31e204a 100644 --- a/parsers/caffe/caffeParser/opParsers/parsePReLU.cpp +++ b/parsers/caffe/caffeParser/opParsers/parsePReLU.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/parsers/caffe/caffeParser/opParsers/parsePermute.cpp b/parsers/caffe/caffeParser/opParsers/parsePermute.cpp index 33163011..8803b4b0 100644 --- a/parsers/caffe/caffeParser/opParsers/parsePermute.cpp +++ b/parsers/caffe/caffeParser/opParsers/parsePermute.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/parsers/caffe/caffeParser/opParsers/parsePooling.cpp b/parsers/caffe/caffeParser/opParsers/parsePooling.cpp index fb0cac87..5e69e419 100644 --- a/parsers/caffe/caffeParser/opParsers/parsePooling.cpp +++ b/parsers/caffe/caffeParser/opParsers/parsePooling.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/parsers/caffe/caffeParser/opParsers/parsePower.cpp b/parsers/caffe/caffeParser/opParsers/parsePower.cpp index 0e77baeb..492c36d8 100644 --- a/parsers/caffe/caffeParser/opParsers/parsePower.cpp +++ b/parsers/caffe/caffeParser/opParsers/parsePower.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/parsers/caffe/caffeParser/opParsers/parseReLU.cpp b/parsers/caffe/caffeParser/opParsers/parseReLU.cpp index 756e9b4c..d37e5fbe 100644 --- a/parsers/caffe/caffeParser/opParsers/parseReLU.cpp +++ b/parsers/caffe/caffeParser/opParsers/parseReLU.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/parsers/caffe/caffeParser/opParsers/parseReduction.cpp b/parsers/caffe/caffeParser/opParsers/parseReduction.cpp index ce6b8c0c..c3bf0742 100644 --- a/parsers/caffe/caffeParser/opParsers/parseReduction.cpp +++ b/parsers/caffe/caffeParser/opParsers/parseReduction.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/parsers/caffe/caffeParser/opParsers/parseReshape.cpp b/parsers/caffe/caffeParser/opParsers/parseReshape.cpp index cfe233d3..a31698a2 100644 --- a/parsers/caffe/caffeParser/opParsers/parseReshape.cpp +++ b/parsers/caffe/caffeParser/opParsers/parseReshape.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/parsers/caffe/caffeParser/opParsers/parseScale.cpp b/parsers/caffe/caffeParser/opParsers/parseScale.cpp index 1cdb8e0c..bd1efa94 100644 --- a/parsers/caffe/caffeParser/opParsers/parseScale.cpp +++ b/parsers/caffe/caffeParser/opParsers/parseScale.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/parsers/caffe/caffeParser/opParsers/parseSigmoid.cpp b/parsers/caffe/caffeParser/opParsers/parseSigmoid.cpp index a94a4181..46d4b9d2 100644 --- a/parsers/caffe/caffeParser/opParsers/parseSigmoid.cpp +++ b/parsers/caffe/caffeParser/opParsers/parseSigmoid.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/parsers/caffe/caffeParser/opParsers/parseSoftMax.cpp b/parsers/caffe/caffeParser/opParsers/parseSoftMax.cpp index ac1407e0..0d88de7f 100644 --- a/parsers/caffe/caffeParser/opParsers/parseSoftMax.cpp +++ b/parsers/caffe/caffeParser/opParsers/parseSoftMax.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/parsers/caffe/caffeParser/opParsers/parseTanH.cpp b/parsers/caffe/caffeParser/opParsers/parseTanH.cpp index f7bb501e..e3c6a3dd 100644 --- a/parsers/caffe/caffeParser/opParsers/parseTanH.cpp +++ b/parsers/caffe/caffeParser/opParsers/parseTanH.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/parsers/caffe/caffeParser/readProto.h b/parsers/caffe/caffeParser/readProto.h index 09bae595..b9276819 100644 --- a/parsers/caffe/caffeParser/readProto.h +++ b/parsers/caffe/caffeParser/readProto.h @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/parsers/caffe/caffeWeightFactory/caffeWeightFactory.cpp b/parsers/caffe/caffeWeightFactory/caffeWeightFactory.cpp index bff42a35..5d4fe134 100644 --- a/parsers/caffe/caffeWeightFactory/caffeWeightFactory.cpp +++ b/parsers/caffe/caffeWeightFactory/caffeWeightFactory.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/parsers/caffe/caffeWeightFactory/caffeWeightFactory.h b/parsers/caffe/caffeWeightFactory/caffeWeightFactory.h index 4644f80d..e32e979d 100644 --- a/parsers/caffe/caffeWeightFactory/caffeWeightFactory.h +++ b/parsers/caffe/caffeWeightFactory/caffeWeightFactory.h @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/parsers/caffe/caffeWeightFactory/weightType.h b/parsers/caffe/caffeWeightFactory/weightType.h index 71f300cc..6377d592 100644 --- a/parsers/caffe/caffeWeightFactory/weightType.h +++ b/parsers/caffe/caffeWeightFactory/weightType.h @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/parsers/common/half.h b/parsers/common/half.h index 8ca4891d..7497459a 100644 --- a/parsers/common/half.h +++ b/parsers/common/half.h @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/parsers/common/ieee_half.h b/parsers/common/ieee_half.h index dd4963c1..071aee09 100644 --- a/parsers/common/ieee_half.h +++ b/parsers/common/ieee_half.h @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/parsers/common/parserUtils.h b/parsers/common/parserUtils.h index 25b54da4..115a2efa 100644 --- a/parsers/common/parserUtils.h +++ b/parsers/common/parserUtils.h @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/plugin/CMakeLists.txt b/plugin/CMakeLists.txt index 02e055d5..393d4891 100644 --- a/plugin/CMakeLists.txt +++ b/plugin/CMakeLists.txt @@ -135,7 +135,7 @@ set_target_properties(${SHARED_TARGET} PROPERTIES if (MSVC) set_target_properties(${SHARED_TARGET} PROPERTIES LINK_FLAGS "/DEF:${PLUGIN_EXPORT_DEF}") else() - set_target_properties(${SHARED_TARGET} PROPERTIES LINK_FLAGS "-Wl,--exclude-libs,ALL -Wl,--version-script=${PLUGIN_EXPORT_MAP} -Wl,--no-undefined") + set_target_properties(${SHARED_TARGET} PROPERTIES LINK_FLAGS "-Wl,--exclude-libs,ALL -Wl,-Bsymbolic -Wl,--version-script=${PLUGIN_EXPORT_MAP} -Wl,--no-undefined") endif() set_target_properties(${SHARED_TARGET} PROPERTIES DEBUG_POSTFIX ${TRT_DEBUG_POSTFIX}) diff --git a/plugin/api/inferPlugin.cpp b/plugin/api/inferPlugin.cpp index 8e16dfe3..b55f9388 100644 --- a/plugin/api/inferPlugin.cpp +++ b/plugin/api/inferPlugin.cpp @@ -176,43 +176,44 @@ extern "C" { bool initLibNvInferPlugins(void* logger, const char* libNamespace) { - initializePlugin(logger, libNamespace); - initializePlugin(logger, libNamespace); initializePlugin(logger, libNamespace); + initializePlugin(logger, libNamespace); + initializePlugin(logger, libNamespace); initializePlugin(logger, libNamespace); initializePlugin(logger, libNamespace); - initializePlugin(logger, libNamespace); initializePlugin(logger, libNamespace); + initializePlugin(logger, libNamespace); initializePlugin(logger, libNamespace); initializePlugin(logger, libNamespace); - initializePlugin(logger, libNamespace); - initializePlugin(logger, libNamespace); initializePlugin(logger, libNamespace); initializePlugin(logger, libNamespace); + initializePlugin(logger, libNamespace); + initializePlugin(logger, libNamespace); initializePlugin(logger, libNamespace); initializePlugin(logger, libNamespace); initializePlugin(logger, libNamespace); initializePlugin(logger, libNamespace); initializePlugin(logger, libNamespace); + initializePlugin(logger, libNamespace); initializePlugin(logger, libNamespace); initializePlugin(logger, libNamespace); initializePlugin(logger, libNamespace); initializePlugin(logger, libNamespace); initializePlugin(logger, libNamespace); - initializePlugin(logger, libNamespace); initializePlugin(logger, libNamespace); + initializePlugin(logger, libNamespace); initializePlugin(logger, libNamespace); initializePlugin(logger, libNamespace); initializePlugin(logger, libNamespace); + initializePlugin(logger, libNamespace); initializePlugin(logger, libNamespace); initializePlugin(logger, libNamespace); - initializePlugin(logger, libNamespace); initializePlugin(logger, libNamespace); initializePlugin(logger, libNamespace); initializePlugin(logger, libNamespace); initializePlugin(logger, libNamespace); - initializePlugin(logger, libNamespace); initializePlugin(logger, libNamespace); + initializePlugin(logger, libNamespace); initializePlugin(logger, libNamespace); initializePlugin(logger, libNamespace); initializePlugin(logger, libNamespace); diff --git a/plugin/batchTilePlugin/batchTilePlugin.h b/plugin/batchTilePlugin/batchTilePlugin.h index 1f0bc199..4f2bf37a 100644 --- a/plugin/batchTilePlugin/batchTilePlugin.h +++ b/plugin/batchTilePlugin/batchTilePlugin.h @@ -42,7 +42,7 @@ class BatchTilePlugin : public IPluginV2Ext int32_t initialize() noexcept override; void terminate() noexcept override; - size_t getWorkspaceSize(int) const noexcept override; + size_t getWorkspaceSize(int32_t) const noexcept override; int32_t enqueue(int32_t batchSize, void const* const* inputs, void* const* outputs, void* workspace, cudaStream_t stream) noexcept override; diff --git a/plugin/batchedNMSPlugin/gatherNMSOutputs.h b/plugin/batchedNMSPlugin/gatherNMSOutputs.h index e3393a08..f245eb93 100644 --- a/plugin/batchedNMSPlugin/gatherNMSOutputs.h +++ b/plugin/batchedNMSPlugin/gatherNMSOutputs.h @@ -18,8 +18,8 @@ #define TRT_BATCHED_NMS_HELPER_H #include "common/plugin.h" -pluginStatus_t gatherNMSOutputs(cudaStream_t stream, bool shareLocation, int numImages, int numPredsPerClass, - int numClasses, int topK, int keepTopK, nvinfer1::DataType DT_BBOX, nvinfer1::DataType DT_SCORE, +pluginStatus_t gatherNMSOutputs(cudaStream_t stream, bool shareLocation, int32_t numImages, int32_t numPredsPerClass, + int32_t numClasses, int32_t topK, int32_t keepTopK, nvinfer1::DataType DT_BBOX, nvinfer1::DataType DT_SCORE, void const* indices, void const* scores, void const* bboxData, void* keepCount, void* nmsedBoxes, void* nmsedScores, void* nmsedClasses, bool clipBoxes, float const scoreShift); diff --git a/plugin/bertQKVToContextPlugin/CustomQKVToContextPluginDynamic_PluginConfig.yaml b/plugin/bertQKVToContextPlugin/CustomQKVToContextPluginDynamic_PluginConfig.yaml index 0931c67b..6b56ebea 100644 --- a/plugin/bertQKVToContextPlugin/CustomQKVToContextPluginDynamic_PluginConfig.yaml +++ b/plugin/bertQKVToContextPlugin/CustomQKVToContextPluginDynamic_PluginConfig.yaml @@ -5,14 +5,12 @@ versions: "1": inputs: - input - - input_mask outputs: - output input_dims: input: 5 - input_mask: 1 output_dims: - output: "input_0, input_mask_0, hidden_size_0, 1, 1" + output: "input_0, input_1, hidden_size_0, 1, 1" attributes: - type_id - hidden_size @@ -55,6 +53,85 @@ versions: - hidden_size - num_heads - has_mask + golden_io_path: "plugin/bertQKVToContextPlugin/CustomQKVToContextPluginDynamic_PluginGoldenIO.json" + abs_tol: 1e-5 + rel_tol: 1e-5 + fp16_atol: 1e-2 + fp16_rtol: 1e-2 + configs: + config1: + input_types: + input: float16 + attribute_options: + type_id: + value: 1 + shape: "1" + hidden_size: + value: 768 + shape: "1" + num_heads: + value: 12 + shape: "1" + has_mask: + value: 0 + shape: "1" + output_types: + output: float16 + config2: + input_types: + input: float16 + attribute_options: + type_id: + value: 1 + shape: "1" + hidden_size: + value: 1024 + shape: "1" + num_heads: + value: 16 + shape: "1" + has_mask: + value: 0 + shape: "1" + output_types: + output: float16 + config5: + input_types: + input: float16 + attribute_options: + type_id: + value: 1 + shape: "1" + hidden_size: + value: 384 + shape: "1" + num_heads: + value: 12 + shape: "1" + has_mask: + value: 0 + shape: "1" + output_types: + output: float16 + has_mask: + input_types: + input: float32 + input_mask: int32 + attribute_options: + type_id: + value: 0 + shape: "1" + hidden_size: + value: 3 + shape: "1" + num_heads: + value: 3 + shape: "1" + has_mask: + value: 1 + shape: "1" + output_types: + output: float32 "2": inputs: - input diff --git a/plugin/bertQKVToContextPlugin/fused_multihead_attention/include/fused_multihead_attention.h b/plugin/bertQKVToContextPlugin/fused_multihead_attention/include/fused_multihead_attention.h index 0eede2aa..9f541aaf 100644 --- a/plugin/bertQKVToContextPlugin/fused_multihead_attention/include/fused_multihead_attention.h +++ b/plugin/bertQKVToContextPlugin/fused_multihead_attention/include/fused_multihead_attention.h @@ -108,7 +108,7 @@ struct Fused_multihead_attention_params float* max_scratch_ptr{}; float* sum_scratch_ptr{}; // Scratch buffer to finalize the output (not needed for FP16). - int* o_scratch_ptr{}; + int32_t* o_scratch_ptr{}; }; //////////////////////////////////////////////////////////////////////////////////////////////////// @@ -398,7 +398,7 @@ class TFusedMultiHeadAttentionXMMAKernel } } mFunctions.insert({kernelKey, funcInfo}); - const int s = static_cast(kernelMeta.mS); + const int32_t s = static_cast(kernelMeta.mS); if (mValidSequences.find(s) == mValidSequences.end()) { mValidSequences.insert(s); @@ -425,7 +425,7 @@ class TFusedMultiHeadAttentionXMMAKernel } } - bool isValid(int s) const + bool isValid(int32_t s) const { return (mValidSequences.find(s) != mValidSequences.end()); } @@ -487,7 +487,7 @@ class TFusedMultiHeadAttentionXMMAKernel CUfunction mDeviceFunction; }; std::unordered_map mFunctions; - std::set mValidSequences; + std::set mValidSequences; }; template diff --git a/plugin/bertQKVToContextPlugin/fused_multihead_attention/src/fused_multihead_attention_fp16_128_64_kernel.sm75.cpp b/plugin/bertQKVToContextPlugin/fused_multihead_attention/src/fused_multihead_attention_fp16_128_64_kernel.sm75.cpp index e3e3e986..af45426d 100644 --- a/plugin/bertQKVToContextPlugin/fused_multihead_attention/src/fused_multihead_attention_fp16_128_64_kernel.sm75.cpp +++ b/plugin/bertQKVToContextPlugin/fused_multihead_attention/src/fused_multihead_attention_fp16_128_64_kernel.sm75.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/plugin/bertQKVToContextPlugin/fused_multihead_attention/src/fused_multihead_attention_fp16_128_64_kernel.sm80.cpp b/plugin/bertQKVToContextPlugin/fused_multihead_attention/src/fused_multihead_attention_fp16_128_64_kernel.sm80.cpp index 111eed80..3e5031b1 100644 --- a/plugin/bertQKVToContextPlugin/fused_multihead_attention/src/fused_multihead_attention_fp16_128_64_kernel.sm80.cpp +++ b/plugin/bertQKVToContextPlugin/fused_multihead_attention/src/fused_multihead_attention_fp16_128_64_kernel.sm80.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/plugin/bertQKVToContextPlugin/fused_multihead_attention/src/fused_multihead_attention_fp16_128_64_kernel.sm87.cpp b/plugin/bertQKVToContextPlugin/fused_multihead_attention/src/fused_multihead_attention_fp16_128_64_kernel.sm87.cpp index f76a4b0c..0d0a6ed7 100644 --- a/plugin/bertQKVToContextPlugin/fused_multihead_attention/src/fused_multihead_attention_fp16_128_64_kernel.sm87.cpp +++ b/plugin/bertQKVToContextPlugin/fused_multihead_attention/src/fused_multihead_attention_fp16_128_64_kernel.sm87.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/plugin/bertQKVToContextPlugin/fused_multihead_attention/src/fused_multihead_attention_fp16_128_64_kernel.sm90.cpp b/plugin/bertQKVToContextPlugin/fused_multihead_attention/src/fused_multihead_attention_fp16_128_64_kernel.sm90.cpp index 4cd98ea5..a5134aaf 100644 --- a/plugin/bertQKVToContextPlugin/fused_multihead_attention/src/fused_multihead_attention_fp16_128_64_kernel.sm90.cpp +++ b/plugin/bertQKVToContextPlugin/fused_multihead_attention/src/fused_multihead_attention_fp16_128_64_kernel.sm90.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/plugin/bertQKVToContextPlugin/fused_multihead_attention/src/fused_multihead_attention_fp16_384_64_kernel.sm75.cpp b/plugin/bertQKVToContextPlugin/fused_multihead_attention/src/fused_multihead_attention_fp16_384_64_kernel.sm75.cpp index ea2872a4..e2604633 100644 --- a/plugin/bertQKVToContextPlugin/fused_multihead_attention/src/fused_multihead_attention_fp16_384_64_kernel.sm75.cpp +++ b/plugin/bertQKVToContextPlugin/fused_multihead_attention/src/fused_multihead_attention_fp16_384_64_kernel.sm75.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/plugin/bertQKVToContextPlugin/fused_multihead_attention/src/fused_multihead_attention_fp16_384_64_kernel.sm80.cpp b/plugin/bertQKVToContextPlugin/fused_multihead_attention/src/fused_multihead_attention_fp16_384_64_kernel.sm80.cpp index 353df03d..035270eb 100644 --- a/plugin/bertQKVToContextPlugin/fused_multihead_attention/src/fused_multihead_attention_fp16_384_64_kernel.sm80.cpp +++ b/plugin/bertQKVToContextPlugin/fused_multihead_attention/src/fused_multihead_attention_fp16_384_64_kernel.sm80.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/plugin/bertQKVToContextPlugin/fused_multihead_attention/src/fused_multihead_attention_fp16_384_64_kernel.sm86.cpp b/plugin/bertQKVToContextPlugin/fused_multihead_attention/src/fused_multihead_attention_fp16_384_64_kernel.sm86.cpp index ac48fd26..81f7a887 100644 --- a/plugin/bertQKVToContextPlugin/fused_multihead_attention/src/fused_multihead_attention_fp16_384_64_kernel.sm86.cpp +++ b/plugin/bertQKVToContextPlugin/fused_multihead_attention/src/fused_multihead_attention_fp16_384_64_kernel.sm86.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/plugin/bertQKVToContextPlugin/fused_multihead_attention/src/fused_multihead_attention_fp16_384_64_kernel.sm87.cpp b/plugin/bertQKVToContextPlugin/fused_multihead_attention/src/fused_multihead_attention_fp16_384_64_kernel.sm87.cpp index e9b441fd..929c0a4b 100644 --- a/plugin/bertQKVToContextPlugin/fused_multihead_attention/src/fused_multihead_attention_fp16_384_64_kernel.sm87.cpp +++ b/plugin/bertQKVToContextPlugin/fused_multihead_attention/src/fused_multihead_attention_fp16_384_64_kernel.sm87.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/plugin/bertQKVToContextPlugin/fused_multihead_attention/src/fused_multihead_attention_fp16_384_64_kernel.sm90.cpp b/plugin/bertQKVToContextPlugin/fused_multihead_attention/src/fused_multihead_attention_fp16_384_64_kernel.sm90.cpp index 4bbf6da7..a9592f3f 100644 --- a/plugin/bertQKVToContextPlugin/fused_multihead_attention/src/fused_multihead_attention_fp16_384_64_kernel.sm90.cpp +++ b/plugin/bertQKVToContextPlugin/fused_multihead_attention/src/fused_multihead_attention_fp16_384_64_kernel.sm90.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/plugin/bertQKVToContextPlugin/fused_multihead_attention/src/fused_multihead_attention_fp16_512_64_kernel.sm90.cpp b/plugin/bertQKVToContextPlugin/fused_multihead_attention/src/fused_multihead_attention_fp16_512_64_kernel.sm90.cpp index 61b71d35..a5a19772 100644 --- a/plugin/bertQKVToContextPlugin/fused_multihead_attention/src/fused_multihead_attention_fp16_512_64_kernel.sm90.cpp +++ b/plugin/bertQKVToContextPlugin/fused_multihead_attention/src/fused_multihead_attention_fp16_512_64_kernel.sm90.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/plugin/bertQKVToContextPlugin/fused_multihead_attention/src/fused_multihead_attention_fp16_64_64_kernel.sm75.cpp b/plugin/bertQKVToContextPlugin/fused_multihead_attention/src/fused_multihead_attention_fp16_64_64_kernel.sm75.cpp index a59a3033..9dc6ffa6 100644 --- a/plugin/bertQKVToContextPlugin/fused_multihead_attention/src/fused_multihead_attention_fp16_64_64_kernel.sm75.cpp +++ b/plugin/bertQKVToContextPlugin/fused_multihead_attention/src/fused_multihead_attention_fp16_64_64_kernel.sm75.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/plugin/bertQKVToContextPlugin/fused_multihead_attention/src/fused_multihead_attention_fp16_64_64_kernel.sm80.cpp b/plugin/bertQKVToContextPlugin/fused_multihead_attention/src/fused_multihead_attention_fp16_64_64_kernel.sm80.cpp index 3dcb178e..588d5dc8 100644 --- a/plugin/bertQKVToContextPlugin/fused_multihead_attention/src/fused_multihead_attention_fp16_64_64_kernel.sm80.cpp +++ b/plugin/bertQKVToContextPlugin/fused_multihead_attention/src/fused_multihead_attention_fp16_64_64_kernel.sm80.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/plugin/bertQKVToContextPlugin/fused_multihead_attention/src/fused_multihead_attention_fp16_64_64_kernel.sm87.cpp b/plugin/bertQKVToContextPlugin/fused_multihead_attention/src/fused_multihead_attention_fp16_64_64_kernel.sm87.cpp index dc477b4c..4d6308d3 100644 --- a/plugin/bertQKVToContextPlugin/fused_multihead_attention/src/fused_multihead_attention_fp16_64_64_kernel.sm87.cpp +++ b/plugin/bertQKVToContextPlugin/fused_multihead_attention/src/fused_multihead_attention_fp16_64_64_kernel.sm87.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/plugin/bertQKVToContextPlugin/fused_multihead_attention/src/fused_multihead_attention_fp16_64_64_kernel.sm90.cpp b/plugin/bertQKVToContextPlugin/fused_multihead_attention/src/fused_multihead_attention_fp16_64_64_kernel.sm90.cpp index 0c40a36a..fd292683 100644 --- a/plugin/bertQKVToContextPlugin/fused_multihead_attention/src/fused_multihead_attention_fp16_64_64_kernel.sm90.cpp +++ b/plugin/bertQKVToContextPlugin/fused_multihead_attention/src/fused_multihead_attention_fp16_64_64_kernel.sm90.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/plugin/bertQKVToContextPlugin/fused_multihead_attention/src/fused_multihead_attention_fp16_96_64_kernel.sm75.cpp b/plugin/bertQKVToContextPlugin/fused_multihead_attention/src/fused_multihead_attention_fp16_96_64_kernel.sm75.cpp index 7a097f31..238e9fbd 100644 --- a/plugin/bertQKVToContextPlugin/fused_multihead_attention/src/fused_multihead_attention_fp16_96_64_kernel.sm75.cpp +++ b/plugin/bertQKVToContextPlugin/fused_multihead_attention/src/fused_multihead_attention_fp16_96_64_kernel.sm75.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/plugin/bertQKVToContextPlugin/fused_multihead_attention/src/fused_multihead_attention_fp16_96_64_kernel.sm80.cpp b/plugin/bertQKVToContextPlugin/fused_multihead_attention/src/fused_multihead_attention_fp16_96_64_kernel.sm80.cpp index afd69591..a2eb24f7 100644 --- a/plugin/bertQKVToContextPlugin/fused_multihead_attention/src/fused_multihead_attention_fp16_96_64_kernel.sm80.cpp +++ b/plugin/bertQKVToContextPlugin/fused_multihead_attention/src/fused_multihead_attention_fp16_96_64_kernel.sm80.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/plugin/bertQKVToContextPlugin/fused_multihead_attention/src/fused_multihead_attention_fp16_96_64_kernel.sm87.cpp b/plugin/bertQKVToContextPlugin/fused_multihead_attention/src/fused_multihead_attention_fp16_96_64_kernel.sm87.cpp index 0908bb48..5b39da95 100644 --- a/plugin/bertQKVToContextPlugin/fused_multihead_attention/src/fused_multihead_attention_fp16_96_64_kernel.sm87.cpp +++ b/plugin/bertQKVToContextPlugin/fused_multihead_attention/src/fused_multihead_attention_fp16_96_64_kernel.sm87.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/plugin/bertQKVToContextPlugin/fused_multihead_attention/src/fused_multihead_attention_fp16_96_64_kernel.sm90.cpp b/plugin/bertQKVToContextPlugin/fused_multihead_attention/src/fused_multihead_attention_fp16_96_64_kernel.sm90.cpp index 82cb6ce4..1af3e96a 100644 --- a/plugin/bertQKVToContextPlugin/fused_multihead_attention/src/fused_multihead_attention_fp16_96_64_kernel.sm90.cpp +++ b/plugin/bertQKVToContextPlugin/fused_multihead_attention/src/fused_multihead_attention_fp16_96_64_kernel.sm90.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/plugin/bertQKVToContextPlugin/fused_multihead_attention/src/fused_multihead_attention_int8_128_64_kernel.sm75.cpp b/plugin/bertQKVToContextPlugin/fused_multihead_attention/src/fused_multihead_attention_int8_128_64_kernel.sm75.cpp index 79c0c662..a18e4874 100644 --- a/plugin/bertQKVToContextPlugin/fused_multihead_attention/src/fused_multihead_attention_int8_128_64_kernel.sm75.cpp +++ b/plugin/bertQKVToContextPlugin/fused_multihead_attention/src/fused_multihead_attention_int8_128_64_kernel.sm75.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/plugin/bertQKVToContextPlugin/fused_multihead_attention/src/fused_multihead_attention_int8_128_64_kernel.sm80.cpp b/plugin/bertQKVToContextPlugin/fused_multihead_attention/src/fused_multihead_attention_int8_128_64_kernel.sm80.cpp index 23045fa7..0c079b17 100644 --- a/plugin/bertQKVToContextPlugin/fused_multihead_attention/src/fused_multihead_attention_int8_128_64_kernel.sm80.cpp +++ b/plugin/bertQKVToContextPlugin/fused_multihead_attention/src/fused_multihead_attention_int8_128_64_kernel.sm80.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/plugin/bertQKVToContextPlugin/fused_multihead_attention/src/fused_multihead_attention_int8_128_64_kernel.sm87.cpp b/plugin/bertQKVToContextPlugin/fused_multihead_attention/src/fused_multihead_attention_int8_128_64_kernel.sm87.cpp index 819677d4..b88a696d 100644 --- a/plugin/bertQKVToContextPlugin/fused_multihead_attention/src/fused_multihead_attention_int8_128_64_kernel.sm87.cpp +++ b/plugin/bertQKVToContextPlugin/fused_multihead_attention/src/fused_multihead_attention_int8_128_64_kernel.sm87.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/plugin/bertQKVToContextPlugin/fused_multihead_attention/src/fused_multihead_attention_int8_128_64_kernel.sm90.cpp b/plugin/bertQKVToContextPlugin/fused_multihead_attention/src/fused_multihead_attention_int8_128_64_kernel.sm90.cpp index 92767aa1..457af2b6 100644 --- a/plugin/bertQKVToContextPlugin/fused_multihead_attention/src/fused_multihead_attention_int8_128_64_kernel.sm90.cpp +++ b/plugin/bertQKVToContextPlugin/fused_multihead_attention/src/fused_multihead_attention_int8_128_64_kernel.sm90.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/plugin/bertQKVToContextPlugin/fused_multihead_attention/src/fused_multihead_attention_int8_384_64_kernel.sm75.cpp b/plugin/bertQKVToContextPlugin/fused_multihead_attention/src/fused_multihead_attention_int8_384_64_kernel.sm75.cpp index 7e077a94..22611907 100644 --- a/plugin/bertQKVToContextPlugin/fused_multihead_attention/src/fused_multihead_attention_int8_384_64_kernel.sm75.cpp +++ b/plugin/bertQKVToContextPlugin/fused_multihead_attention/src/fused_multihead_attention_int8_384_64_kernel.sm75.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/plugin/bertQKVToContextPlugin/fused_multihead_attention/src/fused_multihead_attention_int8_384_64_kernel.sm80.cpp b/plugin/bertQKVToContextPlugin/fused_multihead_attention/src/fused_multihead_attention_int8_384_64_kernel.sm80.cpp index f8422eb6..bf716793 100644 --- a/plugin/bertQKVToContextPlugin/fused_multihead_attention/src/fused_multihead_attention_int8_384_64_kernel.sm80.cpp +++ b/plugin/bertQKVToContextPlugin/fused_multihead_attention/src/fused_multihead_attention_int8_384_64_kernel.sm80.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/plugin/bertQKVToContextPlugin/fused_multihead_attention/src/fused_multihead_attention_int8_384_64_kernel.sm87.cpp b/plugin/bertQKVToContextPlugin/fused_multihead_attention/src/fused_multihead_attention_int8_384_64_kernel.sm87.cpp index d2634d20..c4376f86 100644 --- a/plugin/bertQKVToContextPlugin/fused_multihead_attention/src/fused_multihead_attention_int8_384_64_kernel.sm87.cpp +++ b/plugin/bertQKVToContextPlugin/fused_multihead_attention/src/fused_multihead_attention_int8_384_64_kernel.sm87.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/plugin/bertQKVToContextPlugin/fused_multihead_attention/src/fused_multihead_attention_int8_384_64_kernel.sm90.cpp b/plugin/bertQKVToContextPlugin/fused_multihead_attention/src/fused_multihead_attention_int8_384_64_kernel.sm90.cpp index aa56892d..44f159a7 100644 --- a/plugin/bertQKVToContextPlugin/fused_multihead_attention/src/fused_multihead_attention_int8_384_64_kernel.sm90.cpp +++ b/plugin/bertQKVToContextPlugin/fused_multihead_attention/src/fused_multihead_attention_int8_384_64_kernel.sm90.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/plugin/bertQKVToContextPlugin/fused_multihead_attention/src/fused_multihead_attention_int8_512_64_kernel.sm90.cpp b/plugin/bertQKVToContextPlugin/fused_multihead_attention/src/fused_multihead_attention_int8_512_64_kernel.sm90.cpp index 49f55601..fd51119e 100644 --- a/plugin/bertQKVToContextPlugin/fused_multihead_attention/src/fused_multihead_attention_int8_512_64_kernel.sm90.cpp +++ b/plugin/bertQKVToContextPlugin/fused_multihead_attention/src/fused_multihead_attention_int8_512_64_kernel.sm90.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/plugin/bertQKVToContextPlugin/fused_multihead_attention/src/fused_multihead_attention_int8_64_64_kernel.sm80.cpp b/plugin/bertQKVToContextPlugin/fused_multihead_attention/src/fused_multihead_attention_int8_64_64_kernel.sm80.cpp index 935b3ba2..062ce999 100644 --- a/plugin/bertQKVToContextPlugin/fused_multihead_attention/src/fused_multihead_attention_int8_64_64_kernel.sm80.cpp +++ b/plugin/bertQKVToContextPlugin/fused_multihead_attention/src/fused_multihead_attention_int8_64_64_kernel.sm80.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/plugin/bertQKVToContextPlugin/fused_multihead_attention/src/fused_multihead_attention_int8_96_64_kernel.sm80.cpp b/plugin/bertQKVToContextPlugin/fused_multihead_attention/src/fused_multihead_attention_int8_96_64_kernel.sm80.cpp index f81cd7fa..017f6862 100644 --- a/plugin/bertQKVToContextPlugin/fused_multihead_attention/src/fused_multihead_attention_int8_96_64_kernel.sm80.cpp +++ b/plugin/bertQKVToContextPlugin/fused_multihead_attention/src/fused_multihead_attention_int8_96_64_kernel.sm80.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_fp16_128_32_kernel.sm75.cpp b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_fp16_128_32_kernel.sm75.cpp index aaf7f56f..373f496a 100644 --- a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_fp16_128_32_kernel.sm75.cpp +++ b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_fp16_128_32_kernel.sm75.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_fp16_128_32_kernel.sm80.cpp b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_fp16_128_32_kernel.sm80.cpp index 7e5cb15a..1e3ff7c6 100644 --- a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_fp16_128_32_kernel.sm80.cpp +++ b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_fp16_128_32_kernel.sm80.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_fp16_128_64_kernel.sm75.cpp b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_fp16_128_64_kernel.sm75.cpp index 8946f2c0..ece2d0eb 100644 --- a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_fp16_128_64_kernel.sm75.cpp +++ b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_fp16_128_64_kernel.sm75.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_fp16_128_64_kernel.sm80.cpp b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_fp16_128_64_kernel.sm80.cpp index be4daab0..dbc34090 100644 --- a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_fp16_128_64_kernel.sm80.cpp +++ b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_fp16_128_64_kernel.sm80.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_fp16_128_64_kernel.sm86.cpp b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_fp16_128_64_kernel.sm86.cpp index 810d1624..ff794f09 100644 --- a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_fp16_128_64_kernel.sm86.cpp +++ b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_fp16_128_64_kernel.sm86.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_fp16_128_64_kernel.sm87.cpp b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_fp16_128_64_kernel.sm87.cpp index 0cf510c9..d957a175 100644 --- a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_fp16_128_64_kernel.sm87.cpp +++ b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_fp16_128_64_kernel.sm87.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_fp16_128_64_kernel.sm90.cpp b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_fp16_128_64_kernel.sm90.cpp index 23d90611..910c2772 100644 --- a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_fp16_128_64_kernel.sm90.cpp +++ b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_fp16_128_64_kernel.sm90.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_fp16_256_32_kernel.sm75.cpp b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_fp16_256_32_kernel.sm75.cpp index 3c13456b..f466437c 100644 --- a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_fp16_256_32_kernel.sm75.cpp +++ b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_fp16_256_32_kernel.sm75.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_fp16_256_32_kernel.sm80.cpp b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_fp16_256_32_kernel.sm80.cpp index 18ac26ac..643f3abe 100644 --- a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_fp16_256_32_kernel.sm80.cpp +++ b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_fp16_256_32_kernel.sm80.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_fp16_256_64_kernel.sm75.cpp b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_fp16_256_64_kernel.sm75.cpp index cbaadce9..b193aac5 100644 --- a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_fp16_256_64_kernel.sm75.cpp +++ b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_fp16_256_64_kernel.sm75.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_fp16_256_64_kernel.sm80.cpp b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_fp16_256_64_kernel.sm80.cpp index a83fad85..eedf762f 100644 --- a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_fp16_256_64_kernel.sm80.cpp +++ b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_fp16_256_64_kernel.sm80.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_fp16_256_64_kernel.sm86.cpp b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_fp16_256_64_kernel.sm86.cpp index 4ed00888..17cdf962 100644 --- a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_fp16_256_64_kernel.sm86.cpp +++ b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_fp16_256_64_kernel.sm86.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_fp16_256_64_kernel.sm87.cpp b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_fp16_256_64_kernel.sm87.cpp index 1133758c..3943f07e 100644 --- a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_fp16_256_64_kernel.sm87.cpp +++ b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_fp16_256_64_kernel.sm87.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_fp16_256_64_kernel.sm90.cpp b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_fp16_256_64_kernel.sm90.cpp index 18d4dbd6..8aebf6e4 100644 --- a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_fp16_256_64_kernel.sm90.cpp +++ b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_fp16_256_64_kernel.sm90.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_fp16_384_64_kernel.sm75.cpp b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_fp16_384_64_kernel.sm75.cpp index cc7fd264..47d6f8b4 100644 --- a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_fp16_384_64_kernel.sm75.cpp +++ b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_fp16_384_64_kernel.sm75.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_fp16_384_64_kernel.sm80.cpp b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_fp16_384_64_kernel.sm80.cpp index 46f4e7fa..2c0141c6 100644 --- a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_fp16_384_64_kernel.sm80.cpp +++ b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_fp16_384_64_kernel.sm80.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_fp16_384_64_kernel.sm86.cpp b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_fp16_384_64_kernel.sm86.cpp index 3580a4cd..007b0ca5 100644 --- a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_fp16_384_64_kernel.sm86.cpp +++ b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_fp16_384_64_kernel.sm86.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_fp16_384_64_kernel.sm87.cpp b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_fp16_384_64_kernel.sm87.cpp index 5d5bd4b5..e47a0eb5 100644 --- a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_fp16_384_64_kernel.sm87.cpp +++ b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_fp16_384_64_kernel.sm87.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_fp16_384_64_kernel.sm90.cpp b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_fp16_384_64_kernel.sm90.cpp index 56e80a0f..71047e0d 100644 --- a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_fp16_384_64_kernel.sm90.cpp +++ b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_fp16_384_64_kernel.sm90.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_fp16_512_32_kernel.sm75.cpp b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_fp16_512_32_kernel.sm75.cpp index 74157890..e424fd93 100644 --- a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_fp16_512_32_kernel.sm75.cpp +++ b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_fp16_512_32_kernel.sm75.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_fp16_512_32_kernel.sm80.cpp b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_fp16_512_32_kernel.sm80.cpp index e51279fc..f3b2aec9 100644 --- a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_fp16_512_32_kernel.sm80.cpp +++ b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_fp16_512_32_kernel.sm80.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_fp16_512_64_kernel.sm75.cpp b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_fp16_512_64_kernel.sm75.cpp index 23c1d966..6706f1e1 100644 --- a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_fp16_512_64_kernel.sm75.cpp +++ b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_fp16_512_64_kernel.sm75.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_fp16_512_64_kernel.sm80.cpp b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_fp16_512_64_kernel.sm80.cpp index c4471a00..57d31338 100644 --- a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_fp16_512_64_kernel.sm80.cpp +++ b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_fp16_512_64_kernel.sm80.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_fp16_512_64_kernel.sm90.cpp b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_fp16_512_64_kernel.sm90.cpp index 510227d9..d9bbd955 100644 --- a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_fp16_512_64_kernel.sm90.cpp +++ b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_fp16_512_64_kernel.sm90.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_fp16_64_64_kernel.sm75.cpp b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_fp16_64_64_kernel.sm75.cpp index 6eae08a1..a93f1f80 100644 --- a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_fp16_64_64_kernel.sm75.cpp +++ b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_fp16_64_64_kernel.sm75.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_fp16_64_64_kernel.sm80.cpp b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_fp16_64_64_kernel.sm80.cpp index 41eb73f3..fc6e825e 100644 --- a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_fp16_64_64_kernel.sm80.cpp +++ b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_fp16_64_64_kernel.sm80.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_fp16_64_64_kernel.sm86.cpp b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_fp16_64_64_kernel.sm86.cpp index 2b194a06..dc64aaf1 100644 --- a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_fp16_64_64_kernel.sm86.cpp +++ b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_fp16_64_64_kernel.sm86.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_fp16_64_64_kernel.sm87.cpp b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_fp16_64_64_kernel.sm87.cpp index ee6fb986..17394f7b 100644 --- a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_fp16_64_64_kernel.sm87.cpp +++ b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_fp16_64_64_kernel.sm87.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_fp16_64_64_kernel.sm90.cpp b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_fp16_64_64_kernel.sm90.cpp index 0786bb1a..30a6a139 100644 --- a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_fp16_64_64_kernel.sm90.cpp +++ b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_fp16_64_64_kernel.sm90.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_fp16_96_64_kernel.sm75.cpp b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_fp16_96_64_kernel.sm75.cpp index f74ef32c..75826861 100644 --- a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_fp16_96_64_kernel.sm75.cpp +++ b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_fp16_96_64_kernel.sm75.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_fp16_96_64_kernel.sm80.cpp b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_fp16_96_64_kernel.sm80.cpp index 4998d6a8..a5a9db91 100644 --- a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_fp16_96_64_kernel.sm80.cpp +++ b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_fp16_96_64_kernel.sm80.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_fp16_96_64_kernel.sm86.cpp b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_fp16_96_64_kernel.sm86.cpp index 95ef8d73..5c0e4792 100644 --- a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_fp16_96_64_kernel.sm86.cpp +++ b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_fp16_96_64_kernel.sm86.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_fp16_96_64_kernel.sm87.cpp b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_fp16_96_64_kernel.sm87.cpp index 46d56b52..75cca5b0 100644 --- a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_fp16_96_64_kernel.sm87.cpp +++ b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_fp16_96_64_kernel.sm87.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_fp16_96_64_kernel.sm90.cpp b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_fp16_96_64_kernel.sm90.cpp index 437a2849..05ed3a7d 100644 --- a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_fp16_96_64_kernel.sm90.cpp +++ b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_fp16_96_64_kernel.sm90.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_il_int8_128_32_kernel.sm80.cpp b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_il_int8_128_32_kernel.sm80.cpp index 5d2091fc..7377bb87 100644 --- a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_il_int8_128_32_kernel.sm80.cpp +++ b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_il_int8_128_32_kernel.sm80.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_il_int8_128_64_kernel.sm87.cpp b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_il_int8_128_64_kernel.sm87.cpp index aa3364cd..c486ba74 100644 --- a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_il_int8_128_64_kernel.sm87.cpp +++ b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_il_int8_128_64_kernel.sm87.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_il_int8_128_64_kernel.sm90.cpp b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_il_int8_128_64_kernel.sm90.cpp index 93211a19..ff8b71b7 100644 --- a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_il_int8_128_64_kernel.sm90.cpp +++ b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_il_int8_128_64_kernel.sm90.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_il_int8_192_64_kernel.sm87.cpp b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_il_int8_192_64_kernel.sm87.cpp index 1857e517..b55a9b29 100644 --- a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_il_int8_192_64_kernel.sm87.cpp +++ b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_il_int8_192_64_kernel.sm87.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_il_int8_192_64_kernel.sm90.cpp b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_il_int8_192_64_kernel.sm90.cpp index b9ab1cbd..a486db0f 100644 --- a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_il_int8_192_64_kernel.sm90.cpp +++ b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_il_int8_192_64_kernel.sm90.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_il_int8_256_64_kernel.sm87.cpp b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_il_int8_256_64_kernel.sm87.cpp index 6a8cffaa..dcac39f3 100644 --- a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_il_int8_256_64_kernel.sm87.cpp +++ b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_il_int8_256_64_kernel.sm87.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_il_int8_256_64_kernel.sm90.cpp b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_il_int8_256_64_kernel.sm90.cpp index d1477730..9826a2c2 100644 --- a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_il_int8_256_64_kernel.sm90.cpp +++ b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_il_int8_256_64_kernel.sm90.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_il_int8_384_64_kernel.sm87.cpp b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_il_int8_384_64_kernel.sm87.cpp index cc930e3c..b6659f16 100644 --- a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_il_int8_384_64_kernel.sm87.cpp +++ b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_il_int8_384_64_kernel.sm87.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_il_int8_384_64_kernel.sm90.cpp b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_il_int8_384_64_kernel.sm90.cpp index 8228fbe0..bbb5eeeb 100644 --- a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_il_int8_384_64_kernel.sm90.cpp +++ b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_il_int8_384_64_kernel.sm90.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_il_int8_64_64_kernel.sm80.cpp b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_il_int8_64_64_kernel.sm80.cpp index 5fc60b51..f9fd6183 100644 --- a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_il_int8_64_64_kernel.sm80.cpp +++ b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_il_int8_64_64_kernel.sm80.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_il_int8_64_64_kernel.sm87.cpp b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_il_int8_64_64_kernel.sm87.cpp index 09603ff8..6441c74a 100644 --- a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_il_int8_64_64_kernel.sm87.cpp +++ b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_il_int8_64_64_kernel.sm87.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_il_int8_64_64_kernel.sm90.cpp b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_il_int8_64_64_kernel.sm90.cpp index 3668a80d..df8cda25 100644 --- a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_il_int8_64_64_kernel.sm90.cpp +++ b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_il_int8_64_64_kernel.sm90.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_il_int8_96_64_kernel.sm80.cpp b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_il_int8_96_64_kernel.sm80.cpp index 66963771..e62d93aa 100644 --- a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_il_int8_96_64_kernel.sm80.cpp +++ b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_il_int8_96_64_kernel.sm80.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_il_int8_96_64_kernel.sm87.cpp b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_il_int8_96_64_kernel.sm87.cpp index 91ad85db..590c0df4 100644 --- a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_il_int8_96_64_kernel.sm87.cpp +++ b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_il_int8_96_64_kernel.sm87.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_il_int8_96_64_kernel.sm90.cpp b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_il_int8_96_64_kernel.sm90.cpp index 05a7d0fe..be698b64 100644 --- a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_il_int8_96_64_kernel.sm90.cpp +++ b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_il_int8_96_64_kernel.sm90.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_128_32_kernel.sm75.cpp b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_128_32_kernel.sm75.cpp index 95f20196..ce3baa27 100644 --- a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_128_32_kernel.sm75.cpp +++ b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_128_32_kernel.sm75.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_128_32_kernel.sm80.cpp b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_128_32_kernel.sm80.cpp index b545c838..0abcf4e3 100644 --- a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_128_32_kernel.sm80.cpp +++ b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_128_32_kernel.sm80.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_128_64_kernel.sm72.cpp b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_128_64_kernel.sm72.cpp index 9852a3e0..fbf16481 100644 --- a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_128_64_kernel.sm72.cpp +++ b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_128_64_kernel.sm72.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_128_64_kernel.sm75.cpp b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_128_64_kernel.sm75.cpp index b26d41ba..56cb1930 100644 --- a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_128_64_kernel.sm75.cpp +++ b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_128_64_kernel.sm75.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_128_64_kernel.sm80.cpp b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_128_64_kernel.sm80.cpp index b1612ed2..f7b86091 100644 --- a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_128_64_kernel.sm80.cpp +++ b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_128_64_kernel.sm80.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_128_64_kernel.sm86.cpp b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_128_64_kernel.sm86.cpp index 6a863153..fe49aaa3 100644 --- a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_128_64_kernel.sm86.cpp +++ b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_128_64_kernel.sm86.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_128_64_kernel.sm87.cpp b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_128_64_kernel.sm87.cpp index 88108faa..b84b0dc8 100644 --- a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_128_64_kernel.sm87.cpp +++ b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_128_64_kernel.sm87.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_128_64_kernel.sm90.cpp b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_128_64_kernel.sm90.cpp index b4535fd8..6f889451 100644 --- a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_128_64_kernel.sm90.cpp +++ b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_128_64_kernel.sm90.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_192_64_kernel.sm72.cpp b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_192_64_kernel.sm72.cpp index 8d1795b6..3c3735d1 100644 --- a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_192_64_kernel.sm72.cpp +++ b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_192_64_kernel.sm72.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_192_64_kernel.sm75.cpp b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_192_64_kernel.sm75.cpp index 52997298..dfe6d8ce 100644 --- a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_192_64_kernel.sm75.cpp +++ b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_192_64_kernel.sm75.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_192_64_kernel.sm80.cpp b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_192_64_kernel.sm80.cpp index 0818097f..8a1d2d2c 100644 --- a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_192_64_kernel.sm80.cpp +++ b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_192_64_kernel.sm80.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_192_64_kernel.sm86.cpp b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_192_64_kernel.sm86.cpp index d86ceeb4..31dd3150 100644 --- a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_192_64_kernel.sm86.cpp +++ b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_192_64_kernel.sm86.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_192_64_kernel.sm87.cpp b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_192_64_kernel.sm87.cpp index 3571ce08..aa2a81c9 100644 --- a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_192_64_kernel.sm87.cpp +++ b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_192_64_kernel.sm87.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_192_64_kernel.sm90.cpp b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_192_64_kernel.sm90.cpp index 54caea8a..a5e4c65e 100644 --- a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_192_64_kernel.sm90.cpp +++ b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_192_64_kernel.sm90.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_256_32_kernel.sm75.cpp b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_256_32_kernel.sm75.cpp index c74af8c6..2a729502 100644 --- a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_256_32_kernel.sm75.cpp +++ b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_256_32_kernel.sm75.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_256_32_kernel.sm80.cpp b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_256_32_kernel.sm80.cpp index 922856c5..aeac0ebd 100644 --- a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_256_32_kernel.sm80.cpp +++ b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_256_32_kernel.sm80.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_256_64_kernel.sm72.cpp b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_256_64_kernel.sm72.cpp index 6aed0d37..a62c2cf9 100644 --- a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_256_64_kernel.sm72.cpp +++ b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_256_64_kernel.sm72.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_256_64_kernel.sm75.cpp b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_256_64_kernel.sm75.cpp index 647339c7..3fa33ae5 100644 --- a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_256_64_kernel.sm75.cpp +++ b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_256_64_kernel.sm75.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_256_64_kernel.sm80.cpp b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_256_64_kernel.sm80.cpp index f99cbf52..f597a37e 100644 --- a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_256_64_kernel.sm80.cpp +++ b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_256_64_kernel.sm80.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_256_64_kernel.sm86.cpp b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_256_64_kernel.sm86.cpp index 59950b12..24d31716 100644 --- a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_256_64_kernel.sm86.cpp +++ b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_256_64_kernel.sm86.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_256_64_kernel.sm87.cpp b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_256_64_kernel.sm87.cpp index d17b2eaf..b70f696d 100644 --- a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_256_64_kernel.sm87.cpp +++ b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_256_64_kernel.sm87.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_256_64_kernel.sm90.cpp b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_256_64_kernel.sm90.cpp index 7efa519a..07f7b870 100644 --- a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_256_64_kernel.sm90.cpp +++ b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_256_64_kernel.sm90.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_384_64_kernel.sm72.cpp b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_384_64_kernel.sm72.cpp index d33402a0..2d62254b 100644 --- a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_384_64_kernel.sm72.cpp +++ b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_384_64_kernel.sm72.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_384_64_kernel.sm75.cpp b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_384_64_kernel.sm75.cpp index 3f29bde8..b373a064 100644 --- a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_384_64_kernel.sm75.cpp +++ b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_384_64_kernel.sm75.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_384_64_kernel.sm80.cpp b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_384_64_kernel.sm80.cpp index 6e6e2404..86517581 100644 --- a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_384_64_kernel.sm80.cpp +++ b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_384_64_kernel.sm80.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_384_64_kernel.sm86.cpp b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_384_64_kernel.sm86.cpp index a5b12391..c9196880 100644 --- a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_384_64_kernel.sm86.cpp +++ b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_384_64_kernel.sm86.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_384_64_kernel.sm87.cpp b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_384_64_kernel.sm87.cpp index e1102282..70e699f8 100644 --- a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_384_64_kernel.sm87.cpp +++ b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_384_64_kernel.sm87.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_384_64_kernel.sm90.cpp b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_384_64_kernel.sm90.cpp index b8d41328..848c68be 100644 --- a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_384_64_kernel.sm90.cpp +++ b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_384_64_kernel.sm90.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_512_32_kernel.sm75.cpp b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_512_32_kernel.sm75.cpp index d6d21949..baaf7441 100644 --- a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_512_32_kernel.sm75.cpp +++ b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_512_32_kernel.sm75.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_512_32_kernel.sm80.cpp b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_512_32_kernel.sm80.cpp index 94fc8adf..68204bf6 100644 --- a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_512_32_kernel.sm80.cpp +++ b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_512_32_kernel.sm80.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_512_64_kernel.sm75.cpp b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_512_64_kernel.sm75.cpp index 7fa46bb1..8ee4ced0 100644 --- a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_512_64_kernel.sm75.cpp +++ b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_512_64_kernel.sm75.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_512_64_kernel.sm80.cpp b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_512_64_kernel.sm80.cpp index 838e3bf9..e9bd8613 100644 --- a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_512_64_kernel.sm80.cpp +++ b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_512_64_kernel.sm80.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_512_64_kernel.sm90.cpp b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_512_64_kernel.sm90.cpp index 2b08ac33..48644b36 100644 --- a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_512_64_kernel.sm90.cpp +++ b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_512_64_kernel.sm90.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_64_64_kernel.sm80.cpp b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_64_64_kernel.sm80.cpp index 3c34dc60..77ccb240 100644 --- a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_64_64_kernel.sm80.cpp +++ b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_64_64_kernel.sm80.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_64_64_kernel.sm87.cpp b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_64_64_kernel.sm87.cpp index 55da2568..2eb5c132 100644 --- a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_64_64_kernel.sm87.cpp +++ b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_64_64_kernel.sm87.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_64_64_kernel.sm90.cpp b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_64_64_kernel.sm90.cpp index 38442aab..2280de3b 100644 --- a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_64_64_kernel.sm90.cpp +++ b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_64_64_kernel.sm90.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_96_64_kernel.sm80.cpp b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_96_64_kernel.sm80.cpp index 5fac09b8..b7a7f1db 100644 --- a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_96_64_kernel.sm80.cpp +++ b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_96_64_kernel.sm80.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_96_64_kernel.sm87.cpp b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_96_64_kernel.sm87.cpp index 5c7f08c8..c2e6aca4 100644 --- a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_96_64_kernel.sm87.cpp +++ b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_96_64_kernel.sm87.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_96_64_kernel.sm90.cpp b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_96_64_kernel.sm90.cpp index 98eecaae..a4516a2d 100644 --- a/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_96_64_kernel.sm90.cpp +++ b/plugin/bertQKVToContextPlugin/fused_multihead_attention_v2/src/fused_multihead_attention_v2_int8_96_64_kernel.sm90.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/plugin/bertQKVToContextPlugin/qkvToContext.cu b/plugin/bertQKVToContextPlugin/qkvToContext.cu index 6281fb36..6ebb98ea 100644 --- a/plugin/bertQKVToContextPlugin/qkvToContext.cu +++ b/plugin/bertQKVToContextPlugin/qkvToContext.cu @@ -109,7 +109,7 @@ __global__ void maskedSoftmax(const float rsqrtHeadSize, const T* input, T* outp rZ = (1.f) / Z; } __syncthreads(); - local[it] *= rZ; + local[it] = (threadIdx.x < lastValid) ? local[it] * rZ : 0.F; } #pragma unroll @@ -1264,3 +1264,4 @@ bool FusedMHARunnerInt8v2::isValid(int s) const } // namespace bert } // namespace plugin } // namespace nvinfer1 + diff --git a/plugin/bertQKVToContextPlugin/qkvToContextInt8InterleavedPlugin.cpp b/plugin/bertQKVToContextPlugin/qkvToContextInt8InterleavedPlugin.cpp index d23a1261..61ef88f7 100644 --- a/plugin/bertQKVToContextPlugin/qkvToContextInt8InterleavedPlugin.cpp +++ b/plugin/bertQKVToContextPlugin/qkvToContextInt8InterleavedPlugin.cpp @@ -80,9 +80,9 @@ QKVToContextInterleavedPlugin::QKVToContextInterleavedPlugin(std::string const& deserialize_value(&data, &length, &mUseInt8ScaleMax); } -int QKVToContextInterleavedPlugin::getSMVersion() const noexcept +int32_t QKVToContextInterleavedPlugin::getSMVersion() const noexcept { - int device{-1}; + int32_t device{-1}; PLUGIN_CHECK(cudaGetDevice(&device)); cudaDeviceProp props; PLUGIN_CHECK(cudaGetDeviceProperties(&props, device)); @@ -108,7 +108,7 @@ nvinfer1::IPluginV2DynamicExt* QKVToContextInterleavedPlugin::clone() const noex } DimsExprs QKVToContextInterleavedPlugin::getOutputDimensions( - int outputIndex, DimsExprs const* inputs, int nbInputs, IExprBuilder& exprBuilder) noexcept + int32_t outputIndex, DimsExprs const* inputs, int32_t nbInputs, IExprBuilder& exprBuilder) noexcept { // Input SHAPE is 1x(3*N*H)xTotalx1 (NCHW) // Output SHAPE is 1x(N*H)xTotalx1 @@ -124,7 +124,7 @@ DimsExprs QKVToContextInterleavedPlugin::getOutputDimensions( return output; } bool QKVToContextInterleavedPlugin::supportsFormatCombination( - int pos, PluginTensorDesc const* inOut, int nbInputs, int nbOutputs) noexcept + int32_t pos, PluginTensorDesc const* inOut, int32_t nbInputs, int32_t nbOutputs) noexcept { PLUGIN_ASSERT(nbInputs == 3); PLUGIN_ASSERT(nbOutputs == 1); @@ -140,7 +140,7 @@ bool QKVToContextInterleavedPlugin::supportsFormatCombination( if (pos == 1) { - // cuSeqlens is a int array of size B+1 + // cuSeqlens is a int32_t array of size B+1 auto const* seqlens = &inOut[pos]; return (seqlens->type == DataType::kINT32) && (seqlens->format == TensorFormat::kLINEAR); } @@ -153,19 +153,19 @@ bool QKVToContextInterleavedPlugin::supportsFormatCombination( } void QKVToContextInterleavedPlugin::configurePlugin( - DynamicPluginTensorDesc const* in, int nbInputs, DynamicPluginTensorDesc const* out, int nbOutputs) noexcept + DynamicPluginTensorDesc const* in, int32_t nbInputs, DynamicPluginTensorDesc const* out, int32_t nbOutputs) noexcept { } size_t QKVToContextInterleavedPlugin::getWorkspaceSize( - PluginTensorDesc const* inputs, int nbInputs, PluginTensorDesc const* outputs, int nbOutputs) const noexcept + PluginTensorDesc const* inputs, int32_t nbInputs, PluginTensorDesc const* outputs, int32_t nbOutputs) const noexcept { return 0; } // IPluginV2Ext Methods DataType QKVToContextInterleavedPlugin::getOutputDataType( - int index, nvinfer1::DataType const* inputTypes, int nbInputs) const noexcept + int32_t index, nvinfer1::DataType const* inputTypes, int32_t nbInputs) const noexcept { PLUGIN_ASSERT(index == 0); return DataType::kINT8; @@ -182,12 +182,12 @@ char const* QKVToContextInterleavedPlugin::getPluginVersion() const noexcept return kQKV_TO_CONTEXT_INTERLEAVED_PLUGIN_VERSION; } -int QKVToContextInterleavedPlugin::getNbOutputs() const noexcept +int32_t QKVToContextInterleavedPlugin::getNbOutputs() const noexcept { return 1; } -int QKVToContextInterleavedPlugin::initialize() noexcept +int32_t QKVToContextInterleavedPlugin::initialize() noexcept { return 0; } @@ -227,14 +227,14 @@ char const* QKVToContextInterleavedPlugin::getPluginNamespace() const noexcept return mNamespace.c_str(); } -int QKVToContextInterleavedPlugin::enqueue(PluginTensorDesc const* inputDesc, PluginTensorDesc const* outputDesc, +int32_t QKVToContextInterleavedPlugin::enqueue(PluginTensorDesc const* inputDesc, PluginTensorDesc const* outputDesc, void const* const* inputs, void* const* outputs, void* workspace, cudaStream_t stream) noexcept { - int const total = inputDesc[0].dims.d[2]; - int const B = inputDesc[1].dims.d[0] - 1; - int const maxS = inputDesc[2].dims.d[0]; - int S = 384; + int32_t const total = inputDesc[0].dims.d[2]; + int32_t const B = inputDesc[1].dims.d[0] - 1; + int32_t const maxS = inputDesc[2].dims.d[0]; + int32_t S = 384; if (maxS <= 128) { S = 128; @@ -257,7 +257,7 @@ int QKVToContextInterleavedPlugin::enqueue(PluginTensorDesc const* inputDesc, Pl params.o_ptr = outputs[0]; params.qkv_ptr = const_cast(inputs[0]); - params.cu_seqlens = static_cast(const_cast(inputs[1])); + params.cu_seqlens = static_cast(const_cast(inputs[1])); float scaleQkv = inputDesc[0].scale; float scaleCtx = outputDesc[0].scale; diff --git a/plugin/bertQKVToContextPlugin/qkvToContextInt8InterleavedPlugin.h b/plugin/bertQKVToContextPlugin/qkvToContextInt8InterleavedPlugin.h index 53800df6..98985646 100644 --- a/plugin/bertQKVToContextPlugin/qkvToContextInt8InterleavedPlugin.h +++ b/plugin/bertQKVToContextPlugin/qkvToContextInt8InterleavedPlugin.h @@ -53,26 +53,26 @@ class QKVToContextInterleavedPlugin : public nvinfer1::IPluginV2DynamicExt // IPluginV2DynamicExt Methods nvinfer1::IPluginV2DynamicExt* clone() const noexcept override; - nvinfer1::DimsExprs getOutputDimensions(int outputIndex, nvinfer1::DimsExprs const* inputs, int nbInputs, + nvinfer1::DimsExprs getOutputDimensions(int32_t outputIndex, nvinfer1::DimsExprs const* inputs, int32_t nbInputs, nvinfer1::IExprBuilder& exprBuilder) noexcept override; bool supportsFormatCombination( - int pos, nvinfer1::PluginTensorDesc const* inOut, int nbInputs, int nbOutputs) noexcept override; - void configurePlugin(nvinfer1::DynamicPluginTensorDesc const* in, int nbInputs, - nvinfer1::DynamicPluginTensorDesc const* out, int nbOutputs) noexcept override; - size_t getWorkspaceSize(nvinfer1::PluginTensorDesc const* inputs, int nbInputs, - nvinfer1::PluginTensorDesc const* outputs, int nbOutputs) const noexcept override; - int enqueue(nvinfer1::PluginTensorDesc const* inputDesc, nvinfer1::PluginTensorDesc const* outputDesc, + int32_t pos, nvinfer1::PluginTensorDesc const* inOut, int32_t nbInputs, int32_t nbOutputs) noexcept override; + void configurePlugin(nvinfer1::DynamicPluginTensorDesc const* in, int32_t nbInputs, + nvinfer1::DynamicPluginTensorDesc const* out, int32_t nbOutputs) noexcept override; + size_t getWorkspaceSize(nvinfer1::PluginTensorDesc const* inputs, int32_t nbInputs, + nvinfer1::PluginTensorDesc const* outputs, int32_t nbOutputs) const noexcept override; + int32_t enqueue(nvinfer1::PluginTensorDesc const* inputDesc, nvinfer1::PluginTensorDesc const* outputDesc, void const* const* inputs, void* const* outputs, void* workspace, cudaStream_t stream) noexcept override; // IPluginV2Ext Methods nvinfer1::DataType getOutputDataType( - int index, nvinfer1::DataType const* inputTypes, int nbInputs) const noexcept override; + int32_t index, nvinfer1::DataType const* inputTypes, int32_t nbInputs) const noexcept override; // IPluginV2 Methods char const* getPluginType() const noexcept override; char const* getPluginVersion() const noexcept override; - int getNbOutputs() const noexcept override; - int initialize() noexcept override; + int32_t getNbOutputs() const noexcept override; + int32_t initialize() noexcept override; void terminate() noexcept override; size_t getSerializationSize() const noexcept override; void serialize(void* buffer) const noexcept override; @@ -82,18 +82,18 @@ class QKVToContextInterleavedPlugin : public nvinfer1::IPluginV2DynamicExt protected: void createMHARunner() noexcept; - int getSMVersion() const noexcept; + int32_t getSMVersion() const noexcept; private: std::string const& mLayerName; std::string mNamespace; - int mS; - int mB; - int mSM; - int mHeadSize; - int mHiddenSize; - int mNumHeads; + int32_t mS; + int32_t mB; + int32_t mSM; + int32_t mHeadSize; + int32_t mHiddenSize; + int32_t mNumHeads; FusedMultiHeadAttentionXMMAKernelV2 const* mXmmaKernel; diff --git a/plugin/bertQKVToContextPlugin/qkvToContextPlugin.cpp b/plugin/bertQKVToContextPlugin/qkvToContextPlugin.cpp index 2e9861a8..ed67ef64 100644 --- a/plugin/bertQKVToContextPlugin/qkvToContextPlugin.cpp +++ b/plugin/bertQKVToContextPlugin/qkvToContextPlugin.cpp @@ -388,7 +388,7 @@ size_t QKVToContextPluginDynamic::getSerializationSize() const noexcept { PLUGIN_ASSERT(unfusedDispatcher.get()); return sizeof(mNumHeads) + sizeof(mHeadSize) + sizeof(DataType) + sizeof(mHasImask) + sizeof(mHiddenSize) - + sizeof(mSM) + sizeof(mS) + sizeof(mB) + sizeof(mDqProbs) + sizeof(int) + + sizeof(mSM) + sizeof(mS) + sizeof(mB) + sizeof(mDqProbs) + sizeof(int32_t) + unfusedDispatcher->getSerializationSize(); } @@ -785,7 +785,7 @@ bool QKVToContextVarSeqlenPlugin::supportsFormatCombination( PLUGIN_ASSERT(mUseVarSeqlen); if (pos == 2) { // must be cuSeqlens - // cuSeqlens is a int array of size B+1 + // cuSeqlens is a int32_t array of size B+1 auto const* seqlens = &inOut[pos]; return (seqlens->type == DataType::kINT32) && (seqlens->format == TensorFormat::kLINEAR); } diff --git a/plugin/bertQKVToContextPlugin/qkvToContextPlugin.h b/plugin/bertQKVToContextPlugin/qkvToContextPlugin.h index 74b8d02c..7af05d87 100644 --- a/plugin/bertQKVToContextPlugin/qkvToContextPlugin.h +++ b/plugin/bertQKVToContextPlugin/qkvToContextPlugin.h @@ -108,7 +108,8 @@ class MHARunner float mRsqrtHeadSize; }; -std::pair tuneBatchedGemm(const int32_t B, const int32_t S, const int32_t numHeads, const int32_t headSize); +std::pair tuneBatchedGemm( + const int32_t B, const int32_t S, const int32_t numHeads, const int32_t headSize); template int32_t computeScaledSoftmax(cudaStream_t stream, const int32_t ld, const int32_t B, const int32_t N, @@ -116,7 +117,7 @@ int32_t computeScaledSoftmax(cudaStream_t stream, const int32_t ld, const int32_ template int32_t computeMaskedScaledSoftmax(cudaStream_t stream, const int32_t ld, const int32_t B, const int32_t N, - float const rsqrtHeadSize, int const* maskIdx, T const* input, T* output); + float const rsqrtHeadSize, int32_t const* maskIdx, T const* input, T* output); // One of the preferred ways of making TensorRT to be able to see // our custom layer requires extending IPluginV2 and IPluginCreator classes. diff --git a/plugin/clipPlugin/clip.h b/plugin/clipPlugin/clip.h index b69b3983..70a53143 100644 --- a/plugin/clipPlugin/clip.h +++ b/plugin/clipPlugin/clip.h @@ -18,7 +18,7 @@ #define TRT_CLIP_H #include "NvInfer.h" -int clipInference( - cudaStream_t stream, int n, float clipMin, float clipMax, void const* input, void* output, nvinfer1::DataType type); +int32_t clipInference(cudaStream_t stream, int32_t n, float clipMin, float clipMax, void const* input, void* output, + nvinfer1::DataType type); #endif // TRT_CLIP_H diff --git a/plugin/clipPlugin/clipPlugin.cpp b/plugin/clipPlugin/clipPlugin.cpp index 3c1f28d1..8b7d22b5 100644 --- a/plugin/clipPlugin/clipPlugin.cpp +++ b/plugin/clipPlugin/clipPlugin.cpp @@ -67,30 +67,30 @@ char const* ClipPlugin::getPluginVersion() const noexcept return kCLIP_PLUGIN_VERSION; } -int ClipPlugin::getNbOutputs() const noexcept +int32_t ClipPlugin::getNbOutputs() const noexcept { return 1; } -Dims ClipPlugin::getOutputDimensions(int index, Dims const* inputs, int nbInputDims) noexcept +Dims ClipPlugin::getOutputDimensions(int32_t index, Dims const* inputs, int32_t nbInputDims) noexcept { PLUGIN_ASSERT(nbInputDims == 1); PLUGIN_ASSERT(index == 0); return *inputs; } -int ClipPlugin::initialize() noexcept +int32_t ClipPlugin::initialize() noexcept { return 0; } -int ClipPlugin::enqueue( - int batchSize, void const* const* inputs, void* const* outputs, void*, cudaStream_t stream) noexcept +int32_t ClipPlugin::enqueue( + int32_t batchSize, void const* const* inputs, void* const* outputs, void*, cudaStream_t stream) noexcept { try { void* output = outputs[0]; - int status = pluginStatus_t::STATUS_FAILURE; + int32_t status = pluginStatus_t::STATUS_FAILURE; status = clipInference(stream, mInputVolume * batchSize, mClipMin, mClipMax, inputs[0], output, mDataType); if (status != pluginStatus_t::STATUS_SUCCESS) @@ -130,8 +130,8 @@ void ClipPlugin::serialize(void* buffer) const noexcept PLUGIN_ASSERT(d == a + getSerializationSize()); } -void ClipPlugin::configureWithFormat(Dims const* inputs, int nbInputs, Dims const* outputs, int nbOutputs, - DataType type, PluginFormat format, int) noexcept +void ClipPlugin::configureWithFormat(Dims const* inputs, int32_t nbInputs, Dims const* outputs, int32_t nbOutputs, + DataType type, PluginFormat format, int32_t) noexcept { PLUGIN_ASSERT(nbOutputs == 1); PLUGIN_API_CHECK_ENUM_RANGE(DataType, type); @@ -139,7 +139,7 @@ void ClipPlugin::configureWithFormat(Dims const* inputs, int nbInputs, Dims cons mDataType = type; size_t volume = 1; - for (int i = 0; i < inputs->nbDims; i++) + for (int32_t i = 0; i < inputs->nbDims; i++) { volume *= inputs->d[i]; } diff --git a/plugin/clipPlugin/clipPlugin.h b/plugin/clipPlugin/clipPlugin.h index 8ffbfad2..f7e218ce 100644 --- a/plugin/clipPlugin/clipPlugin.h +++ b/plugin/clipPlugin/clipPlugin.h @@ -39,28 +39,28 @@ class ClipPlugin : public nvinfer1::pluginInternal::BasePlugin ClipPlugin() = delete; - int getNbOutputs() const noexcept override; + int32_t getNbOutputs() const noexcept override; - Dims getOutputDimensions(int index, Dims const* inputs, int nbInputDims) noexcept override; + Dims getOutputDimensions(int32_t index, Dims const* inputs, int32_t nbInputDims) noexcept override; - int initialize() noexcept override; + int32_t initialize() noexcept override; void terminate() noexcept override; - size_t getWorkspaceSize(int) const noexcept override + size_t getWorkspaceSize(int32_t) const noexcept override { return 0; }; - int enqueue(int batchSize, void const* const* inputs, void* const* outputs, void* workspace, + int32_t enqueue(int32_t batchSize, void const* const* inputs, void* const* outputs, void* workspace, cudaStream_t stream) noexcept override; size_t getSerializationSize() const noexcept override; void serialize(void* buffer) const noexcept override; - void configureWithFormat(Dims const* inputDims, int nbInputs, Dims const* outputDims, int nbOutputs, DataType type, - PluginFormat format, int maxBatchSize) noexcept override; + void configureWithFormat(Dims const* inputDims, int32_t nbInputs, Dims const* outputDims, int32_t nbOutputs, + DataType type, PluginFormat format, int32_t maxBatchSize) noexcept override; bool supportsFormat(DataType type, PluginFormat format) const noexcept override; diff --git a/plugin/common/bboxUtils.h b/plugin/common/bboxUtils.h index a49602b4..028eeb81 100644 --- a/plugin/common/bboxUtils.h +++ b/plugin/common/bboxUtils.h @@ -40,10 +40,10 @@ template struct BboxInfo { T conf_score; - int label; - int bbox_idx; + int32_t label; + int32_t bbox_idx; bool kept; - BboxInfo(T conf_score, int label, int bbox_idx, bool kept) + BboxInfo(T conf_score, int32_t label, int32_t bbox_idx, bool kept) : conf_score(conf_score) , label(label) , bbox_idx(bbox_idx) @@ -72,7 +72,7 @@ int8_t* nextWorkspacePtr(int8_t* ptr, uintptr_t previousWorkspaceSize); size_t dataTypeSize(nvinfer1::DataType dtype); -void setUniformOffsets(cudaStream_t stream, int num_segments, int offset, int* d_offsets); +void setUniformOffsets(cudaStream_t stream, int32_t num_segments, int32_t offset, int32_t* d_offsets); } // namespace plugin } // namespace nvinfer1 #endif diff --git a/plugin/common/bertCommon.h b/plugin/common/bertCommon.h index a72fb65d..68de8c07 100644 --- a/plugin/common/bertCommon.h +++ b/plugin/common/bertCommon.h @@ -91,19 +91,19 @@ namespace plugin namespace bert { -inline int getSMVersion() +inline int32_t getSMVersion() { - int device{-1}; + int32_t device{-1}; PLUGIN_CHECK(cudaGetDevice(&device)); cudaDeviceProp props; PLUGIN_CHECK(cudaGetDeviceProperties(&props, device)); return nvinfer1::plugin::getTrtSMVersionDec(props.major, props.minor); } -inline int getMHAMaskPackedSize(int smVersion, nvinfer1::DataType dataType, int sequenceLength) +inline int32_t getMHAMaskPackedSize(int32_t smVersion, nvinfer1::DataType dataType, int32_t sequenceLength) { // this code must match EmbLayerNormPluginDynamic::getOutputDimensions in embLayerNormPlugin.cpp - int packedSize = unfusedMaskSize; + int32_t packedSize = unfusedMaskSize; bool isSmOK = (smVersion == kSM_75 || smVersion == kSM_80 || smVersion == kSM_86 || smVersion == kSM_87 || smVersion == kSM_90); bool isPrecisionOK = (dataType == nvinfer1::DataType::kINT8 || dataType == nvinfer1::DataType::kHALF); @@ -196,36 +196,38 @@ inline T* devToDev(T const* data, size_t nbElem) } template -cublasStatus_t inline cublasGemm(cublasHandle_t handle, cublasOperation_t transa, cublasOperation_t transb, int m, - int n, int k, const T alpha, T const* A, int lda, T const* B, int ldb, const T beta, T* C, int ldc); +cublasStatus_t inline cublasGemm(cublasHandle_t handle, cublasOperation_t transa, cublasOperation_t transb, int32_t m, + int32_t n, int32_t k, const T alpha, T const* A, int32_t lda, T const* B, int32_t ldb, const T beta, T* C, + int32_t ldc); template <> -cublasStatus_t inline cublasGemm(cublasHandle_t handle, cublasOperation_t transa, cublasOperation_t transb, int m, - int n, int k, float const alpha, float const* A, int lda, float const* B, int ldb, float const beta, float* C, - int ldc) +cublasStatus_t inline cublasGemm(cublasHandle_t handle, cublasOperation_t transa, cublasOperation_t transb, int32_t m, + int32_t n, int32_t k, float const alpha, float const* A, int32_t lda, float const* B, int32_t ldb, float const beta, + float* C, int32_t ldc) { return cublasSgemm(handle, transa, transb, m, n, k, &alpha, A, lda, B, ldb, &beta, C, ldc); } template <> -cublasStatus_t inline cublasGemm(cublasHandle_t handle, cublasOperation_t transa, cublasOperation_t transb, int m, - int n, int k, const half alpha, half const* A, int lda, half const* B, int ldb, const half beta, half* C, int ldc) +cublasStatus_t inline cublasGemm(cublasHandle_t handle, cublasOperation_t transa, cublasOperation_t transb, int32_t m, + int32_t n, int32_t k, const half alpha, half const* A, int32_t lda, half const* B, int32_t ldb, const half beta, + half* C, int32_t ldc) { return cublasHgemm(handle, transa, transb, m, n, k, &alpha, A, lda, B, ldb, &beta, C, ldc); } template cublasStatus_t inline cublasGemmStridedBatchedEx(cublasHandle_t handle, cublasOperation_t transa, - cublasOperation_t transb, int m, int n, int k, const T alpha, T const* A, int lda, long long int strideA, - T const* B, int ldb, long long int strideB, const T beta, T* C, int ldc, long long int strideC, int batchCount, + cublasOperation_t transb, int32_t m, int32_t n, int32_t k, const T alpha, T const* A, int32_t lda, int64_t strideA, + T const* B, int32_t ldb, int64_t strideB, const T beta, T* C, int32_t ldc, int64_t strideC, int32_t batchCount, cublasGemmAlgo_t algo); template <> cublasStatus_t inline cublasGemmStridedBatchedEx(cublasHandle_t handle, cublasOperation_t transa, - cublasOperation_t transb, int m, int n, int k, float const alpha, float const* A, int lda, long long int strideA, - float const* B, int ldb, long long int strideB, float const beta, float* C, int ldc, long long int strideC, - int batchCount, cublasGemmAlgo_t algo) + cublasOperation_t transb, int32_t m, int32_t n, int32_t k, float const alpha, float const* A, int32_t lda, + int64_t strideA, float const* B, int32_t ldb, int64_t strideB, float const beta, float* C, int32_t ldc, + int64_t strideC, int32_t batchCount, cublasGemmAlgo_t algo) { return ::cublasGemmStridedBatchedEx(handle, transa, transb, m, n, k, &alpha, A, CUDA_R_32F, lda, strideA, B, @@ -234,9 +236,9 @@ cublasStatus_t inline cublasGemmStridedBatchedEx(cublasHandle_t handle, cublasOp template <> cublasStatus_t inline cublasGemmStridedBatchedEx(cublasHandle_t handle, cublasOperation_t transa, - cublasOperation_t transb, int m, int n, int k, const half alpha, half const* A, int lda, long long int strideA, - half const* B, int ldb, long long int strideB, const half beta, half* C, int ldc, long long int strideC, - int batchCount, cublasGemmAlgo_t algo) + cublasOperation_t transb, int32_t m, int32_t n, int32_t k, const half alpha, half const* A, int32_t lda, + int64_t strideA, half const* B, int32_t ldb, int64_t strideB, const half beta, half* C, int32_t ldc, + int64_t strideC, int32_t batchCount, cublasGemmAlgo_t algo) { return ::cublasGemmStridedBatchedEx(handle, transa, transb, m, n, k, &alpha, A, CUDA_R_16F, lda, strideA, B, CUDA_R_16F, ldb, strideB, &beta, C, CUDA_R_16F, ldc, strideC, batchCount, CUDA_R_16F, algo); @@ -244,14 +246,14 @@ cublasStatus_t inline cublasGemmStridedBatchedEx(cublasHandle_t handle, cublasOp template cublasStatus_t inline cublasGemmStridedBatched(cublasHandle_t handle, cublasOperation_t transa, - cublasOperation_t transb, int m, int n, int k, const T alpha, T const* A, int lda, long long int strideA, - T const* B, int ldb, long long int strideB, const T beta, T* C, int ldc, long long int strideC, int batchCount); + cublasOperation_t transb, int32_t m, int32_t n, int32_t k, const T alpha, T const* A, int32_t lda, int64_t strideA, + T const* B, int32_t ldb, int64_t strideB, const T beta, T* C, int32_t ldc, int64_t strideC, int32_t batchCount); template <> cublasStatus_t inline cublasGemmStridedBatched(cublasHandle_t handle, cublasOperation_t transa, - cublasOperation_t transb, int m, int n, int k, float const alpha, float const* A, int lda, long long int strideA, - float const* B, int ldb, long long int strideB, float const beta, float* C, int ldc, long long int strideC, - int batchCount) + cublasOperation_t transb, int32_t m, int32_t n, int32_t k, float const alpha, float const* A, int32_t lda, + int64_t strideA, float const* B, int32_t ldb, int64_t strideB, float const beta, float* C, int32_t ldc, + int64_t strideC, int32_t batchCount) { return cublasSgemmStridedBatched( @@ -260,9 +262,9 @@ cublasStatus_t inline cublasGemmStridedBatched(cublasHandle_t handle, cublasOper template <> cublasStatus_t inline cublasGemmStridedBatched(cublasHandle_t handle, cublasOperation_t transa, - cublasOperation_t transb, int m, int n, int k, const half alpha, half const* A, int lda, long long int strideA, - half const* B, int ldb, long long int strideB, const half beta, half* C, int ldc, long long int strideC, - int batchCount) + cublasOperation_t transb, int32_t m, int32_t n, int32_t k, const half alpha, half const* A, int32_t lda, + int64_t strideA, half const* B, int32_t ldb, int64_t strideB, const half beta, half* C, int32_t ldc, + int64_t strideC, int32_t batchCount) { return cublasHgemmStridedBatched( handle, transa, transb, m, n, k, &alpha, A, lda, strideA, B, ldb, strideB, &beta, C, ldc, strideC, batchCount); diff --git a/plugin/common/checkMacrosPlugin.cpp b/plugin/common/checkMacrosPlugin.cpp index a6364396..f94d90d0 100644 --- a/plugin/common/checkMacrosPlugin.cpp +++ b/plugin/common/checkMacrosPlugin.cpp @@ -30,7 +30,7 @@ namespace plugin ILogger* gLogger{}; template -int LogStream::Buf::sync() +int32_t LogStream::Buf::sync() { std::string s = str(); while (!s.empty() && s.back() == '\n') @@ -53,7 +53,7 @@ LogStream gLogInfo; LogStream gLogVerbose; // break-pointable -void throwCudaError(char const* file, char const* function, int line, int status, char const* msg) +void throwCudaError(char const* file, char const* function, int32_t line, int32_t status, char const* msg) { CudaError error(file, function, line, status, msg); error.log(gLogError); @@ -62,7 +62,7 @@ void throwCudaError(char const* file, char const* function, int line, int status } // break-pointable -void throwCublasError(char const* file, char const* function, int line, int status, char const* msg) +void throwCublasError(char const* file, char const* function, int32_t line, int32_t status, char const* msg) { if (msg == nullptr) { @@ -88,7 +88,7 @@ void throwCublasError(char const* file, char const* function, int line, int stat } // break-pointable -void throwCudnnError(char const* file, char const* function, int line, int status, char const* msg) +void throwCudnnError(char const* file, char const* function, int32_t line, int32_t status, char const* msg) { CudnnError error(file, function, line, status, msg); error.log(gLogError); @@ -97,7 +97,7 @@ void throwCudnnError(char const* file, char const* function, int line, int statu } // break-pointable -void throwPluginError(char const* file, char const* function, int line, int status, char const* msg) +void throwPluginError(char const* file, char const* function, int32_t line, int32_t status, char const* msg) { PluginError error(file, function, line, status, msg); reportValidationFailure(msg, file, line); @@ -105,13 +105,13 @@ void throwPluginError(char const* file, char const* function, int line, int stat throw error; } -void logError(char const* msg, char const* file, char const* fn, int line) +void logError(char const* msg, char const* file, char const* fn, int32_t line) { gLogError << "Parameter check failed at: " << file << "::" << fn << "::" << line; gLogError << ", condition: " << msg << std::endl; } -void reportValidationFailure(char const* msg, char const* file, int line) +void reportValidationFailure(char const* msg, char const* file, int32_t line) { std::ostringstream stream; stream << "Validation failed: " << msg << "\n" << file << ':' << line << "\n"; @@ -127,7 +127,7 @@ void reportValidationFailure(char const* msg, char const* file, int line) } // break-pointable -void reportAssertion(char const* msg, char const* file, int line) +void reportAssertion(char const* msg, char const* file, int32_t line) { std::ostringstream stream; stream << "Assertion failed: " << msg << "\n" diff --git a/plugin/common/checkMacrosPlugin.h b/plugin/common/checkMacrosPlugin.h index 4629d1e1..3b28ec08 100644 --- a/plugin/common/checkMacrosPlugin.h +++ b/plugin/common/checkMacrosPlugin.h @@ -37,7 +37,7 @@ class LogStream : public std::ostream class Buf : public std::stringbuf { public: - int sync() override; + int32_t sync() override; }; Buf buffer; @@ -87,23 +87,23 @@ extern LogStream gLogWarning; extern LogStream gLogInfo; extern LogStream gLogVerbose; -void reportValidationFailure(char const* msg, char const* file, int line); -void reportAssertion(char const* msg, char const* file, int line); -void logError(char const* msg, char const* file, char const* fn, int line); +void reportValidationFailure(char const* msg, char const* file, int32_t line); +void reportAssertion(char const* msg, char const* file, int32_t line); +void logError(char const* msg, char const* file, char const* fn, int32_t line); [[noreturn]] void throwCudaError( - char const* file, char const* function, int line, int status, char const* msg = nullptr); + char const* file, char const* function, int32_t line, int32_t status, char const* msg = nullptr); [[noreturn]] void throwCudnnError( - char const* file, char const* function, int line, int status, char const* msg = nullptr); + char const* file, char const* function, int32_t line, int32_t status, char const* msg = nullptr); [[noreturn]] void throwCublasError( - char const* file, char const* function, int line, int status, char const* msg = nullptr); + char const* file, char const* function, int32_t line, int32_t status, char const* msg = nullptr); [[noreturn]] void throwPluginError( - char const* file, char const* function, int line, int status, char const* msg = nullptr); + char const* file, char const* function, int32_t line, int32_t status, char const* msg = nullptr); class TRTException : public std::exception { public: - TRTException(char const* fl, char const* fn, int ln, int st, char const* msg, char const* nm) + TRTException(char const* fl, char const* fn, int32_t ln, int32_t st, char const* msg, char const* nm) : file(fl) , function(fn) , line(ln) @@ -121,8 +121,8 @@ class TRTException : public std::exception protected: char const* file{nullptr}; char const* function{nullptr}; - int line{0}; - int status{0}; + int32_t line{0}; + int32_t status{0}; char const* message{nullptr}; char const* name{nullptr}; }; @@ -130,7 +130,7 @@ class TRTException : public std::exception class CudaError : public TRTException { public: - CudaError(char const* fl, char const* fn, int ln, int stat, char const* msg = nullptr) + CudaError(char const* fl, char const* fn, int32_t ln, int32_t stat, char const* msg = nullptr) : TRTException(fl, fn, ln, stat, msg, "Cuda") { } @@ -139,7 +139,7 @@ class CudaError : public TRTException class CudnnError : public TRTException { public: - CudnnError(char const* fl, char const* fn, int ln, int stat, char const* msg = nullptr) + CudnnError(char const* fl, char const* fn, int32_t ln, int32_t stat, char const* msg = nullptr) : TRTException(fl, fn, ln, stat, msg, "Cudnn") { } @@ -148,7 +148,7 @@ class CudnnError : public TRTException class CublasError : public TRTException { public: - CublasError(char const* fl, char const* fn, int ln, int stat, char const* msg = nullptr) + CublasError(char const* fl, char const* fn, int32_t ln, int32_t stat, char const* msg = nullptr) : TRTException(fl, fn, ln, stat, msg, "cuBLAS") { } @@ -157,7 +157,7 @@ class CublasError : public TRTException class PluginError : public TRTException { public: - PluginError(char const* fl, char const* fn, int ln, int stat, char const* msg = nullptr) + PluginError(char const* fl, char const* fn, int32_t ln, int32_t stat, char const* msg = nullptr) : TRTException(fl, fn, ln, stat, msg, "Plugin") { } @@ -189,9 +189,9 @@ inline void caughtError(std::exception const& e) } \ } -#define PLUGIN_API_CHECK_ENUM_RANGE(Type, val) PLUGIN_API_CHECK(int(val) >= 0 && int(val) < EnumMax()) +#define PLUGIN_API_CHECK_ENUM_RANGE(Type, val) PLUGIN_API_CHECK(int32_t(val) >= 0 && int32_t(val) < EnumMax()) #define PLUGIN_API_CHECK_ENUM_RANGE_RETVAL(Type, val, retval) \ - PLUGIN_API_CHECK_RETVAL(int(val) >= 0 && int(val) < EnumMax(), retval) + PLUGIN_API_CHECK_RETVAL(int32_t(val) >= 0 && int32_t(val) < EnumMax(), retval) #define PLUGIN_CHECK_CUDA(call) \ do \ diff --git a/plugin/common/common.cuh b/plugin/common/common.cuh index 1f20e590..5541bb8c 100644 --- a/plugin/common/common.cuh +++ b/plugin/common/common.cuh @@ -243,9 +243,9 @@ __device__ inline kv_half2 operator+(const kv_half2& a, const kv_half2& b) template using kvp = cub::KeyValuePair; -template +template __device__ inline void layerNorm( - const kvp& threadData, const int ld, const int offset, const P* beta, const P* gamma, T* output) + const kvp& threadData, const int32_t ld, const int32_t offset, const P* beta, const P* gamma, T* output) { // Assuming threadData is already divided by ld @@ -263,9 +263,9 @@ __device__ inline void layerNorm( } __syncthreads(); - for (int i = threadIdx.x; i < ld; i += TPB) + for (int32_t i = threadIdx.x; i < ld; i += TPB) { - const int idx = offset + i; + const int32_t idx = offset + i; const R val = output[idx]; const R g(gamma[i]); const R b(beta[i]); @@ -273,9 +273,9 @@ __device__ inline void layerNorm( } } -template +template __device__ inline void layerNormSmall( - const T val, const kvp& threadData, const int ld, const int idx, const P* beta, const P* gamma, T* output) + const T val, const kvp& threadData, const int32_t ld, const int32_t idx, const P* beta, const P* gamma, T* output) { // Assuming threadData is already divided by ld // Small settings: the block covers the leading dimension TPB >= ld. The input @@ -305,7 +305,7 @@ __device__ inline void layerNormSmall( template __device__ inline void scaledSoftmaxSmall( - const int ld, const int lastValid, const float rsqrtHeadSize, const T* input, T* output) + const int32_t ld, const int32_t lastValid, const float rsqrtHeadSize, const T* input, T* output) { using BlockReduce = cub::BlockReduce; @@ -315,13 +315,13 @@ __device__ inline void scaledSoftmaxSmall( __shared__ float rZ; __shared__ float fMax; - const int offset = (blockIdx.y * gridDim.x + blockIdx.x) * ld; + const int32_t offset = (blockIdx.y * gridDim.x + blockIdx.x) * ld; const float w(rsqrtHeadSize); cub::Sum sum; float threadData(-FLT_MAX); - const int idx = offset + threadIdx.x; + const int32_t idx = offset + threadIdx.x; if (threadIdx.x < lastValid) { threadData = input[idx]; @@ -353,14 +353,14 @@ __device__ inline void scaledSoftmaxSmall( if (threadIdx.x < ld) { - // this will be 0 for threadIdx.x >= lastValid - output[idx] = T(threadData * rZ); + float const val = (threadIdx.x < lastValid) ? threadData * rZ : 0.F; + output[idx] = static_cast(val); } } template __device__ inline void scaledSoftmax( - const int ld, const int lastValid, const float rsqrtHeadSize, const T* input, T* output) + const int32_t ld, const int32_t lastValid, const float rsqrtHeadSize, const T* input, T* output) { using BlockReduce = cub::BlockReduce; __shared__ typename BlockReduce::TempStorage tmpStorage; @@ -368,7 +368,7 @@ __device__ inline void scaledSoftmax( __shared__ float rZ; __shared__ float fMax; - const int offset = (blockIdx.y * gridDim.x + blockIdx.x) * ld; + const int32_t offset = (blockIdx.y * gridDim.x + blockIdx.x) * ld; const float w(rsqrtHeadSize); cub::Sum sum; @@ -378,9 +378,9 @@ __device__ inline void scaledSoftmax( { threadData = 0; } - for (int i = threadIdx.x; i < lastValid; i += TPB) + for (int32_t i = threadIdx.x; i < lastValid; i += TPB) { - const int idx = offset + i; + const int32_t idx = offset + i; threadData = max(static_cast(input[idx]), threadData); } @@ -393,9 +393,9 @@ __device__ inline void scaledSoftmax( threadData = 0; - for (int i = threadIdx.x; i < lastValid; i += TPB) + for (int32_t i = threadIdx.x; i < lastValid; i += TPB) { - const int idx = offset + i; + const int32_t idx = offset + i; threadData += exp((static_cast(input[idx]) - fMax) * w); } @@ -407,9 +407,9 @@ __device__ inline void scaledSoftmax( } __syncthreads(); - for (int i = threadIdx.x; i < ld; i += TPB) + for (int32_t i = threadIdx.x; i < ld; i += TPB) { - const int idx = offset + i; + const int32_t idx = offset + i; const float val = (i < lastValid) ? exp((static_cast(input[idx]) - fMax) * w) * rZ : 0.f; output[idx] = T(val); } @@ -426,7 +426,7 @@ constexpr HDI IntType alignTo(IntType a, IntType b) return ceildiv(a, b) * b; } -template +template struct BytesToType; template <> @@ -450,7 +450,7 @@ struct BytesToType<16> using type = float4; }; -template +template __device__ inline void copy(const void* local, void* data) { using T = typename BytesToType::type; @@ -500,7 +500,7 @@ static inline __device__ uint32_t float4_to_char4(float x, inline __device__ char quantize(const float x, const float qScale) { - int tmpq = __float2int_rn(qScale * x); // scale and round + int32_t tmpq = __float2int_rn(qScale * x); // scale and round char tmpq8 = min(127, max(-127, tmpq)); // clip and cast return tmpq8; } diff --git a/plugin/common/cub_helper.h b/plugin/common/cub_helper.h index 3ac17480..ee8402c4 100644 --- a/plugin/common/cub_helper.h +++ b/plugin/common/cub_helper.h @@ -17,13 +17,13 @@ #include "common/kernels/kernel.h" #include template -size_t cubSortPairsWorkspaceSize(int num_items, int num_segments) +size_t cubSortPairsWorkspaceSize(int32_t num_items, int32_t num_segments) { size_t temp_storage_bytes = 0; cub::DeviceSegmentedRadixSort::SortPairsDescending((void*) NULL, temp_storage_bytes, (KeyT const*) NULL, (KeyT*) NULL, (ValueT const*) NULL, (ValueT*) NULL, num_items, // # items num_segments, // # segments - (int const*) NULL, (int const*) NULL); + (int32_t const*) NULL, (int32_t const*) NULL); return temp_storage_bytes; } diff --git a/plugin/common/cudaDriverWrapper.cpp b/plugin/common/cudaDriverWrapper.cpp index dc143567..a4ce78dd 100644 --- a/plugin/common/cudaDriverWrapper.cpp +++ b/plugin/common/cudaDriverWrapper.cpp @@ -74,7 +74,7 @@ CUresult CUDADriverWrapper::cuGetErrorName(CUresult error, char const** pStr) co return (*_cuGetErrorName)(error, pStr); } -CUresult CUDADriverWrapper::cuFuncSetAttribute(CUfunction hfunc, CUfunction_attribute attrib, int value) const +CUresult CUDADriverWrapper::cuFuncSetAttribute(CUfunction hfunc, CUfunction_attribute attrib, int32_t value) const { return (*_cuFuncSetAttribute)(hfunc, attrib, value); } diff --git a/plugin/common/cudaDriverWrapper.h b/plugin/common/cudaDriverWrapper.h index cdde0531..f92fd99e 100644 --- a/plugin/common/cudaDriverWrapper.h +++ b/plugin/common/cudaDriverWrapper.h @@ -42,7 +42,7 @@ class CUDADriverWrapper CUresult cuGetErrorName(CUresult error, char const** pStr) const; - CUresult cuFuncSetAttribute(CUfunction hfunc, CUfunction_attribute attrib, int value) const; + CUresult cuFuncSetAttribute(CUfunction hfunc, CUfunction_attribute attrib, int32_t value) const; CUresult cuLinkComplete(CUlinkState state, void** cubinOut, size_t* sizeOut) const; @@ -73,24 +73,24 @@ class CUDADriverWrapper private: void* handle; CUresult (*_cuGetErrorName)(CUresult, char const**); - CUresult (*_cuFuncSetAttribute)(CUfunction, CUfunction_attribute, int); + CUresult (*_cuFuncSetAttribute)(CUfunction, CUfunction_attribute, int32_t); CUresult (*_cuLinkComplete)(CUlinkState, void**, size_t*); CUresult (*_cuModuleUnload)(CUmodule); CUresult (*_cuLinkDestroy)(CUlinkState); - CUresult (*_cuLinkCreate)(unsigned int, CUjit_option*, void**, CUlinkState*); + CUresult (*_cuLinkCreate)(uint32_t, CUjit_option*, void**, CUlinkState*); CUresult (*_cuModuleLoadData)(CUmodule*, void const*); CUresult (*_cuModuleGetFunction)(CUfunction*, CUmodule, char const*); - CUresult (*_cuLinkAddFile)(CUlinkState, CUjitInputType, char const*, unsigned int, CUjit_option*, void**); + CUresult (*_cuLinkAddFile)(CUlinkState, CUjitInputType, char const*, uint32_t, CUjit_option*, void**); CUresult (*_cuLinkAddData)( - CUlinkState, CUjitInputType, void*, size_t, char const*, unsigned int, CUjit_option*, void**); - CUresult (*_cuLaunchCooperativeKernel)(CUfunction, unsigned int, unsigned int, unsigned int, unsigned int, - unsigned int, unsigned int, unsigned int, CUstream, void**); + CUlinkState, CUjitInputType, void*, size_t, char const*, uint32_t, CUjit_option*, void**); + CUresult (*_cuLaunchCooperativeKernel)( + CUfunction, uint32_t, uint32_t, uint32_t, uint32_t, uint32_t, uint32_t, uint32_t, CUstream, void**); CUresult (*_cuLaunchKernel)(CUfunction f, uint32_t gridDimX, uint32_t gridDimY, uint32_t gridDimZ, uint32_t blockDimX, uint32_t blockDimY, uint32_t blockDimZ, uint32_t sharedMemBytes, CUstream hStream, void** kernelParams, void** extra); }; -inline void cuErrCheck_(CUresult stat, CUDADriverWrapper const& wrap, char const* file, int line) +inline void cuErrCheck_(CUresult stat, CUDADriverWrapper const& wrap, char const* file, int32_t line) { if (stat != CUDA_SUCCESS) { diff --git a/plugin/common/kernels/decodeBbox3DKernels.cu b/plugin/common/kernels/decodeBbox3DKernels.cu index 25d934e7..30ead81c 100644 --- a/plugin/common/kernels/decodeBbox3DKernels.cu +++ b/plugin/common/kernels/decodeBbox3DKernels.cu @@ -22,8 +22,9 @@ namespace nvinfer1 { namespace plugin { -#define checkCudaErrors(status) \ +#define checkCudaErrors(status_) \ { \ + auto const status = status_; \ if (status != 0) \ { \ std::cout << "Cuda failure: " << cudaGetErrorString(status) \ @@ -42,7 +43,7 @@ namespace plugin __device__ float sigmoid(const float x) { return 1.0f / (1.0f + expf(-x)); } __global__ void postprocess_kernal(const float *cls_input, - float *box_input, + float const* box_input, const float *dir_cls_input, float *anchors, float *anchors_bottom_height, @@ -96,7 +97,7 @@ __global__ void postprocess_kernal(const float *cls_input, float *anchor_ptr = anchors + ith_anchor * 4; float z_offset = anchor_ptr[2] / 2 + anchors_bottom_height[ith_anchor / 2]; float anchor[7] = {x_offset, y_offset, z_offset, anchor_ptr[0], anchor_ptr[1], anchor_ptr[2], anchor_ptr[3]}; - float *box_encodings = box_input + box_offset; + float const* box_encodings = box_input + box_offset; float xa = anchor[0]; float ya = anchor[1]; float za = anchor[2]; @@ -136,7 +137,7 @@ __global__ void postprocess_kernal(const float *cls_input, void decodeBbox3DLaunch( const int batch_size, const float *cls_input, - float *box_input, + const float *box_input, const float *dir_cls_input, float *anchors, float *anchors_bottom_height, diff --git a/plugin/common/kernels/kernel.h b/plugin/common/kernels/kernel.h index 94bbab5b..7777402c 100644 --- a/plugin/common/kernels/kernel.h +++ b/plugin/common/kernels/kernel.h @@ -241,12 +241,12 @@ int32_t proposalInference_gpu(cudaStream_t stream, void const* rpn_prob, void co size_t _get_workspace_size( int32_t N, int32_t anc_size_num, int32_t anc_ratio_num, int32_t H, int32_t W, int32_t nmsMaxOut); -void decodeBbox3DLaunch(int32_t const batch_size, float const* cls_input, float* box_input, float const* dir_cls_input, - float* anchors, float* anchors_bottom_height, float* bndbox_output, int32_t* object_counter, - float const min_x_range, float const max_x_range, float const min_y_range, float const max_y_range, - int32_t const feature_x_size, int32_t const feature_y_size, int32_t const num_anchors, int32_t const num_classes, - int32_t const num_box_values, float const score_thresh, float const dir_offset, float const dir_limit_offset, - int32_t const num_dir_bins, cudaStream_t stream = 0); +void decodeBbox3DLaunch(int32_t const batch_size, float const* cls_input, float const* box_input, + float const* dir_cls_input, float* anchors, float* anchors_bottom_height, float* bndbox_output, + int32_t* object_counter, float const min_x_range, float const max_x_range, float const min_y_range, + float const max_y_range, int32_t const feature_x_size, int32_t const feature_y_size, int32_t const num_anchors, + int32_t const num_classes, int32_t const num_box_values, float const score_thresh, float const dir_offset, + float const dir_limit_offset, int32_t const num_dir_bins, cudaStream_t stream = 0); template int32_t pillarScatterKernelLaunch(int32_t batch_size, int32_t max_pillar_num, int32_t num_features, diff --git a/plugin/common/kernels/maskRCNNKernels.h b/plugin/common/kernels/maskRCNNKernels.h index 5b706150..9763816f 100644 --- a/plugin/common/kernels/maskRCNNKernels.h +++ b/plugin/common/kernels/maskRCNNKernels.h @@ -38,7 +38,7 @@ inline size_t nAlignDown(size_t x, size_t align) inline size_t dimVolume(const nvinfer1::Dims& dims) { size_t volume = 1; - for (int i = 0; i < dims.nbDims; ++i) + for (int32_t i = 0; i < dims.nbDims; ++i) volume *= dims.d[i]; return volume; @@ -63,14 +63,14 @@ inline size_t typeSize(const nvinfer1::DataType type) struct RefineNMSParameters { - int backgroundLabelId, numClasses, keepTopK; + int32_t backgroundLabelId, numClasses, keepTopK; float scoreThreshold, iouThreshold; }; struct RefineDetectionWorkSpace { - RefineDetectionWorkSpace( - const int batchSize, const int sampleCount, const RefineNMSParameters& param, const nvinfer1::DataType type); + RefineDetectionWorkSpace(const int32_t batchSize, const int32_t sampleCount, const RefineNMSParameters& param, + const nvinfer1::DataType type); RefineDetectionWorkSpace() = default; @@ -98,8 +98,8 @@ struct RefineDetectionWorkSpace struct ProposalWorkSpace { - ProposalWorkSpace(const int batchSize, const int inputCnt, const int sampleCount, const RefineNMSParameters& param, - const nvinfer1::DataType type); + ProposalWorkSpace(const int32_t batchSize, const int32_t inputCnt, const int32_t sampleCount, + const RefineNMSParameters& param, const nvinfer1::DataType type); ProposalWorkSpace() = default; @@ -134,7 +134,7 @@ struct ProposalWorkSpace struct MultilevelProposeROIWorkSpace { - MultilevelProposeROIWorkSpace(const int batchSize, const int inputCnt, const int sampleCount, + MultilevelProposeROIWorkSpace(const int32_t batchSize, const int32_t inputCnt, const int32_t sampleCount, const RefineNMSParameters& param, const nvinfer1::DataType type); MultilevelProposeROIWorkSpace() = default; @@ -168,7 +168,8 @@ struct MultilevelProposeROIWorkSpace struct ConcatTopKWorkSpace { - ConcatTopKWorkSpace(const int batchSize, const int concatCnt, const int topK, const nvinfer1::DataType inType); + ConcatTopKWorkSpace( + const int32_t batchSize, const int32_t concatCnt, const int32_t topK, const nvinfer1::DataType inType); ConcatTopKWorkSpace() = default; @@ -185,18 +186,18 @@ struct ConcatTopKWorkSpace size_t totalSize = 0; }; -cudaError_t RefineBatchClassNMS(cudaStream_t stream, int N, int samples, nvinfer1::DataType dtype, +cudaError_t RefineBatchClassNMS(cudaStream_t stream, int32_t N, int32_t samples, nvinfer1::DataType dtype, const RefineNMSParameters& param, const RefineDetectionWorkSpace& refineOffset, void* workspace, const void* inScores, const void* inDelta, const void* inCountValid, const void* inROI, void* outDetections); -cudaError_t DetectionPostProcess(cudaStream_t stream, int N, int samples, const float* regWeight, +cudaError_t DetectionPostProcess(cudaStream_t stream, int32_t N, int32_t samples, const float* regWeight, const float inputHeight, const float inputWidth, nvinfer1::DataType dtype, const RefineNMSParameters& param, const RefineDetectionWorkSpace& refineOffset, void* workspace, const void* inScores, const void* inDelta, const void* inCountValid, const void* inROI, void* outDetections); -cudaError_t proposalRefineBatchClassNMS(cudaStream_t stream, int N, - int inputCnt, // candidate anchors - int samples, // preNMS_topK +cudaError_t proposalRefineBatchClassNMS(cudaStream_t stream, int32_t N, + int32_t inputCnt, // candidate anchors + int32_t samples, // preNMS_topK nvinfer1::DataType dtype, const RefineNMSParameters& param, const ProposalWorkSpace& proposalOffset, void* workspace, const void* inScores, const void* inDelta, const void* inCountValid, const void* inAnchors, void* outProposals); @@ -205,9 +206,9 @@ cudaError_t proposalRefineBatchClassNMS(cudaStream_t stream, int N, // inDelta: [N, anchorsCnt, 4] // outScores: [N, topK, 1] // outBbox: [N, topK, 4] -cudaError_t MultilevelPropose(cudaStream_t stream, int N, - int inputCnt, // candidate anchors number among feature map - int samples, // pre nms cnt +cudaError_t MultilevelPropose(cudaStream_t stream, int32_t N, + int32_t inputCnt, // candidate anchors number among feature map + int32_t samples, // pre nms cnt const float* regWeight, const float inputHeight, const float inputWidth, nvinfer1::DataType dtype, const RefineNMSParameters& param, const MultilevelProposeROIWorkSpace& proposalOffset, void* workspace, const void* inScore, const void* inDelta, void* inCountValid, const void* inAnchors, void* outScores, @@ -216,35 +217,33 @@ cudaError_t MultilevelPropose(cudaStream_t stream, int N, // inScores: [N, topK, 1] * featureCnt // inBboxes: [N, topK, 4] * featureCnt // outProposals: [N, topK, 4] -cudaError_t ConcatTopK(cudaStream_t stream, int N, int featureCnt, int topK, nvinfer1::DataType dtype, void* workspace, - const ConcatTopKWorkSpace& spaceOffset, void** inScores, void** inBBox, void* outProposals); +cudaError_t ConcatTopK(cudaStream_t stream, int32_t N, int32_t featureCnt, int32_t topK, nvinfer1::DataType dtype, + void* workspace, const ConcatTopKWorkSpace& spaceOffset, void** inScores, void** inBBox, void* outProposals); -cudaError_t DecodeBBoxes(cudaStream_t stream, int N, - int samples, // number of anchors per image +cudaError_t DecodeBBoxes(cudaStream_t stream, int32_t N, + int32_t samples, // number of anchors per image const float* regWeight, const float inputHeight, const float inputWidth, const void* anchors, // [N, anchors, (y1, x1, y2, x2)] const void* delta, //[N, anchors, (dy, dx, log(dh), log(dw)] - void* outputBbox, - nvinfer1::DataType dtype - ); + void* outputBbox, nvinfer1::DataType dtype); -cudaError_t ApplyDelta2Bboxes(cudaStream_t stream, int N, - int samples, // number of anchors per image +cudaError_t ApplyDelta2Bboxes(cudaStream_t stream, int32_t N, + int32_t samples, // number of anchors per image const void* anchors, // [N, anchors, (y1, x1, y2, x2)] const void* delta, //[N, anchors, (dy, dx, log(dh), log(dw)] void* outputBbox); struct xy_t { - int y; - int x; + int32_t y; + int32_t x; xy_t() : y(0) , x(0) { } - xy_t(int y_, int x_) + xy_t(int32_t y_, int32_t x_) : y(y_) , x(x_) { @@ -256,16 +255,17 @@ cudaError_t roiAlign(cudaStream_t const stream, int32_t const batchSize, xy_t co bool const absCoords, bool const swapCoords, bool const plusOneCoords, int32_t const samplingRatio, void const* rois, void const* const layers[], xy_t const* layerDims, void* pooled, xy_t const poolDims); -cudaError_t roiAlignHalfCenter(cudaStream_t stream, int batchSize, int featureCount, int roiCount, float firstThreshold, +cudaError_t roiAlignHalfCenter(cudaStream_t stream, int32_t batchSize, int32_t featureCount, int32_t roiCount, + float firstThreshold, - int inputHeight, int inputWidth, const void* rois, const void* const layers[], const xy_t* layerDims, + int32_t inputHeight, int32_t inputWidth, const void* rois, const void* const layers[], const xy_t* layerDims, void* pooled, const xy_t poolDims, const nvinfer1::DataType dtype); // RESIZE NEAREST -void resizeNearest(dim3 grid, dim3 block, cudaStream_t stream, int nbatch, float scale, int2 osize, float const* idata, - int istride, int ibatchstride, float* odata, int ostride, int obatchstride); +void resizeNearest(dim3 grid, dim3 block, cudaStream_t stream, int32_t nbatch, float scale, int2 osize, + float const* idata, int32_t istride, int32_t ibatchstride, float* odata, int32_t ostride, int32_t obatchstride); // SPECIAL SLICE -void specialSlice(cudaStream_t stream, int batch_size, int boxes_cnt, const void* idata, void* odata); +void specialSlice(cudaStream_t stream, int32_t batch_size, int32_t boxes_cnt, const void* idata, void* odata); #endif // TRT_MASKRCNN_UTILS_H diff --git a/plugin/common/kernels/reducedMathPlugin.h b/plugin/common/kernels/reducedMathPlugin.h index f52ea72a..777a5e51 100644 --- a/plugin/common/kernels/reducedMathPlugin.h +++ b/plugin/common/kernels/reducedMathPlugin.h @@ -32,7 +32,7 @@ namespace plugin namespace detail { -void findDivisor(int denom, unsigned int& mul_coeff, unsigned int& shift_coeff); +void findDivisor(int32_t denom, uint32_t& mul_coeff, uint32_t& shift_coeff); __host__ __device__ __forceinline__ uint32_t umulhi(uint32_t x, uint32_t y) { @@ -46,51 +46,49 @@ __host__ __device__ __forceinline__ uint32_t umulhi(uint32_t x, uint32_t y) // This is a weird implementation that returns div_up(0,1)=0 but // div_up(0,2)=1 (wrong) -- just do not use it with a=0. -__host__ __device__ inline int div_up(int a, int b) +__host__ __device__ inline int32_t div_up(int32_t a, int32_t b) { return (a - 1) / b + 1; } -} //end namespace detail +} // end namespace detail class ReducedDivisor { public: ReducedDivisor() {} - __host__ __forceinline__ - ReducedDivisor(int _y) + __host__ __forceinline__ ReducedDivisor(int32_t _y) : y(_y) { detail::findDivisor(y, mul_coeff, shift_coeff); } - __host__ __device__ __forceinline__ - ReducedDivisor(unsigned _mul_coeff, unsigned _shift_coeff, int _y) + __host__ __device__ __forceinline__ ReducedDivisor(uint32_t _mul_coeff, uint32_t _shift_coeff, int32_t _y) : mul_coeff(_mul_coeff) , shift_coeff(_shift_coeff) , y(_y) { } - __host__ __device__ __forceinline__ int div(int x) const + __host__ __device__ __forceinline__ int32_t div(int32_t x) const { // if dividing by 1, then findDivisor wouldn't have worked because // mul_coeff would have had to be 2^32, which can't be represented, // so we have to special case that one. return (y != 1) ? detail::umulhi((uint32_t) x, mul_coeff) >> shift_coeff : x; } - __host__ __device__ __forceinline__ int mod(int x) const + __host__ __device__ __forceinline__ int32_t mod(int32_t x) const { return x - (div(x) * y); } - __host__ __device__ __forceinline__ void divmod(int x, int& q, int& mod) const + __host__ __device__ __forceinline__ void divmod(int32_t x, int32_t& q, int32_t& mod) const { q = div(x); mod = x - (q * y); } - __host__ __device__ __forceinline__ int get() const + __host__ __device__ __forceinline__ int32_t get() const { return y; } - inline __host__ void get_mul_shift(unsigned& mul, unsigned& shift) + inline __host__ void get_mul_shift(uint32_t& mul, uint32_t& shift) { mul = mul_coeff; shift = shift_coeff; @@ -99,7 +97,7 @@ class ReducedDivisor protected: uint32_t mul_coeff{}; uint32_t shift_coeff{}; - int y{}; + int32_t y{}; }; } // namespace plugin diff --git a/plugin/common/mrcnn_config.h b/plugin/common/mrcnn_config.h index 93c3cccd..5b3673ca 100644 --- a/plugin/common/mrcnn_config.h +++ b/plugin/common/mrcnn_config.h @@ -26,8 +26,8 @@ namespace MaskRCNNConfig static const nvinfer1::Dims3 IMAGE_SHAPE{3, 1024, 1024}; // Pooled ROIs -static int const POOL_SIZE = 7; -static int const MASK_POOL_SIZE = 14; +static int32_t const POOL_SIZE = 7; +static int32_t const MASK_POOL_SIZE = 14; // Threshold to determine the mask area out of final convolution output static float const MASK_THRESHOLD = 0.5F; @@ -37,7 +37,7 @@ static float const RPN_BBOX_STD_DEV[] = {0.1F, 0.1F, 0.2F, 0.2F}; static float const BBOX_STD_DEV[] = {0.1F, 0.1F, 0.2F, 0.2F}; // Max number of final detections -static int const DETECTION_MAX_INSTANCES = 100; +static int32_t const DETECTION_MAX_INSTANCES = 100; // Minimum probability value to accept a detected instance // ROIs below this threshold are skipped @@ -51,13 +51,13 @@ static float const DETECTION_NMS_THRESHOLD = 0.3F; static const std::vector BACKBONE_STRIDES = {4.F, 8.F, 16.F, 32.F, 64.F}; // Size of the fully-connected layers in the classification graph -static int const FPN_CLASSIF_FC_LAYERS_SIZE = 1024; +static int32_t const FPN_CLASSIF_FC_LAYERS_SIZE = 1024; // Size of the top-down layers used to build the feature pyramid -static int const TOP_DOWN_PYRAMID_SIZE = 256; +static int32_t const TOP_DOWN_PYRAMID_SIZE = 256; // Number of classification classes (including background) -static int const NUM_CLASSES = 1 + 80; // COCO has 80 classes +static int32_t const NUM_CLASSES = 1 + 80; // COCO has 80 classes // Length of square anchor side in pixels static const std::vector RPN_ANCHOR_SCALES = {32.F, 64.F, 128.F, 256.F, 512.F}; @@ -69,18 +69,18 @@ static float const RPN_ANCHOR_RATIOS[] = {0.5F, 1.F, 2.F}; // Anchor stride // If 1 then anchors are created for each cell in the backbone feature map. // If 2, then anchors are created for every other cell, and so on. -static int const RPN_ANCHOR_STRIDE = 1; +static int32_t const RPN_ANCHOR_STRIDE = 1; // Although Python impementation uses 6000, // TRT fails if this number larger than kMAX_TOPK_K defined in engine/checkMacros.h -static int const MAX_PRE_NMS_RESULTS = 1024; // 3840; +static int32_t const MAX_PRE_NMS_RESULTS = 1024; // 3840; // Non-max suppression threshold to filter RPN proposals. // You can increase this during training to generate more propsals. static float const RPN_NMS_THRESHOLD = 0.7F; // ROIs kept after non-maximum suppression (training and inference) -static int const POST_NMS_ROIS_INFERENCE = 1000; +static int32_t const POST_NMS_ROIS_INFERENCE = 1000; // COCO Class names static const std::vector CLASS_NAMES = { diff --git a/plugin/common/nmsHelper.cpp b/plugin/common/nmsHelper.cpp index 2104b098..8b0cc9ca 100644 --- a/plugin/common/nmsHelper.cpp +++ b/plugin/common/nmsHelper.cpp @@ -24,7 +24,7 @@ namespace nvinfer1 namespace plugin { -size_t detectionForwardBBoxDataSize(int N, int C1, DataType DT_BBOX) +size_t detectionForwardBBoxDataSize(int32_t N, int32_t C1, DataType DT_BBOX) { if (DT_BBOX == DataType::kFLOAT) { @@ -39,7 +39,7 @@ size_t detectionForwardBBoxDataSize(int N, int C1, DataType DT_BBOX) return (size_t) -1; } -size_t detectionForwardBBoxPermuteSize(bool shareLocation, int N, int C1, DataType DT_BBOX) +size_t detectionForwardBBoxPermuteSize(bool shareLocation, int32_t N, int32_t C1, DataType DT_BBOX) { if (DT_BBOX == DataType::kFLOAT) { @@ -54,15 +54,15 @@ size_t detectionForwardBBoxPermuteSize(bool shareLocation, int N, int C1, DataTy return (size_t) -1; } -size_t detectionForwardPreNMSSize(int N, int C2) +size_t detectionForwardPreNMSSize(int32_t N, int32_t C2) { - PLUGIN_ASSERT(sizeof(float) == sizeof(int)); + PLUGIN_ASSERT(sizeof(float) == sizeof(int32_t)); return N * C2 * sizeof(float); } -size_t detectionForwardPostNMSSize(int N, int numClasses, int topK) +size_t detectionForwardPostNMSSize(int32_t N, int32_t numClasses, int32_t topK) { - PLUGIN_ASSERT(sizeof(float) == sizeof(int)); + PLUGIN_ASSERT(sizeof(float) == sizeof(int32_t)); return N * numClasses * topK * sizeof(float); } } // namespace plugin diff --git a/plugin/common/nmsUtils.h b/plugin/common/nmsUtils.h index 6270f351..28a4aa7e 100644 --- a/plugin/common/nmsUtils.h +++ b/plugin/common/nmsUtils.h @@ -23,8 +23,8 @@ namespace nvinfer1 { namespace plugin { -size_t detectionInferenceWorkspaceSize(bool shareLocation, int N, int C1, int C2, int numClasses, int numPredsPerClass, - int topK, nvinfer1::DataType DT_BBOX, nvinfer1::DataType DT_SCORE); +size_t detectionInferenceWorkspaceSize(bool shareLocation, int32_t N, int32_t C1, int32_t C2, int32_t numClasses, + int32_t numPredsPerClass, int32_t topK, nvinfer1::DataType DT_BBOX, nvinfer1::DataType DT_SCORE); } // namespace plugin } // namespace nvinfer1 #endif diff --git a/plugin/common/plugin.h b/plugin/common/plugin.h index 3d9d7078..a043339d 100644 --- a/plugin/common/plugin.h +++ b/plugin/common/plugin.h @@ -26,6 +26,7 @@ #include #include +// Enumerator for status typedef enum { STATUS_SUCCESS = 0, diff --git a/plugin/common/reducedMathPlugin.cpp b/plugin/common/reducedMathPlugin.cpp index 36d24458..4e33680a 100644 --- a/plugin/common/reducedMathPlugin.cpp +++ b/plugin/common/reducedMathPlugin.cpp @@ -23,9 +23,9 @@ namespace detail { // Count leading zeros - start from most significant bit. -int clz(int x) +int32_t clz(int32_t x) { - for (int i = 31; i >= 0; --i) + for (int32_t i = 31; i >= 0; --i) { if ((1U << i) & x) { @@ -37,9 +37,9 @@ int clz(int x) #define CUDNN_IS_POW_2(x) (0 == ((x) & ((x) -1))) -int find_log_2(int x, bool round_up = false) +int32_t find_log_2(int32_t x, bool round_up = false) { - int a = 31 - clz(x); + int32_t a = 31 - clz(x); if (round_up) { a += !CUDNN_IS_POW_2(x); @@ -47,7 +47,7 @@ int find_log_2(int x, bool round_up = false) return a; } -void findDivisor(int denom, unsigned int& mul_coeff, unsigned int& shift_coeff) +void findDivisor(int32_t denom, uint32_t& mul_coeff, uint32_t& shift_coeff) { if (denom == 0) { @@ -56,7 +56,7 @@ void findDivisor(int denom, unsigned int& mul_coeff, unsigned int& shift_coeff) if (denom == 1) { // if dividing by 1, reduced math doesn't work because mul_coeff would - // need to be 2^32, which doesn't fit into unsigned int. the div() + // need to be 2^32, which doesn't fit into uint32_t. the div() // routine handles this special case separately. mul_coeff = 0; shift_coeff = 0; diff --git a/plugin/common/templates.h b/plugin/common/templates.h index 3629b5f0..298bb8c2 100644 --- a/plugin/common/templates.h +++ b/plugin/common/templates.h @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/plugin/common/vfcCommon.cpp b/plugin/common/vfcCommon.cpp index 17c1283b..11375350 100644 --- a/plugin/common/vfcCommon.cpp +++ b/plugin/common/vfcCommon.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/plugin/common/vfcCommon.h b/plugin/common/vfcCommon.h index 2791017d..a2015177 100644 --- a/plugin/common/vfcCommon.h +++ b/plugin/common/vfcCommon.h @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/plugin/coordConvACPlugin/coordConvACPlugin.cpp b/plugin/coordConvACPlugin/coordConvACPlugin.cpp index d958a265..443ba6d5 100644 --- a/plugin/coordConvACPlugin/coordConvACPlugin.cpp +++ b/plugin/coordConvACPlugin/coordConvACPlugin.cpp @@ -35,7 +35,8 @@ std::vector CoordConvACPluginCreator::mPluginAttributes; CoordConvACPlugin::CoordConvACPlugin() {} -CoordConvACPlugin::CoordConvACPlugin(nvinfer1::DataType iType, int iC, int iH, int iW, int oC, int oH, int oW) +CoordConvACPlugin::CoordConvACPlugin( + nvinfer1::DataType iType, int32_t iC, int32_t iH, int32_t iW, int32_t oC, int32_t oH, int32_t oW) : iType(iType) , iC(iC) , iH(iH) @@ -63,19 +64,19 @@ void CoordConvACPlugin::deserialize(uint8_t const* data, size_t length) PLUGIN_VALIDATE(d == data + length); } -int CoordConvACPlugin::getNbOutputs() const noexcept +int32_t CoordConvACPlugin::getNbOutputs() const noexcept { return 1; } -int CoordConvACPlugin::initialize() noexcept +int32_t CoordConvACPlugin::initialize() noexcept { return STATUS_SUCCESS; } void CoordConvACPlugin::terminate() noexcept {} -Dims CoordConvACPlugin::getOutputDimensions(int index, Dims const* inputs, int nbInputDims) noexcept +Dims CoordConvACPlugin::getOutputDimensions(int32_t index, Dims const* inputs, int32_t nbInputDims) noexcept { PLUGIN_ASSERT(index == 0); PLUGIN_ASSERT(nbInputDims == 1); @@ -92,7 +93,7 @@ Dims CoordConvACPlugin::getOutputDimensions(int index, Dims const* inputs, int n return dimsOutput; } -size_t CoordConvACPlugin::getWorkspaceSize(int maxBatchSize) const noexcept +size_t CoordConvACPlugin::getWorkspaceSize(int32_t maxBatchSize) const noexcept { return 0; } @@ -100,7 +101,7 @@ size_t CoordConvACPlugin::getWorkspaceSize(int maxBatchSize) const noexcept size_t CoordConvACPlugin::getSerializationSize() const noexcept { // iType, iC, iH, iW, oC, oH, oW - return sizeof(nvinfer1::DataType) + sizeof(int) * 6; + return sizeof(nvinfer1::DataType) + sizeof(int32_t) * 6; } void CoordConvACPlugin::serialize(void* buffer) const noexcept @@ -116,9 +117,9 @@ void CoordConvACPlugin::serialize(void* buffer) const noexcept PLUGIN_ASSERT(d == a + getSerializationSize()); } -void CoordConvACPlugin::configurePlugin(Dims const* inputDims, int nbInputs, Dims const* outputDims, int nbOutputs, - DataType const* inputTypes, DataType const* outputTypes, bool const* inputIsBroadcast, - bool const* outputIsBroadcast, nvinfer1::PluginFormat format, int maxBatchSize) noexcept +void CoordConvACPlugin::configurePlugin(Dims const* inputDims, int32_t nbInputs, Dims const* outputDims, + int32_t nbOutputs, DataType const* inputTypes, DataType const* outputTypes, bool const* inputIsBroadcast, + bool const* outputIsBroadcast, nvinfer1::PluginFormat format, int32_t maxBatchSize) noexcept { PLUGIN_ASSERT(nbInputs == 1); PLUGIN_ASSERT(nbOutputs == 1); @@ -180,18 +181,18 @@ char const* CoordConvACPlugin::getPluginNamespace() const noexcept } nvinfer1::DataType CoordConvACPlugin::getOutputDataType( - int index, nvinfer1::DataType const* inputTypes, int nbInputs) const noexcept + int32_t index, nvinfer1::DataType const* inputTypes, int32_t nbInputs) const noexcept { return inputTypes[0]; } bool CoordConvACPlugin::isOutputBroadcastAcrossBatch( - int outputIndex, bool const* inputIsBroadcasted, int nbInputs) const noexcept + int32_t outputIndex, bool const* inputIsBroadcasted, int32_t nbInputs) const noexcept { return false; } -bool CoordConvACPlugin::canBroadcastInputAcrossBatch(int inputIndex) const noexcept +bool CoordConvACPlugin::canBroadcastInputAcrossBatch(int32_t inputIndex) const noexcept { return false; } diff --git a/plugin/coordConvACPlugin/coordConvACPlugin.h b/plugin/coordConvACPlugin/coordConvACPlugin.h index 3b9905b8..a710ba44 100644 --- a/plugin/coordConvACPlugin/coordConvACPlugin.h +++ b/plugin/coordConvACPlugin/coordConvACPlugin.h @@ -35,32 +35,32 @@ class CoordConvACPlugin : public IPluginV2Ext public: CoordConvACPlugin(); - CoordConvACPlugin(DataType iType, int iC, int iH, int iW, int oC, int oH, int oW); + CoordConvACPlugin(DataType iType, int32_t iC, int32_t iH, int32_t iW, int32_t oC, int32_t oH, int32_t oW); CoordConvACPlugin(void const* data, size_t length); ~CoordConvACPlugin() override = default; - int getNbOutputs() const noexcept override; + int32_t getNbOutputs() const noexcept override; - Dims getOutputDimensions(int index, Dims const* inputs, int nbInputDims) noexcept override; + Dims getOutputDimensions(int32_t index, Dims const* inputs, int32_t nbInputDims) noexcept override; - int initialize() noexcept override; + int32_t initialize() noexcept override; void terminate() noexcept override; - size_t getWorkspaceSize(int maxBatchSize) const noexcept override; + size_t getWorkspaceSize(int32_t maxBatchSize) const noexcept override; - int enqueue(int batchSize, void const* const* inputs, void* const* outputs, void* workspace, + int32_t enqueue(int32_t batchSize, void const* const* inputs, void* const* outputs, void* workspace, cudaStream_t stream) noexcept override; size_t getSerializationSize() const noexcept override; void serialize(void* buffer) const noexcept override; - void configurePlugin(Dims const* inputDims, int nbInputs, Dims const* outputDims, int nbOutputs, + void configurePlugin(Dims const* inputDims, int32_t nbInputs, Dims const* outputDims, int32_t nbOutputs, DataType const* inputTypes, DataType const* outputTypes, bool const* inputIsBroadcast, - bool const* outputIsBroadcast, PluginFormat floatFormat, int maxBatchSize) noexcept override; + bool const* outputIsBroadcast, PluginFormat floatFormat, int32_t maxBatchSize) noexcept override; bool supportsFormat(DataType type, PluginFormat format) const noexcept override; @@ -73,16 +73,16 @@ class CoordConvACPlugin : public IPluginV2Ext IPluginV2Ext* clone() const noexcept override; nvinfer1::DataType getOutputDataType( - int index, nvinfer1::DataType const* inputType, int nbInputs) const noexcept override; + int32_t index, nvinfer1::DataType const* inputType, int32_t nbInputs) const noexcept override; void setPluginNamespace(char const* pluginNamespace) noexcept override; char const* getPluginNamespace() const noexcept override; bool isOutputBroadcastAcrossBatch( - int outputIndex, bool const* inputIsBroadcasted, int nbInputs) const noexcept override; + int32_t outputIndex, bool const* inputIsBroadcasted, int32_t nbInputs) const noexcept override; - bool canBroadcastInputAcrossBatch(int inputIndex) const noexcept override; + bool canBroadcastInputAcrossBatch(int32_t inputIndex) const noexcept override; private: void deserialize(uint8_t const* data, size_t length); diff --git a/plugin/decodeBbox3DPlugin/decodeBbox3D.cpp b/plugin/decodeBbox3DPlugin/decodeBbox3D.cpp index 01cfb22c..c24a4d20 100644 --- a/plugin/decodeBbox3DPlugin/decodeBbox3D.cpp +++ b/plugin/decodeBbox3DPlugin/decodeBbox3D.cpp @@ -16,117 +16,111 @@ */ #include "decodeBbox3D.h" +#include "common/bboxUtils.h" +#include "common/checkMacrosPlugin.h" +#include "common/kernels/kernel.h" #include "common/templates.h" -#include -#include namespace nvinfer1 { namespace plugin { -#define checkCudaErrors(status) \ - { \ - if ((status) != 0) \ - { \ - std::cout << "Cuda failure: " << cudaGetErrorString(status) << " at line " << __LINE__ << " in file " \ - << __FILE__ << " error status: " << (status) << std::endl; \ - abort(); \ - } \ - } using nvinfer1::plugin::DecodeBbox3DPlugin; using nvinfer1::plugin::DecodeBbox3DPluginCreator; +namespace +{ static char const* const kPLUGIN_VERSION{"1"}; static char const* const kPLUGIN_NAME{"DecodeBbox3DPlugin"}; +} // namespace // Static class fields initialization PluginFieldCollection DecodeBbox3DPluginCreator::mFC{}; std::vector DecodeBbox3DPluginCreator::mPluginAttributes; -DecodeBbox3DPlugin::DecodeBbox3DPlugin(float x_min, float x_max, float y_min, float y_max, float z_min, float z_max, - int32_t num_dir_bins, float dir_offset, float dir_limit_offset, std::vector const& anchor_bottom_height, - std::vector const& anchors, float score_thresh) - : min_x_range_(x_min) - , max_x_range_(x_max) - , min_y_range_(y_min) - , max_y_range_(y_max) - , min_z_range_(z_min) - , max_z_range_(z_max) - , num_dir_bins_(num_dir_bins) - , dir_offset_(dir_offset) - , dir_limit_offset_(dir_limit_offset) - , score_thresh_(score_thresh) +DecodeBbox3DPlugin::DecodeBbox3DPlugin(float xMin, float xMax, float yMin, float yMax, float zMin, float zMax, + int32_t numDirBins, float dirOffset, float dirLimitOffset, std::vector const& anchorBottomHeight, + std::vector const& anchors, float scoreThreshold) + : mMinXRange(xMin) + , mMaxXRange(xMax) + , mMinYRange(yMin) + , mMaxYRange(yMax) + , mMinZRange(zMin) + , mMaxZRange(zMax) + , mNumDirBins(numDirBins) + , mDirOffset(dirOffset) + , mDirLimitOffset(dirLimitOffset) + , mScoreThreashold(scoreThreshold) { - anchor_bottom_height_.clear(); - for (size_t i = 0; i < anchor_bottom_height.size(); i++) - anchor_bottom_height_.push_back(anchor_bottom_height[i]); - anchors_.clear(); - for (size_t i = 0; i < anchors.size(); i++) - anchors_.push_back(anchors[i]); - num_classes_ = int(anchor_bottom_height_.size()); - PLUGIN_VALIDATE(num_classes_ > 0); - PLUGIN_VALIDATE(static_cast(num_classes_) * 2 * 4 == anchors_.size()); + mAnchorBottomHeight = anchorBottomHeight; + mAnchors = anchors; + mNumClasses = static_cast(mAnchorBottomHeight.size()); + PLUGIN_VALIDATE(static_cast(mNumClasses) * 2 * 4 == mAnchors.size()); } -DecodeBbox3DPlugin::DecodeBbox3DPlugin(float x_min, float x_max, float y_min, float y_max, float z_min, float z_max, - int num_dir_bins, float dir_offset, float dir_limit_offset, std::vector const& anchor_bottom_height, - std::vector const& anchors, float score_thresh, int feature_h, int feature_w) - : min_x_range_(x_min) - , max_x_range_(x_max) - , min_y_range_(y_min) - , max_y_range_(y_max) - , min_z_range_(z_min) - , max_z_range_(z_max) - , num_dir_bins_(num_dir_bins) - , dir_offset_(dir_offset) - , dir_limit_offset_(dir_limit_offset) - , score_thresh_(score_thresh) - , feature_h_(feature_h) - , feature_w_(feature_w) +DecodeBbox3DPlugin::DecodeBbox3DPlugin(float xMin, float xMax, float yMin, float yMax, float zMin, float zMax, + int32_t numDirBins, float dirOffset, float dirLimitOffset, std::vector const& anchorBottomHeight, + std::vector const& anchors, float scoreThreshold, int32_t feature_h, int32_t feature_w) + : mMinXRange(xMin) + , mMaxXRange(xMax) + , mMinYRange(yMin) + , mMaxYRange(yMax) + , mMinZRange(zMin) + , mMaxZRange(zMax) + , mNumDirBins(numDirBins) + , mDirOffset(dirOffset) + , mDirLimitOffset(dirLimitOffset) + , mScoreThreashold(scoreThreshold) + , mFeatureH(feature_h) + , mFeatureW(feature_w) { - anchor_bottom_height_.clear(); - for (size_t i = 0; i < anchor_bottom_height.size(); i++) - anchor_bottom_height_.push_back(anchor_bottom_height[i]); - anchors_.clear(); - for (size_t i = 0; i < anchors.size(); i++) - anchors_.push_back(anchors[i]); - num_classes_ = int(anchor_bottom_height_.size()); - PLUGIN_VALIDATE(num_classes_ > 0); - PLUGIN_VALIDATE(static_cast(num_classes_) * 2 * 4 == anchors_.size()); + mAnchorBottomHeight = anchorBottomHeight; + mAnchors = anchors; + mNumClasses = static_cast(mAnchorBottomHeight.size()); + PLUGIN_VALIDATE(static_cast(mNumClasses) * 2 * 4 == mAnchors.size()); } DecodeBbox3DPlugin::DecodeBbox3DPlugin(void const* data, size_t length) { - char const* d = reinterpret_cast(data); - min_x_range_ = readFromBuffer(d); - max_x_range_ = readFromBuffer(d); - min_y_range_ = readFromBuffer(d); - max_y_range_ = readFromBuffer(d); - min_z_range_ = readFromBuffer(d); - max_z_range_ = readFromBuffer(d); - num_dir_bins_ = readFromBuffer(d); - dir_offset_ = readFromBuffer(d); - dir_limit_offset_ = readFromBuffer(d); - score_thresh_ = readFromBuffer(d); - num_classes_ = readFromBuffer(d); - feature_h_ = readFromBuffer(d); - feature_w_ = readFromBuffer(d); - anchor_bottom_height_.clear(); - anchors_.clear(); - for (int i = 0; i < num_classes_; i++) - anchor_bottom_height_.push_back(readFromBuffer(d)); - for (int i = 0; i < num_classes_ * 2 * 4; i++) - anchors_.push_back(readFromBuffer(d)); + PLUGIN_VALIDATE(data != nullptr); + auto const* d = reinterpret_cast(data); + mMinXRange = readFromBuffer(d); + mMaxXRange = readFromBuffer(d); + mMinYRange = readFromBuffer(d); + mMaxYRange = readFromBuffer(d); + mMinZRange = readFromBuffer(d); + mMaxZRange = readFromBuffer(d); + mNumDirBins = readFromBuffer(d); + mDirOffset = readFromBuffer(d); + mDirLimitOffset = readFromBuffer(d); + mScoreThreashold = readFromBuffer(d); + mNumClasses = readFromBuffer(d); + mFeatureH = readFromBuffer(d); + mFeatureW = readFromBuffer(d); + + mAnchorBottomHeight.resize(mNumClasses); + for (int32_t i = 0; i < mNumClasses; i++) + { + mAnchorBottomHeight[i] = readFromBuffer(d); + } + + mAnchors.resize(mNumClasses * 2 * 4); + for (int32_t i = 0; i < mNumClasses * 2 * 4; i++) + { + mAnchors[i] = readFromBuffer(d); + } + + PLUGIN_VALIDATE(d == reinterpret_cast(data) + length); } nvinfer1::IPluginV2DynamicExt* DecodeBbox3DPlugin::clone() const noexcept { try { - auto* plugin = new DecodeBbox3DPlugin(min_x_range_, max_x_range_, min_y_range_, max_y_range_, min_z_range_, - max_z_range_, num_dir_bins_, dir_offset_, dir_limit_offset_, anchor_bottom_height_, anchors_, score_thresh_, - feature_h_, feature_w_); + auto* plugin = new DecodeBbox3DPlugin(mMinXRange, mMaxXRange, mMinYRange, mMaxYRange, mMinZRange, mMaxZRange, + mNumDirBins, mDirOffset, mDirLimitOffset, mAnchorBottomHeight, mAnchors, mScoreThreashold, mFeatureH, + mFeatureW); plugin->setPluginNamespace(mNamespace.c_str()); return plugin; } @@ -137,36 +131,56 @@ nvinfer1::IPluginV2DynamicExt* DecodeBbox3DPlugin::clone() const noexcept return nullptr; } -nvinfer1::DimsExprs DecodeBbox3DPlugin::getOutputDimensions( - int outputIndex, nvinfer1::DimsExprs const* inputs, int nbInputs, nvinfer1::IExprBuilder& exprBuilder) noexcept +nvinfer1::DimsExprs DecodeBbox3DPlugin::getOutputDimensions(int32_t outputIndex, nvinfer1::DimsExprs const* inputs, + int32_t nbInputs, nvinfer1::IExprBuilder& exprBuilder) noexcept { - PLUGIN_ASSERT(this->getNbOutputs() == 2); - PLUGIN_ASSERT(outputIndex >= 0 && outputIndex < this->getNbOutputs()); - auto feature_h = inputs[0].d[1]; - auto feature_w = inputs[0].d[2]; - auto batch_size = inputs[0].d[0]; - if (outputIndex == 0) + try { - nvinfer1::DimsExprs dim0{}; - dim0.nbDims = 3; - dim0.d[0] = batch_size; - dim0.d[1] = exprBuilder.operation(nvinfer1::DimensionOperation::kPROD, feature_h[0], - exprBuilder.operation( - nvinfer1::DimensionOperation::kPROD, feature_w[0], exprBuilder.constant(num_classes_ * 2)[0])[0]); - dim0.d[2] = exprBuilder.constant(9); - return dim0; + PLUGIN_VALIDATE(getNbOutputs() == 2); + PLUGIN_VALIDATE(outputIndex >= 0 && outputIndex < getNbOutputs()); + PLUGIN_VALIDATE(inputs != nullptr); + auto const& featureH = inputs[0].d[1]; + auto const& featureW = inputs[0].d[2]; + auto const& batchSize = inputs[0].d[0]; + if (outputIndex == 0) + { + nvinfer1::DimsExprs dim0{}; + dim0.nbDims = 3; + dim0.d[0] = batchSize; + dim0.d[1] = exprBuilder.operation(nvinfer1::DimensionOperation::kPROD, featureH[0], + exprBuilder.operation( + nvinfer1::DimensionOperation::kPROD, featureW[0], exprBuilder.constant(mNumClasses * 2)[0])[0]); + dim0.d[2] = exprBuilder.constant(9); + return dim0; + } + nvinfer1::DimsExprs dim1{}; + dim1.nbDims = 1; + dim1.d[0] = batchSize; + return dim1; } - nvinfer1::DimsExprs dim1{}; - dim1.nbDims = 1; - dim1.d[0] = batch_size; - return dim1; + catch (std::exception const& e) + { + caughtError(e); + } + return nvinfer1::DimsExprs{}; } bool DecodeBbox3DPlugin::supportsFormatCombination( - int pos, nvinfer1::PluginTensorDesc const* inOut, int nbInputs, int nbOutputs) noexcept + int32_t pos, nvinfer1::PluginTensorDesc const* inOut, int32_t nbInputs, int32_t nbOutputs) noexcept { - PLUGIN_ASSERT(nbInputs == 3); - PLUGIN_ASSERT(nbOutputs == 2); + try + { + PLUGIN_VALIDATE(nbInputs == 3); + PLUGIN_VALIDATE(nbOutputs == 2); + PLUGIN_VALIDATE(inOut != nullptr); + PLUGIN_VALIDATE((pos >= 0) && (pos < nbInputs + nbOutputs)); + } + catch (std::exception const& e) + { + caughtError(e); + return false; + } + PluginTensorDesc const& in = inOut[pos]; if (pos == 0) // cls_preds { @@ -191,58 +205,90 @@ bool DecodeBbox3DPlugin::supportsFormatCombination( return false; } -void DecodeBbox3DPlugin::configurePlugin(nvinfer1::DynamicPluginTensorDesc const* in, int nbInputs, - nvinfer1::DynamicPluginTensorDesc const* out, int nbOutputs) noexcept +void DecodeBbox3DPlugin::configurePlugin(nvinfer1::DynamicPluginTensorDesc const* in, int32_t nbInputs, + nvinfer1::DynamicPluginTensorDesc const* out, int32_t nbOutputs) noexcept { - feature_h_ = in[0].desc.dims.d[1]; - feature_w_ = in[0].desc.dims.d[2]; + try + { + PLUGIN_VALIDATE(in != nullptr); + mFeatureH = in[0].desc.dims.d[1]; + mFeatureW = in[0].desc.dims.d[2]; + } + catch (std::exception const& e) + { + caughtError(e); + } } -size_t DecodeBbox3DPlugin::getWorkspaceSize(nvinfer1::PluginTensorDesc const* inputs, int nbInputs, - nvinfer1::PluginTensorDesc const* outputs, int nbOutputs) const noexcept +size_t DecodeBbox3DPlugin::getWorkspaceSize(nvinfer1::PluginTensorDesc const* inputs, int32_t nbInputs, + nvinfer1::PluginTensorDesc const* outputs, int32_t nbOutputs) const noexcept { - size_t anchors_size = num_classes_ * 2 * 4 * sizeof(float); - size_t anchor_bottom_height_size = num_classes_ * sizeof(float); + size_t mAnchorsSize = mNumClasses * 2 * 4 * sizeof(float); + size_t mAnchorBottomHeightSize = mNumClasses * sizeof(float); size_t workspaces[2]; - workspaces[0] = anchors_size; - workspaces[1] = anchor_bottom_height_size; + workspaces[0] = mAnchorsSize; + workspaces[1] = mAnchorBottomHeightSize; return calculateTotalWorkspaceSize(workspaces, 2); } -int DecodeBbox3DPlugin::enqueue(nvinfer1::PluginTensorDesc const* inputDesc, +int32_t DecodeBbox3DPlugin::enqueue(nvinfer1::PluginTensorDesc const* inputDesc, nvinfer1::PluginTensorDesc const* outputDesc, void const* const* inputs, void* const* outputs, void* workspace, cudaStream_t stream) noexcept { - int batchSize = inputDesc[0].dims.d[0]; - // Inputs - float* cls_input = const_cast((float const*) (inputs[0])); - float* box_input = const_cast((float const*) inputs[1]); - float* dir_cls_input = const_cast((float const*) (inputs[2])); - // Outputs - float* bndbox_output = (float*) (outputs[0]); - int* box_num = (int*) (outputs[1]); - // Initialize workspaces - float* anchors = (float*) workspace; - size_t anchors_size = num_classes_ * 2 * 4 * sizeof(float); - float* anchor_bottom_height = (float*) nextWorkspacePtr((int8_t*) anchors, anchors_size); - size_t anchor_bottom_height_size = num_classes_ * sizeof(float); - checkCudaErrors(cudaMemcpyAsync(anchors, &anchors_[0], anchors_size, cudaMemcpyHostToDevice, stream)); - checkCudaErrors(cudaMemcpyAsync( - anchor_bottom_height, &anchor_bottom_height_[0], anchor_bottom_height_size, cudaMemcpyHostToDevice, stream)); - // Initialize box_num to 0 - checkCudaErrors(cudaMemsetAsync(box_num, 0, batchSize * sizeof(int), stream)); - decodeBbox3DLaunch(batchSize, cls_input, box_input, dir_cls_input, anchors, anchor_bottom_height, bndbox_output, - box_num, min_x_range_, max_x_range_, min_y_range_, max_y_range_, feature_w_, feature_h_, num_classes_ * 2, - num_classes_, 7, score_thresh_, dir_offset_, dir_limit_offset_, num_dir_bins_, stream); - return 0; + try + { + int32_t batchSize = inputDesc[0].dims.d[0]; + + // Inputs + auto const* clsInput = static_cast(inputs[0]); + auto const* boxInput = static_cast(inputs[1]); + auto const* dirClsInput = static_cast(inputs[2]); + + // Outputs + auto* bndboxOutput = static_cast(outputs[0]); + auto* boxNum = static_cast(outputs[1]); + + // Initialize workspaces + auto* anchors = static_cast(workspace); + size_t anchorsSize = mNumClasses * 2 * 4 * sizeof(float); + auto* anchorBottomHeight + = reinterpret_cast(nextWorkspacePtr(reinterpret_cast(anchors), anchorsSize)); + size_t anchorBottomHeightSize = mNumClasses * sizeof(float); + PLUGIN_CUASSERT(cudaMemcpyAsync(anchors, &mAnchors[0], anchorsSize, cudaMemcpyHostToDevice, stream)); + PLUGIN_CUASSERT(cudaMemcpyAsync( + anchorBottomHeight, &mAnchorBottomHeight[0], anchorBottomHeightSize, cudaMemcpyHostToDevice, stream)); + // Initialize boxNum to 0 + PLUGIN_CUASSERT(cudaMemsetAsync(boxNum, 0, batchSize * sizeof(int32_t), stream)); + + decodeBbox3DLaunch(batchSize, clsInput, boxInput, dirClsInput, anchors, anchorBottomHeight, bndboxOutput, + boxNum, mMinXRange, mMaxXRange, mMinYRange, mMaxYRange, mFeatureW, mFeatureH, mNumClasses * 2, mNumClasses, + 7, mScoreThreashold, mDirOffset, mDirLimitOffset, mNumDirBins, stream); + return cudaPeekAtLastError(); + } + catch (std::exception const& e) + { + caughtError(e); + } + return STATUS_FAILURE; } nvinfer1::DataType DecodeBbox3DPlugin::getOutputDataType( - int index, nvinfer1::DataType const* inputTypes, int nbInputs) const noexcept + int32_t index, nvinfer1::DataType const* inputTypes, int32_t nbInputs) const noexcept { - if (index == 0) - return inputTypes[0]; - return nvinfer1::DataType::kINT32; + try + { + PLUGIN_VALIDATE(inputTypes != nullptr); + if (index == 0) + { + return inputTypes[0]; + } + return nvinfer1::DataType::kINT32; + } + catch (std::exception const& e) + { + caughtError(e); + } + return nvinfer1::DataType{}; } char const* DecodeBbox3DPlugin::getPluginType() const noexcept @@ -255,12 +301,12 @@ char const* DecodeBbox3DPlugin::getPluginVersion() const noexcept return kPLUGIN_VERSION; } -int DecodeBbox3DPlugin::getNbOutputs() const noexcept +int32_t DecodeBbox3DPlugin::getNbOutputs() const noexcept { return 2; } -int DecodeBbox3DPlugin::initialize() noexcept +int32_t DecodeBbox3DPlugin::initialize() noexcept { return 0; } @@ -269,31 +315,38 @@ void DecodeBbox3DPlugin::terminate() noexcept {} size_t DecodeBbox3DPlugin::getSerializationSize() const noexcept { - size_t scalar_size = 9 * sizeof(float) + 4 * sizeof(int); - size_t vector_size = num_classes_ * 9 * sizeof(float); - return scalar_size + vector_size; + size_t scalarSize = 9 * sizeof(float) + 4 * sizeof(int32_t); + size_t vectorSize = mNumClasses * 9 * sizeof(float); + return scalarSize + vectorSize; } void DecodeBbox3DPlugin::serialize(void* buffer) const noexcept { - char* d = reinterpret_cast(buffer); - writeToBuffer(d, min_x_range_); - writeToBuffer(d, max_x_range_); - writeToBuffer(d, min_y_range_); - writeToBuffer(d, max_y_range_); - writeToBuffer(d, min_z_range_); - writeToBuffer(d, max_z_range_); - writeToBuffer(d, num_dir_bins_); - writeToBuffer(d, dir_offset_); - writeToBuffer(d, dir_limit_offset_); - writeToBuffer(d, score_thresh_); - writeToBuffer(d, num_classes_); - writeToBuffer(d, feature_h_); - writeToBuffer(d, feature_w_); - for (int i = 0; i < num_classes_; i++) - writeToBuffer(d, anchor_bottom_height_[i]); - for (int i = 0; i < num_classes_ * 2 * 4; i++) - writeToBuffer(d, anchors_[i]); + PLUGIN_ASSERT(buffer != nullptr); + auto* d = reinterpret_cast(buffer); + auto* const start = d; + writeToBuffer(d, mMinXRange); + writeToBuffer(d, mMaxXRange); + writeToBuffer(d, mMinYRange); + writeToBuffer(d, mMaxYRange); + writeToBuffer(d, mMinZRange); + writeToBuffer(d, mMaxZRange); + writeToBuffer(d, mNumDirBins); + writeToBuffer(d, mDirOffset); + writeToBuffer(d, mDirLimitOffset); + writeToBuffer(d, mScoreThreashold); + writeToBuffer(d, mNumClasses); + writeToBuffer(d, mFeatureH); + writeToBuffer(d, mFeatureW); + for (int32_t i = 0; i < mNumClasses; i++) + { + writeToBuffer(d, mAnchorBottomHeight[i]); + } + for (int32_t i = 0; i < mNumClasses * 2 * 4; i++) + { + writeToBuffer(d, mAnchors[i]); + } + PLUGIN_ASSERT(d == start + getSerializationSize()); } void DecodeBbox3DPlugin::destroy() noexcept @@ -303,7 +356,15 @@ void DecodeBbox3DPlugin::destroy() noexcept void DecodeBbox3DPlugin::setPluginNamespace(char const* libNamespace) noexcept { - mNamespace = libNamespace; + try + { + PLUGIN_VALIDATE(libNamespace != nullptr); + mNamespace = libNamespace; + } + catch (std::exception const& e) + { + caughtError(e); + } } char const* DecodeBbox3DPlugin::getPluginNamespace() const noexcept @@ -341,74 +402,73 @@ PluginFieldCollection const* DecodeBbox3DPluginCreator::getFieldNames() noexcept return &mFC; } -IPluginV2* DecodeBbox3DPluginCreator::createPlugin(char const* name, PluginFieldCollection const* fc) noexcept +IPluginV2* DecodeBbox3DPluginCreator::createPlugin(char const* /*name*/, PluginFieldCollection const* fc) noexcept { try { + PLUGIN_VALIDATE(fc != nullptr); PluginField const* fields = fc->fields; - int nbFields = fc->nbFields; - float point_cloud_range[6] = {0.0F}; + + // Initialize default values for attributes. + float pointCloudRange[6] = {0.F}; std::vector anchors{}; - std::vector anchor_bottom_height{}; - float dir_offset = 0.78539F; - float dir_limit_offset = 0.0F; - int num_dir_bins = 2; - float score_thresh = 0.1F; - for (int i = 0; i < nbFields; ++i) + std::vector anchorBottomHeight{}; + float dirOffset = 0.78539F; + float dirLimitOffset = 0.F; + int32_t numDirBins = 2; + float scoreThreshold = 0.F; + + for (int32_t i = 0; i < fc->nbFields; ++i) { char const* attr_name = fields[i].name; if (!strcmp(attr_name, "point_cloud_range")) { - float const* d = static_cast(fields[i].data); - point_cloud_range[0] = d[0]; - point_cloud_range[1] = d[1]; - point_cloud_range[2] = d[2]; - point_cloud_range[3] = d[3]; - point_cloud_range[4] = d[4]; - point_cloud_range[5] = d[5]; + auto const* d = static_cast(fields[i].data); + for (int32_t pointCloudIdx = 0; pointCloudIdx < 6; pointCloudIdx++) + { + pointCloudRange[pointCloudIdx] = d[pointCloudIdx]; + } } else if (!strcmp(attr_name, "anchors")) { - float const* as = static_cast(fields[i].data); - for (int j = 0; j < fields[i].length; ++j) + auto const* d = static_cast(fields[i].data); + for (int32_t j = 0; j < fields[i].length; ++j) { - anchors.push_back(*as); - ++as; + anchors.push_back(d[j]); } } else if (!strcmp(attr_name, "anchor_bottom_height")) { - float const* ah = static_cast(fields[i].data); - for (int j = 0; j < fields[i].length; ++j) + auto const* d = static_cast(fields[i].data); + for (int32_t j = 0; j < fields[i].length; ++j) { - anchor_bottom_height.push_back(*ah); - ++ah; + anchorBottomHeight.push_back(d[j]); } } else if (!strcmp(attr_name, "dir_offset")) { - float const* d = static_cast(fields[i].data); - dir_offset = d[0]; + auto const* d = static_cast(fields[i].data); + dirOffset = d[0]; } else if (!strcmp(attr_name, "dir_limit_offset")) { - float const* d = static_cast(fields[i].data); - dir_limit_offset = d[0]; + auto const* d = static_cast(fields[i].data); + dirLimitOffset = d[0]; } else if (!strcmp(attr_name, "num_dir_bins")) { - int const* d = static_cast(fields[i].data); - num_dir_bins = d[0]; + auto const* d = static_cast(fields[i].data); + numDirBins = d[0]; } else if (!strcmp(attr_name, "score_thresh")) { - float const* d = static_cast(fields[i].data); - score_thresh = d[0]; + auto const* d = static_cast(fields[i].data); + scoreThreshold = d[0]; } } - IPluginV2* plugin = new DecodeBbox3DPlugin(point_cloud_range[0], point_cloud_range[3], point_cloud_range[1], - point_cloud_range[4], point_cloud_range[2], point_cloud_range[5], num_dir_bins, dir_offset, - dir_limit_offset, anchor_bottom_height, anchors, score_thresh); + IPluginV2* plugin = new DecodeBbox3DPlugin(pointCloudRange[0], pointCloudRange[3], pointCloudRange[1], + pointCloudRange[4], pointCloudRange[2], pointCloudRange[5], numDirBins, dirOffset, dirLimitOffset, + anchorBottomHeight, anchors, scoreThreshold); return plugin; } catch (std::exception const& e) @@ -419,7 +479,7 @@ IPluginV2* DecodeBbox3DPluginCreator::createPlugin(char const* name, PluginField } IPluginV2* DecodeBbox3DPluginCreator::deserializePlugin( - char const* name, void const* serialData, size_t serialLength) noexcept + char const* /*name*/, void const* serialData, size_t serialLength) noexcept { try { @@ -434,7 +494,15 @@ IPluginV2* DecodeBbox3DPluginCreator::deserializePlugin( void DecodeBbox3DPluginCreator::setPluginNamespace(char const* libNamespace) noexcept { - mNamespace = libNamespace; + try + { + PLUGIN_VALIDATE(libNamespace != nullptr); + mNamespace = libNamespace; + } + catch (std::exception const& e) + { + caughtError(e); + } } char const* DecodeBbox3DPluginCreator::getPluginNamespace() const noexcept diff --git a/plugin/decodeBbox3DPlugin/decodeBbox3D.h b/plugin/decodeBbox3DPlugin/decodeBbox3D.h index 6b2bebee..ea85785a 100644 --- a/plugin/decodeBbox3DPlugin/decodeBbox3D.h +++ b/plugin/decodeBbox3DPlugin/decodeBbox3D.h @@ -19,8 +19,6 @@ #define _DECODE_BBOX_3D_H_ #include "NvInferPlugin.h" -#include "common/bboxUtils.h" -#include "common/kernels/kernel.h" #include #include #include @@ -34,33 +32,33 @@ class DecodeBbox3DPlugin : public nvinfer1::IPluginV2DynamicExt { public: DecodeBbox3DPlugin() = delete; - DecodeBbox3DPlugin(float x_min, float x_max, float y_min, float y_max, float z_min, float z_max, int num_dir_bins, - float dir_offset, float dir_limit_offset, std::vector const& anchor_bottom_height, - std::vector const& anchors, float score_thresh); - DecodeBbox3DPlugin(float x_min, float x_max, float y_min, float y_max, float z_min, float z_max, int num_dir_bins, - float dir_offset, float dir_limit_offset, std::vector const& anchor_bottom_height, - std::vector const& anchors, float score_thresh, int feature_h, int feature_w); + DecodeBbox3DPlugin(float xMin, float xMax, float yMin, float yMax, float zMin, float zMax, int32_t numDirBins, + float dirOffset, float dirLimitOffset, std::vector const& anchorBottomHeight, + std::vector const& anchors, float scoreThresh); + DecodeBbox3DPlugin(float xMin, float xMax, float yMin, float yMax, float zMin, float zMax, int32_t numDirBins, + float dirOffset, float dirLimitOffset, std::vector const& anchorBottomHeight, + std::vector const& anchors, float scoreThresh, int32_t featureH, int32_t featureW); DecodeBbox3DPlugin(void const* data, size_t length); // IPluginV2DynamicExt Methods nvinfer1::IPluginV2DynamicExt* clone() const noexcept override; - nvinfer1::DimsExprs getOutputDimensions(int outputIndex, nvinfer1::DimsExprs const* inputs, int nbInputs, + nvinfer1::DimsExprs getOutputDimensions(int32_t outputIndex, nvinfer1::DimsExprs const* inputs, int32_t nbInputs, nvinfer1::IExprBuilder& exprBuilder) noexcept override; bool supportsFormatCombination( - int pos, nvinfer1::PluginTensorDesc const* inOut, int nbInputs, int nbOutputs) noexcept override; - void configurePlugin(nvinfer1::DynamicPluginTensorDesc const* in, int nbInputs, - nvinfer1::DynamicPluginTensorDesc const* out, int nbOutputs) noexcept override; - size_t getWorkspaceSize(nvinfer1::PluginTensorDesc const* inputs, int nbInputs, - nvinfer1::PluginTensorDesc const* outputs, int nbOutputs) const noexcept override; - int enqueue(nvinfer1::PluginTensorDesc const* inputDesc, nvinfer1::PluginTensorDesc const* outputDesc, + int32_t pos, nvinfer1::PluginTensorDesc const* inOut, int32_t nbInputs, int32_t nbOutputs) noexcept override; + void configurePlugin(nvinfer1::DynamicPluginTensorDesc const* in, int32_t nbInputs, + nvinfer1::DynamicPluginTensorDesc const* out, int32_t nbOutputs) noexcept override; + size_t getWorkspaceSize(nvinfer1::PluginTensorDesc const* inputs, int32_t nbInputs, + nvinfer1::PluginTensorDesc const* outputs, int32_t nbOutputs) const noexcept override; + int32_t enqueue(nvinfer1::PluginTensorDesc const* inputDesc, nvinfer1::PluginTensorDesc const* outputDesc, void const* const* inputs, void* const* outputs, void* workspace, cudaStream_t stream) noexcept override; // IPluginV2Ext Methods nvinfer1::DataType getOutputDataType( - int index, nvinfer1::DataType const* inputTypes, int nbInputs) const noexcept override; + int32_t index, nvinfer1::DataType const* inputTypes, int32_t nbInputs) const noexcept override; // IPluginV2 Methods char const* getPluginType() const noexcept override; char const* getPluginVersion() const noexcept override; - int getNbOutputs() const noexcept override; - int initialize() noexcept override; + int32_t getNbOutputs() const noexcept override; + int32_t initialize() noexcept override; void terminate() noexcept override; size_t getSerializationSize() const noexcept override; void serialize(void* buffer) const noexcept override; @@ -70,21 +68,21 @@ class DecodeBbox3DPlugin : public nvinfer1::IPluginV2DynamicExt private: std::string mNamespace; - float min_x_range_; - float max_x_range_; - float min_y_range_; - float max_y_range_; - float min_z_range_; - float max_z_range_; - int num_dir_bins_; - float dir_offset_; - float dir_limit_offset_; - int num_classes_; - std::vector anchor_bottom_height_; - std::vector anchors_; - float score_thresh_; - int feature_h_; - int feature_w_; + float mMinXRange; + float mMaxXRange; + float mMinYRange; + float mMaxYRange; + float mMinZRange; + float mMaxZRange; + int32_t mNumDirBins; + float mDirOffset; + float mDirLimitOffset; + int32_t mNumClasses; + std::vector mAnchorBottomHeight; + std::vector mAnchors; + float mScoreThreashold; + int32_t mFeatureH; + int32_t mFeatureW; }; class DecodeBbox3DPluginCreator : public nvinfer1::IPluginCreator diff --git a/plugin/detectionLayerPlugin/detectionLayerPlugin.cpp b/plugin/detectionLayerPlugin/detectionLayerPlugin.cpp index 0a993b7b..840156cd 100644 --- a/plugin/detectionLayerPlugin/detectionLayerPlugin.cpp +++ b/plugin/detectionLayerPlugin/detectionLayerPlugin.cpp @@ -253,14 +253,18 @@ DetectionLayer::DetectionLayer(void const* data, size_t length) mType = DataType::kFLOAT; } -bool DetectionLayer::checkValidInputs(nvinfer1::Dims const* inputs, int32_t nbInputDims) +void DetectionLayer::checkValidInputs(nvinfer1::Dims const* inputs, int32_t nbInputDims) { // classifier_delta_bbox[N, anchors, num_classes*4, 1, 1] // classifier_class[N, anchors, num_classes, 1, 1] // rpn_rois[N, anchors, 4] - return (nbInputDims != 3) && (inputs[0].nbDims == 4 && inputs[0].d[1] == mNbClasses * 4) && // delta_bbox - (inputs[1].nbDims == 4 && inputs[1].d[1] == mNbClasses) && // score - (inputs[2].nbDims == 2 && inputs[2].d[1] == 4); // roi + PLUGIN_VALIDATE(nbInputDims == 3); + // delta_bbox + PLUGIN_VALIDATE(inputs[0].nbDims == 4 && inputs[0].d[1] == mNbClasses * 4); + // score + PLUGIN_VALIDATE(inputs[1].nbDims == 4 && inputs[1].d[1] == mNbClasses); + // roi + PLUGIN_VALIDATE(inputs[2].nbDims == 2 && inputs[2].d[1] == 4); } size_t DetectionLayer::getWorkspaceSize(int32_t batchSize) const noexcept @@ -271,13 +275,18 @@ size_t DetectionLayer::getWorkspaceSize(int32_t batchSize) const noexcept Dims DetectionLayer::getOutputDimensions(int32_t index, Dims const* inputs, int32_t nbInputDims) noexcept { - if (!(checkValidInputs(inputs, nbInputDims) && (index == 0))) + try { - return Dims{}; + checkValidInputs(inputs, nbInputDims); + PLUGIN_VALIDATE(index == 0); + // [N, anchors, (y1, x1, y2, x2, class_id, score)] + return {2, {mKeepTopK, 6}}; } - - // [N, anchors, (y1, x1, y2, x2, class_id, score)] - return {2, {mKeepTopK, 6}}; + catch (std::exception const& e) + { + caughtError(e); + } + return Dims{}; } int32_t DetectionLayer::enqueue( @@ -334,12 +343,19 @@ void DetectionLayer::configurePlugin(Dims const* inputDims, int32_t nbInputs, Di DataType const* inputTypes, DataType const* outputTypes, bool const* inputIsBroadcast, bool const* outputIsBroadcast, PluginFormat floatFormat, int32_t maxBatchSize) noexcept { - PLUGIN_ASSERT(checkValidInputs(inputDims, nbInputs)); - PLUGIN_ASSERT(inputDims[0].d[0] == inputDims[1].d[0] && inputDims[1].d[0] == inputDims[2].d[0]); + try + { + checkValidInputs(inputDims, nbInputs); + PLUGIN_VALIDATE(inputDims[0].d[0] == inputDims[1].d[0] && inputDims[1].d[0] == inputDims[2].d[0]); - mAnchorsCnt = inputDims[2].d[0]; - mType = inputTypes[0]; - mMaxBatchSize = maxBatchSize; + mAnchorsCnt = inputDims[2].d[0]; + mType = inputTypes[0]; + mMaxBatchSize = maxBatchSize; + } + catch (std::exception const& e) + { + caughtError(e); + } } // Attach the plugin object to an execution context and grant the plugin the access to some context resource. diff --git a/plugin/detectionLayerPlugin/detectionLayerPlugin.h b/plugin/detectionLayerPlugin/detectionLayerPlugin.h index 82cd77b4..adbf535d 100644 --- a/plugin/detectionLayerPlugin/detectionLayerPlugin.h +++ b/plugin/detectionLayerPlugin/detectionLayerPlugin.h @@ -88,7 +88,7 @@ class DetectionLayer : public IPluginV2Ext void detachFromContext() noexcept override; private: - bool checkValidInputs(nvinfer1::Dims const* inputs, int32_t nbInputDims); + void checkValidInputs(nvinfer1::Dims const* inputs, int32_t nbInputDims); int32_t mBackgroundLabel; int32_t mNbClasses; diff --git a/plugin/disentangledAttentionPlugin/disentangledAttentionPlugin.cpp b/plugin/disentangledAttentionPlugin/disentangledAttentionPlugin.cpp index bbb7db79..4a1c1fc8 100644 --- a/plugin/disentangledAttentionPlugin/disentangledAttentionPlugin.cpp +++ b/plugin/disentangledAttentionPlugin/disentangledAttentionPlugin.cpp @@ -31,20 +31,10 @@ std::vector DisentangledAttentionPluginCreator::mPluginAttributes; REGISTER_TENSORRT_PLUGIN(DisentangledAttentionPluginCreator); -#define CHECK_CUDNN(call) \ - do \ - { \ - cudnnStatus_t status = call; \ - if (status != CUDNN_STATUS_SUCCESS) \ - { \ - return status; \ - } \ - } while (0) - namespace { -constexpr char const* DEBERTA_NAME{"DisentangledAttention_TRT"}; -constexpr char const* DEBERTA_VERSION{"1"}; +constexpr char const* kDEBERTA_PLUGIN_NAME{"DisentangledAttention_TRT"}; +constexpr char const* kDEBERTA_PLUGIN_VERSION{"1"}; } // namespace DisentangledAttentionPlugin::DisentangledAttentionPlugin() {} @@ -62,11 +52,6 @@ DisentangledAttentionPlugin::DisentangledAttentionPlugin(void const* serialData, deserialize_value(&serialData, &serialLength, &mFactor); } -DisentangledAttentionPlugin::~DisentangledAttentionPlugin() -{ - terminate(); -} - int32_t DisentangledAttentionPlugin::getNbOutputs() const noexcept { return 1; @@ -74,62 +59,41 @@ int32_t DisentangledAttentionPlugin::getNbOutputs() const noexcept int32_t DisentangledAttentionPlugin::initialize() noexcept { - // if need large amount of GPU memory, recommend to specify in getWorkspaceSize so TRT allocates it. If not, when a - // plugin is called many times, the memory manually allocated by this initialize() is repeated many times -- may - // overflow return 0; } char const* DisentangledAttentionPlugin::getPluginType() const noexcept { - return DEBERTA_NAME; + return kDEBERTA_PLUGIN_NAME; } char const* DisentangledAttentionPlugin::getPluginVersion() const noexcept { - return DEBERTA_VERSION; + return kDEBERTA_PLUGIN_VERSION; } // IPluginV2DynamicExt Methods nvinfer1::DimsExprs DisentangledAttentionPlugin::getOutputDimensions( int32_t index, nvinfer1::DimsExprs const* inputs, int32_t nbInputs, nvinfer1::IExprBuilder& exprBuilder) noexcept { - nvinfer1::DimsExprs output; - - PLUGIN_ASSERT(nbInputs == 3); // 3 inputs - output = inputs[0]; // same as input[0], i.e. data0 - - PLUGIN_ASSERT(index < 1); // only one output - - return output; -} - -void DisentangledAttentionPlugin::attachToContext( - cudnnContext* cudnnContext, cublasContext* cublasContext, IGpuAllocator* gpuAllocator) noexcept -{ -} - -// Detach the plugin object from its execution context. -void DisentangledAttentionPlugin::detachFromContext() noexcept {} - -template -TDataType const* DisentangledAttentionPlugin::pointer_const_cast(void const* const p) -{ - return static_cast(p); + try + { + PLUGIN_VALIDATE(inputs != nullptr); + PLUGIN_VALIDATE(index == 0); // Only one output + return inputs[0]; + } + catch (std::exception const& e) + { + caughtError(e); + } + return nvinfer1::DimsExprs{}; } template -TDataType* DisentangledAttentionPlugin::pointer_cast(void* const p) -{ - return static_cast(p); -} - -int32_t DisentangledAttentionPlugin::enqueue(nvinfer1::PluginTensorDesc const* inputDesc, - nvinfer1::PluginTensorDesc const* outputDesc, void const* const* inputs, void* const* outputs, void* workspace, - cudaStream_t stream) noexcept +void DisentangledAttentionPlugin::enqueueType(nvinfer1::PluginTensorDesc const* inputDesc, + nvinfer1::PluginTensorDesc const* outputDesc, void const* const* inputs, void* const* outputs, cudaStream_t stream, + TDataType factor) { - -#if kDISENTANGLED_VERSION == 1 nvinfer1::Dims dims0 = inputDesc[0].dims; nvinfer1::Dims dims1 = inputDesc[1].dims; nvinfer1::Dims dims2 = inputDesc[2].dims; @@ -138,84 +102,45 @@ int32_t DisentangledAttentionPlugin::enqueue(nvinfer1::PluginTensorDesc const* i dim3 dimData2(dims2.d[0], dims2.d[1], dims2.d[2]); dim3 dimResult(dimData0); - dim3 block_optimized(kDISENTANGLED_TILESIZE_V1, kDISENTANGLED_BLOCKDIMY_V1); - dim3 grid_optimized((dimResult.z - 1) / kDISENTANGLED_TILESIZE_V1 + 1, - (dimResult.y - 1) / kDISENTANGLED_TILESIZE_V1 + 1, dimResult.x); + dim3 blockOptimized(kDISENTANGLED_TILESIZE, kDISENTANGLED_BLOCKDIMY); + dim3 gridOptimized( + (dimResult.z - 1) / kDISENTANGLED_TILESIZE + 1, (dimResult.y - 1) / kDISENTANGLED_TILESIZE + 1, dimResult.x); - if (inputDesc[0].type == nvinfer1::DataType::kFLOAT) - { - auto const* data0 = pointer_const_cast(inputs[0]); - auto const* data1 = pointer_const_cast(inputs[1]); - auto const* data2 = pointer_const_cast(inputs[2]); - auto* result = pointer_cast(outputs[0]); - disentangled_kernel_wrapper(data0, data1, data2, - result, dimData0, dimData1, dimData2, dimResult, mFactor, mSpan, block_optimized, grid_optimized, stream); - } - else if (inputDesc[0].type == nvinfer1::DataType::kHALF) - { - auto const* data0 = pointer_const_cast<__half>(inputs[0]); - auto const* data1 = pointer_const_cast<__half>(inputs[1]); - auto const* data2 = pointer_const_cast<__half>(inputs[2]); - auto* result = pointer_cast<__half>(outputs[0]); - __half factor = __float2half(mFactor); - disentangled_kernel_wrapper<__half, kDISENTANGLED_TILESIZE_V1, kDISENTANGLED_BLOCKDIMY_V1>(data0, data1, data2, - result, dimData0, dimData1, dimData2, dimResult, factor, mSpan, block_optimized, grid_optimized, stream); - } - else if (inputDesc[0].type == nvinfer1::DataType::kINT8) - { - auto const* data0 = pointer_const_cast(inputs[0]); - auto const* data1 = pointer_const_cast(inputs[1]); - auto const* data2 = pointer_const_cast(inputs[2]); - auto* result = pointer_cast(outputs[0]); - int8_t factor = int8_t(mFactor); - disentangled_kernel_wrapper(data0, data1, data2, - result, dimData0, dimData1, dimData2, dimResult, factor, mSpan, block_optimized, grid_optimized, stream); - } -#elif kDISENTANGLED_VERSION == 2 - nvinfer1::Dims dims0 = inputDesc[0].dims; - nvinfer1::Dims dims1 = inputDesc[1].dims; - nvinfer1::Dims dims2 = inputDesc[2].dims; - dim3 dimData0(dims0.d[0], dims0.d[1], dims0.d[2]); - dim3 dimData1(dims1.d[0], dims1.d[1], dims1.d[2]); - dim3 dimData2(dims2.d[0], dims2.d[1], dims2.d[2]); - dim3 dimResult(dimData0); - - dim3 block_optimized(kDISENTANGLED_TILESIZE_V2, kDISENTANGLED_BLOCKDIMY_V2); - dim3 grid_optimized((dimResult.z - 1) / kDISENTANGLED_TILESIZE_V2 + 1, - (dimResult.y - 1) / kDISENTANGLED_TILESIZE_V2 + 1, dimResult.x); + auto const* data0 = static_cast(inputs[0]); + auto const* data1 = static_cast(inputs[1]); + auto const* data2 = static_cast(inputs[2]); + auto* result = static_cast(outputs[0]); + disentangled_kernel_wrapper(data0, data1, data2, result, + dimData0, dimData1, dimData2, dimResult, factor, mSpan, blockOptimized, gridOptimized, stream); +} - if (inputDesc[0].type == nvinfer1::DataType::kFLOAT) - { - auto const* data0 = pointer_const_cast(inputs[0]); - auto const* data1 = pointer_const_cast(inputs[1]); - auto const* data2 = pointer_const_cast(inputs[2]); - auto* result = pointer_cast(outputs[0]); - disentangled_kernel_wrapper(data0, data1, data2, - result, dimData0, dimData1, dimData2, dimResult, mFactor, mSpan, block_optimized, grid_optimized, stream); - } - else if (inputDesc[0].type == nvinfer1::DataType::kHALF) +int32_t DisentangledAttentionPlugin::enqueue(nvinfer1::PluginTensorDesc const* inputDesc, + nvinfer1::PluginTensorDesc const* outputDesc, void const* const* inputs, void* const* outputs, + void* /* workspace */, cudaStream_t stream) noexcept +{ + try { - auto const* data0 = pointer_const_cast<__half>(inputs[0]); - auto const* data1 = pointer_const_cast<__half>(inputs[1]); - auto const* data2 = pointer_const_cast<__half>(inputs[2]); - auto* result = pointer_cast<__half>(outputs[0]); - __half factor = __float2half(mFactor); - disentangled_kernel_wrapper<__half, kDISENTANGLED_TILESIZE_V2, kDISENTANGLED_BLOCKDIMY_V2>(data0, data1, data2, - result, dimData0, dimData1, dimData2, dimResult, factor, mSpan, block_optimized, grid_optimized, stream); + PLUGIN_VALIDATE(inputDesc && outputDesc && inputs && outputs); + switch (inputDesc[0].type) + { + case nvinfer1::DataType::kFLOAT: + enqueueType(inputDesc, outputDesc, inputs, outputs, stream, mFactor); + break; + case nvinfer1::DataType::kHALF: + enqueueType<__half>(inputDesc, outputDesc, inputs, outputs, stream, __float2half(mFactor)); + break; + case nvinfer1::DataType::kINT8: + enqueueType(inputDesc, outputDesc, inputs, outputs, stream, static_cast(mFactor)); + break; + default: PLUGIN_VALIDATE(false, "Unsupported Datatype"); break; + } + return cudaPeekAtLastError(); } - else if (inputDesc[0].type == nvinfer1::DataType::kINT8) + catch (std::exception const& e) { - auto const* data0 = pointer_const_cast(inputs[0]); - auto const* data1 = pointer_const_cast(inputs[1]); - auto const* data2 = pointer_const_cast(inputs[2]); - auto* result = pointer_cast(outputs[0]); - int8_t factor = int8_t(mFactor); - disentangled_kernel_wrapper(data0, data1, data2, - result, dimData0, dimData1, dimData2, dimResult, factor, mSpan, block_optimized, grid_optimized, stream); + caughtError(e); + return STATUS_FAILURE; } -#endif - - return cudaPeekAtLastError(); } size_t DisentangledAttentionPlugin::getSerializationSize() const noexcept @@ -256,7 +181,7 @@ IPluginV2DynamicExt* DisentangledAttentionPlugin::clone() const noexcept try { auto* plugin = new DisentangledAttentionPlugin(mSpan, mFactor); - plugin->setPluginNamespace(mPluginNamespace); + plugin->setPluginNamespace(mNamespace.c_str()); return plugin; } catch (std::exception const& e) @@ -269,41 +194,57 @@ IPluginV2DynamicExt* DisentangledAttentionPlugin::clone() const noexcept void DisentangledAttentionPlugin::configurePlugin(nvinfer1::DynamicPluginTensorDesc const* in, int32_t nbInputs, nvinfer1::DynamicPluginTensorDesc const* out, int32_t nbOutputs) noexcept { - - // inputs - PLUGIN_ASSERT(nbInputs == 3); // 3 inputs - - // check for valid input dimensions - PLUGIN_ASSERT(in[0].desc.dims.nbDims == 3); - PLUGIN_ASSERT(in[1].desc.dims.nbDims == 3); - PLUGIN_ASSERT(in[2].desc.dims.nbDims == 3); - - // check BN (batch_size * num_heads) dimension consistency - PLUGIN_ASSERT(in[0].desc.dims.d[0] == in[1].desc.dims.d[0]); - PLUGIN_ASSERT(in[0].desc.dims.d[0] == in[2].desc.dims.d[0]); - - // check S (sequence_length) dimension consistency - PLUGIN_ASSERT(in[0].desc.dims.d[1] == in[1].desc.dims.d[1]); - PLUGIN_ASSERT(in[0].desc.dims.d[1] == in[2].desc.dims.d[1]); - PLUGIN_ASSERT(in[0].desc.dims.d[1] == in[0].desc.dims.d[2]); - - // check K (2 * span) dimension consistency for in[1] and in[2] - PLUGIN_ASSERT(in[1].desc.dims.d[2] == 2 * mSpan); - PLUGIN_ASSERT(in[2].desc.dims.d[2] == 2 * mSpan); - - // Outputs (same dimension as in[0]) - PLUGIN_ASSERT(nbOutputs == 1); - PLUGIN_ASSERT(out[0].desc.dims.nbDims == 3); - PLUGIN_ASSERT(in[0].desc.dims.d[0] == out[0].desc.dims.d[0]); - PLUGIN_ASSERT(in[0].desc.dims.d[1] == out[0].desc.dims.d[1]); - PLUGIN_ASSERT(in[0].desc.dims.d[2] == out[0].desc.dims.d[2]); + try + { + // inputs + PLUGIN_VALIDATE(nbInputs == 3); // 3 inputs + + // check for valid input dimensions + PLUGIN_VALIDATE(in[0].desc.dims.nbDims == 3); + PLUGIN_VALIDATE(in[1].desc.dims.nbDims == 3); + PLUGIN_VALIDATE(in[2].desc.dims.nbDims == 3); + + // check BN (batch_size * num_heads) dimension consistency + PLUGIN_VALIDATE(in[0].desc.dims.d[0] == in[1].desc.dims.d[0]); + PLUGIN_VALIDATE(in[0].desc.dims.d[0] == in[2].desc.dims.d[0]); + + // check S (sequence_length) dimension consistency + PLUGIN_VALIDATE(in[0].desc.dims.d[1] == in[1].desc.dims.d[1]); + PLUGIN_VALIDATE(in[0].desc.dims.d[1] == in[2].desc.dims.d[1]); + PLUGIN_VALIDATE(in[0].desc.dims.d[1] == in[0].desc.dims.d[2]); + + // check K (2 * span) dimension consistency for in[1] and in[2] + PLUGIN_VALIDATE(in[1].desc.dims.d[2] == 2 * mSpan); + PLUGIN_VALIDATE(in[2].desc.dims.d[2] == 2 * mSpan); + + // Outputs (same dimension as in[0]) + PLUGIN_VALIDATE(nbOutputs == 1); + PLUGIN_VALIDATE(out[0].desc.dims.nbDims == 3); + PLUGIN_VALIDATE(in[0].desc.dims.d[0] == out[0].desc.dims.d[0]); + PLUGIN_VALIDATE(in[0].desc.dims.d[1] == out[0].desc.dims.d[1]); + PLUGIN_VALIDATE(in[0].desc.dims.d[2] == out[0].desc.dims.d[2]); + } + catch (std::exception const& e) + { + caughtError(e); + } } nvinfer1::DataType DisentangledAttentionPlugin::getOutputDataType( int32_t index, nvinfer1::DataType const* inputTypes, int32_t nbInputs) const noexcept { - PLUGIN_ASSERT(inputTypes && nbInputs > 0 && index < 1); - return inputTypes[0]; // version 1, same as data1; version 2, same as data0 + try + { + PLUGIN_VALIDATE(inputTypes != nullptr); + PLUGIN_VALIDATE(nbInputs > 0); + PLUGIN_VALIDATE(index == 0); + return inputTypes[0]; // version 1, same as data1; version 2, same as data0 + } + catch (std::exception const& e) + { + caughtError(e); + } + return nvinfer1::DataType{}; } size_t DisentangledAttentionPlugin::getWorkspaceSize(nvinfer1::PluginTensorDesc const* inputs, int32_t nbInputs, @@ -314,12 +255,20 @@ size_t DisentangledAttentionPlugin::getWorkspaceSize(nvinfer1::PluginTensorDesc void DisentangledAttentionPlugin::setPluginNamespace(char const* libNamespace) noexcept { - mPluginNamespace = libNamespace; + try + { + PLUGIN_VALIDATE(libNamespace != nullptr); + mNamespace = libNamespace; + } + catch (std::exception const& e) + { + caughtError(e); + } } char const* DisentangledAttentionPlugin::getPluginNamespace() const noexcept { - return mPluginNamespace; + return mNamespace.c_str(); } DisentangledAttentionPluginCreator::DisentangledAttentionPluginCreator() @@ -336,12 +285,12 @@ DisentangledAttentionPluginCreator::DisentangledAttentionPluginCreator() char const* DisentangledAttentionPluginCreator::getPluginName() const noexcept { - return DEBERTA_NAME; + return kDEBERTA_PLUGIN_NAME; } char const* DisentangledAttentionPluginCreator::getPluginVersion() const noexcept { - return DEBERTA_VERSION; + return kDEBERTA_PLUGIN_VERSION; } PluginFieldCollection const* DisentangledAttentionPluginCreator::getFieldNames() noexcept @@ -356,32 +305,42 @@ char const* DisentangledAttentionPluginCreator::getPluginNamespace() const noexc void DisentangledAttentionPluginCreator::setPluginNamespace(char const* libNamespace) noexcept { - mNamespace = libNamespace; + try + { + PLUGIN_VALIDATE(libNamespace != nullptr); + mNamespace = libNamespace; + } + catch (std::exception const& e) + { + caughtError(e); + } } IPluginV2DynamicExt* DisentangledAttentionPluginCreator::createPlugin( - char const* name, PluginFieldCollection const* fc) noexcept + char const* /*name*/, PluginFieldCollection const* fc) noexcept { try { + PLUGIN_VALIDATE(fc != nullptr); + // Set default invalid values (for assert in case when attributes are missing) int32_t span = 0; - float factor = 0.0F; + float factor = 0.F; for (int32_t i = 0; i < fc->nbFields; i++) { - std::string field_name(fc->fields[i].name); - if (field_name.compare("span") == 0) + std::string fieldName = fc->fields[i].name; + if (fieldName.compare("span") == 0) { span = *static_cast(fc->fields[i].data); } - if (field_name.compare("factor") == 0) + if (fieldName.compare("factor") == 0) { factor = *static_cast(fc->fields[i].data); } } - PLUGIN_ASSERT(span >= 0); - PLUGIN_ASSERT(factor > 0.0F && factor < 1.0F); // factor is 1/sqrt(3d), therefore must less than 1 + PLUGIN_VALIDATE(span >= 0); + PLUGIN_VALIDATE(factor > 0.F && factor < 1.F); // factor is 1/sqrt(3d), therefore must less than 1 DisentangledAttentionPlugin* plugin = new DisentangledAttentionPlugin(span, factor); plugin->setPluginNamespace(mNamespace.c_str()); @@ -396,7 +355,7 @@ IPluginV2DynamicExt* DisentangledAttentionPluginCreator::createPlugin( } IPluginV2DynamicExt* DisentangledAttentionPluginCreator::deserializePlugin( - char const* name, void const* serialData, size_t serialLength) noexcept + char const* /*name*/, void const* serialData, size_t serialLength) noexcept { try { diff --git a/plugin/disentangledAttentionPlugin/disentangledAttentionPlugin.h b/plugin/disentangledAttentionPlugin/disentangledAttentionPlugin.h index 789943b2..72223755 100644 --- a/plugin/disentangledAttentionPlugin/disentangledAttentionPlugin.h +++ b/plugin/disentangledAttentionPlugin/disentangledAttentionPlugin.h @@ -36,13 +36,16 @@ namespace plugin // using namespace nvinfer1; -#define kDISENTANGLED_VERSION 2 // Version 1: regular relative position index // Version 2: log bucket relative position index -constexpr int32_t kDISENTANGLED_TILESIZE_V1 = 32; -constexpr int32_t kDISENTANGLED_BLOCKDIMY_V1 = 8; -constexpr int32_t kDISENTANGLED_TILESIZE_V2 = 64; -constexpr int32_t kDISENTANGLED_BLOCKDIMY_V2 = 4; +#define kDISENTANGLED_VERSION 2 +#if kDISENTANGLED_VERSION == 1 +constexpr int32_t kDISENTANGLED_TILESIZE = 32; +constexpr int32_t kDISENTANGLED_BLOCKDIMY = 8; +#elif kDISENTANGLED_VERSION == 2 +constexpr int32_t kDISENTANGLED_TILESIZE = 64; +constexpr int32_t kDISENTANGLED_BLOCKDIMY = 4; +#endif template void disentangled_kernel_wrapper(TDataType const* data0, TDataType const* data1, TDataType const* data2, @@ -58,14 +61,6 @@ class DisentangledAttentionPlugin final : public nvinfer1::IPluginV2DynamicExt DisentangledAttentionPlugin(void const* serialData, size_t serialLength); - ~DisentangledAttentionPlugin() override; - - template - TDataType const* pointer_const_cast(void const* const p); - - template - TDataType* pointer_cast(void* p); - int32_t getNbOutputs() const noexcept override; // DynamicExt plugins returns DimsExprs class instead of Dims @@ -79,9 +74,9 @@ class DisentangledAttentionPlugin final : public nvinfer1::IPluginV2DynamicExt size_t getWorkspaceSize(nvinfer1::PluginTensorDesc const* inputs, int32_t nbInputs, nvinfer1::PluginTensorDesc const* outputs, int32_t nbOutputs) const noexcept override; + // This is where the plugin work is done. int32_t enqueue(nvinfer1::PluginTensorDesc const* inputDesc, nvinfer1::PluginTensorDesc const* outputDesc, - void const* const* inputs, void* const* outputs, void* workspace, - cudaStream_t stream) noexcept override; // this is where the plugin work is done + void const* const* inputs, void* const* outputs, void* workspace, cudaStream_t stream) noexcept override; size_t getSerializationSize() const noexcept override; @@ -101,11 +96,6 @@ class DisentangledAttentionPlugin final : public nvinfer1::IPluginV2DynamicExt nvinfer1::DataType getOutputDataType( int32_t index, nvinfer1::DataType const* inputTypes, int32_t nbInputs) const noexcept override; - void attachToContext( - cudnnContext* cudnn, cublasContext* cublas, nvinfer1::IGpuAllocator* allocator) noexcept override; - - void detachFromContext() noexcept override; - void setPluginNamespace(char const* pluginNamespace) noexcept override; char const* getPluginNamespace() const noexcept override; @@ -114,7 +104,11 @@ class DisentangledAttentionPlugin final : public nvinfer1::IPluginV2DynamicExt nvinfer1::DynamicPluginTensorDesc const* out, int32_t nbOutputs) noexcept override; private: - char const* mPluginNamespace; + // Helper method for enqueue() + template + void enqueueType(nvinfer1::PluginTensorDesc const* inputDesc, nvinfer1::PluginTensorDesc const* outputDesc, + void const* const* inputs, void* const* outputs, cudaStream_t stream, TDataType factor); + std::string mNamespace; // attributes diff --git a/plugin/disentangledAttentionPlugin/disentangledKernel.cu b/plugin/disentangledAttentionPlugin/disentangledKernel.cu index c7024ad6..fb26db33 100644 --- a/plugin/disentangledAttentionPlugin/disentangledKernel.cu +++ b/plugin/disentangledAttentionPlugin/disentangledKernel.cu @@ -263,25 +263,16 @@ void disentangled_kernel_wrapper(TDataType const* data0, TDataType const* data1, data0, data1, data2, result, dimData0, dimData1, dimData2, dimResult, factor, span); } -template void disentangled_kernel_wrapper( +template void disentangled_kernel_wrapper( float const*, float const*, float const*, float*, dim3, dim3, dim3, dim3, float, int32_t, dim3, dim3, cudaStream_t); -template void disentangled_kernel_wrapper<__half, kDISENTANGLED_TILESIZE_V1, kDISENTANGLED_BLOCKDIMY_V1>(__half const*, +template void disentangled_kernel_wrapper<__half, kDISENTANGLED_TILESIZE, kDISENTANGLED_BLOCKDIMY>(__half const*, __half const*, __half const*, __half*, dim3, dim3, dim3, dim3, __half, int32_t, dim3, dim3, cudaStream_t); -template void disentangled_kernel_wrapper(int8_t const*, - int8_t const*, int8_t const*, int8_t*, dim3, dim3, dim3, dim3, int8_t, int32_t, dim3, dim3, cudaStream_t); - -template void disentangled_kernel_wrapper( - float const*, float const*, float const*, float*, dim3, dim3, dim3, dim3, float, int32_t, dim3, dim3, cudaStream_t); - -template void disentangled_kernel_wrapper<__half, kDISENTANGLED_TILESIZE_V2, kDISENTANGLED_BLOCKDIMY_V2>(__half const*, - __half const*, __half const*, __half*, dim3, dim3, dim3, dim3, __half, int32_t, dim3, dim3, cudaStream_t); - -template void disentangled_kernel_wrapper(int8_t const*, +template void disentangled_kernel_wrapper(int8_t const*, int8_t const*, int8_t const*, int8_t*, dim3, dim3, dim3, dim3, int8_t, int32_t, dim3, dim3, cudaStream_t); #undef IND -} /* plugin */ +} // namespace plugin } // namespace nvinfer1 diff --git a/plugin/efficientNMSPlugin/efficientNMSInference.h b/plugin/efficientNMSPlugin/efficientNMSInference.h index 3ba002ef..d9ec3192 100644 --- a/plugin/efficientNMSPlugin/efficientNMSInference.h +++ b/plugin/efficientNMSPlugin/efficientNMSInference.h @@ -22,7 +22,8 @@ #include "efficientNMSParameters.h" -size_t EfficientNMSWorkspaceSize(int batchSize, int numScoreElements, int numClasses, nvinfer1::DataType datatype); +size_t EfficientNMSWorkspaceSize( + int32_t batchSize, int32_t numScoreElements, int32_t numClasses, nvinfer1::DataType datatype); pluginStatus_t EfficientNMSInference(nvinfer1::plugin::EfficientNMSParameters param, void const* boxesInput, void const* scoresInput, void const* anchorsInput, void* numDetectionsOutput, void* nmsBoxesOutput, diff --git a/plugin/efficientNMSPlugin/efficientNMSParameters.h b/plugin/efficientNMSPlugin/efficientNMSParameters.h index 30f2528a..89829089 100644 --- a/plugin/efficientNMSPlugin/efficientNMSParameters.h +++ b/plugin/efficientNMSPlugin/efficientNMSParameters.h @@ -30,27 +30,27 @@ struct EfficientNMSParameters // Related to NMS Options float iouThreshold = 0.5F; float scoreThreshold = 0.5F; - int numOutputBoxes = 100; - int numOutputBoxesPerClass = -1; + int32_t numOutputBoxes = 100; + int32_t numOutputBoxesPerClass = -1; bool padOutputBoxesPerClass = false; - int backgroundClass = -1; + int32_t backgroundClass = -1; bool scoreSigmoid = false; bool clipBoxes = false; - int boxCoding = 0; + int32_t boxCoding = 0; bool classAgnostic = false; // Related to NMS Internals - int numSelectedBoxes = 4096; - int scoreBits = -1; + int32_t numSelectedBoxes = 4096; + int32_t scoreBits = -1; bool outputONNXIndices = false; // Related to Tensor Configuration // (These are set by the various plugin configuration methods, no need to define them during plugin creation.) - int batchSize = -1; - int numClasses = 1; - int numBoxElements = -1; - int numScoreElements = -1; - int numAnchors = -1; + int32_t batchSize = -1; + int32_t numClasses = 1; + int32_t numBoxElements = -1; + int32_t numScoreElements = -1; + int32_t numAnchors = -1; bool shareLocation = true; bool shareAnchors = true; bool boxDecoder = false; diff --git a/plugin/efficientNMSPlugin/efficientNMSPlugin.cpp b/plugin/efficientNMSPlugin/efficientNMSPlugin.cpp index b6e9f65b..2f5d428b 100644 --- a/plugin/efficientNMSPlugin/efficientNMSPlugin.cpp +++ b/plugin/efficientNMSPlugin/efficientNMSPlugin.cpp @@ -59,7 +59,7 @@ char const* EfficientNMSPlugin::getPluginVersion() const noexcept return kEFFICIENT_NMS_PLUGIN_VERSION; } -int EfficientNMSPlugin::getNbOutputs() const noexcept +int32_t EfficientNMSPlugin::getNbOutputs() const noexcept { if (mParam.outputONNXIndices) { @@ -71,7 +71,7 @@ int EfficientNMSPlugin::getNbOutputs() const noexcept return 4; } -int EfficientNMSPlugin::initialize() noexcept +int32_t EfficientNMSPlugin::initialize() noexcept { if (!initialized) { @@ -131,7 +131,7 @@ char const* EfficientNMSPlugin::getPluginNamespace() const noexcept } nvinfer1::DataType EfficientNMSPlugin::getOutputDataType( - int index, nvinfer1::DataType const* inputTypes, int nbInputs) const noexcept + int32_t index, nvinfer1::DataType const* inputTypes, int32_t nbInputs) const noexcept { if (mParam.outputONNXIndices) { @@ -164,7 +164,7 @@ IPluginV2DynamicExt* EfficientNMSPlugin::clone() const noexcept } DimsExprs EfficientNMSPlugin::getOutputDimensions( - int outputIndex, DimsExprs const* inputs, int nbInputs, IExprBuilder& exprBuilder) noexcept + int32_t outputIndex, DimsExprs const* inputs, int32_t nbInputs, IExprBuilder& exprBuilder) noexcept { try { @@ -234,7 +234,7 @@ DimsExprs EfficientNMSPlugin::getOutputDimensions( } bool EfficientNMSPlugin::supportsFormatCombination( - int pos, PluginTensorDesc const* inOut, int nbInputs, int nbOutputs) noexcept + int32_t pos, PluginTensorDesc const* inOut, int32_t nbInputs, int32_t nbOutputs) noexcept { if (inOut[pos].format != PluginFormat::kLINEAR) { @@ -246,7 +246,7 @@ bool EfficientNMSPlugin::supportsFormatCombination( PLUGIN_ASSERT(nbInputs == 2); PLUGIN_ASSERT(nbOutputs == 1); - // detection_indices output: int + // detection_indices output: int32_t if (pos == 2) { return inOut[pos].type == DataType::kINT32; @@ -268,8 +268,8 @@ bool EfficientNMSPlugin::supportsFormatCombination( PLUGIN_ASSERT(0 <= pos && pos <= 6); } - // num_detections and detection_classes output: int - int const posOut = pos - nbInputs; + // num_detections and detection_classes output: int32_t + int32_t const posOut = pos - nbInputs; if (posOut == 0 || posOut == 3) { return inOut[pos].type == DataType::kINT32 && inOut[pos].format == PluginFormat::kLINEAR; @@ -281,7 +281,7 @@ bool EfficientNMSPlugin::supportsFormatCombination( } void EfficientNMSPlugin::configurePlugin( - DynamicPluginTensorDesc const* in, int nbInputs, DynamicPluginTensorDesc const* out, int nbOutputs) noexcept + DynamicPluginTensorDesc const* in, int32_t nbInputs, DynamicPluginTensorDesc const* out, int32_t nbOutputs) noexcept { try { @@ -308,7 +308,7 @@ void EfficientNMSPlugin::configurePlugin( mParam.numScoreElements = in[1].desc.dims.d[1] * in[1].desc.dims.d[2]; mParam.numClasses = in[1].desc.dims.d[2]; - // When pad per class is set, the total ouput boxes size may need to be reduced. + // When pad per class is set, the total output boxes size may need to be reduced. // This operation is also done in getOutputDimension(), but for dynamic shapes, the // numOutputBoxes param can't be set until the number of classes is fully known here. if (mParam.padOutputBoxesPerClass && mParam.numOutputBoxesPerClass > 0) @@ -359,15 +359,15 @@ void EfficientNMSPlugin::configurePlugin( } size_t EfficientNMSPlugin::getWorkspaceSize( - PluginTensorDesc const* inputs, int nbInputs, PluginTensorDesc const* outputs, int nbOutputs) const noexcept + PluginTensorDesc const* inputs, int32_t nbInputs, PluginTensorDesc const* outputs, int32_t nbOutputs) const noexcept { - int batchSize = inputs[1].dims.d[0]; - int numScoreElements = inputs[1].dims.d[1] * inputs[1].dims.d[2]; - int numClasses = inputs[1].dims.d[2]; + int32_t batchSize = inputs[1].dims.d[0]; + int32_t numScoreElements = inputs[1].dims.d[1] * inputs[1].dims.d[2]; + int32_t numClasses = inputs[1].dims.d[2]; return EfficientNMSWorkspaceSize(batchSize, numScoreElements, numClasses, mParam.datatype); } -int EfficientNMSPlugin::enqueue(PluginTensorDesc const* inputDesc, PluginTensorDesc const* outputDesc, +int32_t EfficientNMSPlugin::enqueue(PluginTensorDesc const* inputDesc, PluginTensorDesc const* outputDesc, void const* const* inputs, void* const* outputs, void* workspace, cudaStream_t stream) noexcept { try @@ -562,7 +562,7 @@ IPluginV2DynamicExt* EfficientNMSONNXPluginCreator::createPlugin( try { PluginField const* fields = fc->fields; - for (int i = 0; i < fc->nbFields; ++i) + for (int32_t i = 0; i < fc->nbFields; ++i) { char const* attrName = fields[i].name; if (!strcmp(attrName, "score_threshold")) @@ -578,12 +578,12 @@ IPluginV2DynamicExt* EfficientNMSONNXPluginCreator::createPlugin( if (!strcmp(attrName, "max_output_boxes_per_class")) { PLUGIN_VALIDATE(fields[i].type == PluginFieldType::kINT32); - mParam.numOutputBoxesPerClass = *(static_cast(fields[i].data)); + mParam.numOutputBoxesPerClass = *(static_cast(fields[i].data)); } if (!strcmp(attrName, "center_point_box")) { PLUGIN_VALIDATE(fields[i].type == PluginFieldType::kINT32); - mParam.boxCoding = *(static_cast(fields[i].data)); + mParam.boxCoding = *(static_cast(fields[i].data)); } } diff --git a/plugin/efficientNMSPlugin/efficientNMSPlugin.h b/plugin/efficientNMSPlugin/efficientNMSPlugin.h index c40c9635..afceec01 100644 --- a/plugin/efficientNMSPlugin/efficientNMSPlugin.h +++ b/plugin/efficientNMSPlugin/efficientNMSPlugin.h @@ -20,7 +20,7 @@ #include #include "common/plugin.h" -#include "efficientNMSParameters.h" +#include "efficientNMSPlugin/efficientNMSParameters.h" namespace nvinfer1 { @@ -37,8 +37,8 @@ class EfficientNMSPlugin : public IPluginV2DynamicExt // IPluginV2 methods char const* getPluginType() const noexcept override; char const* getPluginVersion() const noexcept override; - int getNbOutputs() const noexcept override; - int initialize() noexcept override; + int32_t getNbOutputs() const noexcept override; + int32_t initialize() noexcept override; void terminate() noexcept override; size_t getSerializationSize() const noexcept override; void serialize(void* buffer) const noexcept override; @@ -48,19 +48,19 @@ class EfficientNMSPlugin : public IPluginV2DynamicExt // IPluginV2Ext methods nvinfer1::DataType getOutputDataType( - int index, nvinfer1::DataType const* inputType, int nbInputs) const noexcept override; + int32_t index, nvinfer1::DataType const* inputType, int32_t nbInputs) const noexcept override; // IPluginV2DynamicExt methods IPluginV2DynamicExt* clone() const noexcept override; DimsExprs getOutputDimensions( - int outputIndex, DimsExprs const* inputs, int nbInputs, IExprBuilder& exprBuilder) noexcept override; + int32_t outputIndex, DimsExprs const* inputs, int32_t nbInputs, IExprBuilder& exprBuilder) noexcept override; bool supportsFormatCombination( - int pos, PluginTensorDesc const* inOut, int nbInputs, int nbOutputs) noexcept override; - void configurePlugin(DynamicPluginTensorDesc const* in, int nbInputs, DynamicPluginTensorDesc const* out, - int nbOutputs) noexcept override; - size_t getWorkspaceSize(PluginTensorDesc const* inputs, int nbInputs, PluginTensorDesc const* outputs, - int nbOutputs) const noexcept override; - int enqueue(PluginTensorDesc const* inputDesc, PluginTensorDesc const* outputDesc, void const* const* inputs, + int32_t pos, PluginTensorDesc const* inOut, int32_t nbInputs, int32_t nbOutputs) noexcept override; + void configurePlugin(DynamicPluginTensorDesc const* in, int32_t nbInputs, DynamicPluginTensorDesc const* out, + int32_t nbOutputs) noexcept override; + size_t getWorkspaceSize(PluginTensorDesc const* inputs, int32_t nbInputs, PluginTensorDesc const* outputs, + int32_t nbOutputs) const noexcept override; + int32_t enqueue(PluginTensorDesc const* inputDesc, PluginTensorDesc const* outputDesc, void const* const* inputs, void* const* outputs, void* workspace, cudaStream_t stream) noexcept override; protected: diff --git a/plugin/efficientNMSPlugin/tftrt/efficientNMSExplicitTFTRTPlugin.cpp b/plugin/efficientNMSPlugin/tftrt/efficientNMSExplicitTFTRTPlugin.cpp index 9183e5a2..f5c86365 100644 --- a/plugin/efficientNMSPlugin/tftrt/efficientNMSExplicitTFTRTPlugin.cpp +++ b/plugin/efficientNMSPlugin/tftrt/efficientNMSExplicitTFTRTPlugin.cpp @@ -102,18 +102,18 @@ IPluginV2DynamicExt* EfficientNMSExplicitTFTRTPluginCreator::createPlugin( try { const PluginField* fields = fc->fields; - for (int i = 0; i < fc->nbFields; ++i) + for (int32_t i = 0; i < fc->nbFields; ++i) { const char* attrName = fields[i].name; if (!strcmp(attrName, "max_output_size_per_class")) { PLUGIN_ASSERT(fields[i].type == PluginFieldType::kINT32); - mParam.numOutputBoxesPerClass = *(static_cast(fields[i].data)); + mParam.numOutputBoxesPerClass = *(static_cast(fields[i].data)); } if (!strcmp(attrName, "max_total_size")) { PLUGIN_ASSERT(fields[i].type == PluginFieldType::kINT32); - mParam.numOutputBoxes = *(static_cast(fields[i].data)); + mParam.numOutputBoxes = *(static_cast(fields[i].data)); } if (!strcmp(attrName, "iou_threshold")) { @@ -128,12 +128,12 @@ IPluginV2DynamicExt* EfficientNMSExplicitTFTRTPluginCreator::createPlugin( if (!strcmp(attrName, "pad_per_class")) { PLUGIN_ASSERT(fields[i].type == PluginFieldType::kINT32); - mParam.padOutputBoxesPerClass = *(static_cast(fields[i].data)); + mParam.padOutputBoxesPerClass = *(static_cast(fields[i].data)); } if (!strcmp(attrName, "clip_boxes")) { PLUGIN_ASSERT(fields[i].type == PluginFieldType::kINT32); - mParam.clipBoxes = *(static_cast(fields[i].data)); + mParam.clipBoxes = *(static_cast(fields[i].data)); } } diff --git a/plugin/efficientNMSPlugin/tftrt/efficientNMSImplicitTFTRTPlugin.cpp b/plugin/efficientNMSPlugin/tftrt/efficientNMSImplicitTFTRTPlugin.cpp index 3c89b044..25c8e0ef 100644 --- a/plugin/efficientNMSPlugin/tftrt/efficientNMSImplicitTFTRTPlugin.cpp +++ b/plugin/efficientNMSPlugin/tftrt/efficientNMSImplicitTFTRTPlugin.cpp @@ -60,12 +60,12 @@ const char* EfficientNMSImplicitTFTRTPlugin::getPluginVersion() const noexcept return EFFICIENT_NMS_IMPLICIT_TFTRT_PLUGIN_VERSION; } -int EfficientNMSImplicitTFTRTPlugin::getNbOutputs() const noexcept +int32_t EfficientNMSImplicitTFTRTPlugin::getNbOutputs() const noexcept { return 4; } -int EfficientNMSImplicitTFTRTPlugin::initialize() noexcept +int32_t EfficientNMSImplicitTFTRTPlugin::initialize() noexcept { return STATUS_SUCCESS; } @@ -106,7 +106,8 @@ const char* EfficientNMSImplicitTFTRTPlugin::getPluginNamespace() const noexcept return mNamespace.c_str(); } -Dims EfficientNMSImplicitTFTRTPlugin::getOutputDimensions(int outputIndex, const Dims* inputs, int nbInputs) noexcept +Dims EfficientNMSImplicitTFTRTPlugin::getOutputDimensions( + int32_t outputIndex, const Dims* inputs, int32_t nbInputs) noexcept { try { @@ -117,7 +118,7 @@ Dims EfficientNMSImplicitTFTRTPlugin::getOutputDimensions(int outputIndex, const PLUGIN_ASSERT(inputs[1].nbDims == 2); if (mParam.padOutputBoxesPerClass && mParam.numOutputBoxesPerClass > 0) { - const int numClasses = inputs[1].d[1]; + const int32_t numClasses = inputs[1].d[1]; if (mParam.numOutputBoxesPerClass * numClasses < mParam.numOutputBoxes) { mParam.numOutputBoxes = mParam.numOutputBoxesPerClass * numClasses; @@ -157,12 +158,12 @@ Dims EfficientNMSImplicitTFTRTPlugin::getOutputDimensions(int outputIndex, const return Dims{}; } -size_t EfficientNMSImplicitTFTRTPlugin::getWorkspaceSize(int maxBatchSize) const noexcept +size_t EfficientNMSImplicitTFTRTPlugin::getWorkspaceSize(int32_t maxBatchSize) const noexcept { return EfficientNMSWorkspaceSize(maxBatchSize, mParam.numScoreElements, mParam.numClasses, mParam.datatype); } -int EfficientNMSImplicitTFTRTPlugin::enqueue(int batchSize, void const* const* inputs, +int32_t EfficientNMSImplicitTFTRTPlugin::enqueue(int32_t batchSize, void const* const* inputs, EfficientNMSImplicitTFTRTOutputsDataType outputs, void* workspace, cudaStream_t stream) noexcept { try @@ -188,13 +189,13 @@ int EfficientNMSImplicitTFTRTPlugin::enqueue(int batchSize, void const* const* i return -1; } -bool EfficientNMSImplicitTFTRTPlugin::canBroadcastInputAcrossBatch(int inputIndex) const noexcept +bool EfficientNMSImplicitTFTRTPlugin::canBroadcastInputAcrossBatch(int32_t inputIndex) const noexcept { return false; } DataType EfficientNMSImplicitTFTRTPlugin::getOutputDataType( - int index, const DataType* inputTypes, int nbInputs) const noexcept + int32_t index, const DataType* inputTypes, int32_t nbInputs) const noexcept { // num_detections and detection_classes use integer outputs if (index == 0 || index == 3) @@ -221,13 +222,13 @@ IPluginV2IOExt* EfficientNMSImplicitTFTRTPlugin::clone() const noexcept } bool EfficientNMSImplicitTFTRTPlugin::isOutputBroadcastAcrossBatch( - int outputIndex, bool const* inputIsBroadcasted, int nbInputs) const noexcept + int32_t outputIndex, bool const* inputIsBroadcasted, int32_t nbInputs) const noexcept { return false; } bool EfficientNMSImplicitTFTRTPlugin::supportsFormatCombination( - int pos, const PluginTensorDesc* inOut, int nbInputs, int nbOutputs) const noexcept + int32_t pos, const PluginTensorDesc* inOut, int32_t nbInputs, int32_t nbOutputs) const noexcept { if (inOut[pos].format != PluginFormat::kLINEAR) { @@ -241,8 +242,8 @@ bool EfficientNMSImplicitTFTRTPlugin::supportsFormatCombination( PLUGIN_ASSERT(0 <= pos && pos <= 5); } - // num_detections and detection_classes output: int - const int posOut = pos - nbInputs; + // num_detections and detection_classes output: int32_t + const int32_t posOut = pos - nbInputs; if (posOut == 0 || posOut == 3) { return inOut[pos].type == DataType::kINT32 && inOut[pos].format == PluginFormat::kLINEAR; @@ -254,7 +255,7 @@ bool EfficientNMSImplicitTFTRTPlugin::supportsFormatCombination( } void EfficientNMSImplicitTFTRTPlugin::configurePlugin( - const PluginTensorDesc* in, int nbInputs, const PluginTensorDesc* out, int nbOutputs) noexcept + const PluginTensorDesc* in, int32_t nbInputs, const PluginTensorDesc* out, int32_t nbOutputs) noexcept { try { @@ -335,18 +336,18 @@ IPluginV2IOExt* EfficientNMSImplicitTFTRTPluginCreator::createPlugin( try { const PluginField* fields = fc->fields; - for (int i = 0; i < fc->nbFields; ++i) + for (int32_t i = 0; i < fc->nbFields; ++i) { const char* attrName = fields[i].name; if (!strcmp(attrName, "max_output_size_per_class")) { PLUGIN_ASSERT(fields[i].type == PluginFieldType::kINT32); - mParam.numOutputBoxesPerClass = *(static_cast(fields[i].data)); + mParam.numOutputBoxesPerClass = *(static_cast(fields[i].data)); } if (!strcmp(attrName, "max_total_size")) { PLUGIN_ASSERT(fields[i].type == PluginFieldType::kINT32); - mParam.numOutputBoxes = *(static_cast(fields[i].data)); + mParam.numOutputBoxes = *(static_cast(fields[i].data)); } if (!strcmp(attrName, "iou_threshold")) { @@ -361,12 +362,12 @@ IPluginV2IOExt* EfficientNMSImplicitTFTRTPluginCreator::createPlugin( if (!strcmp(attrName, "pad_per_class")) { PLUGIN_ASSERT(fields[i].type == PluginFieldType::kINT32); - mParam.padOutputBoxesPerClass = *(static_cast(fields[i].data)); + mParam.padOutputBoxesPerClass = *(static_cast(fields[i].data)); } if (!strcmp(attrName, "clip_boxes")) { PLUGIN_ASSERT(fields[i].type == PluginFieldType::kINT32); - mParam.clipBoxes = *(static_cast(fields[i].data)); + mParam.clipBoxes = *(static_cast(fields[i].data)); } } diff --git a/plugin/efficientNMSPlugin/tftrt/efficientNMSImplicitTFTRTPlugin.h b/plugin/efficientNMSPlugin/tftrt/efficientNMSImplicitTFTRTPlugin.h index ca797c26..51b09148 100644 --- a/plugin/efficientNMSPlugin/tftrt/efficientNMSImplicitTFTRTPlugin.h +++ b/plugin/efficientNMSPlugin/tftrt/efficientNMSImplicitTFTRTPlugin.h @@ -47,8 +47,8 @@ class EfficientNMSImplicitTFTRTPlugin : public nvinfer1::IPluginV2IOExt // IPluginV2 methods const char* getPluginType() const noexcept override; const char* getPluginVersion() const noexcept override; - int getNbOutputs() const noexcept override; - int initialize() noexcept override; + int32_t getNbOutputs() const noexcept override; + int32_t initialize() noexcept override; void terminate() noexcept override; size_t getSerializationSize() const noexcept override; void serialize(void* buffer) const noexcept override; @@ -56,24 +56,25 @@ class EfficientNMSImplicitTFTRTPlugin : public nvinfer1::IPluginV2IOExt void setPluginNamespace(const char* libNamespace) noexcept override; const char* getPluginNamespace() const noexcept override; - nvinfer1::Dims getOutputDimensions(int outputIndex, const nvinfer1::Dims* inputs, int nbInputs) noexcept override; - size_t getWorkspaceSize(int maxBatchSize) const noexcept override; - int enqueue(int batchSize, void const* const* inputs, EfficientNMSImplicitTFTRTOutputsDataType outputs, + nvinfer1::Dims getOutputDimensions( + int32_t outputIndex, const nvinfer1::Dims* inputs, int32_t nbInputs) noexcept override; + size_t getWorkspaceSize(int32_t maxBatchSize) const noexcept override; + int32_t enqueue(int32_t batchSize, void const* const* inputs, EfficientNMSImplicitTFTRTOutputsDataType outputs, void* workspace, cudaStream_t stream) noexcept override; // IPluginV2Ext methods - bool canBroadcastInputAcrossBatch(int inputIndex) const noexcept override; + bool canBroadcastInputAcrossBatch(int32_t inputIndex) const noexcept override; nvinfer1::DataType getOutputDataType( - int index, const nvinfer1::DataType* inputType, int nbInputs) const noexcept override; + int32_t index, const nvinfer1::DataType* inputType, int32_t nbInputs) const noexcept override; nvinfer1::IPluginV2IOExt* clone() const noexcept override; bool isOutputBroadcastAcrossBatch( - int outputIndex, bool const* inputIsBroadcasted, int nbInputs) const noexcept override; + int32_t outputIndex, bool const* inputIsBroadcasted, int32_t nbInputs) const noexcept override; // IPluginV2IOExt methods - bool supportsFormatCombination( - int pos, const nvinfer1::PluginTensorDesc* inOut, int nbInputs, int nbOutputs) const noexcept override; - void configurePlugin(const nvinfer1::PluginTensorDesc* in, int nbInputs, const nvinfer1::PluginTensorDesc* out, - int nbOutputs) noexcept override; + bool supportsFormatCombination(int32_t pos, const nvinfer1::PluginTensorDesc* inOut, int32_t nbInputs, + int32_t nbOutputs) const noexcept override; + void configurePlugin(const nvinfer1::PluginTensorDesc* in, int32_t nbInputs, const nvinfer1::PluginTensorDesc* out, + int32_t nbOutputs) noexcept override; protected: void deserialize(int8_t const* data, size_t length); diff --git a/plugin/embLayerNormPlugin/CustomEmbLayerNormPluginDynamic_PluginConfig.yaml b/plugin/embLayerNormPlugin/CustomEmbLayerNormPluginDynamic_PluginConfig.yaml index 0a3d0b7d..d5f1594a 100644 --- a/plugin/embLayerNormPlugin/CustomEmbLayerNormPluginDynamic_PluginConfig.yaml +++ b/plugin/embLayerNormPlugin/CustomEmbLayerNormPluginDynamic_PluginConfig.yaml @@ -66,13 +66,13 @@ versions: bert_embeddings_token_type_embeddings: 2 bert_embeddings_position_embeddings: 2 attribute_dim_range: - output_fp16: + output_fp16: - min: "=1" - max: "=1" - full_mask: + full_mask: - min: "=1" - max: "=1" - mha_type_id: + mha_type_id: - min: "=1" - max: "=1" bert_embeddings_layernorm_beta: @@ -91,29 +91,29 @@ versions: - min: "=1, =1" - max: "=pinf, =pinf" attribute_options: - output_fp16: + output_fp16: - 0 - 1 - full_mask: + full_mask: - 0 - 1 - mha_type_id: + mha_type_id: - 0 - 1 - 2 - bert_embeddings_layernorm_beta: + bert_embeddings_layernorm_beta: min: "=ninf" max: "=pinf" - bert_embeddings_layernorm_gamma: + bert_embeddings_layernorm_gamma: min: "=ninf" max: "=pinf" - bert_embeddings_word_embeddings: + bert_embeddings_word_embeddings: min: "=ninf" max: "=pinf" - bert_embeddings_token_type_embeddings: + bert_embeddings_token_type_embeddings: min: "=ninf" max: "=pinf" - bert_embeddings_position_embeddings: + bert_embeddings_position_embeddings: min: "=ninf" max: "=pinf" attributes_required: @@ -122,4 +122,33 @@ versions: - bert_embeddings_word_embeddings - bert_embeddings_token_type_embeddings - bert_embeddings_position_embeddings + golden_reference_script: "plugin/embLayerNormPlugin/CustomEmbLayerNormPluginDynamic_PluginReference.py" + abs_tol: 1e-5 + rel_tol: 1e-5 + configs: + config1: + input_types: + token_id: int32 + segment_id: int32 + input_mask: int32 + attribute_options: + output_fp16: + value: 0 + shape: "1" + full_mask: + value: 0 + shape: "1" + mha_type_id: + value: 0 + shape: "1" + bert_embeddings_layernorm_beta: + shape: "128" + bert_embeddings_layernorm_gamma: + shape: "128" + bert_embeddings_word_embeddings: + shape: "100, 128" + bert_embeddings_token_type_embeddings: + shape: "2, 128" + bert_embeddings_position_embeddings: + shape: "20, 128" ... diff --git a/plugin/embLayerNormPlugin/embLayerNormPlugin.cpp b/plugin/embLayerNormPlugin/embLayerNormPlugin.cpp index 94693861..ef9738d0 100644 --- a/plugin/embLayerNormPlugin/embLayerNormPlugin.cpp +++ b/plugin/embLayerNormPlugin/embLayerNormPlugin.cpp @@ -329,8 +329,8 @@ int32_t EmbLayerNormPluginDynamic::enqueue(PluginTensorDesc const* inputDesc, Pl auto const wordEmb = static_cast(mWordEmbDev.get()); auto const tokEmb = static_cast(mTokEmbDev.get()); auto const posEmb = static_cast(mPosEmbDev.get()); - status = embSkipLayerNorm(stream, static_cast(mLd), batchSize, S, inputIds, segmentIds, beta, - gamma, wordEmb, posEmb, tokEmb, mWordVocabSize, mTokVocabSize, output); + status = embSkipLayerNorm(stream, static_cast(mLd), batchSize, S, inputIds, segmentIds, + beta, gamma, wordEmb, posEmb, tokEmb, mWordVocabSize, mTokVocabSize, output); if (status != cudaSuccess) { @@ -343,7 +343,7 @@ int32_t EmbLayerNormPluginDynamic::enqueue(PluginTensorDesc const* inputDesc, Pl auto const wordEmb = static_cast(mWordEmbDev.get()); auto const tokEmb = static_cast(mTokEmbDev.get()); auto const posEmb = static_cast(mPosEmbDev.get()); - status = embSkipLayerNorm(stream, static_cast(mLd), batchSize, S, inputIds, segmentIds, beta, + status = embSkipLayerNorm(stream, static_cast(mLd), batchSize, S, inputIds, segmentIds, beta, gamma, wordEmb, posEmb, tokEmb, mWordVocabSize, mTokVocabSize, output); if (status != cudaSuccess) @@ -353,7 +353,7 @@ int32_t EmbLayerNormPluginDynamic::enqueue(PluginTensorDesc const* inputDesc, Pl } else { - gLogError << "Unsupported type error, expected [kHALF,kFLOAT], but received " << static_cast(mType) + gLogError << "Unsupported type error, expected [kHALF,kFLOAT], but received " << static_cast(mType) << std::endl; return STATUS_NOT_SUPPORTED; diff --git a/plugin/embLayerNormPlugin/embLayerNormPlugin.h b/plugin/embLayerNormPlugin/embLayerNormPlugin.h index ba3cc18f..eb21d268 100644 --- a/plugin/embLayerNormPlugin/embLayerNormPlugin.h +++ b/plugin/embLayerNormPlugin/embLayerNormPlugin.h @@ -35,7 +35,7 @@ namespace plugin namespace bert { -int32_t computeMaskIdx(cudaStream_t stream, int32_t const S, int32_t const B, int32_t const* mask, int* maskIdx); +int32_t computeMaskIdx(cudaStream_t stream, int32_t const S, int32_t const B, int32_t const* mask, int32_t* maskIdx); template int32_t embSkipLayerNorm(cudaStream_t stream, int32_t ld, int32_t B, int32_t S, int32_t const* inputIds, diff --git a/plugin/exports-vfc_plugin.map b/plugin/exports-vfc_plugin.map index c81bfe23..70ee8938 100644 --- a/plugin/exports-vfc_plugin.map +++ b/plugin/exports-vfc_plugin.map @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/plugin/exports.map b/plugin/exports.map index b0b1d3c5..64de08ba 100644 --- a/plugin/exports.map +++ b/plugin/exports.map @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/plugin/fcPlugin/CustomFCPluginDynamic_PluginConfig.yaml b/plugin/fcPlugin/CustomFCPluginDynamic_PluginConfig.yaml index 44a26e8c..0939c434 100644 --- a/plugin/fcPlugin/CustomFCPluginDynamic_PluginConfig.yaml +++ b/plugin/fcPlugin/CustomFCPluginDynamic_PluginConfig.yaml @@ -57,5 +57,20 @@ versions: - out_dims - type_id - W -... - + golden_io_path: "plugin/fcPlugin/CustomFCPluginDynamic_PluginGoldenIO.json" + golden_reference_script: "plugin/fcPlugin/CustomFCPluginDynamic_PluginReference.py" + abs_tol: 1e-5 + rel_tol: 1e-5 + fp16_atol: 1e-3 + fp16_rtol: 1e-3 + configs: + config1: + input_types: + input: float16 + attribute_options: + "type_id": + value: 1 + shape: "1" + output_types: + output: float16 +... \ No newline at end of file diff --git a/plugin/flattenConcat/flattenConcat.cpp b/plugin/flattenConcat/flattenConcat.cpp index 11e451af..6ecc9f61 100644 --- a/plugin/flattenConcat/flattenConcat.cpp +++ b/plugin/flattenConcat/flattenConcat.cpp @@ -36,15 +36,15 @@ static char const* const kFLATTENCONCAT_PLUGIN_NAME{"FlattenConcat_TRT"}; PluginFieldCollection FlattenConcatPluginCreator::mFC{}; std::vector FlattenConcatPluginCreator::mPluginAttributes; -FlattenConcat::FlattenConcat(int concatAxis, bool ignoreBatch) +FlattenConcat::FlattenConcat(int32_t concatAxis, bool ignoreBatch) : mIgnoreBatch(ignoreBatch) , mConcatAxisID(concatAxis) { PLUGIN_VALIDATE(mConcatAxisID == 1 || mConcatAxisID == 2 || mConcatAxisID == 3); } -FlattenConcat::FlattenConcat(int concatAxis, bool ignoreBatch, int numInputs, int outputConcatAxis, - int const* inputConcatAxis, size_t const* copySize, nvinfer1::Dims const& chwDims) +FlattenConcat::FlattenConcat(int32_t concatAxis, bool ignoreBatch, int32_t numInputs, int32_t outputConcatAxis, + int32_t const* inputConcatAxis, size_t const* copySize, nvinfer1::Dims const& chwDims) : mCopySize(numInputs) , mInputConcatAxis(numInputs) , mIgnoreBatch(ignoreBatch) @@ -64,13 +64,13 @@ FlattenConcat::FlattenConcat(void const* data, size_t length) char const* d = static_cast(data); char const* const a = d; mIgnoreBatch = read(d); - mConcatAxisID = read(d); + mConcatAxisID = read(d); PLUGIN_VALIDATE(mConcatAxisID >= 1 && mConcatAxisID <= 3); - mOutputConcatAxis = read(d); - mNumInputs = read(d); + mOutputConcatAxis = read(d); + mNumInputs = read(d); mInputConcatAxis.resize(mNumInputs); - std::for_each(mInputConcatAxis.begin(), mInputConcatAxis.end(), [&](int& inp) { inp = read(d); }); + std::for_each(mInputConcatAxis.begin(), mInputConcatAxis.end(), [&](int32_t& inp) { inp = read(d); }); mCHW = read(d); @@ -82,12 +82,12 @@ FlattenConcat::FlattenConcat(void const* data, size_t length) FlattenConcat::~FlattenConcat() {} -int FlattenConcat::getNbOutputs() const noexcept +int32_t FlattenConcat::getNbOutputs() const noexcept { return 1; } -Dims FlattenConcat::getOutputDimensions(int index, Dims const* inputs, int nbInputDims) noexcept +Dims FlattenConcat::getOutputDimensions(int32_t index, Dims const* inputs, int32_t nbInputDims) noexcept { try { @@ -97,11 +97,11 @@ Dims FlattenConcat::getOutputDimensions(int index, Dims const* inputs, int nbInp mNumInputs = nbInputDims; mCopySize.resize(mNumInputs); mInputConcatAxis.resize(mNumInputs); - int outputConcatAxis = 0; + int32_t outputConcatAxis = 0; - for (int i = 0; i < nbInputDims; ++i) + for (int32_t i = 0; i < nbInputDims; ++i) { - int flattenInput = 0; + int32_t flattenInput = 0; PLUGIN_ASSERT(inputs[i].nbDims == 3); if (mConcatAxisID != 1) { @@ -129,20 +129,20 @@ Dims FlattenConcat::getOutputDimensions(int index, Dims const* inputs, int nbInp return Dims{}; } -int FlattenConcat::initialize() noexcept +int32_t FlattenConcat::initialize() noexcept { return STATUS_SUCCESS; } void FlattenConcat::terminate() noexcept {} -size_t FlattenConcat::getWorkspaceSize(int) const noexcept +size_t FlattenConcat::getWorkspaceSize(int32_t) const noexcept { return 0; } -int FlattenConcat::enqueue( - int batchSize, void const* const* inputs, void* const* outputs, void*, cudaStream_t stream) noexcept +int32_t FlattenConcat::enqueue( + int32_t batchSize, void const* const* inputs, void* const* outputs, void*, cudaStream_t stream) noexcept { try { @@ -157,11 +157,11 @@ int FlattenConcat::enqueue( } auto* output = static_cast(outputs[0]); - int offset = 0; - for (int i = 0; i < mNumInputs; ++i) + int32_t offset = 0; + for (int32_t i = 0; i < mNumInputs; ++i) { auto const* input = static_cast(inputs[i]); - for (int n = 0; n < numConcats; ++n) + for (int32_t n = 0; n < numConcats; ++n) { auto status = cublasScopy(mCublas, mInputConcatAxis[i], input + n * mInputConcatAxis[i], 1, output + (n * mOutputConcatAxis + offset), 1); @@ -185,7 +185,7 @@ int FlattenConcat::enqueue( size_t FlattenConcat::getSerializationSize() const noexcept { - return sizeof(bool) + sizeof(int) * (3 + mNumInputs) + sizeof(nvinfer1::Dims) + return sizeof(bool) + sizeof(int32_t) * (3 + mNumInputs) + sizeof(nvinfer1::Dims) + (sizeof(decltype(mCopySize)::value_type) * mNumInputs); } @@ -197,12 +197,12 @@ void FlattenConcat::serialize(void* buffer) const noexcept write(d, mConcatAxisID); write(d, mOutputConcatAxis); write(d, mNumInputs); - for (int i = 0; i < mNumInputs; ++i) + for (int32_t i = 0; i < mNumInputs; ++i) { write(d, mInputConcatAxis[i]); } write(d, mCHW); - for (int i = 0; i < mNumInputs; ++i) + for (int32_t i = 0; i < mNumInputs; ++i) { write(d, mCopySize[i]); } @@ -221,13 +221,13 @@ void FlattenConcat::detachFromContext() noexcept {} // Return true if output tensor is broadcast across a batch. bool FlattenConcat::isOutputBroadcastAcrossBatch( - int outputIndex, bool const* inputIsBroadcasted, int nbInputs) const noexcept + int32_t outputIndex, bool const* inputIsBroadcasted, int32_t nbInputs) const noexcept { return false; } // Return true if plugin can use input that is broadcast across batch without replication. -bool FlattenConcat::canBroadcastInputAcrossBatch(int inputIndex) const noexcept +bool FlattenConcat::canBroadcastInputAcrossBatch(int32_t inputIndex) const noexcept { return false; } @@ -251,15 +251,16 @@ char const* FlattenConcat::getPluginNamespace() const noexcept } // Return the DataType of the plugin output at the requested index -DataType FlattenConcat::getOutputDataType(int index, nvinfer1::DataType const* inputTypes, int nbInputs) const noexcept +DataType FlattenConcat::getOutputDataType( + int32_t index, nvinfer1::DataType const* inputTypes, int32_t nbInputs) const noexcept { PLUGIN_ASSERT(index < 3); return DataType::kFLOAT; } -void FlattenConcat::configurePlugin(Dims const* inputDims, int nbInputs, Dims const* outputDims, int nbOutputs, +void FlattenConcat::configurePlugin(Dims const* inputDims, int32_t nbInputs, Dims const* outputDims, int32_t nbOutputs, DataType const* inputTypes, DataType const* outputTypes, bool const* inputIsBroadcast, - bool const* outputIsBroadcast, PluginFormat floatFormat, int maxBatchSize) noexcept + bool const* outputIsBroadcast, PluginFormat floatFormat, int32_t maxBatchSize) noexcept { try { @@ -269,9 +270,9 @@ void FlattenConcat::configurePlugin(Dims const* inputDims, int nbInputs, Dims co PLUGIN_ASSERT(inputDims[0].nbDims == 3); mInputConcatAxis.resize(mNumInputs); - for (int i = 0; i < nbInputs; ++i) + for (int32_t i = 0; i < nbInputs; ++i) { - int flattenInput = 0; + int32_t flattenInput = 0; PLUGIN_ASSERT(inputDims[i].nbDims == 3); if (mConcatAxisID != 1) { @@ -291,7 +292,7 @@ void FlattenConcat::configurePlugin(Dims const* inputDims, int nbInputs, Dims co } mCopySize.resize(mNumInputs); - for (int i = 0; i < nbInputs; ++i) + for (int32_t i = 0; i < nbInputs; ++i) { mCopySize[i] = inputDims[i].d[0] * inputDims[i].d[1] * inputDims[i].d[2] * sizeof(float); } @@ -374,7 +375,7 @@ IPluginV2Ext* FlattenConcatPluginCreator::createPlugin(char const* name, PluginF if (!strcmp(attrName, "axis")) { PLUGIN_VALIDATE(fields[i].type == PluginFieldType::kINT32); - mConcatAxisID = *(static_cast(fields[i].data)); + mConcatAxisID = *(static_cast(fields[i].data)); } if (!strcmp(attrName, "ignoreBatch")) { diff --git a/plugin/flattenConcat/flattenConcat.h b/plugin/flattenConcat/flattenConcat.h index eb4dc133..e0bcc008 100644 --- a/plugin/flattenConcat/flattenConcat.h +++ b/plugin/flattenConcat/flattenConcat.h @@ -33,10 +33,10 @@ namespace plugin class FlattenConcat : public IPluginV2Ext { public: - FlattenConcat(int concatAxis, bool ignoreBatch); + FlattenConcat(int32_t concatAxis, bool ignoreBatch); - FlattenConcat(int concatAxis, bool ignoreBatch, int numInputs, int outputConcatAxis, int const* inputConcatAxis, - size_t const* copySize, nvinfer1::Dims const& chwDims); + FlattenConcat(int32_t concatAxis, bool ignoreBatch, int32_t numInputs, int32_t outputConcatAxis, + int32_t const* inputConcatAxis, size_t const* copySize, nvinfer1::Dims const& chwDims); FlattenConcat(void const* data, size_t length); @@ -44,33 +44,34 @@ class FlattenConcat : public IPluginV2Ext FlattenConcat() = delete; - int getNbOutputs() const noexcept override; + int32_t getNbOutputs() const noexcept override; - Dims getOutputDimensions(int index, Dims const* inputs, int nbInputDims) noexcept override; + Dims getOutputDimensions(int32_t index, Dims const* inputs, int32_t nbInputDims) noexcept override; - int initialize() noexcept override; + int32_t initialize() noexcept override; void terminate() noexcept override; - size_t getWorkspaceSize(int) const noexcept override; + size_t getWorkspaceSize(int32_t) const noexcept override; - int enqueue(int batchSize, void const* const* inputs, void* const* outputs, void* workspace, + int32_t enqueue(int32_t batchSize, void const* const* inputs, void* const* outputs, void* workspace, cudaStream_t stream) noexcept override; - DataType getOutputDataType(int index, nvinfer1::DataType const* inputTypes, int nbInputs) const noexcept override; + DataType getOutputDataType( + int32_t index, nvinfer1::DataType const* inputTypes, int32_t nbInputs) const noexcept override; size_t getSerializationSize() const noexcept override; void serialize(void* buffer) const noexcept override; bool isOutputBroadcastAcrossBatch( - int outputIndex, bool const* inputIsBroadcasted, int nbInputs) const noexcept override; + int32_t outputIndex, bool const* inputIsBroadcasted, int32_t nbInputs) const noexcept override; - bool canBroadcastInputAcrossBatch(int inputIndex) const noexcept override; + bool canBroadcastInputAcrossBatch(int32_t inputIndex) const noexcept override; - void configurePlugin(Dims const* inputDims, int nbInputs, Dims const* outputDims, int nbOutputs, + void configurePlugin(Dims const* inputDims, int32_t nbInputs, Dims const* outputDims, int32_t nbOutputs, DataType const* inputTypes, DataType const* outputTypes, bool const* inputIsBroadcast, - bool const* outputIsBroadcast, PluginFormat floatFormat, int maxBatchSize) noexcept override; + bool const* outputIsBroadcast, PluginFormat floatFormat, int32_t maxBatchSize) noexcept override; bool supportsFormat(DataType type, PluginFormat format) const noexcept override; @@ -98,9 +99,9 @@ class FlattenConcat : public IPluginV2Ext Weights deserializeToDevice(char const*& hostBuffer, size_t count) noexcept; std::vector mCopySize; - std::vector mInputConcatAxis; + std::vector mInputConcatAxis; bool mIgnoreBatch{false}; - int mConcatAxisID{0}, mOutputConcatAxis{0}, mNumInputs{0}; + int32_t mConcatAxisID{0}, mOutputConcatAxis{0}, mNumInputs{0}; nvinfer1::Dims mCHW; std::string mPluginNamespace; cublasHandle_t mCublas{nullptr}; @@ -126,7 +127,7 @@ class FlattenConcatPluginCreator : public nvinfer1::pluginInternal::BaseCreator private: static PluginFieldCollection mFC; bool mIgnoreBatch{false}; - int mConcatAxisID; + int32_t mConcatAxisID; static std::vector mPluginAttributes; }; diff --git a/plugin/geluPlugin/CustomGeluPluginDynamic_PluginConfig.yaml b/plugin/geluPlugin/CustomGeluPluginDynamic_PluginConfig.yaml index 90f32375..3a41b9ed 100644 --- a/plugin/geluPlugin/CustomGeluPluginDynamic_PluginConfig.yaml +++ b/plugin/geluPlugin/CustomGeluPluginDynamic_PluginConfig.yaml @@ -48,4 +48,24 @@ versions: max: "=pinf" attributes_required: - type_id + abs_tol: 1e-2 + rel_tol: 1e-2 + golden_reference_script: "plugin/geluPlugin/CustomGeluPluginDynamic_PluginReference.py" + configs: + config1: + input_types: + input: float32 + attribute_options: + type_id: + value: 0 + bias: + shape: "1, 1, 128, 1, 1" + config2: + input_types: + input: float16 + attribute_options: + type_id: + value: 1 + bias: + shape: "1, 1, 784, 1, 1" ... diff --git a/plugin/geluPlugin/geluPlugin.cpp b/plugin/geluPlugin/geluPlugin.cpp index 13e53f79..9d1d0cf1 100644 --- a/plugin/geluPlugin/geluPlugin.cpp +++ b/plugin/geluPlugin/geluPlugin.cpp @@ -51,8 +51,8 @@ GeluPluginDynamic::GeluPluginDynamic(const std::string name, const DataType type if (mHasBias) { void* cudaMem{nullptr}; - PLUGIN_CHECK(cudaMalloc(&cudaMem, getWeightsSize(bias, mType))); - PLUGIN_CHECK(cudaMemcpy(cudaMem, bias.values, getWeightsSize(bias, mType), cudaMemcpyHostToDevice)); + PLUGIN_CUASSERT(cudaMalloc(&cudaMem, getWeightsSize(bias, mType))); + PLUGIN_CUASSERT(cudaMemcpy(cudaMem, bias.values, getWeightsSize(bias, mType), cudaMemcpyHostToDevice)); make_cuda_shared(mBiasDev, cudaMem); } } @@ -89,15 +89,39 @@ nvinfer1::IPluginV2DynamicExt* GeluPluginDynamic::clone() const noexcept return nullptr; } -nvinfer1::DimsExprs GeluPluginDynamic::getOutputDimensions( - int outputIndex, nvinfer1::DimsExprs const* inputs, int nbInputs, nvinfer1::IExprBuilder& exprBuilder) noexcept +nvinfer1::DimsExprs GeluPluginDynamic::getOutputDimensions(int32_t outputIndex, nvinfer1::DimsExprs const* inputs, + int32_t nbInputs, nvinfer1::IExprBuilder& exprBuilder) noexcept { - return inputs[0]; + try + { + PLUGIN_VALIDATE(inputs != nullptr); + PLUGIN_VALIDATE(nbInputs == 1); + PLUGIN_VALIDATE(outputIndex == 0); + return inputs[0]; + } + catch (std::exception const& e) + { + caughtError(e); + } + return DimsExprs{}; } bool GeluPluginDynamic::supportsFormatCombination( - int pos, nvinfer1::PluginTensorDesc const* inOut, int nbInputs, int nbOutputs) noexcept + int32_t pos, nvinfer1::PluginTensorDesc const* inOut, int32_t nbInputs, int32_t nbOutputs) noexcept { + try + { + PLUGIN_VALIDATE(inOut != nullptr); + PLUGIN_VALIDATE(nbInputs == 1); + PLUGIN_VALIDATE(nbOutputs == 1); + PLUGIN_VALIDATE(pos >= 0); + PLUGIN_VALIDATE(pos < nbInputs + nbOutputs); + } + catch (std::exception const& e) + { + caughtError(e); + return false; + } PluginTensorDesc const& input = inOut[0]; if (pos == 0) @@ -112,77 +136,93 @@ bool GeluPluginDynamic::supportsFormatCombination( return false; } -void GeluPluginDynamic::configurePlugin(nvinfer1::DynamicPluginTensorDesc const* in, int nbInputs, - nvinfer1::DynamicPluginTensorDesc const* out, int nbOutputs) noexcept +void GeluPluginDynamic::configurePlugin(nvinfer1::DynamicPluginTensorDesc const* in, int32_t nbInputs, + nvinfer1::DynamicPluginTensorDesc const* out, int32_t nbOutputs) noexcept { gLogVerbose << "GeluPluginDynamic configurePlugin\n"; - PLUGIN_ASSERT(mType == in[0].desc.type); + + try + { + PLUGIN_VALIDATE(in != nullptr); + PLUGIN_VALIDATE(nbInputs == 1); + PLUGIN_VALIDATE(mType == in[0].desc.type); + } + catch (std::exception const& e) + { + caughtError(e); + } } -size_t GeluPluginDynamic::getWorkspaceSize(nvinfer1::PluginTensorDesc const* inputs, int nbInputs, - nvinfer1::PluginTensorDesc const* outputs, int nbOutputs) const noexcept +size_t GeluPluginDynamic::getWorkspaceSize(nvinfer1::PluginTensorDesc const* inputs, int32_t nbInputs, + nvinfer1::PluginTensorDesc const* outputs, int32_t nbOutputs) const noexcept { return 0; } -int GeluPluginDynamic::enqueue(nvinfer1::PluginTensorDesc const* inputDesc, - nvinfer1::PluginTensorDesc const* outputDesc, void const* const* inputs, void* const* outputs, void* workspace, - cudaStream_t stream) noexcept -{ - int const inputVolume = volume(inputDesc[0].dims); - int status = -1; +template +int32_t GeluPluginDynamic::enqueueTyped( + void const* input_, void* output_, int32_t const inputVolume, cudaStream_t stream) noexcept +{ + TDataType const* input = static_cast(input_); + TDataType* output = static_cast(output_); + int32_t const cols = inputVolume / mLd; + int32_t const rows = mLd; - // Our plugin outputs only one tensor - // Launch CUDA kernel wrapper and save its return value - if (mType == DataType::kFLOAT) + if (mHasBias) { - float const* input = static_cast(inputs[0]); - float* output = static_cast(outputs[0]); - if (mHasBias) - { - float const* bias = static_cast(mBiasDev.get()); - int const cols = inputVolume / mLd; - int const rows = mLd; - status = computeGeluBias(output, input, bias, rows, cols, stream); - } - else - { - status = computeGelu(stream, inputVolume, input, output); - } + TDataType const* bias = static_cast(mBiasDev.get()); + return computeGeluBias(output, input, bias, rows, cols, stream); } - else if (mType == DataType::kHALF) + else { - half const* input = static_cast(inputs[0]); - - half* output = static_cast(outputs[0]); + return computeGelu(stream, inputVolume, input, output); + } +} - if (mHasBias) - { - half const* bias = static_cast(mBiasDev.get()); - int const cols = inputVolume / mLd; - int const rows = mLd; - status = computeGeluBias(output, input, bias, rows, cols, stream); - } - else - { - status = computeGelu(stream, inputVolume, input, output); - } +int32_t GeluPluginDynamic::enqueue(nvinfer1::PluginTensorDesc const* inputDesc, + nvinfer1::PluginTensorDesc const* outputDesc, void const* const* inputs, void* const* outputs, void* workspace, + cudaStream_t stream) noexcept +{ + try + { + PLUGIN_VALIDATE(inputDesc != nullptr); + PLUGIN_VALIDATE(inputs != nullptr); + PLUGIN_VALIDATE(outputs != nullptr); } - else + catch (std::exception const& e) { + caughtError(e); return STATUS_FAILURE; } - return status; + int32_t const inputVolume = volume(inputDesc[0].dims); + + // Our plugin outputs only one tensor. + // Launch CUDA kernel wrapper and save its return value. + switch (mType) + { + case DataType::kFLOAT: return enqueueTyped(inputs[0], outputs[0], inputVolume, stream); + case DataType::kHALF: return enqueueTyped(inputs[0], outputs[0], inputVolume, stream); + default: return STATUS_FAILURE; + } } // IPluginV2Ext Methods nvinfer1::DataType GeluPluginDynamic::getOutputDataType( - int index, nvinfer1::DataType const* inputTypes, int nbInputs) const noexcept + int32_t index, nvinfer1::DataType const* inputTypes, int32_t nbInputs) const noexcept { - PLUGIN_ASSERT(index == 0); - PLUGIN_ASSERT(inputTypes[0] == DataType::kFLOAT || inputTypes[0] == DataType::kHALF); - return inputTypes[0]; + try + { + PLUGIN_VALIDATE(index == 0); + PLUGIN_VALIDATE(inputTypes != nullptr); + PLUGIN_VALIDATE(inputTypes[0] == DataType::kFLOAT || inputTypes[0] == DataType::kHALF); + return inputTypes[0]; + } + catch (std::exception const& e) + { + caughtError(e); + } + return DataType{}; } // IPluginV2 Methods @@ -197,12 +237,12 @@ char const* GeluPluginDynamic::getPluginVersion() const noexcept return kGELU_PLUGIN_VERSION; } -int GeluPluginDynamic::getNbOutputs() const noexcept +int32_t GeluPluginDynamic::getNbOutputs() const noexcept { return 1; } -int GeluPluginDynamic::initialize() noexcept +int32_t GeluPluginDynamic::initialize() noexcept { gLogVerbose << "GeluPluginDynamic initalize\n"; return 0; @@ -243,7 +283,15 @@ void GeluPluginDynamic::destroy() noexcept void GeluPluginDynamic::setPluginNamespace(char const* libNamespace) noexcept { - mNamespace = libNamespace; + try + { + PLUGIN_VALIDATE(libNamespace != nullptr); + mNamespace = libNamespace; + } + catch (std::exception const& e) + { + caughtError(e); + } } char const* GeluPluginDynamic::getPluginNamespace() const noexcept @@ -283,6 +331,7 @@ IPluginV2* GeluPluginDynamicCreator::createPlugin(char const* name, PluginFieldC try { gLogVerbose << "GeluPluginDynamicCreator createPlugin\n"; + PLUGIN_VALIDATE(fc != nullptr); Weights bias{DataType::kFLOAT, nullptr, 0}; int32_t typeId = -1; @@ -290,13 +339,14 @@ IPluginV2* GeluPluginDynamicCreator::createPlugin(char const* name, PluginFieldC for (int32_t i = 0; i < fc->nbFields; i++) { - std::string field_name(fc->fields[i].name); + PLUGIN_VALIDATE(fc->fields[i].name != nullptr); + std::string fieldName(fc->fields[i].name); - if (field_name.compare("type_id") == 0) + if (fieldName.compare("type_id") == 0) { typeId = *static_cast(fc->fields[i].data); } - if (field_name.compare("bias") == 0) + if (fieldName.compare("bias") == 0) { bias.values = fc->fields[i].data; bias.count = fc->fields[i].length; @@ -337,7 +387,15 @@ IPluginV2* GeluPluginDynamicCreator::deserializePlugin( void GeluPluginDynamicCreator::setPluginNamespace(char const* libNamespace) noexcept { - mNamespace = libNamespace; + try + { + PLUGIN_VALIDATE(libNamespace != nullptr); + mNamespace = libNamespace; + } + catch (std::exception const& e) + { + caughtError(e); + } } char const* GeluPluginDynamicCreator::getPluginNamespace() const noexcept diff --git a/plugin/geluPlugin/geluPlugin.h b/plugin/geluPlugin/geluPlugin.h index 20e27f28..009797c3 100644 --- a/plugin/geluPlugin/geluPlugin.h +++ b/plugin/geluPlugin/geluPlugin.h @@ -33,15 +33,15 @@ namespace plugin namespace bert { -int computeGelu(cudaStream_t stream, int n, float const* input, float* output); +int32_t computeGelu(cudaStream_t stream, int32_t n, float const* input, float* output); -int computeGelu(cudaStream_t stream, int n, half const* input, half* output); +int32_t computeGelu(cudaStream_t stream, int32_t n, half const* input, half* output); -int computeGeluBias( - float* output, float const* input, float const* bias, int const ld, int const cols, cudaStream_t stream); +int32_t computeGeluBias( + float* output, float const* input, float const* bias, int32_t const ld, int32_t const cols, cudaStream_t stream); -int computeGeluBias( - half* output, half const* input, half const* bias, int const ld, int const cols, cudaStream_t stream); +int32_t computeGeluBias( + half* output, half const* input, half const* bias, int32_t const ld, int32_t const cols, cudaStream_t stream); class GeluPluginDynamic : public nvinfer1::IPluginV2DynamicExt { @@ -56,26 +56,26 @@ class GeluPluginDynamic : public nvinfer1::IPluginV2DynamicExt // IPluginV2DynamicExt Methods nvinfer1::IPluginV2DynamicExt* clone() const noexcept override; - nvinfer1::DimsExprs getOutputDimensions(int outputIndex, nvinfer1::DimsExprs const* inputs, int nbInputs, + nvinfer1::DimsExprs getOutputDimensions(int32_t outputIndex, nvinfer1::DimsExprs const* inputs, int32_t nbInputs, nvinfer1::IExprBuilder& exprBuilder) noexcept override; bool supportsFormatCombination( - int pos, nvinfer1::PluginTensorDesc const* inOut, int nbInputs, int nbOutputs) noexcept override; - void configurePlugin(nvinfer1::DynamicPluginTensorDesc const* in, int nbInputs, - nvinfer1::DynamicPluginTensorDesc const* out, int nbOutputs) noexcept override; - size_t getWorkspaceSize(nvinfer1::PluginTensorDesc const* inputs, int nbInputs, - nvinfer1::PluginTensorDesc const* outputs, int nbOutputs) const noexcept override; - int enqueue(nvinfer1::PluginTensorDesc const* inputDesc, nvinfer1::PluginTensorDesc const* outputDesc, + int32_t pos, nvinfer1::PluginTensorDesc const* inOut, int32_t nbInputs, int32_t nbOutputs) noexcept override; + void configurePlugin(nvinfer1::DynamicPluginTensorDesc const* in, int32_t nbInputs, + nvinfer1::DynamicPluginTensorDesc const* out, int32_t nbOutputs) noexcept override; + size_t getWorkspaceSize(nvinfer1::PluginTensorDesc const* inputs, int32_t nbInputs, + nvinfer1::PluginTensorDesc const* outputs, int32_t nbOutputs) const noexcept override; + int32_t enqueue(nvinfer1::PluginTensorDesc const* inputDesc, nvinfer1::PluginTensorDesc const* outputDesc, void const* const* inputs, void* const* outputs, void* workspace, cudaStream_t stream) noexcept override; // IPluginV2Ext Methods nvinfer1::DataType getOutputDataType( - int index, nvinfer1::DataType const* inputTypes, int nbInputs) const noexcept override; + int32_t index, nvinfer1::DataType const* inputTypes, int32_t nbInputs) const noexcept override; // IPluginV2 Methods char const* getPluginType() const noexcept override; char const* getPluginVersion() const noexcept override; - int getNbOutputs() const noexcept override; - int initialize() noexcept override; + int32_t getNbOutputs() const noexcept override; + int32_t initialize() noexcept override; void terminate() noexcept override; size_t getSerializationSize() const noexcept override; void serialize(void* buffer) const noexcept override; @@ -84,6 +84,10 @@ class GeluPluginDynamic : public nvinfer1::IPluginV2DynamicExt char const* getPluginNamespace() const noexcept override; private: + // Helper method for enqueue() + template + int32_t enqueueTyped(void const* input, void* output, int32_t const inputVolume, cudaStream_t stream) noexcept; + const std::string mLayerName; std::string mNamespace; diff --git a/plugin/generateDetectionPlugin/generateDetectionPlugin.cpp b/plugin/generateDetectionPlugin/generateDetectionPlugin.cpp index 4a9b0984..574f2ba2 100644 --- a/plugin/generateDetectionPlugin/generateDetectionPlugin.cpp +++ b/plugin/generateDetectionPlugin/generateDetectionPlugin.cpp @@ -125,8 +125,8 @@ IPluginV2Ext* GenerateDetectionPluginCreator::deserializePlugin( return nullptr; } -GenerateDetection::GenerateDetection( - int num_classes, int keep_topk, float score_threshold, float iou_threshold, nvinfer1::Dims const& image_size) +GenerateDetection::GenerateDetection(int32_t num_classes, int32_t keep_topk, float score_threshold, float iou_threshold, + nvinfer1::Dims const& image_size) : mNbClasses(num_classes) , mKeepTopK(keep_topk) , mScoreThreshold(score_threshold) @@ -150,12 +150,12 @@ GenerateDetection::GenerateDetection( mType = DataType::kFLOAT; } -int GenerateDetection::getNbOutputs() const noexcept +int32_t GenerateDetection::getNbOutputs() const noexcept { return 1; } -int GenerateDetection::initialize() noexcept +int32_t GenerateDetection::initialize() noexcept { // Init the regWeight [10, 10, 5, 5] mRegWeightDevice = std::make_shared>(4); @@ -163,12 +163,12 @@ int GenerateDetection::initialize() noexcept static_cast(TLTMaskRCNNConfig::DETECTION_REG_WEIGHTS), sizeof(float) * 4, cudaMemcpyHostToDevice)); //@Init the mValidCnt and mDecodedBboxes for max batch size - std::vector tempValidCnt(mMaxBatchSize, mAnchorsCnt); + std::vector tempValidCnt(mMaxBatchSize, mAnchorsCnt); - mValidCnt = std::make_shared>(mMaxBatchSize); + mValidCnt = std::make_shared>(mMaxBatchSize); - PLUGIN_CUASSERT(cudaMemcpy( - mValidCnt->mPtr, static_cast(tempValidCnt.data()), sizeof(int) * mMaxBatchSize, cudaMemcpyHostToDevice)); + PLUGIN_CUASSERT(cudaMemcpy(mValidCnt->mPtr, static_cast(tempValidCnt.data()), + sizeof(int32_t) * mMaxBatchSize, cudaMemcpyHostToDevice)); return 0; } @@ -220,7 +220,7 @@ char const* GenerateDetection::getPluginNamespace() const noexcept size_t GenerateDetection::getSerializationSize() const noexcept { - return sizeof(int) * 2 + sizeof(float) * 2 + sizeof(int) * 2 + sizeof(nvinfer1::Dims); + return sizeof(int32_t) * 2 + sizeof(float) * 2 + sizeof(int32_t) * 2 + sizeof(nvinfer1::Dims); } void GenerateDetection::serialize(void* buffer) const noexcept @@ -267,7 +267,7 @@ void GenerateDetection::deserialize(int8_t const* data, size_t length) mType = DataType::kFLOAT; } -void GenerateDetection::check_valid_inputs(nvinfer1::Dims const* inputs, int nbInputDims) noexcept +void GenerateDetection::check_valid_inputs(nvinfer1::Dims const* inputs, int32_t nbInputDims) noexcept { // classifier_delta_bbox[N, anchors, num_classes*4, 1, 1] // classifier_class[N, anchors, num_classes, 1, 1] @@ -282,13 +282,13 @@ void GenerateDetection::check_valid_inputs(nvinfer1::Dims const* inputs, int nbI PLUGIN_ASSERT(inputs[2].nbDims == 2 && inputs[2].d[1] == 4); } -size_t GenerateDetection::getWorkspaceSize(int batch_size) const noexcept +size_t GenerateDetection::getWorkspaceSize(int32_t batch_size) const noexcept { RefineDetectionWorkSpace refine(batch_size, mAnchorsCnt, mParam, mType); return refine.totalSize; } -Dims GenerateDetection::getOutputDimensions(int index, Dims const* inputs, int nbInputDims) noexcept +Dims GenerateDetection::getOutputDimensions(int32_t index, Dims const* inputs, int32_t nbInputDims) noexcept { check_valid_inputs(inputs, nbInputDims); @@ -322,7 +322,7 @@ int32_t GenerateDetection::enqueue( } DataType GenerateDetection::getOutputDataType( - int index, nvinfer1::DataType const* inputTypes, int nbInputs) const noexcept + int32_t index, nvinfer1::DataType const* inputTypes, int32_t nbInputs) const noexcept { // Only DataType::kFLOAT is acceptable by the plugin layer return DataType::kFLOAT; @@ -330,21 +330,21 @@ DataType GenerateDetection::getOutputDataType( // Return true if output tensor is broadcast across a batch. bool GenerateDetection::isOutputBroadcastAcrossBatch( - int outputIndex, bool const* inputIsBroadcasted, int nbInputs) const noexcept + int32_t outputIndex, bool const* inputIsBroadcasted, int32_t nbInputs) const noexcept { return false; } // Return true if plugin can use input that is broadcast across batch without replication. -bool GenerateDetection::canBroadcastInputAcrossBatch(int inputIndex) const noexcept +bool GenerateDetection::canBroadcastInputAcrossBatch(int32_t inputIndex) const noexcept { return false; } // Configure the layer with input and output data types. -void GenerateDetection::configurePlugin(Dims const* inputDims, int nbInputs, Dims const* outputDims, int nbOutputs, - DataType const* inputTypes, DataType const* outputTypes, bool const* inputIsBroadcast, - bool const* outputIsBroadcast, PluginFormat floatFormat, int maxBatchSize) noexcept +void GenerateDetection::configurePlugin(Dims const* inputDims, int32_t nbInputs, Dims const* outputDims, + int32_t nbOutputs, DataType const* inputTypes, DataType const* outputTypes, bool const* inputIsBroadcast, + bool const* outputIsBroadcast, PluginFormat floatFormat, int32_t maxBatchSize) noexcept { check_valid_inputs(inputDims, nbInputs); PLUGIN_ASSERT(inputDims[0].d[0] == inputDims[1].d[0] && inputDims[1].d[0] == inputDims[2].d[0]); diff --git a/plugin/generateDetectionPlugin/generateDetectionPlugin.h b/plugin/generateDetectionPlugin/generateDetectionPlugin.h index 30825328..75dd50f3 100644 --- a/plugin/generateDetectionPlugin/generateDetectionPlugin.h +++ b/plugin/generateDetectionPlugin/generateDetectionPlugin.h @@ -36,24 +36,24 @@ namespace plugin class GenerateDetection : public IPluginV2Ext { public: - GenerateDetection( - int num_classes, int keep_topk, float score_threshold, float iou_threshold, nvinfer1::Dims const& image_size); + GenerateDetection(int32_t num_classes, int32_t keep_topk, float score_threshold, float iou_threshold, + nvinfer1::Dims const& image_size); GenerateDetection(void const* data, size_t length); ~GenerateDetection() noexcept override = default; - int getNbOutputs() const noexcept override; + int32_t getNbOutputs() const noexcept override; - Dims getOutputDimensions(int index, Dims const* inputs, int nbInputDims) noexcept override; + Dims getOutputDimensions(int32_t index, Dims const* inputs, int32_t nbInputDims) noexcept override; - int initialize() noexcept override; + int32_t initialize() noexcept override; void terminate() noexcept override; void destroy() noexcept override; - size_t getWorkspaceSize(int maxBatchSize) const noexcept override; + size_t getWorkspaceSize(int32_t maxBatchSize) const noexcept override; int32_t enqueue(int32_t batch_size, void const* const* inputs, void* const* outputs, void* workspace, cudaStream_t stream) noexcept override; @@ -74,25 +74,26 @@ class GenerateDetection : public IPluginV2Ext char const* getPluginNamespace() const noexcept override; - DataType getOutputDataType(int index, nvinfer1::DataType const* inputTypes, int nbInputs) const noexcept override; + DataType getOutputDataType( + int32_t index, nvinfer1::DataType const* inputTypes, int32_t nbInputs) const noexcept override; bool isOutputBroadcastAcrossBatch( - int outputIndex, bool const* inputIsBroadcasted, int nbInputs) const noexcept override; + int32_t outputIndex, bool const* inputIsBroadcasted, int32_t nbInputs) const noexcept override; - bool canBroadcastInputAcrossBatch(int inputIndex) const noexcept override; + bool canBroadcastInputAcrossBatch(int32_t inputIndex) const noexcept override; void attachToContext( cudnnContext* cudnnContext, cublasContext* cublasContext, IGpuAllocator* gpuAllocator) noexcept override; - void configurePlugin(Dims const* inputDims, int nbInputs, Dims const* outputDims, int nbOutputs, + void configurePlugin(Dims const* inputDims, int32_t nbInputs, Dims const* outputDims, int32_t nbOutputs, DataType const* inputTypes, DataType const* outputTypes, bool const* inputIsBroadcast, - bool const* outputIsBroadcast, PluginFormat floatFormat, int maxBatchSize) noexcept override; + bool const* outputIsBroadcast, PluginFormat floatFormat, int32_t maxBatchSize) noexcept override; void detachFromContext() noexcept override; private: void deserialize(int8_t const* data, size_t length); - void check_valid_inputs(nvinfer1::Dims const* inputs, int nbInputDims) noexcept; + void check_valid_inputs(nvinfer1::Dims const* inputs, int32_t nbInputDims) noexcept; int32_t mBackgroundLabel{}; int32_t mNbClasses{}; diff --git a/plugin/gridAnchorPlugin/gridAnchorPlugin.cpp b/plugin/gridAnchorPlugin/gridAnchorPlugin.cpp index 14cbf260..33afca2d 100644 --- a/plugin/gridAnchorPlugin/gridAnchorPlugin.cpp +++ b/plugin/gridAnchorPlugin/gridAnchorPlugin.cpp @@ -38,28 +38,28 @@ char const* const kGRID_ANCHOR_PLUGIN_VERSION = "1"; PluginFieldCollection GridAnchorBasePluginCreator::mFC{}; std::vector GridAnchorBasePluginCreator::mPluginAttributes; -GridAnchorGenerator::GridAnchorGenerator(GridAnchorParameters const* paramIn, int numLayers, char const* name) +GridAnchorGenerator::GridAnchorGenerator(GridAnchorParameters const* paramIn, int32_t numLayers, char const* name) : mPluginName(name) , mNumLayers(numLayers) { - PLUGIN_CUASSERT(cudaMallocHost((void**) &mNumPriors, mNumLayers * sizeof(int))); + PLUGIN_CUASSERT(cudaMallocHost((void**) &mNumPriors, mNumLayers * sizeof(int32_t))); PLUGIN_CUASSERT(cudaMallocHost((void**) &mDeviceWidths, mNumLayers * sizeof(Weights))); PLUGIN_CUASSERT(cudaMallocHost((void**) &mDeviceHeights, mNumLayers * sizeof(Weights))); mParam.resize(mNumLayers); - for (int id = 0; id < mNumLayers; id++) + for (int32_t id = 0; id < mNumLayers; id++) { mParam[id] = paramIn[id]; PLUGIN_VALIDATE(mParam[id].numAspectRatios >= 0 && mParam[id].aspectRatios != nullptr); mParam[id].aspectRatios = (float*) malloc(sizeof(float) * mParam[id].numAspectRatios); - for (int i = 0; i < paramIn[id].numAspectRatios; ++i) + for (int32_t i = 0; i < paramIn[id].numAspectRatios; ++i) { mParam[id].aspectRatios[i] = paramIn[id].aspectRatios[i]; } - for (int i = 0; i < 4; ++i) + for (int32_t i = 0; i < 4; ++i) { mParam[id].variance[i] = paramIn[id].variance[i]; } @@ -67,7 +67,7 @@ GridAnchorGenerator::GridAnchorGenerator(GridAnchorParameters const* paramIn, in std::vector tmpScales(mNumLayers + 1); // Calculate the scales of SSD model for each layer - for (int i = 0; i < mNumLayers; i++) + for (int32_t i = 0; i < mNumLayers; i++) { tmpScales[i] = (mParam[id].minSize + (mParam[id].maxSize - mParam[id].minSize) * id / (mNumLayers - 1)); } @@ -82,7 +82,7 @@ GridAnchorGenerator::GridAnchorGenerator(GridAnchorParameters const* paramIn, in // The first layer is different if (id == 0) { - for (int i = 0; i < mParam[id].numAspectRatios; i++) + for (int32_t i = 0; i < mParam[id].numAspectRatios; i++) { aspect_ratios.push_back(mParam[id].aspectRatios[i]); scales.push_back(scale0[i]); @@ -92,7 +92,7 @@ GridAnchorGenerator::GridAnchorGenerator(GridAnchorParameters const* paramIn, in else { - for (int i = 0; i < mParam[id].numAspectRatios; i++) + for (int32_t i = 0; i < mParam[id].numAspectRatios; i++) { aspect_ratios.push_back(mParam[id].aspectRatios[i]); } @@ -100,7 +100,7 @@ GridAnchorGenerator::GridAnchorGenerator(GridAnchorParameters const* paramIn, in aspect_ratios.push_back(1.0); // scales - for (int i = 0; i < mParam[id].numAspectRatios; i++) + for (int32_t i = 0; i < mParam[id].numAspectRatios; i++) { scales.push_back(tmpScales[id]); } @@ -115,7 +115,7 @@ GridAnchorGenerator::GridAnchorGenerator(GridAnchorParameters const* paramIn, in std::vector tmpWidths; std::vector tmpHeights; // Calculate the width and height of the prior boxes - for (int i = 0; i < mNumPriors[id]; i++) + for (int32_t i = 0; i < mNumPriors[id]; i++) { float sqrt_AR = sqrt(aspect_ratios[i]); tmpWidths.push_back(scales[i] * sqrt_AR); @@ -131,30 +131,30 @@ GridAnchorGenerator::GridAnchorGenerator(void const* data, size_t length, char c : mPluginName(name) { char const *d = reinterpret_cast(data), *a = d; - mNumLayers = read(d); - PLUGIN_CUASSERT(cudaMallocHost((void**) &mNumPriors, mNumLayers * sizeof(int))); + mNumLayers = read(d); + PLUGIN_CUASSERT(cudaMallocHost((void**) &mNumPriors, mNumLayers * sizeof(int32_t))); PLUGIN_CUASSERT(cudaMallocHost((void**) &mDeviceWidths, mNumLayers * sizeof(Weights))); PLUGIN_CUASSERT(cudaMallocHost((void**) &mDeviceHeights, mNumLayers * sizeof(Weights))); mParam.resize(mNumLayers); - for (int id = 0; id < mNumLayers; id++) + for (int32_t id = 0; id < mNumLayers; id++) { // we have to deserialize GridAnchorParameters by hand mParam[id].minSize = read(d); mParam[id].maxSize = read(d); - mParam[id].numAspectRatios = read(d); + mParam[id].numAspectRatios = read(d); mParam[id].aspectRatios = (float*) malloc(sizeof(float) * mParam[id].numAspectRatios); - for (int i = 0; i < mParam[id].numAspectRatios; ++i) + for (int32_t i = 0; i < mParam[id].numAspectRatios; ++i) { mParam[id].aspectRatios[i] = read(d); } - mParam[id].H = read(d); - mParam[id].W = read(d); - for (int i = 0; i < 4; ++i) + mParam[id].H = read(d); + mParam[id].W = read(d); + for (int32_t i = 0; i < 4; ++i) { mParam[id].variance[i] = read(d); } - mNumPriors[id] = read(d); + mNumPriors[id] = read(d); mDeviceWidths[id] = deserializeToDevice(d, mNumPriors[id]); mDeviceHeights[id] = deserializeToDevice(d, mNumPriors[id]); } @@ -164,7 +164,7 @@ GridAnchorGenerator::GridAnchorGenerator(void const* data, size_t length, char c GridAnchorGenerator::~GridAnchorGenerator() { - for (int id = 0; id < mNumLayers; id++) + for (int32_t id = 0; id < mNumLayers; id++) { PLUGIN_CUERROR(cudaFree(const_cast(mDeviceWidths[id].values))); PLUGIN_CUERROR(cudaFree(const_cast(mDeviceHeights[id].values))); @@ -175,12 +175,12 @@ GridAnchorGenerator::~GridAnchorGenerator() PLUGIN_CUERROR(cudaFreeHost(mDeviceHeights)); } -int GridAnchorGenerator::getNbOutputs() const noexcept +int32_t GridAnchorGenerator::getNbOutputs() const noexcept { return mNumLayers; } -Dims GridAnchorGenerator::getOutputDimensions(int index, Dims const* inputs, int nbInputDims) noexcept +Dims GridAnchorGenerator::getOutputDimensions(int32_t index, Dims const* inputs, int32_t nbInputDims) noexcept { // Particularity of the PriorBox layer: no batchSize dimension needed // 2 channels. First channel stores the mean of each prior coordinate. @@ -188,23 +188,23 @@ Dims GridAnchorGenerator::getOutputDimensions(int index, Dims const* inputs, int return Dims3(2, mParam[index].H * mParam[index].W * mNumPriors[index] * 4, 1); } -int GridAnchorGenerator::initialize() noexcept +int32_t GridAnchorGenerator::initialize() noexcept { return STATUS_SUCCESS; } void GridAnchorGenerator::terminate() noexcept {} -size_t GridAnchorGenerator::getWorkspaceSize(int maxBatchSize) const noexcept +size_t GridAnchorGenerator::getWorkspaceSize(int32_t maxBatchSize) const noexcept { return 0; } -int GridAnchorGenerator::enqueue( - int batchSize, void const* const* inputs, void* const* outputs, void* workspace, cudaStream_t stream) noexcept +int32_t GridAnchorGenerator::enqueue( + int32_t batchSize, void const* const* inputs, void* const* outputs, void* workspace, cudaStream_t stream) noexcept { // Generate prior boxes for each layer - for (int id = 0; id < mNumLayers; id++) + for (int32_t id = 0; id < mNumLayers; id++) { void* outputData = outputs[id]; pluginStatus_t status = anchorGridInference( @@ -219,10 +219,10 @@ int GridAnchorGenerator::enqueue( size_t GridAnchorGenerator::getSerializationSize() const noexcept { - size_t sum = sizeof(int); // mNumLayers - for (int i = 0; i < mNumLayers; i++) + size_t sum = sizeof(int32_t); // mNumLayers + for (int32_t i = 0; i < mNumLayers; i++) { - sum += 4 * sizeof(int); // mNumPriors, mParam[i].{numAspectRatios, H, W} + sum += 4 * sizeof(int32_t); // mNumPriors, mParam[i].{numAspectRatios, H, W} sum += (6 + mParam[i].numAspectRatios) * sizeof(float); // mParam[i].{minSize, maxSize, aspectRatios, variance[4]} sum += mDeviceWidths[i].count * sizeof(float); @@ -235,19 +235,19 @@ void GridAnchorGenerator::serialize(void* buffer) const noexcept { char *d = reinterpret_cast(buffer), *a = d; write(d, mNumLayers); - for (int id = 0; id < mNumLayers; id++) + for (int32_t id = 0; id < mNumLayers; id++) { // we have to serialize GridAnchorParameters by hand write(d, mParam[id].minSize); write(d, mParam[id].maxSize); write(d, mParam[id].numAspectRatios); - for (int i = 0; i < mParam[id].numAspectRatios; ++i) + for (int32_t i = 0; i < mParam[id].numAspectRatios; ++i) { write(d, mParam[id].aspectRatios[i]); } write(d, mParam[id].H); write(d, mParam[id].W); - for (int i = 0; i < 4; ++i) + for (int32_t i = 0; i < 4; ++i) { write(d, mParam[id].variance[i]); } @@ -309,7 +309,7 @@ char const* GridAnchorGenerator::getPluginNamespace() const noexcept #include // Return the DataType of the plugin output at the requested index DataType GridAnchorGenerator::getOutputDataType( - int index, nvinfer1::DataType const* inputTypes, int nbInputs) const noexcept + int32_t index, nvinfer1::DataType const* inputTypes, int32_t nbInputs) const noexcept { PLUGIN_ASSERT(index < mNumLayers); return DataType::kFLOAT; @@ -317,21 +317,21 @@ DataType GridAnchorGenerator::getOutputDataType( // Return true if output tensor is broadcast across a batch. bool GridAnchorGenerator::isOutputBroadcastAcrossBatch( - int outputIndex, bool const* inputIsBroadcasted, int nbInputs) const noexcept + int32_t outputIndex, bool const* inputIsBroadcasted, int32_t nbInputs) const noexcept { return false; } // Return true if plugin can use input that is broadcast across batch without replication. -bool GridAnchorGenerator::canBroadcastInputAcrossBatch(int inputIndex) const noexcept +bool GridAnchorGenerator::canBroadcastInputAcrossBatch(int32_t inputIndex) const noexcept { return false; } // Configure the layer with input and output data types. -void GridAnchorGenerator::configurePlugin(Dims const* inputDims, int nbInputs, Dims const* outputDims, int nbOutputs, - DataType const* inputTypes, DataType const* outputTypes, bool const* inputIsBroadcast, - bool const* outputIsBroadcast, PluginFormat floatFormat, int maxBatchSize) noexcept +void GridAnchorGenerator::configurePlugin(Dims const* inputDims, int32_t nbInputs, Dims const* outputDims, + int32_t nbOutputs, DataType const* inputTypes, DataType const* outputTypes, bool const* inputIsBroadcast, + bool const* outputIsBroadcast, PluginFormat floatFormat, int32_t maxBatchSize) noexcept { PLUGIN_ASSERT(nbOutputs == mNumLayers); PLUGIN_ASSERT(outputDims[0].nbDims == 3); @@ -400,20 +400,20 @@ IPluginV2Ext* GridAnchorBasePluginCreator::createPlugin(char const* name, Plugin try { float minScale = 0.2F, maxScale = 0.95F; - int numLayers = 6; + int32_t numLayers = 6; std::vector aspectRatios; - std::vector fMapShapes; + std::vector fMapShapes; std::vector layerVariances; PluginField const* fields = fc->fields; bool const isFMapRect = (kGRID_ANCHOR_PLUGIN_NAMES[1] == mPluginName); - for (int i = 0; i < fc->nbFields; ++i) + for (int32_t i = 0; i < fc->nbFields; ++i) { char const* attrName = fields[i].name; if (!strcmp(attrName, "numLayers")) { PLUGIN_VALIDATE(fields[i].type == PluginFieldType::kINT32); - numLayers = static_cast(*(static_cast(fields[i].data))); + numLayers = static_cast(*(static_cast(fields[i].data))); } else if (!strcmp(attrName, "minSize")) { @@ -428,10 +428,10 @@ IPluginV2Ext* GridAnchorBasePluginCreator::createPlugin(char const* name, Plugin else if (!strcmp(attrName, "variance")) { PLUGIN_VALIDATE(fields[i].type == PluginFieldType::kFLOAT32); - int size = fields[i].length; + int32_t size = fields[i].length; layerVariances.reserve(size); auto const* lVar = static_cast(fields[i].data); - for (int j = 0; j < size; j++) + for (int32_t j = 0; j < size; j++) { layerVariances.push_back(*lVar); lVar++; @@ -440,10 +440,10 @@ IPluginV2Ext* GridAnchorBasePluginCreator::createPlugin(char const* name, Plugin else if (!strcmp(attrName, "aspectRatios")) { PLUGIN_VALIDATE(fields[i].type == PluginFieldType::kFLOAT32); - int size = fields[i].length; + int32_t size = fields[i].length; aspectRatios.reserve(size); auto const* aR = static_cast(fields[i].data); - for (int j = 0; j < size; j++) + for (int32_t j = 0; j < size; j++) { aspectRatios.push_back(*aR); aR++; @@ -452,11 +452,11 @@ IPluginV2Ext* GridAnchorBasePluginCreator::createPlugin(char const* name, Plugin else if (!strcmp(attrName, "featureMapShapes")) { PLUGIN_VALIDATE(fields[i].type == PluginFieldType::kINT32); - int size = fields[i].length; + int32_t size = fields[i].length; PLUGIN_VALIDATE(!isFMapRect || (size % 2 == 0)); fMapShapes.reserve(size); - int const* fMap = static_cast(fields[i].data); - for (int j = 0; j < size; j++) + int32_t const* fMap = static_cast(fields[i].data); + for (int32_t j = 0; j < size; j++) { fMapShapes.push_back(*fMap); fMap++; @@ -468,13 +468,13 @@ IPluginV2Ext* GridAnchorBasePluginCreator::createPlugin(char const* name, Plugin std::vector firstLayerAspectRatios; PLUGIN_VALIDATE(numLayers > 0); - int const numExpectedLayers = static_cast(fMapShapes.size()) >> (isFMapRect ? 1 : 0); + int32_t const numExpectedLayers = static_cast(fMapShapes.size()) >> (isFMapRect ? 1 : 0); PLUGIN_VALIDATE(numExpectedLayers == numLayers); - int numFirstLayerARs = 3; + int32_t numFirstLayerARs = 3; // First layer only has the first 3 aspect ratios from aspectRatios firstLayerAspectRatios.reserve(numFirstLayerARs); - for (int i = 0; i < numFirstLayerARs; ++i) + for (int32_t i = 0; i < numFirstLayerARs; ++i) { firstLayerAspectRatios.push_back(aspectRatios[i]); } @@ -482,21 +482,22 @@ IPluginV2Ext* GridAnchorBasePluginCreator::createPlugin(char const* name, Plugin std::vector boxParams(numLayers); // One set of box parameters for one layer - for (int i = 0; i < numLayers; i++) + for (int32_t i = 0; i < numLayers; i++) { - int hOffset = (isFMapRect ? i * 2 : i); - int wOffset = (isFMapRect ? i * 2 + 1 : i); + int32_t hOffset = (isFMapRect ? i * 2 : i); + int32_t wOffset = (isFMapRect ? i * 2 + 1 : i); // Only the first layer is different if (i == 0) { - boxParams[i] = {minScale, maxScale, firstLayerAspectRatios.data(), (int) firstLayerAspectRatios.size(), - fMapShapes[hOffset], fMapShapes[wOffset], + boxParams[i] = {minScale, maxScale, firstLayerAspectRatios.data(), + (int32_t) firstLayerAspectRatios.size(), fMapShapes[hOffset], fMapShapes[wOffset], {layerVariances[0], layerVariances[1], layerVariances[2], layerVariances[3]}}; } else { - boxParams[i] = {minScale, maxScale, aspectRatios.data(), (int) aspectRatios.size(), fMapShapes[hOffset], - fMapShapes[wOffset], {layerVariances[0], layerVariances[1], layerVariances[2], layerVariances[3]}}; + boxParams[i] = {minScale, maxScale, aspectRatios.data(), (int32_t) aspectRatios.size(), + fMapShapes[hOffset], fMapShapes[wOffset], + {layerVariances[0], layerVariances[1], layerVariances[2], layerVariances[3]}}; } } diff --git a/plugin/gridAnchorPlugin/gridAnchorPlugin.h b/plugin/gridAnchorPlugin/gridAnchorPlugin.h index 496de7c7..b0e1fc16 100644 --- a/plugin/gridAnchorPlugin/gridAnchorPlugin.h +++ b/plugin/gridAnchorPlugin/gridAnchorPlugin.h @@ -31,23 +31,23 @@ namespace plugin class GridAnchorGenerator : public IPluginV2Ext { public: - GridAnchorGenerator(GridAnchorParameters const* param, int numLayers, char const* version); + GridAnchorGenerator(GridAnchorParameters const* param, int32_t numLayers, char const* version); GridAnchorGenerator(void const* data, size_t length, char const* version); ~GridAnchorGenerator() override; - int getNbOutputs() const noexcept override; + int32_t getNbOutputs() const noexcept override; - Dims getOutputDimensions(int index, Dims const* inputs, int nbInputDims) noexcept override; + Dims getOutputDimensions(int32_t index, Dims const* inputs, int32_t nbInputDims) noexcept override; - int initialize() noexcept override; + int32_t initialize() noexcept override; void terminate() noexcept override; - size_t getWorkspaceSize(int maxBatchSize) const noexcept override; + size_t getWorkspaceSize(int32_t maxBatchSize) const noexcept override; - int enqueue(int batchSize, void const* const* inputs, void* const* outputs, void* workspace, + int32_t enqueue(int32_t batchSize, void const* const* inputs, void* const* outputs, void* workspace, cudaStream_t stream) noexcept override; size_t getSerializationSize() const noexcept override; @@ -68,19 +68,20 @@ class GridAnchorGenerator : public IPluginV2Ext char const* getPluginNamespace() const noexcept override; - DataType getOutputDataType(int index, nvinfer1::DataType const* inputTypes, int nbInputs) const noexcept override; + DataType getOutputDataType( + int32_t index, nvinfer1::DataType const* inputTypes, int32_t nbInputs) const noexcept override; bool isOutputBroadcastAcrossBatch( - int outputIndex, bool const* inputIsBroadcasted, int nbInputs) const noexcept override; + int32_t outputIndex, bool const* inputIsBroadcasted, int32_t nbInputs) const noexcept override; - bool canBroadcastInputAcrossBatch(int inputIndex) const noexcept override; + bool canBroadcastInputAcrossBatch(int32_t inputIndex) const noexcept override; void attachToContext( cudnnContext* cudnnContext, cublasContext* cublasContext, IGpuAllocator* gpuAllocator) noexcept override; - void configurePlugin(Dims const* inputDims, int nbInputs, Dims const* outputDims, int nbOutputs, + void configurePlugin(Dims const* inputDims, int32_t nbInputs, Dims const* outputDims, int32_t nbOutputs, DataType const* inputTypes, DataType const* outputTypes, bool const* inputIsBroadcast, - bool const* outputIsBroadcast, PluginFormat floatFormat, int maxBatchSize) noexcept override; + bool const* outputIsBroadcast, PluginFormat floatFormat, int32_t maxBatchSize) noexcept override; void detachFromContext() noexcept override; @@ -94,9 +95,9 @@ class GridAnchorGenerator : public IPluginV2Ext Weights deserializeToDevice(char const*& hostBuffer, size_t count) noexcept; - int mNumLayers; + int32_t mNumLayers; std::vector mParam; - int* mNumPriors; + int32_t* mNumPriors; Weights *mDeviceWidths, *mDeviceHeights; std::string mPluginNamespace; }; diff --git a/plugin/groupNormalizationPlugin/GroupNormalizationPlugin_PluginConfig.yaml b/plugin/groupNormalizationPlugin/GroupNormalizationPlugin_PluginConfig.yaml index 76574792..bec760a5 100644 --- a/plugin/groupNormalizationPlugin/GroupNormalizationPlugin_PluginConfig.yaml +++ b/plugin/groupNormalizationPlugin/GroupNormalizationPlugin_PluginConfig.yaml @@ -3,6 +3,37 @@ name: GroupNormalizationPlugin interface: "IPluginV2DynamicExt" versions: "1": + inputs: + - input + - scale + - bias + outputs: + - output + input_dims: + input: 4 + scale: 1 + bias: 1 + input_dim_constraints: + - "input_1 MULTIPLE_OF num_groups_0" + - "scale_0 == input_1" + - "bias_0 == scale_0" + input_dim_range: + input: + min: "=1, =1, =1, =1" + max: "=pinf, =pinf, =pinf, =pinf" + scale: + min: "=1" + max: "=pinf" + bias: + min: "=1" + max: "=pinf" + supported_input_types: + - combination1: + input: float32 + scale: float32 + bias: float32 + output_dims: + output: "input_0, input_1, input_2, input_3" attributes: - eps - num_groups @@ -12,12 +43,53 @@ versions: attribute_length: eps: 1 num_groups: 1 + attribute_dim_range: + eps: + min: "=1" + max: "=1" + num_groups: + min: "=1" + max: "=1" attribute_options: eps: min: "0" max: "=pinf" num_groups: - min: "0" + min: "=1" max: "=pinf" attributes_required: [] + abs_tol: 1e-2 + rel_tol: 1e-2 + golden_reference_script: "plugin/groupNormalizationPlugin/GroupNormalizationPlugin_PluginReference.py" + configs: + config1: + input_types: + input: float32 + scale: float32 + bias: float32 + attribute_options: + eps: + value: 0.0001 + num_groups: + value: 1 + config2: + input_types: + input: float32 + scale: float32 + bias: float32 + attribute_options: + eps: + value: 0.001 + num_groups: + value: 2 + config3: + input_types: + input: float32 + scale: float32 + bias: float32 + attribute_options: + eps: + value: 0.01 + num_groups: + value: 3 ... diff --git a/plugin/groupNormalizationPlugin/GroupNormalizationPlugin_PluginReference.py b/plugin/groupNormalizationPlugin/GroupNormalizationPlugin_PluginReference.py new file mode 100644 index 00000000..0e7fb510 --- /dev/null +++ b/plugin/groupNormalizationPlugin/GroupNormalizationPlugin_PluginReference.py @@ -0,0 +1,27 @@ +import numpy as np + +def ref(inputs, attributes, version = "1"): + assert version == "1" + num_groups = attributes["num_groups"][0] + epsilon = attributes["eps"][0] + input = inputs["input"] + bias = inputs["bias"] + scale = inputs["scale"] + output = input.copy() + + assert len(input.shape) == 4 + B, C, H, W = input.shape + + # Groups are a subdivision of the channel dimension. + assert C % num_groups == 0 + + # Normalize every group. + output = output.reshape((B * num_groups, -1)) + output -= np.mean(output, axis=-1, keepdims=True) + output /= np.sqrt(epsilon + np.var(output, axis=-1, keepdims=True)) + + # Apply per-channel scale and bias. + output = output.reshape(input.shape) + output = bias.reshape(1, C, 1, 1) + scale.reshape(1, C, 1, 1) * output + + return {"output": output} diff --git a/plugin/groupNormalizationPlugin/groupNormalizationPlugin.cpp b/plugin/groupNormalizationPlugin/groupNormalizationPlugin.cpp index 44e0b881..d68f4018 100644 --- a/plugin/groupNormalizationPlugin/groupNormalizationPlugin.cpp +++ b/plugin/groupNormalizationPlugin/groupNormalizationPlugin.cpp @@ -17,6 +17,7 @@ #include "groupNormalizationPlugin.h" #include "common/dimsHelpers.h" +#include "common/serialize.hpp" #include #include @@ -25,16 +26,6 @@ using namespace nvinfer1; using nvinfer1::plugin::GroupNormalizationPlugin; using nvinfer1::plugin::GroupNormalizationPluginCreator; -#define PLUGIN_CHECK_CUDNN(call) \ - do \ - { \ - cudnnStatus_t status = call; \ - if (status != CUDNN_STATUS_SUCCESS) \ - { \ - return status; \ - } \ - } while (0) - namespace { constexpr char const* kGROUP_NORM_VERSION{"1"}; @@ -47,33 +38,17 @@ std::vector GroupNormalizationPluginCreator::mPluginAttri REGISTER_TENSORRT_PLUGIN(GroupNormalizationPluginCreator); -GroupNormalizationPlugin::GroupNormalizationPlugin(float epsilon, int nbGroups) +GroupNormalizationPlugin::GroupNormalizationPlugin(float epsilon, int32_t nbGroups) : mEpsilon(epsilon) , mNbGroups(nbGroups) { PLUGIN_VALIDATE(mEpsilon > 0.0F); - // Number of groups should be positive PLUGIN_VALIDATE(mNbGroups > 0); } -int GroupNormalizationPlugin::initialize() noexcept +int32_t GroupNormalizationPlugin::initialize() noexcept { - auto allocScaleBias = [this](std::shared_ptr>& buf, float value) { - PLUGIN_VALIDATE(mNbScaleBias > 0); - if (!buf || !buf->mPtr || buf->mSize != mNbScaleBias) - { - // Allocate device memory. - buf = std::make_shared>(mNbScaleBias); - - // Initialize values. - std::vector const values(mNbScaleBias, value); - PLUGIN_CUASSERT(cudaMemcpy(buf->mPtr, values.data(), sizeof(float) * mNbScaleBias, cudaMemcpyHostToDevice)); - } - }; - - allocScaleBias(mBnScales, 1.F); - allocScaleBias(mBnBias, 0.F); - return 0; + return STATUS_SUCCESS; } GroupNormalizationPlugin::GroupNormalizationPlugin(void const* data, size_t length) @@ -81,7 +56,6 @@ GroupNormalizationPlugin::GroupNormalizationPlugin(void const* data, size_t leng // Deserialize in the same order as serialization deserialize_value(&data, &length, &mEpsilon); deserialize_value(&data, &length, &mNbGroups); - deserialize_value(&data, &length, &mNbScaleBias); } char const* GroupNormalizationPlugin::getPluginType() const noexcept @@ -94,113 +68,153 @@ char const* GroupNormalizationPlugin::getPluginVersion() const noexcept return kGROUP_NORM_VERSION; } -int GroupNormalizationPlugin::getNbOutputs() const noexcept +int32_t GroupNormalizationPlugin::getNbOutputs() const noexcept { return 1; } nvinfer1::DimsExprs GroupNormalizationPlugin::getOutputDimensions( - int index, nvinfer1::DimsExprs const* inputs, int nbInputs, nvinfer1::IExprBuilder& exprBuilder) noexcept + int32_t index, nvinfer1::DimsExprs const* inputs, int32_t nbInputs, nvinfer1::IExprBuilder& exprBuilder) noexcept { - // Input (from previous layer), scale and bias are the three inputs to the plugin. - PLUGIN_ASSERT(nbInputs == 3); - PLUGIN_ASSERT(index == 0); - nvinfer1::DimsExprs output(inputs[0]); - return output; + try + { + // Input (from previous layer), scale and bias are the three inputs to the plugin. + PLUGIN_VALIDATE(nbInputs == 3); + PLUGIN_VALIDATE(index == 0); + return inputs[0]; + } + catch (std::exception const& e) + { + caughtError(e); + return DimsExprs{}; + } } void GroupNormalizationPlugin::attachToContext( cudnnContext* cudnnContext, cublasContext* cublasContext, IGpuAllocator* gpuAllocator) noexcept { - PLUGIN_ASSERT(cudnnContext); - _cudnn_handle = cudnnContext; - PLUGIN_CUDNNASSERT(cudnnCreateTensorDescriptor(&desc)); - PLUGIN_CUDNNASSERT(cudnnCreateTensorDescriptor(&bnDesc)); + try + { + PLUGIN_VALIDATE(cudnnContext); + mCudnnHandle = cudnnContext; + PLUGIN_CUDNNASSERT(cudnnCreateTensorDescriptor(&mTensorDesc)); + PLUGIN_CUDNNASSERT(cudnnCreateTensorDescriptor(&mBNTensorDesc)); + } + catch (std::exception const& e) + { + caughtError(e); + } } // Detach the plugin object from its execution context. void GroupNormalizationPlugin::detachFromContext() noexcept { - PLUGIN_CUDNNASSERT(cudnnDestroyTensorDescriptor(desc)); - PLUGIN_CUDNNASSERT(cudnnDestroyTensorDescriptor(bnDesc)); + try + { + PLUGIN_CUDNNASSERT(cudnnDestroyTensorDescriptor(mTensorDesc)); + PLUGIN_CUDNNASSERT(cudnnDestroyTensorDescriptor(mBNTensorDesc)); + mCudnnHandle = nullptr; + } + catch (std::exception const& e) + { + caughtError(e); + } } -int GroupNormalizationPlugin::enqueue(nvinfer1::PluginTensorDesc const* inputDesc, +int32_t GroupNormalizationPlugin::enqueue(nvinfer1::PluginTensorDesc const* inputDesc, nvinfer1::PluginTensorDesc const* outputDesc, void const* const* inputs, void* const* outputs, void* workspace, cudaStream_t stream) noexcept { - // Get the input dimensions - nvinfer1::Dims input_dims = inputDesc[0].dims; - int batchSize = input_dims.d[0]; - int nbChannels = input_dims.d[1]; - - // Calculate size of each group - int groupSize = nbChannels / mNbGroups; - - mChannelVolume = pluginInternal::volume(input_dims, /*start*/ 2, /*stop*/ inputDesc[0].dims.nbDims); - - PLUGIN_CHECK_CUDNN(cudnnSetTensor4dDescriptor(desc, // descriptor - CUDNN_TENSOR_NCHW, // tensor format - CUDNN_DATA_FLOAT, // type - 1, // Batchsize - batchSize * mNbGroups, // Channels - groupSize, // Height - mChannelVolume // Width - )); + try + { + PLUGIN_VALIDATE(inputDesc != nullptr); + PLUGIN_VALIDATE(inputs != nullptr); + PLUGIN_VALIDATE(outputs != nullptr); + PLUGIN_VALIDATE(mBnScales != nullptr && mBnScales->mPtr != nullptr); + PLUGIN_VALIDATE(mBnBias != nullptr && mBnBias->mPtr != nullptr); + PLUGIN_VALIDATE(mCudnnHandle != nullptr); + PLUGIN_VALIDATE(mTensorDesc != nullptr); + PLUGIN_VALIDATE(mBNTensorDesc != nullptr); + } + catch (std::exception const& e) + { + caughtError(e); + return STATUS_FAILURE; + } - PLUGIN_CHECK_CUDNN(cudnnDeriveBNTensorDescriptor(bnDesc, desc, CUDNN_BATCHNORM_SPATIAL)); - PLUGIN_CHECK_CUDNN(cudnnSetStream(_cudnn_handle, stream)); - - // Reshape the data according in the cudnnSetTensor4dDescriptor. - PLUGIN_ASSERT(mBnScales && mBnScales->mPtr); - PLUGIN_ASSERT(mBnBias && mBnBias->mPtr); - float a = 1.F; - float b = 0.F; - PLUGIN_CHECK_CUDNN(cudnnBatchNormalizationForwardTraining(_cudnn_handle, // handle - CUDNN_BATCHNORM_SPATIAL, // BatchNormMode_t, try also non persistent - &a, // - &b, // - desc, // in/out descriptor - inputs[0], // input - desc, // in/out descriptor - outputs[0], // output - bnDesc, // - mBnScales->mPtr, // 1 - mBnBias->mPtr, // 0 - 0.0, // exponential average factor - nullptr, // resultRunningMean - nullptr, // resultRunningVar - mEpsilon, // eps - nullptr, // resultSaveMean - nullptr // resultSaveInvVar + PLUGIN_CHECK_CUDNN(cudnnSetStream(mCudnnHandle, stream)); + + // The tensor descriptors were set up in configurePlugin() to make Batch Normalization actually + // perform Group Normalization. This was done by setting the tensor descriptor shape to + // (1, batch*num_groups, channels_per_group, volume_of_spatial_dims). + // cudnnBatchNorm will normalize over the last two dimensions. + float const one = 1.F; + float const zero = 0.F; + PLUGIN_CHECK_CUDNN(cudnnBatchNormalizationForwardTraining(mCudnnHandle, // handle + CUDNN_BATCHNORM_SPATIAL, // BatchNormMode_t, try also non persistent + &one, // + &zero, // + mTensorDesc, // in/out descriptor + inputs[0], // input + mTensorDesc, // in/out descriptor + outputs[0], // output + mBNTensorDesc, // + mBnScales->mPtr, // 1 + mBnBias->mPtr, // 0 + 0.0, // exponential average factor + nullptr, // resultRunningMean + nullptr, // resultRunningVar + mEpsilon, // eps + nullptr, // resultSaveMean + nullptr // resultSaveInvVar )); - float* output = static_cast(outputs[0]); + // Apply an additional scale and bias on each channel. + nvinfer1::Dims inputDims = inputDesc[0].dims; + int32_t batchSize = inputDims.d[0]; + int32_t nbChannels = inputDims.d[1]; + auto* output = static_cast(outputs[0]); return scaleShiftChannelsInplace(output, batchSize, nbChannels, mChannelVolume, static_cast(inputs[2]), static_cast(inputs[1]), stream); // mBetaDev, mGammaDev, } size_t GroupNormalizationPlugin::getSerializationSize() const noexcept { - return sizeof(mNbGroups) + sizeof(mEpsilon) + sizeof(mNbScaleBias); + return sizeof(mNbGroups) + sizeof(mEpsilon); } void GroupNormalizationPlugin::serialize(void* buffer) const noexcept { + PLUGIN_ASSERT(buffer != nullptr); + auto* const start = reinterpret_cast(buffer); serialize_value(&buffer, mEpsilon); serialize_value(&buffer, mNbGroups); - serialize_value(&buffer, mNbScaleBias); + PLUGIN_ASSERT(start + getSerializationSize() == reinterpret_cast(buffer)); } bool GroupNormalizationPlugin::supportsFormatCombination( - int pos, nvinfer1::PluginTensorDesc const* inOut, int nbInputs, int nbOutputs) noexcept + int32_t pos, nvinfer1::PluginTensorDesc const* inOut, int32_t nbInputs, int32_t nbOutputs) noexcept { - PLUGIN_ASSERT(inOut && pos < (nbInputs + nbOutputs)); - return ((inOut[pos].type == nvinfer1::DataType::kFLOAT) && inOut[pos].format == nvinfer1::PluginFormat::kLINEAR - && inOut[pos].type == inOut[0].type); + try + { + PLUGIN_VALIDATE(inOut != nullptr); + PLUGIN_VALIDATE(pos < nbInputs + nbOutputs); + PLUGIN_VALIDATE(pos >= 0); + return ((inOut[pos].type == nvinfer1::DataType::kFLOAT) && inOut[pos].format == nvinfer1::PluginFormat::kLINEAR + && inOut[pos].type == inOut[0].type); + } + catch (std::exception const& e) + { + caughtError(e); + return false; + } } -void GroupNormalizationPlugin::terminate() noexcept {} +void GroupNormalizationPlugin::terminate() noexcept +{ + mBnScales.reset(); + mBnBias.reset(); +} void GroupNormalizationPlugin::destroy() noexcept { @@ -213,10 +227,11 @@ IPluginV2DynamicExt* GroupNormalizationPlugin::clone() const noexcept try { auto* plugin = new GroupNormalizationPlugin(mEpsilon, mNbGroups); - plugin->setPluginNamespace(mPluginNamespace); + plugin->setPluginNamespace(mNamespace.c_str()); plugin->mNbScaleBias = mNbScaleBias; plugin->mBnScales = mBnScales; plugin->mBnBias = mBnBias; + plugin->mChannelVolume = mChannelVolume; return plugin; } catch (std::exception const& e) @@ -226,34 +241,109 @@ IPluginV2DynamicExt* GroupNormalizationPlugin::clone() const noexcept return nullptr; } -void GroupNormalizationPlugin::configurePlugin(nvinfer1::DynamicPluginTensorDesc const* in, int nbInputs, - nvinfer1::DynamicPluginTensorDesc const* out, int nbOutputs) noexcept +void GroupNormalizationPlugin::configurePlugin(nvinfer1::DynamicPluginTensorDesc const* in, int32_t nbInputs, + nvinfer1::DynamicPluginTensorDesc const* out, int32_t nbOutputs) noexcept { - int32_t const batchSize = in[0].desc.dims.d[0] <= 0 ? in[0].max.d[0] : in[0].desc.dims.d[0]; - mNbScaleBias = batchSize * mNbGroups; + try + { + PLUGIN_VALIDATE(in != nullptr); + PLUGIN_VALIDATE(out != nullptr); + PLUGIN_VALIDATE(nbInputs == 3); + PLUGIN_VALIDATE(nbOutputs == getNbOutputs()); + + nvinfer1::Dims inputDims = in[0].desc.dims; + int32_t const batchSize = inputDims.d[0]; + int32_t const nbChannels = inputDims.d[1]; + + if (batchSize <= 0 || nbChannels <= 0) + { + // Input size not yet known, nothing to configure. + return; + } + + if (mTensorDesc == nullptr) + { + // Not yet attached to context. + return; + } + + // Allocate scale/bias tensors needed for cudnnBatchNorm. + mNbScaleBias = batchSize * mNbGroups; + auto allocScaleBias = [this](std::shared_ptr>& buf, float value) { + PLUGIN_VALIDATE(mNbScaleBias > 0); + if (!buf || !buf->mPtr || buf->mSize != mNbScaleBias) + { + // Allocate device memory. + buf = std::make_shared>(mNbScaleBias); + + // Initialize values. + std::vector const values(mNbScaleBias, value); + PLUGIN_CUASSERT( + cudaMemcpy(buf->mPtr, values.data(), sizeof(float) * mNbScaleBias, cudaMemcpyHostToDevice)); + } + }; + allocScaleBias(mBnScales, 1.F); + allocScaleBias(mBnBias, 0.F); + + // Calculate size of each group + int32_t groupSize = nbChannels / mNbGroups; + mChannelVolume = pluginInternal::volume(inputDims, /*start*/ 2, /*stop*/ inputDims.nbDims); + + // Set tensor descriptor in a way that cudnnBatchNorm will perform Group Normalization. + PLUGIN_CUDNNASSERT(cudnnSetTensor4dDescriptor(mTensorDesc, // descriptor + CUDNN_TENSOR_NCHW, // tensor format + CUDNN_DATA_FLOAT, // type + 1, // Batchsize + batchSize * mNbGroups, // Channels + groupSize, // Height + mChannelVolume // Width + )); + PLUGIN_CUDNNASSERT(cudnnDeriveBNTensorDescriptor(mBNTensorDesc, mTensorDesc, CUDNN_BATCHNORM_SPATIAL)); + } + catch (std::exception const& e) + { + caughtError(e); + } } nvinfer1::DataType GroupNormalizationPlugin::getOutputDataType( - int index, nvinfer1::DataType const* inputTypes, int nbInputs) const noexcept + int32_t index, nvinfer1::DataType const* inputTypes, int32_t nbInputs) const noexcept { - PLUGIN_ASSERT(inputTypes && nbInputs > 0 && index == 0); - return inputTypes[0]; + try + { + PLUGIN_VALIDATE(inputTypes != nullptr); + PLUGIN_VALIDATE(index == 0); + return inputTypes[0]; + } + catch (std::exception const& e) + { + caughtError(e); + return DataType{}; + } } -size_t GroupNormalizationPlugin::getWorkspaceSize(nvinfer1::PluginTensorDesc const* inputs, int nbInputs, - nvinfer1::PluginTensorDesc const* outputs, int nbOutputs) const noexcept +size_t GroupNormalizationPlugin::getWorkspaceSize(nvinfer1::PluginTensorDesc const* inputs, int32_t nbInputs, + nvinfer1::PluginTensorDesc const* outputs, int32_t nbOutputs) const noexcept { return 0; } void GroupNormalizationPlugin::setPluginNamespace(char const* libNamespace) noexcept { - mPluginNamespace = libNamespace; + try + { + PLUGIN_VALIDATE(libNamespace != nullptr); + mNamespace = libNamespace; + } + catch (std::exception const& e) + { + caughtError(e); + } } char const* GroupNormalizationPlugin::getPluginNamespace() const noexcept { - return mPluginNamespace; + return mNamespace.c_str(); } GroupNormalizationPluginCreator::GroupNormalizationPluginCreator() @@ -288,7 +378,15 @@ char const* GroupNormalizationPluginCreator::getPluginNamespace() const noexcept void GroupNormalizationPluginCreator::setPluginNamespace(char const* libNamespace) noexcept { - mNamespace = libNamespace; + try + { + PLUGIN_VALIDATE(libNamespace != nullptr); + mNamespace = libNamespace; + } + catch (std::exception const& e) + { + caughtError(e); + } } IPluginV2DynamicExt* GroupNormalizationPluginCreator::createPlugin( @@ -296,19 +394,22 @@ IPluginV2DynamicExt* GroupNormalizationPluginCreator::createPlugin( { try { + PLUGIN_VALIDATE(fc != nullptr); + // Set default values - int nbGroups{1}; + int32_t nbGroups{1}; float epsilon{0.00001F}; - for (int i = 0; i < fc->nbFields; i++) + for (int32_t i = 0; i < fc->nbFields; i++) { - std::string field_name(fc->fields[i].name); - if (field_name.compare("eps") == 0) + PLUGIN_VALIDATE(fc->fields[i].name != nullptr); + std::string fieldName(fc->fields[i].name); + if (fieldName.compare("eps") == 0) { epsilon = *static_cast(fc->fields[i].data); } - if (field_name.compare("num_groups") == 0) + if (fieldName.compare("num_groups") == 0) { - nbGroups = *static_cast(fc->fields[i].data); + nbGroups = *static_cast(fc->fields[i].data); } } diff --git a/plugin/groupNormalizationPlugin/groupNormalizationPlugin.h b/plugin/groupNormalizationPlugin/groupNormalizationPlugin.h index 23d3e40d..001720aa 100644 --- a/plugin/groupNormalizationPlugin/groupNormalizationPlugin.h +++ b/plugin/groupNormalizationPlugin/groupNormalizationPlugin.h @@ -19,9 +19,8 @@ #define TRT_GROUP_NORM_PLUGIN_H #include "common/plugin.h" -#include "common/serialize.hpp" + #include -#include #include #include @@ -34,13 +33,13 @@ namespace plugin { template -cudaError_t scaleShiftChannelsInplace(T* inOut, int const B, int const C, int const channelVolume, float const* beta, - float const* gamma, cudaStream_t stream); +cudaError_t scaleShiftChannelsInplace(T* inOut, int32_t const B, int32_t const C, int32_t const channelVolume, + float const* beta, float const* gamma, cudaStream_t stream); class GroupNormalizationPlugin final : public nvinfer1::IPluginV2DynamicExt { public: - GroupNormalizationPlugin(float epsilon, int const nbGroups); + GroupNormalizationPlugin(float epsilon, int32_t const nbGroups); GroupNormalizationPlugin(void const* data, size_t length); @@ -48,20 +47,20 @@ class GroupNormalizationPlugin final : public nvinfer1::IPluginV2DynamicExt // delete default constructor. GroupNormalizationPlugin() = delete; - int getNbOutputs() const noexcept override; + int32_t getNbOutputs() const noexcept override; // DynamicExt plugins returns DimsExprs class instead of Dims - DimsExprs getOutputDimensions(int index, nvinfer1::DimsExprs const* inputs, int nbInputDims, + DimsExprs getOutputDimensions(int32_t index, nvinfer1::DimsExprs const* inputs, int32_t nbInputDims, nvinfer1::IExprBuilder& exprBuilder) noexcept override; - int initialize() noexcept override; + int32_t initialize() noexcept override; void terminate() noexcept override; - size_t getWorkspaceSize(nvinfer1::PluginTensorDesc const* inputs, int nbInputs, - nvinfer1::PluginTensorDesc const* outputs, int nbOutputs) const noexcept override; + size_t getWorkspaceSize(nvinfer1::PluginTensorDesc const* inputs, int32_t nbInputs, + nvinfer1::PluginTensorDesc const* outputs, int32_t nbOutputs) const noexcept override; - int enqueue(nvinfer1::PluginTensorDesc const* inputDesc, nvinfer1::PluginTensorDesc const* outputDesc, + int32_t enqueue(nvinfer1::PluginTensorDesc const* inputDesc, nvinfer1::PluginTensorDesc const* outputDesc, void const* const* inputs, void* const* outputs, void* workspace, cudaStream_t stream) noexcept override; size_t getSerializationSize() const noexcept override; @@ -69,7 +68,7 @@ class GroupNormalizationPlugin final : public nvinfer1::IPluginV2DynamicExt void serialize(void* buffer) const noexcept override; bool supportsFormatCombination( - int pos, nvinfer1::PluginTensorDesc const* inOut, int nbInputs, int nbOutputs) noexcept override; + int32_t pos, nvinfer1::PluginTensorDesc const* inOut, int32_t nbInputs, int32_t nbOutputs) noexcept override; char const* getPluginType() const noexcept override; @@ -79,7 +78,8 @@ class GroupNormalizationPlugin final : public nvinfer1::IPluginV2DynamicExt void destroy() noexcept override; - DataType getOutputDataType(int index, nvinfer1::DataType const* inputTypes, int nbInputs) const noexcept override; + DataType getOutputDataType( + int32_t index, nvinfer1::DataType const* inputTypes, int32_t nbInputs) const noexcept override; void attachToContext( cudnnContext* cudnn, cublasContext* cublas, nvinfer1::IGpuAllocator* allocator) noexcept override; @@ -90,21 +90,22 @@ class GroupNormalizationPlugin final : public nvinfer1::IPluginV2DynamicExt char const* getPluginNamespace() const noexcept override; - void configurePlugin(nvinfer1::DynamicPluginTensorDesc const* in, int nbInputs, - nvinfer1::DynamicPluginTensorDesc const* out, int nbOutputs) noexcept override; + void configurePlugin(nvinfer1::DynamicPluginTensorDesc const* in, int32_t nbInputs, + nvinfer1::DynamicPluginTensorDesc const* out, int32_t nbOutputs) noexcept override; private: - char const* mPluginNamespace; std::string mNamespace; float mEpsilon; - int mNbGroups; - int mChannelVolume; + int32_t mNbGroups; + int32_t mChannelVolume; + + cudnnHandle_t mCudnnHandle{}; - cudnnHandle_t _cudnn_handle; // Describes input and output. - cudnnTensorDescriptor_t desc; - cudnnTensorDescriptor_t bnDesc; + cudnnTensorDescriptor_t mTensorDesc{}; + cudnnTensorDescriptor_t mBNTensorDesc{}; + // These are buffers initialized to 1 and 0 respectively std::shared_ptr> mBnScales{}; std::shared_ptr> mBnBias{}; diff --git a/plugin/instanceNormalizationPlugin/instanceNormalizationPlugin.h b/plugin/instanceNormalizationPlugin/instanceNormalizationPlugin.h index d736f64c..f00a083c 100644 --- a/plugin/instanceNormalizationPlugin/instanceNormalizationPlugin.h +++ b/plugin/instanceNormalizationPlugin/instanceNormalizationPlugin.h @@ -25,7 +25,7 @@ #include #include -typedef unsigned short half_type; +typedef uint16_t half_type; namespace nvinfer1 { diff --git a/plugin/leakyReluPlugin/lReluPlugin.cpp b/plugin/leakyReluPlugin/lReluPlugin.cpp index 84346bc0..0fe1af5b 100644 --- a/plugin/leakyReluPlugin/lReluPlugin.cpp +++ b/plugin/leakyReluPlugin/lReluPlugin.cpp @@ -39,24 +39,24 @@ LReLU::LReLU(void const* buffer, size_t length) { char const *d = reinterpret_cast(buffer), *a = d; mNegSlope = read(d); - mBatchDim = read(d); + mBatchDim = read(d); PLUGIN_VALIDATE(d == a + length); } -int LReLU::getNbOutputs() const noexcept +int32_t LReLU::getNbOutputs() const noexcept { return 1; } -Dims LReLU::getOutputDimensions(int index, Dims const* inputs, int nbInputDims) noexcept +Dims LReLU::getOutputDimensions(int32_t index, Dims const* inputs, int32_t nbInputDims) noexcept { PLUGIN_ASSERT(nbInputDims == 1); PLUGIN_ASSERT(index == 0); return inputs[0]; } -int LReLU::enqueue( - int batchSize, void const* const* inputs, void* const* outputs, void* workspace, cudaStream_t stream) noexcept +int32_t LReLU::enqueue( + int32_t batchSize, void const* const* inputs, void* const* outputs, void* workspace, cudaStream_t stream) noexcept { void const* inputData = inputs[0]; void* outputData = outputs[0]; @@ -67,7 +67,7 @@ int LReLU::enqueue( size_t LReLU::getSerializationSize() const noexcept { // mNegSlope, mBatchDim - return sizeof(float) + sizeof(int); + return sizeof(float) + sizeof(int32_t); } void LReLU::serialize(void* buffer) const noexcept @@ -78,13 +78,13 @@ void LReLU::serialize(void* buffer) const noexcept PLUGIN_ASSERT(d == a + getSerializationSize()); } -void LReLU::configureWithFormat(Dims const* inputDims, int /* nbInputs */, Dims const* /* outputDims */, int nbOutputs, - DataType type, PluginFormat format, int) noexcept +void LReLU::configureWithFormat(Dims const* inputDims, int32_t /* nbInputs */, Dims const* /* outputDims */, + int32_t nbOutputs, DataType type, PluginFormat format, int32_t) noexcept { PLUGIN_ASSERT(type == DataType::kFLOAT && format == PluginFormat::kLINEAR); PLUGIN_ASSERT(mBatchDim == 1); PLUGIN_ASSERT(nbOutputs == 1); - for (int i = 0; i < inputDims[0].nbDims; ++i) + for (int32_t i = 0; i < inputDims[0].nbDims; ++i) { mBatchDim *= inputDims[0].d[i]; } @@ -95,14 +95,14 @@ bool LReLU::supportsFormat(DataType type, PluginFormat format) const noexcept return (type == DataType::kFLOAT && format == PluginFormat::kLINEAR); } -int LReLU::initialize() noexcept +int32_t LReLU::initialize() noexcept { return 0; } void LReLU::terminate() noexcept {} -size_t LReLU::getWorkspaceSize(int /* maxBatchSize */) const noexcept +size_t LReLU::getWorkspaceSize(int32_t /* maxBatchSize */) const noexcept { return 0; } diff --git a/plugin/leakyReluPlugin/lReluPlugin.h b/plugin/leakyReluPlugin/lReluPlugin.h index c4b68b1d..39c973b5 100644 --- a/plugin/leakyReluPlugin/lReluPlugin.h +++ b/plugin/leakyReluPlugin/lReluPlugin.h @@ -37,25 +37,25 @@ class LReLU : public nvinfer1::pluginInternal::BasePlugin ~LReLU() override = default; - int getNbOutputs() const noexcept override; + int32_t getNbOutputs() const noexcept override; - Dims getOutputDimensions(int index, Dims const* inputs, int nbInputDims) noexcept override; + Dims getOutputDimensions(int32_t index, Dims const* inputs, int32_t nbInputDims) noexcept override; - int initialize() noexcept override; + int32_t initialize() noexcept override; void terminate() noexcept override; - size_t getWorkspaceSize(int maxBatchSize) const noexcept override; + size_t getWorkspaceSize(int32_t maxBatchSize) const noexcept override; - int enqueue(int batchSize, void const* const* inputs, void* const* outputs, void* workspace, + int32_t enqueue(int32_t batchSize, void const* const* inputs, void* const* outputs, void* workspace, cudaStream_t stream) noexcept override; size_t getSerializationSize() const noexcept override; void serialize(void* buffer) const noexcept override; - void configureWithFormat(Dims const* inputDims, int nbInputs, Dims const* outputDims, int nbOutputs, DataType type, - PluginFormat format, int maxBatchSize) noexcept override; + void configureWithFormat(Dims const* inputDims, int32_t nbInputs, Dims const* outputDims, int32_t nbOutputs, + DataType type, PluginFormat format, int32_t maxBatchSize) noexcept override; bool supportsFormat(DataType type, PluginFormat format) const noexcept override; @@ -69,7 +69,7 @@ class LReLU : public nvinfer1::pluginInternal::BasePlugin private: float mNegSlope; - int mBatchDim; + int32_t mBatchDim; }; class LReluPluginCreator : public nvinfer1::pluginInternal::BaseCreator diff --git a/plugin/modulatedDeformConvPlugin/modulatedDeformConvPlugin.h b/plugin/modulatedDeformConvPlugin/modulatedDeformConvPlugin.h index 3e2d5687..53b653fa 100644 --- a/plugin/modulatedDeformConvPlugin/modulatedDeformConvPlugin.h +++ b/plugin/modulatedDeformConvPlugin/modulatedDeformConvPlugin.h @@ -55,7 +55,7 @@ class ModulatedDeformableConvPluginDynamic : public nvinfer1::IPluginV2DynamicEx ModulatedDeformableConvPluginDynamic() = delete; - ~ModulatedDeformableConvPluginDynamic(); + ~ModulatedDeformableConvPluginDynamic() override; nvinfer1::IPluginV2DynamicExt* clone() const noexcept override; nvinfer1::DimsExprs getOutputDimensions(int32_t outputIndex, nvinfer1::DimsExprs const* inputs, int32_t nbInputs, diff --git a/plugin/multilevelCropAndResizePlugin/multilevelCropAndResizePlugin.cpp b/plugin/multilevelCropAndResizePlugin/multilevelCropAndResizePlugin.cpp index 711d2fd8..3dc980b8 100644 --- a/plugin/multilevelCropAndResizePlugin/multilevelCropAndResizePlugin.cpp +++ b/plugin/multilevelCropAndResizePlugin/multilevelCropAndResizePlugin.cpp @@ -108,7 +108,7 @@ IPluginV2Ext* MultilevelCropAndResizePluginCreator::deserializePlugin( return nullptr; } -MultilevelCropAndResize::MultilevelCropAndResize(int pooled_size, nvinfer1::Dims const& imageSize) +MultilevelCropAndResize::MultilevelCropAndResize(int32_t pooled_size, nvinfer1::Dims const& imageSize) : mPooledSize({pooled_size, pooled_size}) { @@ -122,12 +122,12 @@ MultilevelCropAndResize::MultilevelCropAndResize(int pooled_size, nvinfer1::Dims mThresh = (224 * 224) / (4.0F); } -int MultilevelCropAndResize::getNbOutputs() const noexcept +int32_t MultilevelCropAndResize::getNbOutputs() const noexcept { return 1; } -int MultilevelCropAndResize::initialize() noexcept +int32_t MultilevelCropAndResize::initialize() noexcept { return 0; } @@ -139,7 +139,7 @@ void MultilevelCropAndResize::destroy() noexcept delete this; } -size_t MultilevelCropAndResize::getWorkspaceSize(int) const noexcept +size_t MultilevelCropAndResize::getWorkspaceSize(int32_t) const noexcept { return 0; } @@ -182,7 +182,7 @@ char const* MultilevelCropAndResize::getPluginNamespace() const noexcept return mNameSpace.c_str(); } -void MultilevelCropAndResize::check_valid_inputs(nvinfer1::Dims const* inputs, int nbInputDims) noexcept +void MultilevelCropAndResize::check_valid_inputs(nvinfer1::Dims const* inputs, int32_t nbInputDims) noexcept { // to be compatible with tensorflow node's input: // roi: [N, anchors, 4], @@ -193,7 +193,7 @@ void MultilevelCropAndResize::check_valid_inputs(nvinfer1::Dims const* inputs, i PLUGIN_ASSERT(rois.nbDims == 2); PLUGIN_ASSERT(rois.d[1] == 4); - for (int i = 1; i < nbInputDims; ++i) + for (int32_t i = 1; i < nbInputDims; ++i) { nvinfer1::Dims dims = inputs[i]; @@ -202,7 +202,7 @@ void MultilevelCropAndResize::check_valid_inputs(nvinfer1::Dims const* inputs, i } } -Dims MultilevelCropAndResize::getOutputDimensions(int index, Dims const* inputs, int nbInputDims) noexcept +Dims MultilevelCropAndResize::getOutputDimensions(int32_t index, Dims const* inputs, int32_t nbInputDims) noexcept { check_valid_inputs(inputs, nbInputDims); @@ -241,7 +241,8 @@ int32_t MultilevelCropAndResize::enqueue( size_t MultilevelCropAndResize::getSerializationSize() const noexcept { - return sizeof(int) * 2 + sizeof(int) * 4 + sizeof(float) + sizeof(int) * 2 * mFeatureMapCount + sizeof(DataType); + return sizeof(int32_t) * 2 + sizeof(int32_t) * 4 + sizeof(float) + sizeof(int32_t) * 2 * mFeatureMapCount + + sizeof(DataType); } void MultilevelCropAndResize::serialize(void* buffer) const noexcept @@ -254,7 +255,7 @@ void MultilevelCropAndResize::serialize(void* buffer) const noexcept write(d, mInputHeight); write(d, mInputWidth); write(d, mThresh); - for (int i = 0; i < mFeatureMapCount; i++) + for (int32_t i = 0; i < mFeatureMapCount; i++) { write(d, mFeatureSpatialSize[i].y); write(d, mFeatureSpatialSize[i].x); @@ -276,7 +277,7 @@ void MultilevelCropAndResize::deserialize(int8_t const* data, size_t length) mInputHeight = read(d); mInputWidth = read(d); mThresh = read(d); - for (int i = 0; i < mFeatureMapCount; i++) + for (int32_t i = 0; i < mFeatureMapCount; i++) { mFeatureSpatialSize[i].y = read(d); mFeatureSpatialSize[i].x = read(d); @@ -287,7 +288,7 @@ void MultilevelCropAndResize::deserialize(int8_t const* data, size_t length) // Return the DataType of the plugin output at the requested index DataType MultilevelCropAndResize::getOutputDataType( - int index, nvinfer1::DataType const* inputTypes, int nbInputs) const noexcept + int32_t index, nvinfer1::DataType const* inputTypes, int32_t nbInputs) const noexcept { // Only DataType::kFLOAT is acceptable by the plugin layer // return DataType::kFLOAT; @@ -300,21 +301,21 @@ DataType MultilevelCropAndResize::getOutputDataType( // Return true if output tensor is broadcast across a batch. bool MultilevelCropAndResize::isOutputBroadcastAcrossBatch( - int outputIndex, bool const* inputIsBroadcasted, int nbInputs) const noexcept + int32_t outputIndex, bool const* inputIsBroadcasted, int32_t nbInputs) const noexcept { return false; } // Return true if plugin can use input that is broadcast across batch without replication. -bool MultilevelCropAndResize::canBroadcastInputAcrossBatch(int inputIndex) const noexcept +bool MultilevelCropAndResize::canBroadcastInputAcrossBatch(int32_t inputIndex) const noexcept { return false; } // Configure the layer with input and output data types. -void MultilevelCropAndResize::configurePlugin(Dims const* inputDims, int nbInputs, Dims const* outputDims, - int nbOutputs, DataType const* inputTypes, DataType const* outputTypes, bool const* inputIsBroadcast, - bool const* outputIsBroadcast, PluginFormat floatFormat, int maxBatchSize) noexcept +void MultilevelCropAndResize::configurePlugin(Dims const* inputDims, int32_t nbInputs, Dims const* outputDims, + int32_t nbOutputs, DataType const* inputTypes, DataType const* outputTypes, bool const* inputIsBroadcast, + bool const* outputIsBroadcast, PluginFormat floatFormat, int32_t maxBatchSize) noexcept { PLUGIN_ASSERT(supportsFormat(inputTypes[0], floatFormat)); check_valid_inputs(inputDims, nbInputs); diff --git a/plugin/multilevelCropAndResizePlugin/multilevelCropAndResizePlugin.h b/plugin/multilevelCropAndResizePlugin/multilevelCropAndResizePlugin.h index d67802f3..c2f615b4 100644 --- a/plugin/multilevelCropAndResizePlugin/multilevelCropAndResizePlugin.h +++ b/plugin/multilevelCropAndResizePlugin/multilevelCropAndResizePlugin.h @@ -36,23 +36,23 @@ namespace plugin class MultilevelCropAndResize : public IPluginV2Ext { public: - MultilevelCropAndResize(int pooled_size, nvinfer1::Dims const& image_size); + MultilevelCropAndResize(int32_t pooled_size, nvinfer1::Dims const& image_size); MultilevelCropAndResize(void const* data, size_t length); ~MultilevelCropAndResize() noexcept override = default; - int getNbOutputs() const noexcept override; + int32_t getNbOutputs() const noexcept override; - Dims getOutputDimensions(int index, Dims const* inputs, int nbInputDims) noexcept override; + Dims getOutputDimensions(int32_t index, Dims const* inputs, int32_t nbInputDims) noexcept override; - int initialize() noexcept override; + int32_t initialize() noexcept override; void terminate() noexcept override; void destroy() noexcept override; - size_t getWorkspaceSize(int) const noexcept override; + size_t getWorkspaceSize(int32_t) const noexcept override; int32_t enqueue(int32_t batch_size, void const* const* inputs, void* const* outputs, void* workspace, cudaStream_t stream) noexcept override; @@ -73,25 +73,26 @@ class MultilevelCropAndResize : public IPluginV2Ext char const* getPluginNamespace() const noexcept override; - DataType getOutputDataType(int index, nvinfer1::DataType const* inputTypes, int nbInputs) const noexcept override; + DataType getOutputDataType( + int32_t index, nvinfer1::DataType const* inputTypes, int32_t nbInputs) const noexcept override; bool isOutputBroadcastAcrossBatch( - int outputIndex, bool const* inputIsBroadcasted, int nbInputs) const noexcept override; + int32_t outputIndex, bool const* inputIsBroadcasted, int32_t nbInputs) const noexcept override; - bool canBroadcastInputAcrossBatch(int inputIndex) const noexcept override; + bool canBroadcastInputAcrossBatch(int32_t inputIndex) const noexcept override; void attachToContext( cudnnContext* cudnnContext, cublasContext* cublasContext, IGpuAllocator* gpuAllocator) noexcept override; - void configurePlugin(Dims const* inputDims, int nbInputs, Dims const* outputDims, int nbOutputs, + void configurePlugin(Dims const* inputDims, int32_t nbInputs, Dims const* outputDims, int32_t nbOutputs, DataType const* inputTypes, DataType const* outputTypes, bool const* inputIsBroadcast, - bool const* outputIsBroadcast, PluginFormat floatFormat, int maxBatchSize) noexcept override; + bool const* outputIsBroadcast, PluginFormat floatFormat, int32_t maxBatchSize) noexcept override; void detachFromContext() noexcept override; private: void deserialize(int8_t const* data, size_t length); - void check_valid_inputs(nvinfer1::Dims const* inputs, int nbInputDims) noexcept; + void check_valid_inputs(nvinfer1::Dims const* inputs, int32_t nbInputDims) noexcept; xy_t mPooledSize{}; static const int32_t mFeatureMapCount = 5; // p2, p3, p4, p5, p6(Maxpooling) @@ -124,7 +125,7 @@ class MultilevelCropAndResizePluginCreator : public nvinfer1::pluginInternal::Ba private: static PluginFieldCollection mFC; - int mPooledSize; + int32_t mPooledSize; static std::vector mPluginAttributes; }; } // namespace plugin diff --git a/plugin/multilevelProposeROI/multilevelProposeROIPlugin.cpp b/plugin/multilevelProposeROI/multilevelProposeROIPlugin.cpp index 8fb0e426..d9ad8add 100644 --- a/plugin/multilevelProposeROI/multilevelProposeROIPlugin.cpp +++ b/plugin/multilevelProposeROI/multilevelProposeROIPlugin.cpp @@ -129,7 +129,7 @@ IPluginV2Ext* MultilevelProposeROIPluginCreator::deserializePlugin( } MultilevelProposeROI::MultilevelProposeROI( - int prenms_topk, int keep_topk, float fg_threshold, float iou_threshold, const nvinfer1::Dims imageSize) + int32_t prenms_topk, int32_t keep_topk, float fg_threshold, float iou_threshold, const nvinfer1::Dims imageSize) : mPreNMSTopK(prenms_topk) , mKeepTopK(keep_topk) , mFGThreshold(fg_threshold) @@ -137,7 +137,8 @@ MultilevelProposeROI::MultilevelProposeROI( , mImageSize(imageSize) { mBackgroundLabel = -1; - PLUGIN_VALIDATE(mPreNMSTopK > 0 && mPreNMSTopK <= 4096); + PLUGIN_VALIDATE(mPreNMSTopK > 0); + PLUGIN_VALIDATE(mPreNMSTopK <= 4096); PLUGIN_VALIDATE(mKeepTopK > 0); PLUGIN_VALIDATE(mIOUThreshold >= 0.0F); PLUGIN_VALIDATE(mFGThreshold >= 0.0F); @@ -157,12 +158,12 @@ MultilevelProposeROI::MultilevelProposeROI( generate_pyramid_anchors(mImageSize); } -int MultilevelProposeROI::getNbOutputs() const noexcept +int32_t MultilevelProposeROI::getNbOutputs() const noexcept { return 1; } -int MultilevelProposeROI::initialize() noexcept +int32_t MultilevelProposeROI::initialize() noexcept { // Init the regWeight [1, 1, 1, 1] mRegWeightDevice = std::make_shared>(4); @@ -171,22 +172,22 @@ int MultilevelProposeROI::initialize() noexcept sizeof(float) * 4, cudaMemcpyHostToDevice)); // Init the mValidCnt of max batch size - std::vector tempValidCnt(mMaxBatchSize, mPreNMSTopK); + std::vector tempValidCnt(mMaxBatchSize, mPreNMSTopK); - mValidCnt = std::make_shared>(mMaxBatchSize); + mValidCnt = std::make_shared>(mMaxBatchSize); - PLUGIN_CUASSERT(cudaMemcpy( - mValidCnt->mPtr, static_cast(tempValidCnt.data()), sizeof(int) * mMaxBatchSize, cudaMemcpyHostToDevice)); + PLUGIN_CUASSERT(cudaMemcpy(mValidCnt->mPtr, static_cast(tempValidCnt.data()), + sizeof(int32_t) * mMaxBatchSize, cudaMemcpyHostToDevice)); // Init the anchors for batch size: - for (int i = 0; i < mFeatureCnt; i++) + for (int32_t i = 0; i < mFeatureCnt; i++) { - int i_anchors_cnt = mAnchorsCnt[i]; + int32_t i_anchors_cnt = mAnchorsCnt[i]; auto i_anchors_host = mAnchorBoxesHost[i].data(); auto i_anchors_device = std::make_shared>(i_anchors_cnt * 4 * mMaxBatchSize); - int batch_offset = sizeof(float) * i_anchors_cnt * 4; + int32_t batch_offset = sizeof(float) * i_anchors_cnt * 4; uint8_t* device_ptr = static_cast(i_anchors_device->mPtr); - for (int i = 0; i < mMaxBatchSize; i++) + for (int32_t i = 0; i < mMaxBatchSize; i++) { PLUGIN_CUASSERT(cudaMemcpy(static_cast(device_ptr + i * batch_offset), static_cast(i_anchors_host), batch_offset, cudaMemcpyHostToDevice)); @@ -197,7 +198,7 @@ int MultilevelProposeROI::initialize() noexcept // Init the temp storage for proposals from feature maps before concat std::vector score_tp; std::vector box_tp; - for (int i = 0; i < mFeatureCnt; i++) + for (int32_t i = 0; i < mFeatureCnt; i++) { if (mType == DataType::kFLOAT) { @@ -276,7 +277,7 @@ char const* MultilevelProposeROI::getPluginNamespace() const noexcept size_t MultilevelProposeROI::getSerializationSize() const noexcept { - return sizeof(int) * 2 + sizeof(float) * 2 + sizeof(int) * (mFeatureCnt + 1) + sizeof(nvinfer1::Dims) + return sizeof(int32_t) * 2 + sizeof(float) * 2 + sizeof(int32_t) * (mFeatureCnt + 1) + sizeof(nvinfer1::Dims) + sizeof(DataType); } @@ -288,7 +289,7 @@ void MultilevelProposeROI::serialize(void* buffer) const noexcept write(d, mFGThreshold); write(d, mIOUThreshold); write(d, mMaxBatchSize); - for (int i = 0; i < mFeatureCnt; i++) + for (int32_t i = 0; i < mFeatureCnt; i++) { write(d, mAnchorsCnt[i]); } @@ -302,15 +303,15 @@ MultilevelProposeROI::MultilevelProposeROI(void const* data, size_t length) mFeatureCnt = TLTMaskRCNNConfig::MAX_LEVEL - TLTMaskRCNNConfig::MIN_LEVEL + 1; char const *d = reinterpret_cast(data), *a = d; - int prenms_topk = read(d); - int keep_topk = read(d); + int32_t prenms_topk = read(d); + int32_t keep_topk = read(d); float fg_threshold = read(d); float iou_threshold = read(d); - mMaxBatchSize = read(d); + mMaxBatchSize = read(d); PLUGIN_VALIDATE(mAnchorsCnt.size() == 0); - for (int i = 0; i < mFeatureCnt; i++) + for (int32_t i = 0; i < mFeatureCnt; i++) { - mAnchorsCnt.push_back(read(d)); + mAnchorsCnt.push_back(read(d)); } mImageSize = read(d); mType = read(d); @@ -331,14 +332,14 @@ MultilevelProposeROI::MultilevelProposeROI(void const* data, size_t length) generate_pyramid_anchors(mImageSize); } -void MultilevelProposeROI::check_valid_inputs(nvinfer1::Dims const* inputs, int nbInputDims) noexcept +void MultilevelProposeROI::check_valid_inputs(nvinfer1::Dims const* inputs, int32_t nbInputDims) noexcept { // x=2,3,4,5,6 // foreground_delta_px [N, h_x * w_x * anchors_per_location, 4, 1], // foreground_score_px [N, h_x * w_x * anchors_per_location, 1, 1], // anchors should be generated inside PLUGIN_ASSERT(nbInputDims == 2 * mFeatureCnt); - for (int i = 0; i < 2 * mFeatureCnt; i += 2) + for (int32_t i = 0; i < 2 * mFeatureCnt; i += 2) { // foreground_delta PLUGIN_ASSERT(inputs[i].nbDims == 3 && inputs[i].d[1] == 4); @@ -347,13 +348,13 @@ void MultilevelProposeROI::check_valid_inputs(nvinfer1::Dims const* inputs, int } } -size_t MultilevelProposeROI::getWorkspaceSize(int batch_size) const noexcept +size_t MultilevelProposeROI::getWorkspaceSize(int32_t batch_size) const noexcept { size_t total_size = 0; PLUGIN_ASSERT(mAnchorsCnt.size() == static_cast(mFeatureCnt)); // workspace for propose on each feature map - for (int i = 0; i < mFeatureCnt; i++) + for (int32_t i = 0; i < mFeatureCnt; i++) { MultilevelProposeROIWorkSpace proposal(batch_size, mAnchorsCnt[i], mPreNMSTopK, mParam, mType); @@ -367,7 +368,7 @@ size_t MultilevelProposeROI::getWorkspaceSize(int batch_size) const noexcept return total_size; } -Dims MultilevelProposeROI::getOutputDimensions(int index, Dims const* inputs, int nbInputDims) noexcept +Dims MultilevelProposeROI::getOutputDimensions(int32_t index, Dims const* inputs, int32_t nbInputDims) noexcept { check_valid_inputs(inputs, nbInputDims); @@ -387,10 +388,10 @@ void MultilevelProposeROI::generate_pyramid_anchors(nvinfer1::Dims const& imageS // Generate anchors strides and scales std::vector anchor_scales; - std::vector anchor_strides; - for (int i = min_level; i < max_level + 1; i++) + std::vector anchor_strides; + for (int32_t i = min_level; i < max_level + 1; i++) { - int stride = static_cast(pow(2.0, i)); + int32_t stride = static_cast(pow(2.0, i)); anchor_strides.push_back(stride); anchor_scales.push_back(stride * anchor_scale); } @@ -402,11 +403,11 @@ void MultilevelProposeROI::generate_pyramid_anchors(nvinfer1::Dims const& imageS for (size_t s = 0; s < anchor_scales.size(); ++s) { float scale = anchor_scales[s]; - int stride = anchor_strides[s]; + int32_t stride = anchor_strides[s]; std::vector s_anchors; - for (int y = stride / 2; y < image_dims.d[1]; y += stride) - for (int x = stride / 2; x < image_dims.d[2]; x += stride) + for (int32_t y = stride / 2; y < image_dims.d[1]; y += stride) + for (int32_t x = stride / 2; x < image_dims.d[2]; x += stride) for (auto r : aspect_ratios) { float h = scale * r.second; @@ -433,7 +434,7 @@ int32_t MultilevelProposeROI::enqueue( std::vector mTempScores; std::vector mTempBboxes; - for (int i = 0; i < mFeatureCnt; i++) + for (int32_t i = 0; i < mFeatureCnt; i++) { if (mType == DataType::kFLOAT) { @@ -447,7 +448,7 @@ int32_t MultilevelProposeROI::enqueue( } } - for (int i = 0; i < mFeatureCnt; i++) + for (int32_t i = 0; i < mFeatureCnt; i++) { MultilevelProposeROIWorkSpace proposal_ws(batch_size, mAnchorsCnt[i], mPreNMSTopK, mParam, mType); status = MultilevelPropose(stream, batch_size, mAnchorsCnt[i], mPreNMSTopK, @@ -477,7 +478,7 @@ int32_t MultilevelProposeROI::enqueue( // Return the DataType of the plugin output at the requested index DataType MultilevelProposeROI::getOutputDataType( - int index, nvinfer1::DataType const* inputTypes, int nbInputs) const noexcept + int32_t index, nvinfer1::DataType const* inputTypes, int32_t nbInputs) const noexcept { // Only DataType::kFLOAT is acceptable by the plugin layer if ((inputTypes[0] == DataType::kFLOAT) || (inputTypes[0] == DataType::kHALF)) @@ -487,29 +488,29 @@ DataType MultilevelProposeROI::getOutputDataType( // Return true if output tensor is broadcast across a batch. bool MultilevelProposeROI::isOutputBroadcastAcrossBatch( - int outputIndex, bool const* inputIsBroadcasted, int nbInputs) const noexcept + int32_t outputIndex, bool const* inputIsBroadcasted, int32_t nbInputs) const noexcept { return false; } // Return true if plugin can use input that is broadcast across batch without replication. -bool MultilevelProposeROI::canBroadcastInputAcrossBatch(int inputIndex) const noexcept +bool MultilevelProposeROI::canBroadcastInputAcrossBatch(int32_t inputIndex) const noexcept { return false; } // Configure the layer with input and output data types. -void MultilevelProposeROI::configurePlugin(Dims const* inputDims, int nbInputs, Dims const* outputDims, int nbOutputs, - DataType const* inputTypes, DataType const* outputTypes, bool const* inputIsBroadcast, - bool const* outputIsBroadcast, PluginFormat floatFormat, int maxBatchSize) noexcept +void MultilevelProposeROI::configurePlugin(Dims const* inputDims, int32_t nbInputs, Dims const* outputDims, + int32_t nbOutputs, DataType const* inputTypes, DataType const* outputTypes, bool const* inputIsBroadcast, + bool const* outputIsBroadcast, PluginFormat floatFormat, int32_t maxBatchSize) noexcept { check_valid_inputs(inputDims, nbInputs); mAnchorsCnt.clear(); - for (int i = 0; i < mFeatureCnt; i++) + for (int32_t i = 0; i < mFeatureCnt; i++) { mAnchorsCnt.push_back(inputDims[2 * i].d[0]); - PLUGIN_ASSERT(mAnchorsCnt[i] == (int) (mAnchorBoxesHost[i].size() / 4)); + PLUGIN_ASSERT(mAnchorsCnt[i] == (int32_t) (mAnchorBoxesHost[i].size() / 4)); } mMaxBatchSize = maxBatchSize; diff --git a/plugin/multilevelProposeROI/multilevelProposeROIPlugin.h b/plugin/multilevelProposeROI/multilevelProposeROIPlugin.h index 31277758..653958e6 100644 --- a/plugin/multilevelProposeROI/multilevelProposeROIPlugin.h +++ b/plugin/multilevelProposeROI/multilevelProposeROIPlugin.h @@ -35,24 +35,24 @@ namespace plugin class MultilevelProposeROI : public IPluginV2Ext { public: - MultilevelProposeROI( - int prenms_topk, int keep_topk, float fg_threshold, float iou_threshold, const nvinfer1::Dims image_size); + MultilevelProposeROI(int32_t prenms_topk, int32_t keep_topk, float fg_threshold, float iou_threshold, + const nvinfer1::Dims image_size); MultilevelProposeROI(void const* data, size_t length); ~MultilevelProposeROI() noexcept override = default; - int getNbOutputs() const noexcept override; + int32_t getNbOutputs() const noexcept override; - Dims getOutputDimensions(int index, Dims const* inputs, int nbInputDims) noexcept override; + Dims getOutputDimensions(int32_t index, Dims const* inputs, int32_t nbInputDims) noexcept override; - int initialize() noexcept override; + int32_t initialize() noexcept override; void terminate() noexcept override; void destroy() noexcept override; - size_t getWorkspaceSize(int maxBatchSize) const noexcept override; + size_t getWorkspaceSize(int32_t maxBatchSize) const noexcept override; int32_t enqueue(int32_t batch_size, void const* const* inputs, void* const* outputs, void* workspace, cudaStream_t stream) noexcept override; @@ -73,36 +73,37 @@ class MultilevelProposeROI : public IPluginV2Ext char const* getPluginNamespace() const noexcept override; - DataType getOutputDataType(int index, nvinfer1::DataType const* inputTypes, int nbInputs) const noexcept override; + DataType getOutputDataType( + int32_t index, nvinfer1::DataType const* inputTypes, int32_t nbInputs) const noexcept override; bool isOutputBroadcastAcrossBatch( - int outputIndex, bool const* inputIsBroadcasted, int nbInputs) const noexcept override; + int32_t outputIndex, bool const* inputIsBroadcasted, int32_t nbInputs) const noexcept override; - bool canBroadcastInputAcrossBatch(int inputIndex) const noexcept override; + bool canBroadcastInputAcrossBatch(int32_t inputIndex) const noexcept override; void attachToContext( cudnnContext* cudnnContext, cublasContext* cublasContext, IGpuAllocator* gpuAllocator) noexcept override; - void configurePlugin(Dims const* inputDims, int nbInputs, Dims const* outputDims, int nbOutputs, + void configurePlugin(Dims const* inputDims, int32_t nbInputs, Dims const* outputDims, int32_t nbOutputs, DataType const* inputTypes, DataType const* outputTypes, bool const* inputIsBroadcast, - bool const* outputIsBroadcast, PluginFormat floatFormat, int maxBatchSize) noexcept override; + bool const* outputIsBroadcast, PluginFormat floatFormat, int32_t maxBatchSize) noexcept override; void detachFromContext() noexcept override; private: - void check_valid_inputs(nvinfer1::Dims const* inputs, int nbInputDims) noexcept; + void check_valid_inputs(nvinfer1::Dims const* inputs, int32_t nbInputDims) noexcept; void generate_pyramid_anchors(nvinfer1::Dims const& imageSize); - int mBackgroundLabel; - int mPreNMSTopK; - int mKeepTopK; - int mFeatureCnt; + int32_t mBackgroundLabel; + int32_t mPreNMSTopK; + int32_t mKeepTopK; + int32_t mFeatureCnt; float mFGThreshold; float mIOUThreshold; - int mMaxBatchSize; - std::vector mAnchorsCnt; - std::shared_ptr> mValidCnt; // valid cnt = number of input roi for every image. + int32_t mMaxBatchSize; + std::vector mAnchorsCnt; + std::shared_ptr> mValidCnt; // valid cnt = number of input roi for every image. std::vector>> mAnchorBoxesDevice; // [N, anchors(261888 for resnet101 + 1024*1024), (y1, x1, y2, x2)] std::vector> mAnchorBoxesHost; @@ -140,8 +141,8 @@ class MultilevelProposeROIPluginCreator : public nvinfer1::pluginInternal::BaseC private: static PluginFieldCollection mFC; - int mPreNMSTopK; - int mKeepTopK; + int32_t mPreNMSTopK; + int32_t mKeepTopK; float mFGThreshold; float mIOUThreshold; static std::vector mPluginAttributes; diff --git a/plugin/multilevelProposeROI/tlt_mrcnn_config.h b/plugin/multilevelProposeROI/tlt_mrcnn_config.h index c3f6b345..13c8abfe 100644 --- a/plugin/multilevelProposeROI/tlt_mrcnn_config.h +++ b/plugin/multilevelProposeROI/tlt_mrcnn_config.h @@ -27,8 +27,8 @@ namespace TLTMaskRCNNConfig static const nvinfer1::Dims3 IMAGE_SHAPE{3, 832, 1344}; // Pooled ROIs -static int const POOL_SIZE = 7; -static int const MASK_POOL_SIZE = 14; +static int32_t const POOL_SIZE = 7; +static int32_t const MASK_POOL_SIZE = 14; // Threshold to determine the mask area out of final convolution output static float const MASK_THRESHOLD = 0.5; @@ -37,7 +37,7 @@ static float const MASK_THRESHOLD = 0.5; static float const DETECTION_REG_WEIGHTS[] = {10, 10, 5, 5}; // Max number of final detections -static int const DETECTION_MAX_INSTANCES = 100; +static int32_t const DETECTION_MAX_INSTANCES = 100; // Minimum probability value to accept a detected instance // ROIs below this threshold are skipped @@ -47,18 +47,18 @@ static float const DETECTION_MIN_CONFIDENCE = 0; static float const DETECTION_NMS_THRESHOLD = 0.5; // Size of the fully-connected layers in the classification graph -static int const FPN_CLASSIF_FC_LAYERS_SIZE = 1024; +static int32_t const FPN_CLASSIF_FC_LAYERS_SIZE = 1024; // Size of the top-down layers used to build the feature pyramid -static int const TOP_DOWN_PYRAMID_SIZE = 256; +static int32_t const TOP_DOWN_PYRAMID_SIZE = 256; // Number of classification classes (including background) -static int const NUM_CLASSES = 1 + 90; +static int32_t const NUM_CLASSES = 1 + 90; // Min and max level of fpn feature pyramids: // p2, p3, p4, p5, p6. -static int const MIN_LEVEL = 2; -static int const MAX_LEVEL = 6; +static int32_t const MIN_LEVEL = 2; +static int32_t const MAX_LEVEL = 6; // Length of minimum square anchor side in pixels static float const RPN_ANCHOR_SCALE = 8; @@ -70,17 +70,17 @@ static const std::vector> ANCHOR_RATIOS // Anchor stride // If 1 then anchors are created for each cell in the backbone feature map. // If 2, then anchors are created for every other cell, and so on. -static int const RPN_ANCHOR_STRIDE = 1; +static int32_t const RPN_ANCHOR_STRIDE = 1; // TRT fails if this number larger than kMAX_TOPK_K defined in engine/checkMacros.h -static int const MAX_PRE_NMS_RESULTS = 1000; // 3840; +static int32_t const MAX_PRE_NMS_RESULTS = 1000; // 3840; // Non-max suppression threshold to filter RPN proposals. // You can increase this during training to generate more propsals. static float const RPN_NMS_THRESHOLD = 0.7F; // ROIs kept after non-maximum suppression (training and inference) -static int const POST_NMS_ROIS_INFERENCE = 1000; +static int32_t const POST_NMS_ROIS_INFERENCE = 1000; // COCO Class names static const std::vector CLASS_NAMES = { diff --git a/plugin/multiscaleDeformableAttnPlugin/multiscaleDeformableAttn.cu b/plugin/multiscaleDeformableAttnPlugin/multiscaleDeformableAttn.cu index 8b923d2f..d6843c64 100644 --- a/plugin/multiscaleDeformableAttnPlugin/multiscaleDeformableAttn.cu +++ b/plugin/multiscaleDeformableAttnPlugin/multiscaleDeformableAttn.cu @@ -36,8 +36,8 @@ #include "multiscaleDeformableIm2ColCuda.cuh" -int32_t ms_deform_attn_cuda_forward(cudaStream_t stream, const float* value, const int32_t* spatialShapes, - const int32_t* levelStartIndex, const float* samplingLoc, const float* attnWeight, float* output, int32_t batch, +int32_t ms_deform_attn_cuda_forward(cudaStream_t stream, float const* value, int32_t const* spatialShapes, + int32_t const* levelStartIndex, float const* samplingLoc, float const* attnWeight, float* output, int32_t batch, int32_t mSpatialSize, int32_t mNumHeads, int32_t mChannels, int32_t mNumLevels, int32_t mNumQuery, int32_t mNumPoint) { auto perValueSize = mSpatialSize * mNumHeads * mChannels; @@ -57,8 +57,8 @@ int32_t ms_deform_attn_cuda_forward(cudaStream_t stream, const float* value, con return 0; } -int32_t ms_deform_attn_cuda_forward(cudaStream_t stream, const __half* value, const int32_t* spatialShapes, - const int32_t* levelStartIndex, const __half* samplingLoc, const __half* attnWeight, __half* output, int32_t batch, +int32_t ms_deform_attn_cuda_forward(cudaStream_t stream, __half const* value, int32_t const* spatialShapes, + int32_t const* levelStartIndex, __half const* samplingLoc, __half const* attnWeight, __half* output, int32_t batch, int32_t mSpatialSize, int32_t mNumHeads, int32_t mChannels, int32_t mNumLevels, int32_t mNumQuery, int32_t mNumPoint) { auto perValueSize = mSpatialSize * mNumHeads * mChannels; diff --git a/plugin/multiscaleDeformableAttnPlugin/multiscaleDeformableAttnPlugin.cpp b/plugin/multiscaleDeformableAttnPlugin/multiscaleDeformableAttnPlugin.cpp index 81f7afd3..939590ae 100644 --- a/plugin/multiscaleDeformableAttnPlugin/multiscaleDeformableAttnPlugin.cpp +++ b/plugin/multiscaleDeformableAttnPlugin/multiscaleDeformableAttnPlugin.cpp @@ -44,7 +44,7 @@ nvinfer1::IPluginV2DynamicExt* MultiscaleDeformableAttnPlugin::clone() const PLU plugin->setPluginNamespace(getPluginNamespace()); return plugin; } - catch (const std::exception& e) + catch (std::exception const& e) { caughtError(e); } @@ -140,11 +140,11 @@ int32_t MultiscaleDeformableAttnPlugin::enqueue(nvinfer1::PluginTensorDesc const } else if (inputDesc[0].type == nvinfer1::DataType::kHALF) { - const __half* value = static_cast(inputs[0]); + __half const* value = static_cast<__half const*>(inputs[0]); int32_t const* spatialShapes = static_cast(inputs[1]); int32_t const* levelStartIndex = static_cast(inputs[2]); - const __half* samplingLoc = static_cast(inputs[3]); - const __half* attnWeight = static_cast(inputs[4]); + __half const* samplingLoc = static_cast<__half const*>(inputs[3]); + __half const* attnWeight = static_cast<__half const*>(inputs[4]); __half* output = static_cast<__half*>(outputs[0]); rc = ms_deform_attn_cuda_forward(stream, value, spatialShapes, levelStartIndex, samplingLoc, attnWeight, output, @@ -244,7 +244,7 @@ IPluginV2* MultiscaleDeformableAttnPluginCreator::createPlugin( MultiscaleDeformableAttnPlugin* plugin = new MultiscaleDeformableAttnPlugin(); return plugin; } - catch (const std::exception& e) + catch (std::exception const& e) { caughtError(e); } @@ -260,7 +260,7 @@ IPluginV2* MultiscaleDeformableAttnPluginCreator::deserializePlugin( plugin->setPluginNamespace(getPluginNamespace()); return plugin; } - catch (const std::exception& e) + catch (std::exception const& e) { caughtError(e); } diff --git a/plugin/multiscaleDeformableAttnPlugin/multiscaleDeformableIm2ColCuda.cuh b/plugin/multiscaleDeformableAttnPlugin/multiscaleDeformableIm2ColCuda.cuh index eaa670fd..370c4cd1 100644 --- a/plugin/multiscaleDeformableAttnPlugin/multiscaleDeformableIm2ColCuda.cuh +++ b/plugin/multiscaleDeformableAttnPlugin/multiscaleDeformableIm2ColCuda.cuh @@ -39,7 +39,7 @@ #define CUDA_KERNEL_LOOP(i, n) for (int32_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); i += blockDim.x * gridDim.x) -constexpr int32_t CUDA_NUM_THREADS = 768; +constexpr int32_t kCUDA_NUM_THREADS{768}; inline int32_t GET_BLOCKS(int32_t const N, int32_t const numThreads) { return (N + numThreads - 1) / numThreads; @@ -98,26 +98,26 @@ __device__ scalar_t ms_deform_attn_im2col_bilinear(scalar_t const*& bottomData, } template <> -__device__ __half ms_deform_attn_im2col_bilinear<__half>(const __half*& bottomData, int32_t const& height, int32_t const& width, - int32_t const& nHeads, int32_t const& channels, const __half& h, const __half& w, int32_t const& m, int32_t const& c) +__device__ __half ms_deform_attn_im2col_bilinear<__half>(__half const*& bottomData, int32_t const& height, int32_t const& width, + int32_t const& nHeads, int32_t const& channels, __half const& h, __half const& w, int32_t const& m, int32_t const& c) { int32_t const hLow = __half2int_rd(h); int32_t const wLow = __half2int_rd(w); int32_t const hHigh = hLow + 1; int32_t const wHigh = wLow + 1; - const __half kZERO = __int2half_rz(0); - const __half one = __int2half_rz(1); + __half const kZERO = __int2half_rz(0); + __half const one = __int2half_rz(1); #if __CUDA_ARCH__>=530 - const __half lh = __hsub(h, __int2half_rd(hLow)); - const __half lw = __hsub(w, __int2half_rd(wLow)); - const __half hh = __hsub(one, lh), hw = __hsub(one, lw); + __half const lh = __hsub(h, __int2half_rd(hLow)); + __half const lw = __hsub(w, __int2half_rd(wLow)); + __half const hh = __hsub(one, lh), hw = __hsub(one, lw); #else - const __half lh = __float2half(__half2float(h) - hLow); - const __half lw = __float2half(__half2float(w) - wLow); - const __half hh = __float2half(__half2float(one) - __half2float(lh)); - const __half hw = __float2half(__half2float(one) - __half2float(lw)); + __half const lh = __float2half(__half2float(h) - hLow); + __half const lw = __float2half(__half2float(w) - wLow); + __half const hh = __float2half(__half2float(one) - __half2float(lh)); + __half const hw = __float2half(__half2float(one) - __half2float(lw)); #endif int32_t const wStride = nHeads * channels; int32_t const hStride = width * wStride; @@ -161,7 +161,7 @@ __device__ __half ms_deform_attn_im2col_bilinear<__half>(const __half*& bottomDa w1 = __hadd(w1, w2); w3 = __hadd(w3, w4); - const __half val = __hadd(w1, w3); + __half const val = __hadd(w1, w3); #else __half w1 = __float2half((__half2float(hh) * __half2float(hw)) * __half2float(v1)); __half w2 = __float2half((__half2float(hh) * __half2float(lw)) * __half2float(v2)); @@ -171,7 +171,7 @@ __device__ __half ms_deform_attn_im2col_bilinear<__half>(const __half*& bottomDa w1 = __float2half(__half2float(w1) + __half2float(w2)); w3 = __float2half(__half2float(w3) + __half2float(w4)); - const __half val = __float2half(__half2float(w1) + __half2float(w3)); + __half const val = __float2half(__half2float(w1) + __half2float(w3)); #endif return val; } @@ -373,9 +373,9 @@ __global__ void ms_deformable_im2col_gpu_kernel(int32_t const n, scalar_t const* } template <> -__global__ void ms_deformable_im2col_gpu_kernel<__half>(int32_t const n, const __half* dataValue, - int32_t const* dataSpatialShapes, int32_t const* dataLevelStartIndex, const __half* dataSamplingLoc, - const __half* dataAttnWeight, int32_t const batchSize, int32_t const spatialSize, int32_t const numHeads, int32_t const channels, +__global__ void ms_deformable_im2col_gpu_kernel<__half>(int32_t const n, __half const* dataValue, + int32_t const* dataSpatialShapes, int32_t const* dataLevelStartIndex, __half const* dataSamplingLoc, + __half const* dataAttnWeight, int32_t const batchSize, int32_t const spatialSize, int32_t const numHeads, int32_t const channels, int32_t const numLevels, int32_t const numQuery, int32_t const numPoint, __half* dataCol) { CUDA_KERNEL_LOOP(index, n) @@ -394,9 +394,9 @@ __global__ void ms_deformable_im2col_gpu_kernel<__half>(int32_t const n, const _ int32_t dataLocWPtr = dataWeightPtr << 1; int32_t const qidStride = numHeads * channels; int32_t const dataValuePtrInitOffset = bCol * spatialSize * qidStride; - const __half kZERO_POINT_FIVE = __float2half(0.5f); - const __half kMINUS_ONE = __float2half(-1.0f); - const __half kZERO = __int2half_rz(0); + __half const kZERO_POINT_FIVE = __float2half(0.5f); + __half const kMINUS_ONE = __float2half(-1.0f); + __half const kZERO = __int2half_rz(0); __half tpVal = kZERO; __half col = kZERO; @@ -406,17 +406,17 @@ __global__ void ms_deformable_im2col_gpu_kernel<__half>(int32_t const n, const _ int32_t const spatialHPtr = lCol << 1; int32_t const spatialH = dataSpatialShapes[spatialHPtr]; int32_t const spatialW = dataSpatialShapes[spatialHPtr + 1]; - const __half spatialHHalf = __int2half_rd(spatialH); - const __half spatialWHalf = __int2half_rd(spatialW); - const __half* dataValuePtr = dataValue + (dataValuePtrInitOffset + levelStartId * qidStride); + __half const spatialHHalf = __int2half_rd(spatialH); + __half const spatialWHalf = __int2half_rd(spatialW); + __half const* dataValuePtr = dataValue + (dataValuePtrInitOffset + levelStartId * qidStride); for (int32_t pCol = 0; pCol < numPoint; ++pCol) { - const __half locW = dataSamplingLoc[dataLocWPtr]; - const __half locH = dataSamplingLoc[dataLocWPtr + 1]; - const __half weight = dataAttnWeight[dataWeightPtr]; + __half const locW = dataSamplingLoc[dataLocWPtr]; + __half const locH = dataSamplingLoc[dataLocWPtr + 1]; + __half const weight = dataAttnWeight[dataWeightPtr]; #if __CUDA_ARCH__ >= 530 - const __half hIm = __hsub(__hmul(locH, spatialHHalf), kZERO_POINT_FIVE); - const __half wIm = __hsub(__hmul(locW, spatialWHalf), kZERO_POINT_FIVE); + __half const hIm = __hsub(__hmul(locH, spatialHHalf), kZERO_POINT_FIVE); + __half const wIm = __hsub(__hmul(locW, spatialWHalf), kZERO_POINT_FIVE); if (__hgt(hIm, kMINUS_ONE) && __hgt(wIm, kMINUS_ONE) && __hlt(hIm, spatialHHalf) && __hlt(wIm, spatialWHalf)) @@ -426,8 +426,8 @@ __global__ void ms_deformable_im2col_gpu_kernel<__half>(int32_t const n, const _ col = __hadd(col, __hmul(tpVal, weight)); } #else - const __half hIm = __float2half(__half2float(locH) * __half2float(spatialHHalf) - __half2float(kZERO_POINT_FIVE)); - const __half wIm = __float2half(__half2float(locW) * __half2float(spatialWHalf) - __half2float(kZERO_POINT_FIVE)); + __half const hIm = __float2half(__half2float(locH) * __half2float(spatialHHalf) - __half2float(kZERO_POINT_FIVE)); + __half const wIm = __float2half(__half2float(locW) * __half2float(spatialWHalf) - __half2float(kZERO_POINT_FIVE)); if((__half2float(hIm)>__half2float(kMINUS_ONE)) && (__half2float(wIm)>__half2float(kMINUS_ONE)) && (__half2float(hIm)<__half2float(spatialHHalf)) && (__half2float(wIm)<__half2float(spatialWHalf))) @@ -996,7 +996,7 @@ void ms_deformable_im2col_cuda(cudaStream_t stream, scalar_t const* dataValue, i { int32_t const numKernels = batchSize * numQuery * numHeads * channels; int32_t const numActualKernels = batchSize * numQuery * numHeads * channels; - int32_t const numThreads = CUDA_NUM_THREADS; + int32_t const numThreads = kCUDA_NUM_THREADS; cudaError_t err = cudaSuccess; ms_deformable_im2col_gpu_kernel<<>>( @@ -1016,7 +1016,7 @@ void ms_deformable_col2im_cuda(cudaStream_t stream, scalar_t const* grad_col, sc int32_t const numLevels, int32_t const numQuery, int32_t const numPoint, scalar_t* gradValue, scalar_t* gradSamplingLoc, scalar_t* gradAttnWeight) { - int32_t const numThreads = (channels > CUDA_NUM_THREADS) ? CUDA_NUM_THREADS : channels; + int32_t const numThreads = (channels > kCUDA_NUM_THREADS) ? kCUDA_NUM_THREADS : channels; int32_t const numKernels = batchSize * numQuery * numHeads * channels; int32_t const numActualKernels = batchSize * numQuery * numHeads * channels; if (channels > 1024) @@ -1157,7 +1157,7 @@ __global__ void float2half_input(int32_t const nData1, int32_t const nData2, int } } -__global__ void half2float_output(int32_t const n_data, const __half* data_half, float* data_float) +__global__ void half2float_output(int32_t const n_data, __half const* data_half, float* data_float) { CUDA_KERNEL_LOOP(index, n_data) { diff --git a/plugin/nmsPlugin/nmsPlugin.cpp b/plugin/nmsPlugin/nmsPlugin.cpp index 40f00ccc..14a5e2eb 100644 --- a/plugin/nmsPlugin/nmsPlugin.cpp +++ b/plugin/nmsPlugin/nmsPlugin.cpp @@ -54,7 +54,7 @@ DetectionOutputDynamic::DetectionOutputDynamic(DetectionOutputParameters params) { } -DetectionOutput::DetectionOutput(DetectionOutputParameters params, int C1, int C2, int numPriors) +DetectionOutput::DetectionOutput(DetectionOutputParameters params, int32_t C1, int32_t C2, int32_t numPriors) : param(params) , C1(C1) , C2(C2) @@ -64,7 +64,8 @@ DetectionOutput::DetectionOutput(DetectionOutputParameters params, int C1, int C { } -DetectionOutputDynamic::DetectionOutputDynamic(DetectionOutputParameters params, int C1, int C2, int numPriors) +DetectionOutputDynamic::DetectionOutputDynamic( + DetectionOutputParameters params, int32_t C1, int32_t C2, int32_t numPriors) : param(params) , C1(C1) , C2(C2) @@ -81,12 +82,12 @@ DetectionOutput::DetectionOutput(void const* data, size_t length) param = read(d); // Channel size of the locData tensor // numPriors * numLocClasses * 4 - C1 = read(d); + C1 = read(d); // Channel size of the confData tensor // numPriors * param.numClasses - C2 = read(d); + C2 = read(d); // Number of bounding boxes per sample - numPriors = read(d); + numPriors = read(d); // data type of this plugin mType = read(d); // mScoreBits @@ -100,12 +101,12 @@ DetectionOutputDynamic::DetectionOutputDynamic(void const* data, size_t length) param = read(d); // Channel size of the locData tensor // numPriors * numLocClasses * 4 - C1 = read(d); + C1 = read(d); // Channel size of the confData tensor // numPriors * param.numClasses - C2 = read(d); + C2 = read(d); // Number of bounding boxes per sample - numPriors = read(d); + numPriors = read(d); // data type of this plugin mType = read(d); // mScoreBits @@ -113,24 +114,24 @@ DetectionOutputDynamic::DetectionOutputDynamic(void const* data, size_t length) PLUGIN_VALIDATE(d == a + length); } -int DetectionOutput::getNbOutputs() const noexcept +int32_t DetectionOutput::getNbOutputs() const noexcept { // Plugin layer has 2 outputs return 2; } -int DetectionOutputDynamic::getNbOutputs() const noexcept +int32_t DetectionOutputDynamic::getNbOutputs() const noexcept { // Plugin layer has 2 outputs return 2; } -int DetectionOutput::initialize() noexcept +int32_t DetectionOutput::initialize() noexcept { return STATUS_SUCCESS; } -int DetectionOutputDynamic::initialize() noexcept +int32_t DetectionOutputDynamic::initialize() noexcept { return STATUS_SUCCESS; } @@ -140,7 +141,7 @@ void DetectionOutput::terminate() noexcept {} void DetectionOutputDynamic::terminate() noexcept {} // Returns output dimensions at given index -Dims DetectionOutput::getOutputDimensions(int index, Dims const* inputs, int nbInputDims) noexcept +Dims DetectionOutput::getOutputDimensions(int32_t index, Dims const* inputs, int32_t nbInputDims) noexcept { PLUGIN_ASSERT(nbInputDims == 3); PLUGIN_ASSERT(index == 0 || index == 1); @@ -155,7 +156,7 @@ Dims DetectionOutput::getOutputDimensions(int index, Dims const* inputs, int nbI } DimsExprs DetectionOutputDynamic::getOutputDimensions( - int outputIndex, DimsExprs const* inputs, int nbInputs, IExprBuilder& exprBuilder) noexcept + int32_t outputIndex, DimsExprs const* inputs, int32_t nbInputs, IExprBuilder& exprBuilder) noexcept { PLUGIN_ASSERT(nbInputs == 3); PLUGIN_ASSERT(outputIndex >= 0 && outputIndex < this->getNbOutputs()); @@ -165,8 +166,8 @@ DimsExprs DetectionOutputDynamic::getOutputDimensions( PLUGIN_ASSERT(inputs[1].nbDims == 4); // prior data PLUGIN_ASSERT(inputs[2].nbDims == 4); - int const C1_idx = param.inputOrder[0]; - int const C2_idx = param.inputOrder[1]; + int32_t const C1_idx = param.inputOrder[0]; + int32_t const C2_idx = param.inputOrder[1]; if (inputs[C1_idx].d[0]->isConstant() && inputs[C1_idx].d[1]->isConstant() && inputs[C1_idx].d[2]->isConstant() && inputs[C1_idx].d[3]->isConstant()) { @@ -207,22 +208,22 @@ DimsExprs DetectionOutputDynamic::getOutputDimensions( } // Returns the workspace size -size_t DetectionOutput::getWorkspaceSize(int maxBatchSize) const noexcept +size_t DetectionOutput::getWorkspaceSize(int32_t maxBatchSize) const noexcept { return detectionInferenceWorkspaceSize( param.shareLocation, maxBatchSize, C1, C2, param.numClasses, numPriors, param.topK, mType, mType); } size_t DetectionOutputDynamic::getWorkspaceSize( - PluginTensorDesc const* inputs, int nbInputs, PluginTensorDesc const* outputs, int nbOutputs) const noexcept + PluginTensorDesc const* inputs, int32_t nbInputs, PluginTensorDesc const* outputs, int32_t nbOutputs) const noexcept { return detectionInferenceWorkspaceSize( param.shareLocation, inputs[0].dims.d[0], C1, C2, param.numClasses, numPriors, param.topK, mType, mType); } // Plugin layer implementation -int DetectionOutput::enqueue( - int batchSize, void const* const* inputs, void* const* outputs, void* workspace, cudaStream_t stream) noexcept +int32_t DetectionOutput::enqueue( + int32_t batchSize, void const* const* inputs, void* const* outputs, void* workspace, cudaStream_t stream) noexcept { // Input order {loc, conf, prior} void const* const locData = inputs[param.inputOrder[0]]; @@ -263,13 +264,13 @@ int32_t DetectionOutputDynamic::enqueue(PluginTensorDesc const* inputDesc, Plugi size_t DetectionOutput::getSerializationSize() const noexcept { // DetectionOutputParameters, C1, C2, numPriors, mType, mScoreBits - return sizeof(DetectionOutputParameters) + sizeof(int) * 3 + sizeof(DataType) + sizeof(int32_t); + return sizeof(DetectionOutputParameters) + sizeof(int32_t) * 3 + sizeof(DataType) + sizeof(int32_t); } size_t DetectionOutputDynamic::getSerializationSize() const noexcept { // DetectionOutputParameters, C1, C2, numPriors, mType, mScoreBits - return sizeof(DetectionOutputParameters) + sizeof(int) * 3 + sizeof(DataType) + sizeof(int32_t); + return sizeof(DetectionOutputParameters) + sizeof(int32_t) * 3 + sizeof(DataType) + sizeof(int32_t); } // Serialization of plugin parameters @@ -304,7 +305,7 @@ bool DetectionOutput::supportsFormat(DataType type, PluginFormat format) const n } bool DetectionOutputDynamic::supportsFormatCombination( - int pos, PluginTensorDesc const* inOut, int nbInputs, int nbOutputs) noexcept + int32_t pos, PluginTensorDesc const* inOut, int32_t nbInputs, int32_t nbOutputs) noexcept { // 3 inputs, 2 outputs, so 5 input/output in total PLUGIN_ASSERT(0 <= pos && pos < 5); @@ -437,7 +438,7 @@ char const* DetectionOutputDynamic::getPluginNamespace() const noexcept // Return the DataType of the plugin output at the requested index. DataType DetectionOutput::getOutputDataType( - int index, nvinfer1::DataType const* inputTypes, int nbInputs) const noexcept + int32_t index, nvinfer1::DataType const* inputTypes, int32_t nbInputs) const noexcept { // Two outputs PLUGIN_ASSERT(index == 0 || index == 1); @@ -448,12 +449,12 @@ DataType DetectionOutput::getOutputDataType( return inputTypes[0]; } // keepCount: use kFLOAT instead as they have same sizeof(type) - PLUGIN_ASSERT(sizeof(int) == sizeof(float)); + PLUGIN_ASSERT(sizeof(int32_t) == sizeof(float)); return DataType::kFLOAT; } DataType DetectionOutputDynamic::getOutputDataType( - int index, nvinfer1::DataType const* inputTypes, int nbInputs) const noexcept + int32_t index, nvinfer1::DataType const* inputTypes, int32_t nbInputs) const noexcept { // Two outputs PLUGIN_ASSERT(index == 0 || index == 1); @@ -464,19 +465,19 @@ DataType DetectionOutputDynamic::getOutputDataType( return inputTypes[0]; } // keepCount: use kFLOAT instead as they have same sizeof(type) - PLUGIN_ASSERT(sizeof(int) == sizeof(float)); + PLUGIN_ASSERT(sizeof(int32_t) == sizeof(float)); return DataType::kFLOAT; } // Return true if output tensor is broadcast across a batch. bool DetectionOutput::isOutputBroadcastAcrossBatch( - int outputIndex, bool const* inputIsBroadcasted, int nbInputs) const noexcept + int32_t outputIndex, bool const* inputIsBroadcasted, int32_t nbInputs) const noexcept { return false; } // Return true if plugin can use input that is broadcast across batch without replication. -bool DetectionOutput::canBroadcastInputAcrossBatch(int inputIndex) const noexcept +bool DetectionOutput::canBroadcastInputAcrossBatch(int32_t inputIndex) const noexcept { return false; } @@ -489,21 +490,21 @@ bool DetectionOutput::canBroadcastInputAcrossBatch(int inputIndex) const noexcep // type: DataType configuration for the plugin layer // format: format NCHW, NHWC etc // maxbatchSize: maximum batch size for the plugin layer -void DetectionOutput::configurePlugin(Dims const* inputDims, int nbInputs, Dims const* outputDims, int nbOutputs, - DataType const* inputTypes, DataType const* outputTypes, bool const* inputIsBroadcast, - bool const* outputIsBroadcast, PluginFormat floatFormat, int maxBatchSize) noexcept +void DetectionOutput::configurePlugin(Dims const* inputDims, int32_t nbInputs, Dims const* outputDims, + int32_t nbOutputs, DataType const* inputTypes, DataType const* outputTypes, bool const* inputIsBroadcast, + bool const* outputIsBroadcast, PluginFormat floatFormat, int32_t maxBatchSize) noexcept { PLUGIN_ASSERT(nbInputs == 3); PLUGIN_ASSERT(nbOutputs == 2); // Verify all the input dimensions - for (int i = 0; i < nbInputs; i++) + for (int32_t i = 0; i < nbInputs; i++) { PLUGIN_ASSERT(inputDims[i].nbDims == 3); } // Verify all the output dimensions - for (int i = 0; i < nbOutputs; i++) + for (int32_t i = 0; i < nbOutputs; i++) { PLUGIN_ASSERT(outputDims[i].nbDims == 3); } @@ -513,9 +514,9 @@ void DetectionOutput::configurePlugin(Dims const* inputDims, int nbInputs, Dims C1 = inputDims[param.inputOrder[0]].d[0]; C2 = inputDims[param.inputOrder[1]].d[0]; - int const nbBoxCoordinates = 4; + int32_t const nbBoxCoordinates = 4; numPriors = inputDims[param.inputOrder[2]].d[1] / nbBoxCoordinates; - int const numLocClasses = param.shareLocation ? 1 : param.numClasses; + int32_t const numLocClasses = param.shareLocation ? 1 : param.numClasses; // Verify C1 PLUGIN_ASSERT(numPriors * numLocClasses * nbBoxCoordinates == inputDims[param.inputOrder[0]].d[0]); @@ -528,19 +529,19 @@ void DetectionOutput::configurePlugin(Dims const* inputDims, int nbInputs, Dims } void DetectionOutputDynamic::configurePlugin( - DynamicPluginTensorDesc const* in, int nbInputs, DynamicPluginTensorDesc const* out, int nbOutputs) noexcept + DynamicPluginTensorDesc const* in, int32_t nbInputs, DynamicPluginTensorDesc const* out, int32_t nbOutputs) noexcept { PLUGIN_ASSERT(nbInputs == 3); PLUGIN_ASSERT(nbOutputs == 2); // Verify all the input dimensions - for (int i = 0; i < nbInputs; i++) + for (int32_t i = 0; i < nbInputs; i++) { PLUGIN_ASSERT(in[i].desc.dims.nbDims == 4); } // Verify all the output dimensions - for (int i = 0; i < nbOutputs; i++) + for (int32_t i = 0; i < nbOutputs; i++) { PLUGIN_ASSERT(out[i].desc.dims.nbDims == 4); } @@ -550,9 +551,9 @@ void DetectionOutputDynamic::configurePlugin( C1 = in[param.inputOrder[0]].desc.dims.d[1]; C2 = in[param.inputOrder[1]].desc.dims.d[1]; - int const nbBoxCoordinates = 4; + int32_t const nbBoxCoordinates = 4; numPriors = in[param.inputOrder[2]].desc.dims.d[2] / nbBoxCoordinates; - int const numLocClasses = param.shareLocation ? 1 : param.numClasses; + int32_t const numLocClasses = param.shareLocation ? 1 : param.numClasses; // Verify C1 PLUGIN_ASSERT(numPriors * numLocClasses * nbBoxCoordinates == in[param.inputOrder[0]].desc.dims.d[1]); @@ -639,38 +640,38 @@ IPluginV2Ext* NMSPluginCreator::createPlugin(char const* name, PluginFieldCollec mScoreBits = 16; // Read configurations from each fields - for (int i = 0; i < fc->nbFields; ++i) + for (int32_t i = 0; i < fc->nbFields; ++i) { char const* attrName = fields[i].name; if (!strcmp(attrName, "shareLocation")) { PLUGIN_VALIDATE(fields[i].type == PluginFieldType::kINT32); - params.shareLocation = static_cast(*(static_cast(fields[i].data))); + params.shareLocation = static_cast(*(static_cast(fields[i].data))); } else if (!strcmp(attrName, "varianceEncodedInTarget")) { PLUGIN_VALIDATE(fields[i].type == PluginFieldType::kINT32); - params.varianceEncodedInTarget = static_cast(*(static_cast(fields[i].data))); + params.varianceEncodedInTarget = static_cast(*(static_cast(fields[i].data))); } else if (!strcmp(attrName, "backgroundLabelId")) { PLUGIN_VALIDATE(fields[i].type == PluginFieldType::kINT32); - params.backgroundLabelId = static_cast(*(static_cast(fields[i].data))); + params.backgroundLabelId = static_cast(*(static_cast(fields[i].data))); } else if (!strcmp(attrName, "numClasses")) { PLUGIN_VALIDATE(fields[i].type == PluginFieldType::kINT32); - params.numClasses = static_cast(*(static_cast(fields[i].data))); + params.numClasses = static_cast(*(static_cast(fields[i].data))); } else if (!strcmp(attrName, "topK")) { PLUGIN_VALIDATE(fields[i].type == PluginFieldType::kINT32); - params.topK = static_cast(*(static_cast(fields[i].data))); + params.topK = static_cast(*(static_cast(fields[i].data))); } else if (!strcmp(attrName, "keepTopK")) { PLUGIN_VALIDATE(fields[i].type == PluginFieldType::kINT32); - params.keepTopK = static_cast(*(static_cast(fields[i].data))); + params.keepTopK = static_cast(*(static_cast(fields[i].data))); } else if (!strcmp(attrName, "confidenceThreshold")) { @@ -684,18 +685,18 @@ IPluginV2Ext* NMSPluginCreator::createPlugin(char const* name, PluginFieldCollec } else if (!strcmp(attrName, "confSigmoid")) { - params.confSigmoid = static_cast(*(static_cast(fields[i].data))); + params.confSigmoid = static_cast(*(static_cast(fields[i].data))); } else if (!strcmp(attrName, "isNormalized")) { - params.isNormalized = static_cast(*(static_cast(fields[i].data))); + params.isNormalized = static_cast(*(static_cast(fields[i].data))); } else if (!strcmp(attrName, "inputOrder")) { PLUGIN_VALIDATE(fields[i].type == PluginFieldType::kINT32); - int const size = fields[i].length; - int const* o = static_cast(fields[i].data); - for (int j = 0; j < size; j++) + int32_t const size = fields[i].length; + int32_t const* o = static_cast(fields[i].data); + for (int32_t j = 0; j < size; j++) { params.inputOrder[j] = *o; o++; @@ -704,7 +705,7 @@ IPluginV2Ext* NMSPluginCreator::createPlugin(char const* name, PluginFieldCollec else if (!strcmp(attrName, "codeType")) { PLUGIN_VALIDATE(fields[i].type == PluginFieldType::kINT32); - params.codeType = static_cast(*(static_cast(fields[i].data))); + params.codeType = static_cast(*(static_cast(fields[i].data))); } else if (!strcmp(attrName, "scoreBits")) { @@ -714,7 +715,7 @@ IPluginV2Ext* NMSPluginCreator::createPlugin(char const* name, PluginFieldCollec else if (!strcmp(attrName, "isBatchAgnostic")) { PLUGIN_VALIDATE(fields[i].type == PluginFieldType::kINT32); - params.isBatchAgnostic = static_cast(*(static_cast(fields[i].data))); + params.isBatchAgnostic = static_cast(*(static_cast(fields[i].data))); } } @@ -744,38 +745,38 @@ IPluginV2DynamicExt* NMSDynamicPluginCreator::createPlugin(char const* name, Plu mScoreBits = 16; // Read configurations from each fields - for (int i = 0; i < fc->nbFields; ++i) + for (int32_t i = 0; i < fc->nbFields; ++i) { char const* attrName = fields[i].name; if (!strcmp(attrName, "shareLocation")) { PLUGIN_VALIDATE(fields[i].type == PluginFieldType::kINT32); - params.shareLocation = static_cast(*(static_cast(fields[i].data))); + params.shareLocation = static_cast(*(static_cast(fields[i].data))); } else if (!strcmp(attrName, "varianceEncodedInTarget")) { PLUGIN_VALIDATE(fields[i].type == PluginFieldType::kINT32); - params.varianceEncodedInTarget = static_cast(*(static_cast(fields[i].data))); + params.varianceEncodedInTarget = static_cast(*(static_cast(fields[i].data))); } else if (!strcmp(attrName, "backgroundLabelId")) { PLUGIN_VALIDATE(fields[i].type == PluginFieldType::kINT32); - params.backgroundLabelId = static_cast(*(static_cast(fields[i].data))); + params.backgroundLabelId = static_cast(*(static_cast(fields[i].data))); } else if (!strcmp(attrName, "numClasses")) { PLUGIN_VALIDATE(fields[i].type == PluginFieldType::kINT32); - params.numClasses = static_cast(*(static_cast(fields[i].data))); + params.numClasses = static_cast(*(static_cast(fields[i].data))); } else if (!strcmp(attrName, "topK")) { PLUGIN_VALIDATE(fields[i].type == PluginFieldType::kINT32); - params.topK = static_cast(*(static_cast(fields[i].data))); + params.topK = static_cast(*(static_cast(fields[i].data))); } else if (!strcmp(attrName, "keepTopK")) { PLUGIN_VALIDATE(fields[i].type == PluginFieldType::kINT32); - params.keepTopK = static_cast(*(static_cast(fields[i].data))); + params.keepTopK = static_cast(*(static_cast(fields[i].data))); } else if (!strcmp(attrName, "confidenceThreshold")) { @@ -789,18 +790,18 @@ IPluginV2DynamicExt* NMSDynamicPluginCreator::createPlugin(char const* name, Plu } else if (!strcmp(attrName, "confSigmoid")) { - params.confSigmoid = static_cast(*(static_cast(fields[i].data))); + params.confSigmoid = static_cast(*(static_cast(fields[i].data))); } else if (!strcmp(attrName, "isNormalized")) { - params.isNormalized = static_cast(*(static_cast(fields[i].data))); + params.isNormalized = static_cast(*(static_cast(fields[i].data))); } else if (!strcmp(attrName, "inputOrder")) { PLUGIN_VALIDATE(fields[i].type == PluginFieldType::kINT32); - int const size = fields[i].length; - int const* o = static_cast(fields[i].data); - for (int j = 0; j < size; j++) + int32_t const size = fields[i].length; + int32_t const* o = static_cast(fields[i].data); + for (int32_t j = 0; j < size; j++) { params.inputOrder[j] = *o; o++; @@ -809,7 +810,7 @@ IPluginV2DynamicExt* NMSDynamicPluginCreator::createPlugin(char const* name, Plu else if (!strcmp(attrName, "codeType")) { PLUGIN_VALIDATE(fields[i].type == PluginFieldType::kINT32); - params.codeType = static_cast(*(static_cast(fields[i].data))); + params.codeType = static_cast(*(static_cast(fields[i].data))); } else if (!strcmp(attrName, "scoreBits")) { diff --git a/plugin/nmsPlugin/nmsPlugin.h b/plugin/nmsPlugin/nmsPlugin.h index 2075aeb5..70d3d5ba 100644 --- a/plugin/nmsPlugin/nmsPlugin.h +++ b/plugin/nmsPlugin/nmsPlugin.h @@ -32,23 +32,23 @@ class DetectionOutput : public IPluginV2Ext public: DetectionOutput(DetectionOutputParameters param); - DetectionOutput(DetectionOutputParameters param, int C1, int C2, int numPriors); + DetectionOutput(DetectionOutputParameters param, int32_t C1, int32_t C2, int32_t numPriors); DetectionOutput(void const* data, size_t length); ~DetectionOutput() override = default; - int getNbOutputs() const noexcept override; + int32_t getNbOutputs() const noexcept override; - Dims getOutputDimensions(int index, Dims const* inputs, int nbInputDims) noexcept override; + Dims getOutputDimensions(int32_t index, Dims const* inputs, int32_t nbInputDims) noexcept override; - int initialize() noexcept override; + int32_t initialize() noexcept override; void terminate() noexcept override; - size_t getWorkspaceSize(int maxBatchSize) const noexcept override; + size_t getWorkspaceSize(int32_t maxBatchSize) const noexcept override; - int enqueue(int batchSize, void const* const* inputs, void* const* outputs, void* workspace, + int32_t enqueue(int32_t batchSize, void const* const* inputs, void* const* outputs, void* workspace, cudaStream_t stream) noexcept override; size_t getSerializationSize() const noexcept override; @@ -69,19 +69,20 @@ class DetectionOutput : public IPluginV2Ext char const* getPluginNamespace() const noexcept override; - DataType getOutputDataType(int index, nvinfer1::DataType const* inputTypes, int nbInputs) const noexcept override; + DataType getOutputDataType( + int32_t index, nvinfer1::DataType const* inputTypes, int32_t nbInputs) const noexcept override; bool isOutputBroadcastAcrossBatch( - int outputIndex, bool const* inputIsBroadcasted, int nbInputs) const noexcept override; + int32_t outputIndex, bool const* inputIsBroadcasted, int32_t nbInputs) const noexcept override; - bool canBroadcastInputAcrossBatch(int inputIndex) const noexcept override; + bool canBroadcastInputAcrossBatch(int32_t inputIndex) const noexcept override; void attachToContext( cudnnContext* cudnnContext, cublasContext* cublasContext, IGpuAllocator* gpuAllocator) noexcept override; - void configurePlugin(Dims const* inputDims, int nbInputs, Dims const* outputDims, int nbOutputs, + void configurePlugin(Dims const* inputDims, int32_t nbInputs, Dims const* outputDims, int32_t nbOutputs, DataType const* inputTypes, DataType const* outputTypes, bool const* inputIsBroadcast, - bool const* outputIsBroadcast, PluginFormat floatFormat, int maxBatchSize) noexcept override; + bool const* outputIsBroadcast, PluginFormat floatFormat, int32_t maxBatchSize) noexcept override; void detachFromContext() noexcept override; @@ -89,7 +90,7 @@ class DetectionOutput : public IPluginV2Ext private: DetectionOutputParameters param; - int C1, C2, numPriors; + int32_t C1, C2, numPriors; DataType mType; int32_t mScoreBits; std::string mPluginNamespace; @@ -99,15 +100,15 @@ class DetectionOutputDynamic : public IPluginV2DynamicExt { public: DetectionOutputDynamic(DetectionOutputParameters param); - DetectionOutputDynamic(DetectionOutputParameters param, int C1, int C2, int numPriors); + DetectionOutputDynamic(DetectionOutputParameters param, int32_t C1, int32_t C2, int32_t numPriors); DetectionOutputDynamic(void const* data, size_t length); ~DetectionOutputDynamic() override = default; // IPluginV2 methods char const* getPluginType() const noexcept override; char const* getPluginVersion() const noexcept override; - int getNbOutputs() const noexcept override; - int initialize() noexcept override; + int32_t getNbOutputs() const noexcept override; + int32_t initialize() noexcept override; void terminate() noexcept override; size_t getSerializationSize() const noexcept override; void serialize(void* buffer) const noexcept override; @@ -117,24 +118,25 @@ class DetectionOutputDynamic : public IPluginV2DynamicExt void setScoreBits(int32_t scoreBits) noexcept; // IPluginV2Ext methods - DataType getOutputDataType(int index, nvinfer1::DataType const* inputType, int nbInputs) const noexcept override; + DataType getOutputDataType( + int32_t index, nvinfer1::DataType const* inputType, int32_t nbInputs) const noexcept override; // IPluginV2DynamicExt methods IPluginV2DynamicExt* clone() const noexcept override; DimsExprs getOutputDimensions( - int outputIndex, DimsExprs const* inputs, int nbInputs, IExprBuilder& exprBuilder) noexcept override; + int32_t outputIndex, DimsExprs const* inputs, int32_t nbInputs, IExprBuilder& exprBuilder) noexcept override; bool supportsFormatCombination( - int pos, PluginTensorDesc const* inOut, int nbInputs, int nbOutputs) noexcept override; - void configurePlugin(DynamicPluginTensorDesc const* in, int nbInputs, DynamicPluginTensorDesc const* out, - int nbOutputs) noexcept override; - size_t getWorkspaceSize(PluginTensorDesc const* inputs, int nbInputs, PluginTensorDesc const* outputs, - int nbOutputs) const noexcept override; + int32_t pos, PluginTensorDesc const* inOut, int32_t nbInputs, int32_t nbOutputs) noexcept override; + void configurePlugin(DynamicPluginTensorDesc const* in, int32_t nbInputs, DynamicPluginTensorDesc const* out, + int32_t nbOutputs) noexcept override; + size_t getWorkspaceSize(PluginTensorDesc const* inputs, int32_t nbInputs, PluginTensorDesc const* outputs, + int32_t nbOutputs) const noexcept override; int32_t enqueue(PluginTensorDesc const* inputDesc, PluginTensorDesc const* outputDesc, void const* const* inputs, void* const* outputs, void* workspace, cudaStream_t stream) noexcept override; private: DetectionOutputParameters param; - int C1, C2, numPriors; + int32_t C1, C2, numPriors; DataType mType; int32_t mScoreBits; std::string mPluginNamespace; diff --git a/plugin/normalizePlugin/normalizePlugin.cpp b/plugin/normalizePlugin/normalizePlugin.cpp index 86640406..b400f35a 100644 --- a/plugin/normalizePlugin/normalizePlugin.cpp +++ b/plugin/normalizePlugin/normalizePlugin.cpp @@ -35,7 +35,7 @@ char const* const kNORMALIZE_PLUGIN_NAME{"Normalize_TRT"}; PluginFieldCollection NormalizePluginCreator::mFC{}; std::vector NormalizePluginCreator::mPluginAttributes; -Normalize::Normalize(Weights const* weights, int nbWeights, bool acrossSpatial, bool channelShared, float eps) +Normalize::Normalize(Weights const* weights, int32_t nbWeights, bool acrossSpatial, bool channelShared, float eps) : acrossSpatial(acrossSpatial) , channelShared(channelShared) , eps(eps) @@ -47,8 +47,8 @@ Normalize::Normalize(Weights const* weights, int nbWeights, bool acrossSpatial, mScalarScale = static_cast(weights[0].values)[0]; } -Normalize::Normalize(Weights const* weights, int nbWeights, float scalarScale, bool acrossSpatial, bool channelShared, - float eps, int C, int H, int W) +Normalize::Normalize(Weights const* weights, int32_t nbWeights, float scalarScale, bool acrossSpatial, + bool channelShared, float eps, int32_t C, int32_t H, int32_t W) : mScalarScale(scalarScale) , acrossSpatial(acrossSpatial) , channelShared(channelShared) @@ -67,27 +67,27 @@ Normalize::Normalize(void const* buffer, size_t length) { char const* d = static_cast(buffer); char const* a = d; - C = read(d); - H = read(d); - W = read(d); + C = read(d); + H = read(d); + W = read(d); acrossSpatial = read(d); channelShared = read(d); eps = read(d); - mNbWeights = read(d); - int count = read(d); + mNbWeights = read(d); + int32_t count = read(d); std::memcpy(&mScalarScale, d, sizeof(float)); mWeights = deserializeToDevice(d, count); PLUGIN_VALIDATE(d == a + length); } -int Normalize::getNbOutputs() const noexcept +int32_t Normalize::getNbOutputs() const noexcept { // Plugin layer has 1 output return 1; } -Dims Normalize::getOutputDimensions(int index, Dims const* inputs, int nbInputDims) noexcept +Dims Normalize::getOutputDimensions(int32_t index, Dims const* inputs, int32_t nbInputDims) noexcept { PLUGIN_ASSERT(nbInputDims == 1); PLUGIN_ASSERT(index == 0); @@ -95,20 +95,20 @@ Dims Normalize::getOutputDimensions(int index, Dims const* inputs, int nbInputDi return Dims3(inputs[0].d[0], inputs[0].d[1], inputs[0].d[2]); } -int Normalize::initialize() noexcept +int32_t Normalize::initialize() noexcept { return STATUS_SUCCESS; } void Normalize::terminate() noexcept {} -size_t Normalize::getWorkspaceSize(int maxBatchSize) const noexcept +size_t Normalize::getWorkspaceSize(int32_t maxBatchSize) const noexcept { return normalizePluginWorkspaceSize(acrossSpatial, C, H, W); } -int Normalize::enqueue( - int batchSize, void const* const* inputs, void* const* outputs, void* workspace, cudaStream_t stream) noexcept +int32_t Normalize::enqueue( + int32_t batchSize, void const* const* inputs, void* const* outputs, void* workspace, cudaStream_t stream) noexcept { void const* inputData = inputs[0]; void* outputData = outputs[0]; @@ -133,7 +133,8 @@ int Normalize::enqueue( size_t Normalize::getSerializationSize() const noexcept { // C,H,W, acrossSpatial,channelShared, eps, mWeights.count,mWeights.values - return sizeof(int) * 3 + sizeof(bool) * 2 + sizeof(float) + sizeof(int) * 2 + mWeights.count * sizeof(float); + return sizeof(int32_t) * 3 + sizeof(bool) * 2 + sizeof(float) + sizeof(int32_t) * 2 + + mWeights.count * sizeof(float); } void Normalize::serialize(void* buffer) const noexcept @@ -145,8 +146,8 @@ void Normalize::serialize(void* buffer) const noexcept write(d, acrossSpatial); write(d, channelShared); write(d, eps); - write(d, (int) mNbWeights); - write(d, (int) mWeights.count); + write(d, (int32_t) mNbWeights); + write(d, (int32_t) mWeights.count); serializeFromDevice(d, mWeights); PLUGIN_ASSERT(d == a + getSerializationSize()); @@ -191,7 +192,8 @@ char const* Normalize::getPluginNamespace() const noexcept } // Return the DataType of the plugin output at the requested index -DataType Normalize::getOutputDataType(int index, nvinfer1::DataType const* inputTypes, int nbInputs) const noexcept +DataType Normalize::getOutputDataType( + int32_t index, nvinfer1::DataType const* inputTypes, int32_t nbInputs) const noexcept { PLUGIN_ASSERT(index == 0); return DataType::kFLOAT; @@ -199,21 +201,21 @@ DataType Normalize::getOutputDataType(int index, nvinfer1::DataType const* input // Return true if output tensor is broadcast across a batch. bool Normalize::isOutputBroadcastAcrossBatch( - int outputIndex, bool const* inputIsBroadcasted, int nbInputs) const noexcept + int32_t outputIndex, bool const* inputIsBroadcasted, int32_t nbInputs) const noexcept { return false; } // Return true if plugin can use input that is broadcast across batch without replication. -bool Normalize::canBroadcastInputAcrossBatch(int inputIndex) const noexcept +bool Normalize::canBroadcastInputAcrossBatch(int32_t inputIndex) const noexcept { return false; } // Configure the layer with input and output data types. -void Normalize::configurePlugin(Dims const* inputDims, int nbInputs, Dims const* outputDims, int nbOutputs, +void Normalize::configurePlugin(Dims const* inputDims, int32_t nbInputs, Dims const* outputDims, int32_t nbOutputs, DataType const* inputTypes, DataType const* outputTypes, bool const* inputIsBroadcast, - bool const* outputIsBroadcast, PluginFormat floatFormat, int maxBatchSize) noexcept + bool const* outputIsBroadcast, PluginFormat floatFormat, int32_t maxBatchSize) noexcept { PLUGIN_ASSERT(*inputTypes == DataType::kFLOAT && floatFormat == PluginFormat::kLINEAR); C = inputDims[0].d[0]; @@ -314,13 +316,13 @@ IPluginV2Ext* NormalizePluginCreator::createPlugin(char const* name, PluginField { std::vector weightValues; PluginField const* fields = fc->fields; - for (int i = 0; i < fc->nbFields; ++i) + for (int32_t i = 0; i < fc->nbFields; ++i) { char const* attrName = fields[i].name; if (!strcmp(attrName, "nbWeights")) { PLUGIN_VALIDATE(fields[i].type == PluginFieldType::kINT32); - mNbWeights = *(static_cast(fields[i].data)); + mNbWeights = *(static_cast(fields[i].data)); } else if (!strcmp(attrName, "acrossSpatial")) { @@ -340,10 +342,10 @@ IPluginV2Ext* NormalizePluginCreator::createPlugin(char const* name, PluginField else if (!strcmp(attrName, "weights")) { PLUGIN_VALIDATE(fields[i].type == PluginFieldType::kFLOAT32); - int size = fields[i].length; + int32_t size = fields[i].length; weightValues.reserve(size); auto const* w = static_cast(fields[i].data); - for (int j = 0; j < size; j++) + for (int32_t j = 0; j < size; j++) { weightValues.push_back(*w); w++; diff --git a/plugin/normalizePlugin/normalizePlugin.h b/plugin/normalizePlugin/normalizePlugin.h index f41ff478..622b44d6 100644 --- a/plugin/normalizePlugin/normalizePlugin.h +++ b/plugin/normalizePlugin/normalizePlugin.h @@ -31,26 +31,26 @@ namespace plugin class Normalize : public IPluginV2Ext { public: - Normalize(Weights const* weights, int nbWeights, bool acrossSpatial, bool channelShared, float eps); + Normalize(Weights const* weights, int32_t nbWeights, bool acrossSpatial, bool channelShared, float eps); - Normalize(Weights const* weights, int nbWeights, float scalarScale, bool acrossSpatial, bool channelShared, - float eps, int C, int H, int W); + Normalize(Weights const* weights, int32_t nbWeights, float scalarScale, bool acrossSpatial, bool channelShared, + float eps, int32_t C, int32_t H, int32_t W); Normalize(void const* buffer, size_t length); ~Normalize() override = default; - int getNbOutputs() const noexcept override; + int32_t getNbOutputs() const noexcept override; - Dims getOutputDimensions(int index, Dims const* inputs, int nbInputDims) noexcept override; + Dims getOutputDimensions(int32_t index, Dims const* inputs, int32_t nbInputDims) noexcept override; - int initialize() noexcept override; + int32_t initialize() noexcept override; void terminate() noexcept override; - size_t getWorkspaceSize(int maxBatchSize) const noexcept override; + size_t getWorkspaceSize(int32_t maxBatchSize) const noexcept override; - int enqueue(int batchSize, void const* const* inputs, void* const* outputs, void* workspace, + int32_t enqueue(int32_t batchSize, void const* const* inputs, void* const* outputs, void* workspace, cudaStream_t stream) noexcept override; size_t getSerializationSize() const noexcept override; @@ -71,19 +71,20 @@ class Normalize : public IPluginV2Ext char const* getPluginNamespace() const noexcept override; - DataType getOutputDataType(int index, nvinfer1::DataType const* inputTypes, int nbInputs) const noexcept override; + DataType getOutputDataType( + int32_t index, nvinfer1::DataType const* inputTypes, int32_t nbInputs) const noexcept override; bool isOutputBroadcastAcrossBatch( - int outputIndex, bool const* inputIsBroadcasted, int nbInputs) const noexcept override; + int32_t outputIndex, bool const* inputIsBroadcasted, int32_t nbInputs) const noexcept override; - bool canBroadcastInputAcrossBatch(int inputIndex) const noexcept override; + bool canBroadcastInputAcrossBatch(int32_t inputIndex) const noexcept override; void attachToContext( cudnnContext* cudnnContext, cublasContext* cublasContext, IGpuAllocator* gpuAllocator) noexcept override; - void configurePlugin(Dims const* inputDims, int nbInputs, Dims const* outputDims, int nbOutputs, + void configurePlugin(Dims const* inputDims, int32_t nbInputs, Dims const* outputDims, int32_t nbOutputs, DataType const* inputTypes, DataType const* outputTypes, bool const* inputIsBroadcast, - bool const* outputIsBroadcast, PluginFormat floatFormat, int maxBatchSize) noexcept override; + bool const* outputIsBroadcast, PluginFormat floatFormat, int32_t maxBatchSize) noexcept override; void detachFromContext() noexcept override; @@ -95,14 +96,14 @@ class Normalize : public IPluginV2Ext cublasHandle_t mCublas; Weights mWeights{}; // mWeights.values is on the device - int mNbWeights{}; + int32_t mNbWeights{}; float mScalarScale{}; // keep track of scale on the host (for when channelShared is true) bool acrossSpatial{}; bool channelShared{}; float eps{}; - int C{}; - int H{}; - int W{}; + int32_t C{}; + int32_t H{}; + int32_t W{}; std::string mPluginNamespace; }; @@ -128,7 +129,7 @@ class NormalizePluginCreator : public nvinfer1::pluginInternal::BaseCreator bool mAcrossSpatial{}; bool mChannelShared{}; float mEps{}; - int mNbWeights{}; + int32_t mNbWeights{}; static std::vector mPluginAttributes; }; } // namespace plugin diff --git a/plugin/nvFasterRCNN/nvFasterRCNNPlugin.cpp b/plugin/nvFasterRCNN/nvFasterRCNNPlugin.cpp index 89d976ee..85b2dee4 100644 --- a/plugin/nvFasterRCNN/nvFasterRCNNPlugin.cpp +++ b/plugin/nvFasterRCNN/nvFasterRCNNPlugin.cpp @@ -132,7 +132,7 @@ RPROIPlugin::~RPROIPlugin() } } -int RPROIPlugin::initialize() noexcept +int32_t RPROIPlugin::initialize() noexcept { return STATUS_SUCCESS; } @@ -146,12 +146,12 @@ size_t RPROIPlugin::getSmemSize() const noexcept return prop.sharedMemPerBlockOptin; } -int RPROIPlugin::getNbOutputs() const noexcept +int32_t RPROIPlugin::getNbOutputs() const noexcept { return 2; } -Dims RPROIPlugin::getOutputDimensions(int index, Dims const* inputs, int nbInputDims) noexcept +Dims RPROIPlugin::getOutputDimensions(int32_t index, Dims const* inputs, int32_t nbInputDims) noexcept { PLUGIN_ASSERT(index >= 0 && index < 2); PLUGIN_ASSERT(nbInputDims == 4); @@ -165,13 +165,13 @@ Dims RPROIPlugin::getOutputDimensions(int index, Dims const* inputs, int nbInput return Dims4(params.nmsMaxOut, inputs[2].d[0], params.poolingH, params.poolingW); } -size_t RPROIPlugin::getWorkspaceSize(int maxBatchSize) const noexcept +size_t RPROIPlugin::getWorkspaceSize(int32_t maxBatchSize) const noexcept { return RPROIInferenceFusedWorkspaceSize(maxBatchSize, A, H, W, params.nmsMaxOut); } -int RPROIPlugin::enqueue( - int batchSize, void const* const* inputs, void* const* outputs, void* workspace, cudaStream_t stream) noexcept +int32_t RPROIPlugin::enqueue( + int32_t batchSize, void const* const* inputs, void* const* outputs, void* workspace, cudaStream_t stream) noexcept { // Bounding box (region proposal) objectness scores. void const* const scores = inputs[0]; @@ -230,7 +230,7 @@ void RPROIPlugin::serialize(void* buffer) const noexcept PLUGIN_ASSERT(d == a + getSerializationSize()); } -float* RPROIPlugin::copyToHost(void const* srcHostData, int count) noexcept +float* RPROIPlugin::copyToHost(void const* srcHostData, int32_t count) noexcept { float* dstHostPtr = nullptr; PLUGIN_CHECK(cudaMallocHost(&dstHostPtr, count * sizeof(float))); @@ -238,7 +238,7 @@ float* RPROIPlugin::copyToHost(void const* srcHostData, int count) noexcept return dstHostPtr; } -int RPROIPlugin::copyFromHost(char* dstHostBuffer, void const* source, int count) const noexcept +int32_t RPROIPlugin::copyFromHost(char* dstHostBuffer, void const* source, int32_t count) const noexcept { PLUGIN_CHECK(cudaMemcpy(dstHostBuffer, source, count * sizeof(float), cudaMemcpyHostToHost)); return count * sizeof(float); @@ -318,7 +318,8 @@ char const* RPROIPlugin::getPluginNamespace() const noexcept } // Return the DataType of the plugin output at the requested index. -DataType RPROIPlugin::getOutputDataType(int index, nvinfer1::DataType const* inputTypes, int nbInputs) const noexcept +DataType RPROIPlugin::getOutputDataType( + int32_t index, nvinfer1::DataType const* inputTypes, int32_t nbInputs) const noexcept { // Two outputs PLUGIN_ASSERT(index == 0 || index == 1); @@ -327,13 +328,13 @@ DataType RPROIPlugin::getOutputDataType(int index, nvinfer1::DataType const* inp // Return true if output tensor is broadcast across a batch. bool RPROIPlugin::isOutputBroadcastAcrossBatch( - int outputIndex, bool const* inputIsBroadcasted, int nbInputs) const noexcept + int32_t outputIndex, bool const* inputIsBroadcasted, int32_t nbInputs) const noexcept { return false; } // Return true if plugin can use input that is broadcast across batch without replication. -bool RPROIPlugin::canBroadcastInputAcrossBatch(int inputIndex) const noexcept +bool RPROIPlugin::canBroadcastInputAcrossBatch(int32_t inputIndex) const noexcept { return false; } @@ -432,45 +433,45 @@ IPluginV2Ext* RPROIPluginCreator::createPlugin(char const* name, PluginFieldColl try { PluginField const* fields = fc->fields; - int nbFields = fc->nbFields; + int32_t nbFields = fc->nbFields; - for (int i = 0; i < nbFields; ++i) + for (int32_t i = 0; i < nbFields; ++i) { char const* attrName = fields[i].name; if (!strcmp(attrName, "poolingH")) { PLUGIN_VALIDATE(fields[i].type == PluginFieldType::kINT32); - params.poolingH = *(static_cast(fields[i].data)); + params.poolingH = *(static_cast(fields[i].data)); } if (!strcmp(attrName, "poolingW")) { PLUGIN_VALIDATE(fields[i].type == PluginFieldType::kINT32); - params.poolingW = *(static_cast(fields[i].data)); + params.poolingW = *(static_cast(fields[i].data)); } if (!strcmp(attrName, "featureStride")) { PLUGIN_VALIDATE(fields[i].type == PluginFieldType::kINT32); - params.featureStride = *(static_cast(fields[i].data)); + params.featureStride = *(static_cast(fields[i].data)); } if (!strcmp(attrName, "preNmsTop")) { PLUGIN_VALIDATE(fields[i].type == PluginFieldType::kINT32); - params.preNmsTop = *(static_cast(fields[i].data)); + params.preNmsTop = *(static_cast(fields[i].data)); } if (!strcmp(attrName, "nmsMaxOut")) { PLUGIN_VALIDATE(fields[i].type == PluginFieldType::kINT32); - params.nmsMaxOut = *(static_cast(fields[i].data)); + params.nmsMaxOut = *(static_cast(fields[i].data)); } if (!strcmp(attrName, "anchorsRatioCount")) { PLUGIN_VALIDATE(fields[i].type == PluginFieldType::kINT32); - params.anchorsRatioCount = *(static_cast(fields[i].data)); + params.anchorsRatioCount = *(static_cast(fields[i].data)); } if (!strcmp(attrName, "anchorsScaleCount")) { PLUGIN_VALIDATE(fields[i].type == PluginFieldType::kINT32); - params.anchorsScaleCount = *(static_cast(fields[i].data)); + params.anchorsScaleCount = *(static_cast(fields[i].data)); } if (!strcmp(attrName, "iouThreshold")) { @@ -492,7 +493,7 @@ IPluginV2Ext* RPROIPluginCreator::createPlugin(char const* name, PluginFieldColl PLUGIN_VALIDATE(fields[i].type == PluginFieldType::kFLOAT32); anchorsRatios.reserve(params.anchorsRatioCount); float const* ratios = static_cast(fields[i].data); - for (int j = 0; j < params.anchorsRatioCount; ++j) + for (int32_t j = 0; j < params.anchorsRatioCount; ++j) { anchorsRatios.push_back(*ratios); ratios++; @@ -503,7 +504,7 @@ IPluginV2Ext* RPROIPluginCreator::createPlugin(char const* name, PluginFieldColl PLUGIN_VALIDATE(fields[i].type == PluginFieldType::kFLOAT32); anchorsScales.reserve(params.anchorsScaleCount); float const* scales = static_cast(fields[i].data); - for (int j = 0; j < params.anchorsScaleCount; ++j) + for (int32_t j = 0; j < params.anchorsScaleCount; ++j) { anchorsScales.push_back(*scales); scales++; diff --git a/plugin/nvFasterRCNN/nvFasterRCNNPlugin.h b/plugin/nvFasterRCNN/nvFasterRCNNPlugin.h index 4e05582c..30f3c099 100644 --- a/plugin/nvFasterRCNN/nvFasterRCNNPlugin.h +++ b/plugin/nvFasterRCNN/nvFasterRCNNPlugin.h @@ -40,17 +40,17 @@ class RPROIPlugin : public IPluginV2IOExt ~RPROIPlugin() override; - int getNbOutputs() const noexcept override; + int32_t getNbOutputs() const noexcept override; - Dims getOutputDimensions(int index, Dims const* inputs, int nbInputDims) noexcept override; + Dims getOutputDimensions(int32_t index, Dims const* inputs, int32_t nbInputDims) noexcept override; - int initialize() noexcept override; + int32_t initialize() noexcept override; void terminate() noexcept override; - size_t getWorkspaceSize(int maxBatchSize) const noexcept override; + size_t getWorkspaceSize(int32_t maxBatchSize) const noexcept override; - int enqueue(int batchSize, void const* const* inputs, void* const* outputs, void* workspace, + int32_t enqueue(int32_t batchSize, void const* const* inputs, void* const* outputs, void* workspace, cudaStream_t stream) noexcept override; size_t getSerializationSize() const noexcept override; @@ -72,12 +72,13 @@ class RPROIPlugin : public IPluginV2IOExt char const* getPluginNamespace() const noexcept override; - DataType getOutputDataType(int index, nvinfer1::DataType const* inputTypes, int nbInputs) const noexcept override; + DataType getOutputDataType( + int32_t index, nvinfer1::DataType const* inputTypes, int32_t nbInputs) const noexcept override; bool isOutputBroadcastAcrossBatch( - int outputIndex, bool const* inputIsBroadcasted, int nbInputs) const noexcept override; + int32_t outputIndex, bool const* inputIsBroadcasted, int32_t nbInputs) const noexcept override; - bool canBroadcastInputAcrossBatch(int inputIndex) const noexcept override; + bool canBroadcastInputAcrossBatch(int32_t inputIndex) const noexcept override; void attachToContext( cudnnContext* cudnnContext, cublasContext* cublasContext, IGpuAllocator* gpuAllocator) noexcept override; @@ -89,9 +90,9 @@ class RPROIPlugin : public IPluginV2IOExt private: void deserialize(int8_t const* data, size_t length); - float* copyToHost(void const* srcHostData, int count) noexcept; + float* copyToHost(void const* srcHostData, int32_t count) noexcept; - int copyFromHost(char* dstHostBuffer, void const* source, int count) const noexcept; + int32_t copyFromHost(char* dstHostBuffer, void const* source, int32_t count) const noexcept; size_t getSmemSize() const noexcept; diff --git a/plugin/pillarScatterPlugin/pillarScatter.cpp b/plugin/pillarScatterPlugin/pillarScatter.cpp index 93f162df..f1a0aa16 100644 --- a/plugin/pillarScatterPlugin/pillarScatter.cpp +++ b/plugin/pillarScatterPlugin/pillarScatter.cpp @@ -59,8 +59,8 @@ nvinfer1::IPluginV2DynamicExt* PillarScatterPlugin::clone() const noexcept return nullptr; } -nvinfer1::DimsExprs PillarScatterPlugin::getOutputDimensions( - int outputIndex, nvinfer1::DimsExprs const* inputs, int nbInputs, nvinfer1::IExprBuilder& exprBuilder) noexcept +nvinfer1::DimsExprs PillarScatterPlugin::getOutputDimensions(int32_t outputIndex, nvinfer1::DimsExprs const* inputs, + int32_t nbInputs, nvinfer1::IExprBuilder& exprBuilder) noexcept { PLUGIN_ASSERT(outputIndex == 0); nvinfer1::DimsExprs output; @@ -74,7 +74,7 @@ nvinfer1::DimsExprs PillarScatterPlugin::getOutputDimensions( } bool PillarScatterPlugin::supportsFormatCombination( - int pos, nvinfer1::PluginTensorDesc const* inOut, int nbInputs, int nbOutputs) noexcept + int32_t pos, nvinfer1::PluginTensorDesc const* inOut, int32_t nbInputs, int32_t nbOutputs) noexcept { PLUGIN_ASSERT(nbInputs == 3); PLUGIN_ASSERT(nbOutputs == 1); @@ -99,36 +99,36 @@ bool PillarScatterPlugin::supportsFormatCombination( return false; } -void PillarScatterPlugin::configurePlugin(nvinfer1::DynamicPluginTensorDesc const* in, int nbInputs, - nvinfer1::DynamicPluginTensorDesc const* out, int nbOutputs) noexcept +void PillarScatterPlugin::configurePlugin(nvinfer1::DynamicPluginTensorDesc const* in, int32_t nbInputs, + nvinfer1::DynamicPluginTensorDesc const* out, int32_t nbOutputs) noexcept { return; } -size_t PillarScatterPlugin::getWorkspaceSize(nvinfer1::PluginTensorDesc const* inputs, int nbInputs, - nvinfer1::PluginTensorDesc const* outputs, int nbOutputs) const noexcept +size_t PillarScatterPlugin::getWorkspaceSize(nvinfer1::PluginTensorDesc const* inputs, int32_t nbInputs, + nvinfer1::PluginTensorDesc const* outputs, int32_t nbOutputs) const noexcept { return 0; } -int PillarScatterPlugin::enqueue(nvinfer1::PluginTensorDesc const* inputDesc, +int32_t PillarScatterPlugin::enqueue(nvinfer1::PluginTensorDesc const* inputDesc, nvinfer1::PluginTensorDesc const* outputDesc, void const* const* inputs, void* const* outputs, void* workspace, cudaStream_t stream) noexcept { try { - int batchSize = inputDesc[0].dims.d[0]; - int maxPillarNum = inputDesc[0].dims.d[1]; - int numFeatures = inputDesc[0].dims.d[2]; + int32_t batchSize = inputDesc[0].dims.d[0]; + int32_t maxPillarNum = inputDesc[0].dims.d[1]; + int32_t numFeatures = inputDesc[0].dims.d[2]; nvinfer1::DataType inputType = inputDesc[0].type; - auto coords_data = static_cast(inputs[1]); - auto params_data = static_cast(inputs[2]); + auto coords_data = static_cast(inputs[1]); + auto params_data = static_cast(inputs[2]); - unsigned int featureY = feature_y_size_; - unsigned int featureX = feature_x_size_; + uint32_t featureY = feature_y_size_; + uint32_t featureX = feature_x_size_; - int status = -1; + int32_t status = -1; if (inputType == nvinfer1::DataType::kHALF) { @@ -159,7 +159,7 @@ int PillarScatterPlugin::enqueue(nvinfer1::PluginTensorDesc const* inputDesc, } nvinfer1::DataType PillarScatterPlugin::getOutputDataType( - int index, nvinfer1::DataType const* inputTypes, int nbInputs) const noexcept + int32_t index, nvinfer1::DataType const* inputTypes, int32_t nbInputs) const noexcept { return inputTypes[0]; } @@ -174,12 +174,12 @@ char const* PillarScatterPlugin::getPluginVersion() const noexcept return kPLUGIN_VERSION; } -int PillarScatterPlugin::getNbOutputs() const noexcept +int32_t PillarScatterPlugin::getNbOutputs() const noexcept { return 1; } -int PillarScatterPlugin::initialize() noexcept +int32_t PillarScatterPlugin::initialize() noexcept { return 0; } diff --git a/plugin/pillarScatterPlugin/pillarScatter.h b/plugin/pillarScatterPlugin/pillarScatter.h index 690f6974..cdaf0454 100644 --- a/plugin/pillarScatterPlugin/pillarScatter.h +++ b/plugin/pillarScatterPlugin/pillarScatter.h @@ -37,24 +37,24 @@ class PillarScatterPlugin : public nvinfer1::IPluginV2DynamicExt PillarScatterPlugin(size_t h, size_t w); // IPluginV2DynamicExt Methods nvinfer1::IPluginV2DynamicExt* clone() const noexcept override; - nvinfer1::DimsExprs getOutputDimensions(int outputIndex, nvinfer1::DimsExprs const* inputs, int nbInputs, + nvinfer1::DimsExprs getOutputDimensions(int32_t outputIndex, nvinfer1::DimsExprs const* inputs, int32_t nbInputs, nvinfer1::IExprBuilder& exprBuilder) noexcept override; bool supportsFormatCombination( - int pos, nvinfer1::PluginTensorDesc const* inOut, int nbInputs, int nbOutputs) noexcept override; - void configurePlugin(nvinfer1::DynamicPluginTensorDesc const* in, int nbInputs, - nvinfer1::DynamicPluginTensorDesc const* out, int nbOutputs) noexcept override; - size_t getWorkspaceSize(nvinfer1::PluginTensorDesc const* inputs, int nbInputs, - nvinfer1::PluginTensorDesc const* outputs, int nbOutputs) const noexcept override; - int enqueue(nvinfer1::PluginTensorDesc const* inputDesc, nvinfer1::PluginTensorDesc const* outputDesc, + int32_t pos, nvinfer1::PluginTensorDesc const* inOut, int32_t nbInputs, int32_t nbOutputs) noexcept override; + void configurePlugin(nvinfer1::DynamicPluginTensorDesc const* in, int32_t nbInputs, + nvinfer1::DynamicPluginTensorDesc const* out, int32_t nbOutputs) noexcept override; + size_t getWorkspaceSize(nvinfer1::PluginTensorDesc const* inputs, int32_t nbInputs, + nvinfer1::PluginTensorDesc const* outputs, int32_t nbOutputs) const noexcept override; + int32_t enqueue(nvinfer1::PluginTensorDesc const* inputDesc, nvinfer1::PluginTensorDesc const* outputDesc, void const* const* inputs, void* const* outputs, void* workspace, cudaStream_t stream) noexcept override; // IPluginV2Ext Methods nvinfer1::DataType getOutputDataType( - int index, nvinfer1::DataType const* inputTypes, int nbInputs) const noexcept override; + int32_t index, nvinfer1::DataType const* inputTypes, int32_t nbInputs) const noexcept override; // IPluginV2 Methods char const* getPluginType() const noexcept override; char const* getPluginVersion() const noexcept override; - int getNbOutputs() const noexcept override; - int initialize() noexcept override; + int32_t getNbOutputs() const noexcept override; + int32_t initialize() noexcept override; void terminate() noexcept override; size_t getSerializationSize() const noexcept override; void serialize(void* buffer) const noexcept override; diff --git a/plugin/proposalLayerPlugin/proposalLayerPlugin.cpp b/plugin/proposalLayerPlugin/proposalLayerPlugin.cpp index f384a8f3..1335ea66 100644 --- a/plugin/proposalLayerPlugin/proposalLayerPlugin.cpp +++ b/plugin/proposalLayerPlugin/proposalLayerPlugin.cpp @@ -117,14 +117,16 @@ IPluginV2Ext* ProposalLayerPluginCreator::deserializePlugin(char const* name, vo return nullptr; } -ProposalLayer::ProposalLayer(int prenms_topk, int keep_topk, float iou_threshold, nvinfer1::Dims const& imageSize) +ProposalLayer::ProposalLayer( + int32_t prenms_topk, int32_t keep_topk, float iou_threshold, nvinfer1::Dims const& imageSize) : mPreNMSTopK(prenms_topk) , mKeepTopK(keep_topk) , mIOUThreshold(iou_threshold) , mImageSize(imageSize) { mBackgroundLabel = -1; - PLUGIN_VALIDATE(mPreNMSTopK > 0 && mPreNMSTopK <= 1024); + PLUGIN_VALIDATE(mPreNMSTopK > 0); + PLUGIN_VALIDATE(mPreNMSTopK <= 1024); PLUGIN_VALIDATE(mKeepTopK > 0); PLUGIN_VALIDATE(iou_threshold > 0.0F); PLUGIN_VALIDATE(mImageSize.nbDims == 3); @@ -141,26 +143,26 @@ ProposalLayer::ProposalLayer(int prenms_topk, int keep_topk, float iou_threshold generate_pyramid_anchors(imageSize); } -int ProposalLayer::getNbOutputs() const noexcept +int32_t ProposalLayer::getNbOutputs() const noexcept { return 1; } -int ProposalLayer::initialize() noexcept +int32_t ProposalLayer::initialize() noexcept { // Init the mValidCnt of max batch size - std::vector tempValidCnt(mMaxBatchSize, mPreNMSTopK); + std::vector tempValidCnt(mMaxBatchSize, mPreNMSTopK); - mValidCnt = std::make_shared>(mMaxBatchSize); + mValidCnt = std::make_shared>(mMaxBatchSize); - PLUGIN_CUASSERT(cudaMemcpy( - mValidCnt->mPtr, static_cast(tempValidCnt.data()), sizeof(int) * mMaxBatchSize, cudaMemcpyHostToDevice)); + PLUGIN_CUASSERT(cudaMemcpy(mValidCnt->mPtr, static_cast(tempValidCnt.data()), + sizeof(int32_t) * mMaxBatchSize, cudaMemcpyHostToDevice)); // Init the anchors for batch size: mAnchorBoxesDevice = std::make_shared>(mAnchorsCnt * 4 * mMaxBatchSize); - int batch_offset = sizeof(float) * mAnchorsCnt * 4; + int32_t batch_offset = sizeof(float) * mAnchorsCnt * 4; uint8_t* device_ptr = static_cast(mAnchorBoxesDevice->mPtr); - for (int i = 0; i < mMaxBatchSize; i++) + for (int32_t i = 0; i < mMaxBatchSize; i++) { PLUGIN_CUASSERT(cudaMemcpy(static_cast(device_ptr + i * batch_offset), static_cast(mAnchorBoxesHost.data()), batch_offset, cudaMemcpyHostToDevice)); @@ -218,7 +220,7 @@ char const* ProposalLayer::getPluginNamespace() const noexcept size_t ProposalLayer::getSerializationSize() const noexcept { - return sizeof(int) * 2 + sizeof(float) + sizeof(int) * 2 + sizeof(nvinfer1::Dims); + return sizeof(int32_t) * 2 + sizeof(float) + sizeof(int32_t) * 2 + sizeof(nvinfer1::Dims); } void ProposalLayer::serialize(void* buffer) const noexcept @@ -265,7 +267,7 @@ void ProposalLayer::deserialize(int8_t const* data, size_t length) generate_pyramid_anchors(mImageSize); } -void ProposalLayer::check_valid_inputs(nvinfer1::Dims const* inputs, int nbInputDims) +void ProposalLayer::check_valid_inputs(nvinfer1::Dims const* inputs, int32_t nbInputDims) { // object_score[N, anchors, 2, 1], // foreground_delta[N, anchors, 4, 1], @@ -277,14 +279,14 @@ void ProposalLayer::check_valid_inputs(nvinfer1::Dims const* inputs, int nbInput PLUGIN_ASSERT(inputs[1].nbDims == 3 && inputs[1].d[1] == 4); } -size_t ProposalLayer::getWorkspaceSize(int batch_size) const noexcept +size_t ProposalLayer::getWorkspaceSize(int32_t batch_size) const noexcept { ProposalWorkSpace proposal(batch_size, mAnchorsCnt, mPreNMSTopK, mParam, mType); return proposal.totalSize; } -Dims ProposalLayer::getOutputDimensions(int index, Dims const* inputs, int nbInputDims) noexcept +Dims ProposalLayer::getOutputDimensions(int32_t index, Dims const* inputs, int32_t nbInputDims) noexcept { check_valid_inputs(inputs, nbInputDims); @@ -312,10 +314,10 @@ void ProposalLayer::generate_pyramid_anchors(nvinfer1::Dims const& imageDims) for (size_t s = 0; s < scales.size(); ++s) { float scale = scales[s]; - int stride = strides[s]; + int32_t stride = strides[s]; - for (int y = 0; y < imageDims.d[1]; y += anchor_stride * stride) - for (int x = 0; x < imageDims.d[2]; x += anchor_stride * stride) + for (int32_t y = 0; y < imageDims.d[1]; y += anchor_stride * stride) + for (int32_t x = 0; x < imageDims.d[2]; x += anchor_stride * stride) for (float r : ratios) { float sqrt_r = sqrt(r); @@ -330,8 +332,8 @@ void ProposalLayer::generate_pyramid_anchors(nvinfer1::Dims const& imageDims) PLUGIN_VALIDATE(anchors.size() % 4 == 0); } -int ProposalLayer::enqueue( - int batch_size, void const* const* inputs, void* const* outputs, void* workspace, cudaStream_t stream) noexcept +int32_t ProposalLayer::enqueue( + int32_t batch_size, void const* const* inputs, void* const* outputs, void* workspace, cudaStream_t stream) noexcept { void* proposals = outputs[0]; @@ -352,7 +354,8 @@ int ProposalLayer::enqueue( } // Return the DataType of the plugin output at the requested index -DataType ProposalLayer::getOutputDataType(int index, nvinfer1::DataType const* inputTypes, int nbInputs) const noexcept +DataType ProposalLayer::getOutputDataType( + int32_t index, nvinfer1::DataType const* inputTypes, int32_t nbInputs) const noexcept { // Only DataType::kFLOAT is acceptable by the plugin layer return DataType::kFLOAT; @@ -360,27 +363,27 @@ DataType ProposalLayer::getOutputDataType(int index, nvinfer1::DataType const* i // Return true if output tensor is broadcast across a batch. bool ProposalLayer::isOutputBroadcastAcrossBatch( - int outputIndex, bool const* inputIsBroadcasted, int nbInputs) const noexcept + int32_t outputIndex, bool const* inputIsBroadcasted, int32_t nbInputs) const noexcept { return false; } // Return true if plugin can use input that is broadcast across batch without replication. -bool ProposalLayer::canBroadcastInputAcrossBatch(int inputIndex) const noexcept +bool ProposalLayer::canBroadcastInputAcrossBatch(int32_t inputIndex) const noexcept { return false; } // Configure the layer with input and output data types. -void ProposalLayer::configurePlugin(Dims const* inputDims, int nbInputs, Dims const* outputDims, int nbOutputs, +void ProposalLayer::configurePlugin(Dims const* inputDims, int32_t nbInputs, Dims const* outputDims, int32_t nbOutputs, DataType const* inputTypes, DataType const* outputTypes, bool const* inputIsBroadcast, - bool const* outputIsBroadcast, PluginFormat floatFormat, int maxBatchSize) noexcept + bool const* outputIsBroadcast, PluginFormat floatFormat, int32_t maxBatchSize) noexcept { check_valid_inputs(inputDims, nbInputs); PLUGIN_ASSERT(inputDims[0].d[0] == inputDims[1].d[0]); mAnchorsCnt = inputDims[0].d[0]; - PLUGIN_ASSERT(mAnchorsCnt == (int) (mAnchorBoxesHost.size() / 4)); + PLUGIN_ASSERT(mAnchorsCnt == (int32_t) (mAnchorBoxesHost.size() / 4)); mMaxBatchSize = maxBatchSize; } diff --git a/plugin/proposalLayerPlugin/proposalLayerPlugin.h b/plugin/proposalLayerPlugin/proposalLayerPlugin.h index d6880d00..68a0d136 100644 --- a/plugin/proposalLayerPlugin/proposalLayerPlugin.h +++ b/plugin/proposalLayerPlugin/proposalLayerPlugin.h @@ -34,33 +34,33 @@ namespace plugin class ProposalLayer : public IPluginV2Ext { public: - ProposalLayer(int prenms_topk, int keep_topk, float iou_threshold, nvinfer1::Dims const& image_size); + ProposalLayer(int32_t prenms_topk, int32_t keep_topk, float iou_threshold, nvinfer1::Dims const& image_size); ProposalLayer(void const* data, size_t length); ~ProposalLayer() override = default; - int getNbOutputs() const noexcept override; + int32_t getNbOutputs() const noexcept override; - Dims getOutputDimensions(int index, Dims const* inputs, int nbInputDims) noexcept override; + Dims getOutputDimensions(int32_t index, Dims const* inputs, int32_t nbInputDims) noexcept override; - int initialize() noexcept override; + int32_t initialize() noexcept override; void terminate() noexcept override; void destroy() noexcept override; - size_t getWorkspaceSize(int maxBatchSize) const noexcept override; + size_t getWorkspaceSize(int32_t maxBatchSize) const noexcept override; - int enqueue(int batch_size, void const* const* inputs, void* const* outputs, void* workspace, + int32_t enqueue(int32_t batch_size, void const* const* inputs, void* const* outputs, void* workspace, cudaStream_t stream) noexcept override; size_t getSerializationSize() const noexcept override; void serialize(void* buffer) const noexcept override; - // void configureWithFormat(const Dims* inputs, int nbInputs, const Dims* outputDims, int nbOutputs, - // nvinfer1::DataType type, nvinfer1::PluginFormat format, int maxBatchSize) override; + // void configureWithFormat(const Dims* inputs, int32_t nbInputs, const Dims* outputDims, int32_t nbOutputs, + // nvinfer1::DataType type, nvinfer1::PluginFormat format, int32_t maxBatchSize) override; bool supportsFormat(DataType type, PluginFormat format) const noexcept override; @@ -74,25 +74,26 @@ class ProposalLayer : public IPluginV2Ext char const* getPluginNamespace() const noexcept override; - DataType getOutputDataType(int index, nvinfer1::DataType const* inputTypes, int nbInputs) const noexcept override; + DataType getOutputDataType( + int32_t index, nvinfer1::DataType const* inputTypes, int32_t nbInputs) const noexcept override; bool isOutputBroadcastAcrossBatch( - int outputIndex, bool const* inputIsBroadcasted, int nbInputs) const noexcept override; + int32_t outputIndex, bool const* inputIsBroadcasted, int32_t nbInputs) const noexcept override; - bool canBroadcastInputAcrossBatch(int inputIndex) const noexcept override; + bool canBroadcastInputAcrossBatch(int32_t inputIndex) const noexcept override; void attachToContext( cudnnContext* cudnnContext, cublasContext* cublasContext, IGpuAllocator* gpuAllocator) noexcept override; - void configurePlugin(Dims const* inputDims, int nbInputs, Dims const* outputDims, int nbOutputs, + void configurePlugin(Dims const* inputDims, int32_t nbInputs, Dims const* outputDims, int32_t nbOutputs, DataType const* inputTypes, DataType const* outputTypes, bool const* inputIsBroadcast, - bool const* outputIsBroadcast, PluginFormat floatFormat, int maxBatchSize) noexcept override; + bool const* outputIsBroadcast, PluginFormat floatFormat, int32_t maxBatchSize) noexcept override; void detachFromContext() noexcept override; private: void deserialize(int8_t const* data, size_t length); - void check_valid_inputs(nvinfer1::Dims const* inputs, int nbInputDims); + void check_valid_inputs(nvinfer1::Dims const* inputs, int32_t nbInputDims); void generate_pyramid_anchors(nvinfer1::Dims const& imageDims); int32_t mBackgroundLabel{}; @@ -133,8 +134,8 @@ class ProposalLayerPluginCreator : public nvinfer1::pluginInternal::BaseCreator private: static PluginFieldCollection mFC; - int mPreNMSTopK; - int mKeepTopK; + int32_t mPreNMSTopK; + int32_t mKeepTopK; float mIOUThreshold; static std::vector mPluginAttributes; }; diff --git a/plugin/pyramidROIAlignPlugin/pyramidROIAlignPlugin.cpp b/plugin/pyramidROIAlignPlugin/pyramidROIAlignPlugin.cpp index 86ca4482..486377c0 100644 --- a/plugin/pyramidROIAlignPlugin/pyramidROIAlignPlugin.cpp +++ b/plugin/pyramidROIAlignPlugin/pyramidROIAlignPlugin.cpp @@ -132,11 +132,6 @@ IPluginV2Ext* PyramidROIAlignPluginCreator::createPlugin(char const* name, Plugi PLUGIN_VALIDATE(fields[i].type == PluginFieldType::kINT32); samplingRatio = *(static_cast(fields[i].data)); PLUGIN_VALIDATE(samplingRatio >= 0); - } - if (!strcmp(attrName, "legacy")) - { - PLUGIN_ASSERT(fields[i].type == PluginFieldType::kINT32); - legacy = *(static_cast(fields[i].data)); } if (!strcmp(attrName, "legacy")) { @@ -202,7 +197,7 @@ void PyramidROIAlign::destroy() noexcept delete this; } -size_t PyramidROIAlign::getWorkspaceSize(int) const noexcept +size_t PyramidROIAlign::getWorkspaceSize(int32_t) const noexcept { return 0; } @@ -290,7 +285,6 @@ Dims PyramidROIAlign::getOutputDimensions(int32_t index, Dims const* inputs, int int32_t PyramidROIAlign::enqueue( int32_t batch_size, void const* const* inputs, void* const* outputs, void* workspace, cudaStream_t stream) noexcept - { void* const pooled = outputs[0]; cudaError_t status; @@ -331,18 +325,18 @@ int32_t PyramidROIAlign::enqueue( size_t PyramidROIAlign::getSerializationSize() const noexcept { - return sizeof(int) * 2 // mPooledSize - + sizeof(int) * 2 // mImageSize - + sizeof(int) // mFeatureLength - + sizeof(int) // mROICount - + sizeof(int) // mFPNScale - + sizeof(int) // mTransformCoords - + sizeof(bool) // mAbsCoords - + sizeof(bool) // mSwapCoords - + sizeof(bool) // mPlusOneCoords - + sizeof(int) // mSamplingRatio - + sizeof(bool) // mIsLegacy - + sizeof(int) * 8; // mFeatureSpatialSize + return sizeof(int32_t) * 2 // mPooledSize + + sizeof(int32_t) * 2 // mImageSize + + sizeof(int32_t) // mFeatureLength + + sizeof(int32_t) // mROICount + + sizeof(int32_t) // mFPNScale + + sizeof(int32_t) // mTransformCoords + + sizeof(bool) // mAbsCoords + + sizeof(bool) // mSwapCoords + + sizeof(bool) // mPlusOneCoords + + sizeof(int32_t) // mSamplingRatio + + sizeof(bool) // mIsLegacy + + sizeof(int32_t) * 8; // mFeatureSpatialSize } void PyramidROIAlign::serialize(void* buffer) const noexcept diff --git a/plugin/pyramidROIAlignPlugin/pyramidROIAlignPlugin.h b/plugin/pyramidROIAlignPlugin/pyramidROIAlignPlugin.h index feca0b7b..dde6a309 100644 --- a/plugin/pyramidROIAlignPlugin/pyramidROIAlignPlugin.h +++ b/plugin/pyramidROIAlignPlugin/pyramidROIAlignPlugin.h @@ -52,7 +52,7 @@ class PyramidROIAlign : public IPluginV2Ext void destroy() noexcept override; - size_t getWorkspaceSize(int) const noexcept override; + size_t getWorkspaceSize(int32_t) const noexcept override; int32_t enqueue(int32_t batch_size, void const* const* inputs, void* const* outputs, void* workspace, cudaStream_t stream) noexcept override; diff --git a/plugin/regionPlugin/regionPlugin.cpp b/plugin/regionPlugin/regionPlugin.cpp index 61e2c889..c6f709eb 100644 --- a/plugin/regionPlugin/regionPlugin.cpp +++ b/plugin/regionPlugin/regionPlugin.cpp @@ -37,7 +37,7 @@ void safeFree(T* ptr) } template -void allocateChunk(T*& ptr, int count) +void allocateChunk(T*& ptr, int32_t count) { ptr = static_cast(malloc(count * sizeof(T))); } @@ -55,7 +55,7 @@ struct SoftmaxTreeDeleter safeFree(smTree->group); if (smTree->name) { - for (int i = 0; i < smTree->n; i++) + for (int32_t i = 0; i < smTree->n; i++) { safeFree(smTree->name[i]); } @@ -83,7 +83,7 @@ Region::Region(RegionParameters params) { } -Region::Region(RegionParameters params, int C, int H, int W) +Region::Region(RegionParameters params, int32_t C, int32_t H, int32_t W) : num(params.num) , coords(params.coords) , classes(params.classes) @@ -97,12 +97,12 @@ Region::Region(RegionParameters params, int C, int H, int W) Region::Region(void const* buffer, size_t length) { char const *d = reinterpret_cast(buffer), *a = d; - C = read(d); - H = read(d); - W = read(d); - num = read(d); - classes = read(d); - coords = read(d); + C = read(d); + H = read(d); + W = read(d); + num = read(d); + classes = read(d); + coords = read(d); bool softmaxTreePresent = read(d); bool leafPresent = read(d); bool parentPresent = read(d); @@ -117,7 +117,7 @@ Region::Region(void const* buffer, size_t length) // need to read each element individually allocateChunk(smTreeTemp, 1); - smTreeTemp->n = read(d); + smTreeTemp->n = read(d); if (leafPresent) { @@ -152,23 +152,23 @@ Region::Region(void const* buffer, size_t length) smTreeTemp->group = nullptr; } - for (int i = 0; i < smTreeTemp->n; i++) + for (int32_t i = 0; i < smTreeTemp->n; i++) { if (leafPresent) { - smTreeTemp->leaf[i] = read(d); + smTreeTemp->leaf[i] = read(d); } if (parentPresent) { - smTreeTemp->parent[i] = read(d); + smTreeTemp->parent[i] = read(d); } if (childPresent) { - smTreeTemp->child[i] = read(d); + smTreeTemp->child[i] = read(d); } if (groupPresent) { - smTreeTemp->group[i] = read(d); + smTreeTemp->group[i] = read(d); } } @@ -183,17 +183,17 @@ Region::Region(void const* buffer, size_t length) if (namePresent) { - for (int i = 0; i < smTreeTemp->n; i++) + for (int32_t i = 0; i < smTreeTemp->n; i++) { allocateChunk(smTreeTemp->name[i], 256); - for (int j = 0; j < 256; j++) + for (int32_t j = 0; j < 256; j++) { smTreeTemp->name[i][j] = read(d); } } } - smTreeTemp->groups = read(d); + smTreeTemp->groups = read(d); if (groupSizePresent) { allocateChunk(smTreeTemp->groupSize, smTreeTemp->groups); @@ -210,15 +210,15 @@ Region::Region(void const* buffer, size_t length) { smTreeTemp->groupOffset = nullptr; } - for (int i = 0; i < smTreeTemp->groups; i++) + for (int32_t i = 0; i < smTreeTemp->groups; i++) { if (groupSizePresent) { - smTreeTemp->groupSize[i] = read(d); + smTreeTemp->groupSize[i] = read(d); } if (groupOffsetPresent) { - smTreeTemp->groupOffset[i] = read(d); + smTreeTemp->groupOffset[i] = read(d); } } smTree = std::shared_ptr(smTreeTemp, SoftmaxTreeDeleter()); @@ -230,20 +230,20 @@ Region::Region(void const* buffer, size_t length) PLUGIN_VALIDATE(d == a + length); } -int Region::getNbOutputs() const noexcept +int32_t Region::getNbOutputs() const noexcept { return 1; } -Dims Region::getOutputDimensions(int index, Dims const* inputs, int nbInputDims) noexcept +Dims Region::getOutputDimensions(int32_t index, Dims const* inputs, int32_t nbInputDims) noexcept { PLUGIN_ASSERT(nbInputDims == 1); PLUGIN_ASSERT(index == 0); return inputs[0]; } -int Region::enqueue( - int batchSize, void const* const* inputs, void* const* outputs, void* workspace, cudaStream_t stream) noexcept +int32_t Region::enqueue( + int32_t batchSize, void const* const* inputs, void* const* outputs, void* workspace, cudaStream_t stream) noexcept { void const* inputData = inputs[0]; void* outputData = outputs[0]; @@ -263,26 +263,26 @@ int Region::enqueue( size_t Region::getSerializationSize() const noexcept { // C, H, W, num, classes, coords, smTree !nullptr and other array members !nullptr, softmaxTree members - size_t count = 6 * sizeof(int) + 8 * sizeof(bool); + size_t count = 6 * sizeof(int32_t) + 8 * sizeof(bool); if (smTree.get()) { - count += 2 * sizeof(int); + count += 2 * sizeof(int32_t); if (smTree->leaf) { - count += smTree->n * sizeof(int); + count += smTree->n * sizeof(int32_t); } if (smTree->parent) { - count += smTree->n * sizeof(int); + count += smTree->n * sizeof(int32_t); } if (smTree->child) { - count += smTree->n * sizeof(int); + count += smTree->n * sizeof(int32_t); } if (smTree->group) { - count += smTree->n * sizeof(int); + count += smTree->n * sizeof(int32_t); } if (smTree->name) { @@ -290,11 +290,11 @@ size_t Region::getSerializationSize() const noexcept } if (smTree->groupSize) { - count += smTree->groups * sizeof(int); + count += smTree->groups * sizeof(int32_t); } if (smTree->groupOffset) { - count += smTree->groups * sizeof(int); + count += smTree->groups * sizeof(int32_t); } } return count; @@ -321,7 +321,7 @@ void Region::serialize(void* buffer) const noexcept if (smTree) { write(d, smTree->n); - for (int i = 0; i < smTree->n; i++) + for (int32_t i = 0; i < smTree->n; i++) { if (smTree->leaf) { @@ -342,17 +342,17 @@ void Region::serialize(void* buffer) const noexcept } if (smTree->name) { - for (int i = 0; i < smTree->n; i++) + for (int32_t i = 0; i < smTree->n; i++) { char const* str = smTree->name[i]; - for (int j = 0; j < 256; j++) + for (int32_t j = 0; j < 256; j++) { write(d, str[j]); } } } write(d, smTree->groups); - for (int i = 0; i < smTree->groups; i++) + for (int32_t i = 0; i < smTree->groups; i++) { if (smTree->groupSize) { @@ -372,7 +372,7 @@ bool Region::supportsFormat(DataType type, PluginFormat format) const noexcept return (type == DataType::kFLOAT && format == PluginFormat::kLINEAR); } -int Region::initialize() noexcept +int32_t Region::initialize() noexcept { return STATUS_SUCCESS; } @@ -389,7 +389,7 @@ char const* Region::getPluginVersion() const noexcept return kREGION_PLUGIN_VERSION; } -size_t Region::getWorkspaceSize(int maxBatchSize) const noexcept +size_t Region::getWorkspaceSize(int32_t maxBatchSize) const noexcept { return 0; } @@ -429,28 +429,29 @@ char const* Region::getPluginNamespace() const noexcept } // Return the DataType of the plugin output at the requested index -DataType Region::getOutputDataType(int index, nvinfer1::DataType const* inputTypes, int nbInputs) const noexcept +DataType Region::getOutputDataType(int32_t index, nvinfer1::DataType const* inputTypes, int32_t nbInputs) const noexcept { PLUGIN_ASSERT(index == 0); return DataType::kFLOAT; } // Return true if output tensor is broadcast across a batch. -bool Region::isOutputBroadcastAcrossBatch(int outputIndex, bool const* inputIsBroadcasted, int nbInputs) const noexcept +bool Region::isOutputBroadcastAcrossBatch( + int32_t outputIndex, bool const* inputIsBroadcasted, int32_t nbInputs) const noexcept { return false; } // Return true if plugin can use input that is broadcast across batch without replication. -bool Region::canBroadcastInputAcrossBatch(int inputIndex) const noexcept +bool Region::canBroadcastInputAcrossBatch(int32_t inputIndex) const noexcept { return false; } // Configure the layer with input and output data types. -void Region::configurePlugin(Dims const* inputDims, int nbInputs, Dims const* outputDims, int nbOutputs, +void Region::configurePlugin(Dims const* inputDims, int32_t nbInputs, Dims const* outputDims, int32_t nbOutputs, DataType const* inputTypes, DataType const* outputTypes, bool const* inputIsBroadcast, - bool const* outputIsBroadcast, PluginFormat floatFormat, int maxBatchSize) noexcept + bool const* outputIsBroadcast, PluginFormat floatFormat, int32_t maxBatchSize) noexcept { PLUGIN_ASSERT(*inputTypes == DataType::kFLOAT && floatFormat == PluginFormat::kLINEAR); PLUGIN_ASSERT(nbInputs == 1); @@ -507,23 +508,23 @@ IPluginV2Ext* RegionPluginCreator::createPlugin(char const* name, PluginFieldCol try { PluginField const* fields = fc->fields; - for (int i = 0; i < fc->nbFields; ++i) + for (int32_t i = 0; i < fc->nbFields; ++i) { char const* attrName = fields[i].name; if (!strcmp(attrName, "num")) { PLUGIN_VALIDATE(fields[i].type == PluginFieldType::kINT32); - params.num = *(static_cast(fields[i].data)); + params.num = *(static_cast(fields[i].data)); } if (!strcmp(attrName, "coords")) { PLUGIN_VALIDATE(fields[i].type == PluginFieldType::kINT32); - params.coords = *(static_cast(fields[i].data)); + params.coords = *(static_cast(fields[i].data)); } if (!strcmp(attrName, "classes")) { PLUGIN_VALIDATE(fields[i].type == PluginFieldType::kINT32); - params.classes = *(static_cast(fields[i].data)); + params.classes = *(static_cast(fields[i].data)); } if (!strcmp(attrName, "smTree")) { diff --git a/plugin/regionPlugin/regionPlugin.h b/plugin/regionPlugin/regionPlugin.h index 9ec4cc33..7af234f1 100644 --- a/plugin/regionPlugin/regionPlugin.h +++ b/plugin/regionPlugin/regionPlugin.h @@ -32,23 +32,23 @@ class Region : public IPluginV2Ext public: Region(RegionParameters params); - Region(RegionParameters params, int C, int H, int W); + Region(RegionParameters params, int32_t C, int32_t H, int32_t W); Region(void const* buffer, size_t length); ~Region() override = default; - int getNbOutputs() const noexcept override; + int32_t getNbOutputs() const noexcept override; - Dims getOutputDimensions(int index, Dims const* inputs, int nbInputDims) noexcept override; + Dims getOutputDimensions(int32_t index, Dims const* inputs, int32_t nbInputDims) noexcept override; - int initialize() noexcept override; + int32_t initialize() noexcept override; void terminate() noexcept override; - size_t getWorkspaceSize(int maxBatchSize) const noexcept override; + size_t getWorkspaceSize(int32_t maxBatchSize) const noexcept override; - int enqueue(int batchSize, void const* const* inputs, void* const* outputs, void* workspace, + int32_t enqueue(int32_t batchSize, void const* const* inputs, void* const* outputs, void* workspace, cudaStream_t stream) noexcept override; size_t getSerializationSize() const noexcept override; @@ -69,19 +69,20 @@ class Region : public IPluginV2Ext char const* getPluginNamespace() const noexcept override; - DataType getOutputDataType(int index, nvinfer1::DataType const* inputTypes, int nbInputs) const noexcept override; + DataType getOutputDataType( + int32_t index, nvinfer1::DataType const* inputTypes, int32_t nbInputs) const noexcept override; bool isOutputBroadcastAcrossBatch( - int outputIndex, bool const* inputIsBroadcasted, int nbInputs) const noexcept override; + int32_t outputIndex, bool const* inputIsBroadcasted, int32_t nbInputs) const noexcept override; - bool canBroadcastInputAcrossBatch(int inputIndex) const noexcept override; + bool canBroadcastInputAcrossBatch(int32_t inputIndex) const noexcept override; void attachToContext( cudnnContext* cudnnContext, cublasContext* cublasContext, IGpuAllocator* gpuAllocator) noexcept override; - void configurePlugin(Dims const* inputDims, int nbInputs, Dims const* outputDims, int nbOutputs, + void configurePlugin(Dims const* inputDims, int32_t nbInputs, Dims const* outputDims, int32_t nbOutputs, DataType const* inputTypes, DataType const* outputTypes, bool const* inputIsBroadcast, - bool const* outputIsBroadcast, PluginFormat floatFormat, int maxBatchSize) noexcept override; + bool const* outputIsBroadcast, PluginFormat floatFormat, int32_t maxBatchSize) noexcept override; void detachFromContext() noexcept override; @@ -91,11 +92,11 @@ class Region : public IPluginV2Ext } private: - int num; - int coords; - int classes; + int32_t num; + int32_t coords; + int32_t classes; std::shared_ptr smTree; - int C, H, W; + int32_t C, H, W; bool hasSoftmaxTree; std::string mPluginNamespace; }; diff --git a/plugin/reorgPlugin/reorgPlugin.cpp b/plugin/reorgPlugin/reorgPlugin.cpp index 15bd98f0..b1d1ff21 100644 --- a/plugin/reorgPlugin/reorgPlugin.cpp +++ b/plugin/reorgPlugin/reorgPlugin.cpp @@ -25,7 +25,7 @@ static char const* const kREORG_PLUGIN_NAME{"Reorg_TRT"}; PluginFieldCollection ReorgPluginCreator::mFC{}; std::vector ReorgPluginCreator::mPluginAttributes; -Reorg::Reorg(int C, int H, int W, int stride) +Reorg::Reorg(int32_t C, int32_t H, int32_t W, int32_t stride) : C(C) , H(H) , W(W) @@ -33,7 +33,7 @@ Reorg::Reorg(int C, int H, int W, int stride) { } -Reorg::Reorg(int stride) +Reorg::Reorg(int32_t stride) : stride(stride) { } @@ -41,27 +41,27 @@ Reorg::Reorg(int stride) Reorg::Reorg(void const* buffer, size_t length) { char const *d = reinterpret_cast(buffer), *a = d; - C = read(d); - H = read(d); - W = read(d); - stride = read(d); + C = read(d); + H = read(d); + W = read(d); + stride = read(d); PLUGIN_VALIDATE(d == a + length); } -int Reorg::getNbOutputs() const noexcept +int32_t Reorg::getNbOutputs() const noexcept { return 1; } -Dims Reorg::getOutputDimensions(int index, Dims const* inputs, int nbInputDims) noexcept +Dims Reorg::getOutputDimensions(int32_t index, Dims const* inputs, int32_t nbInputDims) noexcept { PLUGIN_ASSERT(nbInputDims == 1); PLUGIN_ASSERT(index == 0); return Dims3(inputs[0].d[0] * stride * stride, inputs[0].d[1] / stride, inputs[0].d[2] / stride); } -int Reorg::enqueue( - int batchSize, void const* const* inputs, void* const* outputs, void* workspace, cudaStream_t stream) noexcept +int32_t Reorg::enqueue( + int32_t batchSize, void const* const* inputs, void* const* outputs, void* workspace, cudaStream_t stream) noexcept { void const* inputData = inputs[0]; void* outputData = outputs[0]; @@ -72,7 +72,7 @@ int Reorg::enqueue( size_t Reorg::getSerializationSize() const noexcept { // C, H, W, stride - return sizeof(int) * 4; + return sizeof(int32_t) * 4; } void Reorg::serialize(void* buffer) const noexcept @@ -90,14 +90,14 @@ bool Reorg::supportsFormat(DataType type, PluginFormat format) const noexcept return (type == DataType::kFLOAT && format == PluginFormat::kLINEAR); } -int Reorg::initialize() noexcept +int32_t Reorg::initialize() noexcept { return STATUS_SUCCESS; } void Reorg::terminate() noexcept {} -size_t Reorg::getWorkspaceSize(int maxBatchSize) const noexcept +size_t Reorg::getWorkspaceSize(int32_t maxBatchSize) const noexcept { return 0; } @@ -129,7 +129,7 @@ char const* Reorg::getPluginNamespace() const noexcept } // Return the DataType of the plugin output at the requested index -DataType Reorg::getOutputDataType(int index, nvinfer1::DataType const* inputTypes, int nbInputs) const noexcept +DataType Reorg::getOutputDataType(int32_t index, nvinfer1::DataType const* inputTypes, int32_t nbInputs) const noexcept { // Only 1 input and 1 output from the plugin layer PLUGIN_ASSERT(index == 0); @@ -139,21 +139,22 @@ DataType Reorg::getOutputDataType(int index, nvinfer1::DataType const* inputType } // Return true if output tensor is broadcast across a batch. -bool Reorg::isOutputBroadcastAcrossBatch(int outputIndex, bool const* inputIsBroadcasted, int nbInputs) const noexcept +bool Reorg::isOutputBroadcastAcrossBatch( + int32_t outputIndex, bool const* inputIsBroadcasted, int32_t nbInputs) const noexcept { return false; } // Return true if plugin can use input that is broadcast across batch without replication. -bool Reorg::canBroadcastInputAcrossBatch(int inputIndex) const noexcept +bool Reorg::canBroadcastInputAcrossBatch(int32_t inputIndex) const noexcept { return false; } // Configure the layer with input and output data types. -void Reorg::configurePlugin(Dims const* inputDims, int nbInputs, Dims const* outputDims, int nbOutputs, +void Reorg::configurePlugin(Dims const* inputDims, int32_t nbInputs, Dims const* outputDims, int32_t nbOutputs, DataType const* inputTypes, DataType const* outputTypes, bool const* inputIsBroadcast, - bool const* outputIsBroadcast, PluginFormat floatFormat, int maxBatchSize) noexcept + bool const* outputIsBroadcast, PluginFormat floatFormat, int32_t maxBatchSize) noexcept { PLUGIN_ASSERT(*inputTypes == DataType::kFLOAT && floatFormat == PluginFormat::kLINEAR); PLUGIN_ASSERT(nbInputs == 1); diff --git a/plugin/reorgPlugin/reorgPlugin.h b/plugin/reorgPlugin/reorgPlugin.h index 405c2808..f0f999b5 100644 --- a/plugin/reorgPlugin/reorgPlugin.h +++ b/plugin/reorgPlugin/reorgPlugin.h @@ -29,25 +29,25 @@ namespace plugin class Reorg : public IPluginV2Ext { public: - Reorg(int stride); + Reorg(int32_t stride); - Reorg(int C, int H, int W, int stride); + Reorg(int32_t C, int32_t H, int32_t W, int32_t stride); Reorg(void const* buffer, size_t length); ~Reorg() override = default; - int getNbOutputs() const noexcept override; + int32_t getNbOutputs() const noexcept override; - Dims getOutputDimensions(int index, Dims const* inputs, int nbInputDims) noexcept override; + Dims getOutputDimensions(int32_t index, Dims const* inputs, int32_t nbInputDims) noexcept override; - int initialize() noexcept override; + int32_t initialize() noexcept override; void terminate() noexcept override; - size_t getWorkspaceSize(int maxBatchSize) const noexcept override; + size_t getWorkspaceSize(int32_t maxBatchSize) const noexcept override; - int enqueue(int batchSize, void const* const* inputs, void* const* outputs, void* workspace, + int32_t enqueue(int32_t batchSize, void const* const* inputs, void* const* outputs, void* workspace, cudaStream_t stream) noexcept override; size_t getSerializationSize() const noexcept override; @@ -68,25 +68,26 @@ class Reorg : public IPluginV2Ext char const* getPluginNamespace() const noexcept override; - DataType getOutputDataType(int index, nvinfer1::DataType const* inputTypes, int nbInputs) const noexcept override; + DataType getOutputDataType( + int32_t index, nvinfer1::DataType const* inputTypes, int32_t nbInputs) const noexcept override; bool isOutputBroadcastAcrossBatch( - int outputIndex, bool const* inputIsBroadcasted, int nbInputs) const noexcept override; + int32_t outputIndex, bool const* inputIsBroadcasted, int32_t nbInputs) const noexcept override; - bool canBroadcastInputAcrossBatch(int inputIndex) const noexcept override; + bool canBroadcastInputAcrossBatch(int32_t inputIndex) const noexcept override; void attachToContext( cudnnContext* cudnnContext, cublasContext* cublasContext, IGpuAllocator* gpuAllocator) noexcept override; - void configurePlugin(Dims const* inputDims, int nbInputs, Dims const* outputDims, int nbOutputs, + void configurePlugin(Dims const* inputDims, int32_t nbInputs, Dims const* outputDims, int32_t nbOutputs, DataType const* inputTypes, DataType const* outputTypes, bool const* inputIsBroadcast, - bool const* outputIsBroadcast, PluginFormat floatFormat, int maxBatchSize) noexcept override; + bool const* outputIsBroadcast, PluginFormat floatFormat, int32_t maxBatchSize) noexcept override; void detachFromContext() noexcept override; private: - int C{}, H{}, W{}; - int stride{}; + int32_t C{}, H{}, W{}; + int32_t stride{}; std::string mPluginNamespace; }; @@ -109,7 +110,7 @@ class ReorgPluginCreator : public nvinfer1::pluginInternal::BaseCreator private: static PluginFieldCollection mFC; - int stride{}; + int32_t stride{}; static std::vector mPluginAttributes; }; } // namespace plugin diff --git a/plugin/resizeNearestPlugin/resizeNearestPlugin.cpp b/plugin/resizeNearestPlugin/resizeNearestPlugin.cpp index 22f56ffb..3f88a17b 100644 --- a/plugin/resizeNearestPlugin/resizeNearestPlugin.cpp +++ b/plugin/resizeNearestPlugin/resizeNearestPlugin.cpp @@ -103,23 +103,23 @@ ResizeNearest::ResizeNearest(float scale) PLUGIN_VALIDATE(mScale > 0); } -int ResizeNearest::getNbOutputs() const noexcept +int32_t ResizeNearest::getNbOutputs() const noexcept { return 1; } -Dims ResizeNearest::getOutputDimensions(int index, Dims const* inputDims, int nbInputs) noexcept +Dims ResizeNearest::getOutputDimensions(int32_t index, Dims const* inputDims, int32_t nbInputs) noexcept { PLUGIN_ASSERT(nbInputs == 1); nvinfer1::Dims const& input = inputDims[0]; PLUGIN_ASSERT(index == 0); nvinfer1::Dims output{}; output.nbDims = input.nbDims; - for (int d = 0; d < input.nbDims; ++d) + for (int32_t d = 0; d < input.nbDims; ++d) { if (d == input.nbDims - 2 || d == input.nbDims - 1) { - output.d[d] = int(input.d[d] * mScale); + output.d[d] = int32_t(input.d[d] * mScale); } else { @@ -129,7 +129,7 @@ Dims ResizeNearest::getOutputDimensions(int index, Dims const* inputDims, int nb return output; } -int ResizeNearest::initialize() noexcept +int32_t ResizeNearest::initialize() noexcept { return 0; } @@ -141,7 +141,7 @@ void ResizeNearest::destroy() noexcept delete this; } -size_t ResizeNearest::getWorkspaceSize(int) const noexcept +size_t ResizeNearest::getWorkspaceSize(int32_t) const noexcept { return 0; } @@ -149,7 +149,7 @@ size_t ResizeNearest::getWorkspaceSize(int) const noexcept size_t ResizeNearest::getSerializationSize() const noexcept { // scale, dimensions: 3 * 2 - return sizeof(float) + sizeof(int) * 3 * 2; + return sizeof(float) + sizeof(int32_t) * 3 * 2; } void ResizeNearest::serialize(void* buffer) const noexcept @@ -225,17 +225,17 @@ bool ResizeNearest::supportsFormat(DataType type, PluginFormat format) const noe return (type == DataType::kFLOAT && format == PluginFormat::kLINEAR); } -int ResizeNearest::enqueue( - int batch_size, void const* const* inputs, void* const* outputs, void* workspace, cudaStream_t stream) noexcept +int32_t ResizeNearest::enqueue( + int32_t batch_size, void const* const* inputs, void* const* outputs, void* workspace, cudaStream_t stream) noexcept { - int nchan = mOutputDims.d[0]; + int32_t nchan = mOutputDims.d[0]; float scale = mScale; int2 osize = {mOutputDims.d[2], mOutputDims.d[1]}; - int istride = mInputDims.d[2]; - int ostride = mOutputDims.d[2]; - int ibatchstride = mInputDims.d[1] * istride; - int obatchstride = mOutputDims.d[1] * ostride; + int32_t istride = mInputDims.d[2]; + int32_t ostride = mOutputDims.d[2]; + int32_t ibatchstride = mInputDims.d[1] * istride; + int32_t obatchstride = mOutputDims.d[1] * ostride; dim3 block(32, 16); dim3 grid((osize.x - 1) / block.x + 1, (osize.y - 1) / block.y + 1, std::min(batch_size * nchan, 65535)); @@ -246,7 +246,8 @@ int ResizeNearest::enqueue( } // Return the DataType of the plugin output at the requested index -DataType ResizeNearest::getOutputDataType(int index, nvinfer1::DataType const* inputTypes, int nbInputs) const noexcept +DataType ResizeNearest::getOutputDataType( + int32_t index, nvinfer1::DataType const* inputTypes, int32_t nbInputs) const noexcept { // Only 1 input and 1 output from the plugin layer PLUGIN_ASSERT(index == 0); @@ -257,21 +258,21 @@ DataType ResizeNearest::getOutputDataType(int index, nvinfer1::DataType const* i // Return true if output tensor is broadcast across a batch. bool ResizeNearest::isOutputBroadcastAcrossBatch( - int outputIndex, bool const* inputIsBroadcasted, int nbInputs) const noexcept + int32_t outputIndex, bool const* inputIsBroadcasted, int32_t nbInputs) const noexcept { return false; } // Return true if plugin can use input that is broadcast across batch without replication. -bool ResizeNearest::canBroadcastInputAcrossBatch(int inputIndex) const noexcept +bool ResizeNearest::canBroadcastInputAcrossBatch(int32_t inputIndex) const noexcept { return false; } // Configure the layer with input and output data types. -void ResizeNearest::configurePlugin(Dims const* inputDims, int nbInputs, Dims const* outputDims, int nbOutputs, +void ResizeNearest::configurePlugin(Dims const* inputDims, int32_t nbInputs, Dims const* outputDims, int32_t nbOutputs, DataType const* inputTypes, DataType const* outputTypes, bool const* inputIsBroadcast, - bool const* outputIsBroadcast, PluginFormat floatFormat, int maxBatchSize) noexcept + bool const* outputIsBroadcast, PluginFormat floatFormat, int32_t maxBatchSize) noexcept { PLUGIN_ASSERT(nbInputs == 1); mInputDims = inputDims[0]; diff --git a/plugin/resizeNearestPlugin/resizeNearestPlugin.h b/plugin/resizeNearestPlugin/resizeNearestPlugin.h index ed8ff38a..5db5fc49 100644 --- a/plugin/resizeNearestPlugin/resizeNearestPlugin.h +++ b/plugin/resizeNearestPlugin/resizeNearestPlugin.h @@ -39,19 +39,19 @@ class ResizeNearest : public IPluginV2Ext ~ResizeNearest() override = default; - int getNbOutputs() const noexcept override; + int32_t getNbOutputs() const noexcept override; - Dims getOutputDimensions(int index, Dims const* inputs, int nbInputDims) noexcept override; + Dims getOutputDimensions(int32_t index, Dims const* inputs, int32_t nbInputDims) noexcept override; - int initialize() noexcept override; + int32_t initialize() noexcept override; void terminate() noexcept override; void destroy() noexcept override; - size_t getWorkspaceSize(int) const noexcept override; + size_t getWorkspaceSize(int32_t) const noexcept override; - int enqueue(int batch_size, void const* const* inputs, void* const* outputs, void* workspace, + int32_t enqueue(int32_t batch_size, void const* const* inputs, void* const* outputs, void* workspace, cudaStream_t stream) noexcept override; size_t getSerializationSize() const noexcept override; @@ -70,19 +70,20 @@ class ResizeNearest : public IPluginV2Ext char const* getPluginNamespace() const noexcept override; - DataType getOutputDataType(int index, nvinfer1::DataType const* inputTypes, int nbInputs) const noexcept override; + DataType getOutputDataType( + int32_t index, nvinfer1::DataType const* inputTypes, int32_t nbInputs) const noexcept override; bool isOutputBroadcastAcrossBatch( - int outputIndex, bool const* inputIsBroadcasted, int nbInputs) const noexcept override; + int32_t outputIndex, bool const* inputIsBroadcasted, int32_t nbInputs) const noexcept override; - bool canBroadcastInputAcrossBatch(int inputIndex) const noexcept override; + bool canBroadcastInputAcrossBatch(int32_t inputIndex) const noexcept override; void attachToContext( cudnnContext* cudnnContext, cublasContext* cublasContext, IGpuAllocator* gpuAllocator) noexcept override; - void configurePlugin(Dims const* inputDims, int nbInputs, Dims const* outputDims, int nbOutputs, + void configurePlugin(Dims const* inputDims, int32_t nbInputs, Dims const* outputDims, int32_t nbOutputs, DataType const* inputTypes, DataType const* outputTypes, bool const* inputIsBroadcast, - bool const* outputIsBroadcast, PluginFormat floatFormat, int maxBatchSize) noexcept override; + bool const* outputIsBroadcast, PluginFormat floatFormat, int32_t maxBatchSize) noexcept override; void detachFromContext() noexcept override; diff --git a/plugin/scatterPlugin/scatterPlugin.cpp b/plugin/scatterPlugin/scatterPlugin.cpp index 3dbe44ae..694d4cfe 100644 --- a/plugin/scatterPlugin/scatterPlugin.cpp +++ b/plugin/scatterPlugin/scatterPlugin.cpp @@ -37,7 +37,7 @@ PluginFieldCollection ScatterNDPluginCreator::mFC{}; ScatterND::ScatterND() {} -int ScatterND::getNbOutputs() const noexcept +int32_t ScatterND::getNbOutputs() const noexcept { // Plugin layer has 1 output return 1; @@ -51,7 +51,7 @@ DimsExprs ScatterND::getOutputDimensions( return ret; } -int ScatterND::initialize() noexcept +int32_t ScatterND::initialize() noexcept { return 0; } @@ -90,7 +90,7 @@ void ScatterND::configurePlugin( int32_t ScatterND::calculateNumSlices(Dims indexTensorDims) const noexcept { int32_t nSlices = 1; - for (int i = 0; i < indexTensorDims.nbDims - 1; i++) + for (int32_t i = 0; i < indexTensorDims.nbDims - 1; i++) { nSlices *= indexTensorDims.d[i]; } @@ -106,7 +106,7 @@ size_t ScatterND::getWorkspaceSize( } void ScatterND::calculateTransformCoeff( - Dims const& dataTensorDims, int indexRank, int32_t* transformCoeff) const noexcept + Dims const& dataTensorDims, int32_t indexRank, int32_t* transformCoeff) const noexcept { std::vector pitches; for (int32_t i = indexRank - 1, nIndx = 1; i >= 0; i--) @@ -123,7 +123,7 @@ void ScatterND::calculateTransformCoeff( int32_t ScatterND::calculateCopySize(Dims const& dataDims) const noexcept { int32_t copySize = 1; - for (int i = 0; i < dataDims.nbDims; i++) + for (int32_t i = 0; i < dataDims.nbDims; i++) { copySize *= dataDims.d[i]; } @@ -158,7 +158,7 @@ int32_t ScatterND::enqueue(PluginTensorDesc const* inputDesc, PluginTensorDesc c case DataType::kFP8: PLUGIN_FAIL("FP8 not supported"); break; } - for (int i = indexRank; i < dataDims.nbDims; i++) + for (int32_t i = indexRank; i < dataDims.nbDims; i++) { rowSize *= dataDims.d[i]; } @@ -194,7 +194,8 @@ char const* ScatterND::getPluginNamespace() const noexcept } // Return the DataType of the plugin output at the requested index -DataType ScatterND::getOutputDataType(int index, nvinfer1::DataType const* inputTypes, int nbInputs) const noexcept +DataType ScatterND::getOutputDataType( + int32_t index, nvinfer1::DataType const* inputTypes, int32_t nbInputs) const noexcept { PLUGIN_ASSERT(index == 0); return inputTypes[dataTensorIdx]; diff --git a/plugin/scatterPlugin/scatterPlugin.h b/plugin/scatterPlugin/scatterPlugin.h index d5b23c0e..3545aa57 100644 --- a/plugin/scatterPlugin/scatterPlugin.h +++ b/plugin/scatterPlugin/scatterPlugin.h @@ -35,7 +35,7 @@ class ScatterND : public IPluginV2DynamicExt ~ScatterND() override = default; - int getNbOutputs() const noexcept override; + int32_t getNbOutputs() const noexcept override; DimsExprs getOutputDimensions( int32_t outputIndex, DimsExprs const* inputs, int32_t nbInputs, IExprBuilder& exprBuilder) noexcept override; @@ -49,7 +49,7 @@ class ScatterND : public IPluginV2DynamicExt size_t getWorkspaceSize(PluginTensorDesc const* inputs, int32_t nbInputs, PluginTensorDesc const* outputs, int32_t nbOutputs) const noexcept override; - int initialize() noexcept override; + int32_t initialize() noexcept override; void terminate() noexcept override; @@ -72,7 +72,8 @@ class ScatterND : public IPluginV2DynamicExt char const* getPluginNamespace() const noexcept override; - DataType getOutputDataType(int index, nvinfer1::DataType const* inputTypes, int nbInputs) const noexcept override; + DataType getOutputDataType( + int32_t index, nvinfer1::DataType const* inputTypes, int32_t nbInputs) const noexcept override; void attachToContext( cudnnContext* cudnnContext, cublasContext* cublasContext, IGpuAllocator* gpuAllocator) noexcept override; @@ -83,12 +84,12 @@ class ScatterND : public IPluginV2DynamicExt // calculate how many slices we need to scatter = reduce_mul(indexTensor.shape[:-1]) int32_t calculateNumSlices(Dims indexTensorDims) const noexcept; int32_t calculateCopySize(Dims const& dataDims) const noexcept; - void calculateTransformCoeff(Dims const& dataTensorDims, int indexRank, int32_t* transformCoeff) const noexcept; + void calculateTransformCoeff(Dims const& dataTensorDims, int32_t indexRank, int32_t* transformCoeff) const noexcept; std::string mPluginNamespace; - static constexpr int indexTensorIdx = 1; - static constexpr int updateTensorIdx = 2; - static constexpr int dataTensorIdx = 0; + static constexpr int32_t indexTensorIdx = 1; + static constexpr int32_t updateTensorIdx = 2; + static constexpr int32_t dataTensorIdx = 0; }; class ScatterNDPluginCreator : public nvinfer1::pluginInternal::BaseCreator diff --git a/plugin/skipLayerNormPlugin/CustomSkipLayerNormPluginDynamic_PluginConfig.yaml b/plugin/skipLayerNormPlugin/CustomSkipLayerNormPluginDynamic_PluginConfig.yaml index 3ae0de8f..a2f309c3 100644 --- a/plugin/skipLayerNormPlugin/CustomSkipLayerNormPluginDynamic_PluginConfig.yaml +++ b/plugin/skipLayerNormPlugin/CustomSkipLayerNormPluginDynamic_PluginConfig.yaml @@ -88,4 +88,38 @@ versions: - ld - beta - gamma + golden_reference_script: "plugin/skipLayerNormPlugin/CustomSkipLayerNormPluginDynamic_PluginReference.py" + abs_tol: 1e-2 + rel_tol: 1e-2 + configs: + config1: + input_types: + input: float32 + skip: float32 + attribute_options: + type_id: + value: 0 + ld: + value: 128 + beta: + shape: "1, 1, 128" + gamma: + shape: "1, 1, 128" + bias: + shape: "1, 1, 128" + config2: + input_types: + input: float16 + skip: float16 + attribute_options: + type_id: + value: 1 + ld: + value: 768 + beta: + shape: "1, 1, 768" + gamma: + shape: "1, 1, 768" + bias: + shape: "1, 1, 768" ... diff --git a/plugin/skipLayerNormPlugin/skipLayerNormPlugin.cpp b/plugin/skipLayerNormPlugin/skipLayerNormPlugin.cpp index 37ab791a..d95f0df6 100644 --- a/plugin/skipLayerNormPlugin/skipLayerNormPlugin.cpp +++ b/plugin/skipLayerNormPlugin/skipLayerNormPlugin.cpp @@ -331,7 +331,7 @@ int32_t SkipLayerNormPluginDynamic::enqueue(PluginTensorDesc const* inputDesc, P else { PLUGIN_ERROR(("Unsupported type error, expected [kINT8,kHALF,kFLOAT], but received " - + std::to_string(static_cast(iType))) + + std::to_string(static_cast(iType))) .c_str()); } } @@ -857,7 +857,7 @@ int32_t SkipLayerNormVarSeqlenPlugin::enqueue(PluginTensorDesc const* inputDesc, else { PLUGIN_VALIDATE(("Unsupported type error, expected [kINT8,kHALF,kFLOAT], but received " - + std::to_string(static_cast(iType))) + + std::to_string(static_cast(iType))) .c_str()); } } diff --git a/plugin/specialSlicePlugin/specialSlicePlugin.cpp b/plugin/specialSlicePlugin/specialSlicePlugin.cpp index a8760027..c8721596 100644 --- a/plugin/specialSlicePlugin/specialSlicePlugin.cpp +++ b/plugin/specialSlicePlugin/specialSlicePlugin.cpp @@ -79,7 +79,7 @@ IPluginV2Ext* SpecialSlicePluginCreator::deserializePlugin(char const* name, voi return nullptr; } -size_t SpecialSlice::getWorkspaceSize(int) const noexcept +size_t SpecialSlice::getWorkspaceSize(int32_t) const noexcept { return 0; } @@ -126,7 +126,7 @@ char const* SpecialSlice::getPluginNamespace() const noexcept size_t SpecialSlice::getSerializationSize() const noexcept { - return sizeof(int); + return sizeof(int32_t); } void SpecialSlice::serialize(void* buffer) const noexcept @@ -139,23 +139,23 @@ void SpecialSlice::serialize(void* buffer) const noexcept SpecialSlice::SpecialSlice(void const* data, size_t length) { char const *d = reinterpret_cast(data), *a = d; - mBboxesCnt = read(d); + mBboxesCnt = read(d); PLUGIN_VALIDATE(d == a + length); } SpecialSlice::SpecialSlice() {} -int SpecialSlice::initialize() noexcept +int32_t SpecialSlice::initialize() noexcept { return 0; } -int SpecialSlice::getNbOutputs() const noexcept +int32_t SpecialSlice::getNbOutputs() const noexcept { return 1; } -void SpecialSlice::check_valid_inputs(nvinfer1::Dims const* inputs, int nbInputDims) +void SpecialSlice::check_valid_inputs(nvinfer1::Dims const* inputs, int32_t nbInputDims) { PLUGIN_ASSERT(nbInputDims == 1); @@ -163,7 +163,7 @@ void SpecialSlice::check_valid_inputs(nvinfer1::Dims const* inputs, int nbInputD PLUGIN_ASSERT(inputs[0].nbDims == 2 && inputs[0].d[1] == 6); } -Dims SpecialSlice::getOutputDimensions(int index, Dims const* inputDims, int nbInputs) noexcept +Dims SpecialSlice::getOutputDimensions(int32_t index, Dims const* inputDims, int32_t nbInputs) noexcept { PLUGIN_ASSERT(index == 0); @@ -180,8 +180,8 @@ Dims SpecialSlice::getOutputDimensions(int index, Dims const* inputDims, int nbI return output; } -int SpecialSlice::enqueue( - int batch_size, void const* const* inputs, void* const* outputs, void* workspace, cudaStream_t stream) noexcept +int32_t SpecialSlice::enqueue( + int32_t batch_size, void const* const* inputs, void* const* outputs, void* workspace, cudaStream_t stream) noexcept { specialSlice(stream, batch_size, mBboxesCnt, inputs[0], outputs[0]); @@ -190,7 +190,8 @@ int SpecialSlice::enqueue( } // Return the DataType of the plugin output at the requested index -DataType SpecialSlice::getOutputDataType(int index, nvinfer1::DataType const* inputTypes, int nbInputs) const noexcept +DataType SpecialSlice::getOutputDataType( + int32_t index, nvinfer1::DataType const* inputTypes, int32_t nbInputs) const noexcept { // Only 1 input and 1 output from the plugin layer PLUGIN_ASSERT(index == 0); @@ -201,21 +202,21 @@ DataType SpecialSlice::getOutputDataType(int index, nvinfer1::DataType const* in // Return true if output tensor is broadcast across a batch. bool SpecialSlice::isOutputBroadcastAcrossBatch( - int outputIndex, bool const* inputIsBroadcasted, int nbInputs) const noexcept + int32_t outputIndex, bool const* inputIsBroadcasted, int32_t nbInputs) const noexcept { return false; } // Return true if plugin can use input that is broadcast across batch without replication. -bool SpecialSlice::canBroadcastInputAcrossBatch(int inputIndex) const noexcept +bool SpecialSlice::canBroadcastInputAcrossBatch(int32_t inputIndex) const noexcept { return false; } // Configure the layer with input and output data types. -void SpecialSlice::configurePlugin(Dims const* inputDims, int nbInputs, Dims const* outputDims, int nbOutputs, +void SpecialSlice::configurePlugin(Dims const* inputDims, int32_t nbInputs, Dims const* outputDims, int32_t nbOutputs, DataType const* inputTypes, DataType const* outputTypes, bool const* inputIsBroadcast, - bool const* outputIsBroadcast, PluginFormat floatFormat, int maxBatchSize) noexcept + bool const* outputIsBroadcast, PluginFormat floatFormat, int32_t maxBatchSize) noexcept { PLUGIN_ASSERT(nbInputs == 1); diff --git a/plugin/specialSlicePlugin/specialSlicePlugin.h b/plugin/specialSlicePlugin/specialSlicePlugin.h index abd43107..5fbe2835 100644 --- a/plugin/specialSlicePlugin/specialSlicePlugin.h +++ b/plugin/specialSlicePlugin/specialSlicePlugin.h @@ -39,13 +39,13 @@ class SpecialSlice : public IPluginV2Ext ~SpecialSlice() override = default; - int getNbOutputs() const noexcept override; + int32_t getNbOutputs() const noexcept override; - void check_valid_inputs(nvinfer1::Dims const* inputs, int nbInputDims); + void check_valid_inputs(nvinfer1::Dims const* inputs, int32_t nbInputDims); - Dims getOutputDimensions(int index, Dims const* inputs, int nbInputDims) noexcept override; + Dims getOutputDimensions(int32_t index, Dims const* inputs, int32_t nbInputDims) noexcept override; - int initialize() noexcept override; + int32_t initialize() noexcept override; void terminate() noexcept override{}; @@ -54,9 +54,9 @@ class SpecialSlice : public IPluginV2Ext delete this; } - size_t getWorkspaceSize(int) const noexcept override; + size_t getWorkspaceSize(int32_t) const noexcept override; - int enqueue(int batch_size, void const* const* inputs, void* const* outputs, void* workspace, + int32_t enqueue(int32_t batch_size, void const* const* inputs, void* const* outputs, void* workspace, cudaStream_t stream) noexcept override; size_t getSerializationSize() const noexcept override; @@ -75,24 +75,25 @@ class SpecialSlice : public IPluginV2Ext char const* getPluginNamespace() const noexcept override; - DataType getOutputDataType(int index, nvinfer1::DataType const* inputTypes, int nbInputs) const noexcept override; + DataType getOutputDataType( + int32_t index, nvinfer1::DataType const* inputTypes, int32_t nbInputs) const noexcept override; bool isOutputBroadcastAcrossBatch( - int outputIndex, bool const* inputIsBroadcasted, int nbInputs) const noexcept override; + int32_t outputIndex, bool const* inputIsBroadcasted, int32_t nbInputs) const noexcept override; - bool canBroadcastInputAcrossBatch(int inputIndex) const noexcept override; + bool canBroadcastInputAcrossBatch(int32_t inputIndex) const noexcept override; void attachToContext( cudnnContext* cudnnContext, cublasContext* cublasContext, IGpuAllocator* gpuAllocator) noexcept override; - void configurePlugin(Dims const* inputDims, int nbInputs, Dims const* outputDims, int nbOutputs, + void configurePlugin(Dims const* inputDims, int32_t nbInputs, Dims const* outputDims, int32_t nbOutputs, DataType const* inputTypes, DataType const* outputTypes, bool const* inputIsBroadcast, - bool const* outputIsBroadcast, PluginFormat floatFormat, int maxBatchSize) noexcept override; + bool const* outputIsBroadcast, PluginFormat floatFormat, int32_t maxBatchSize) noexcept override; void detachFromContext() noexcept override; private: - int mBboxesCnt; + int32_t mBboxesCnt; std::string mNameSpace; }; diff --git a/plugin/splitPlugin/split.cu b/plugin/splitPlugin/split.cu index a3389e5b..9cdf2af8 100644 --- a/plugin/splitPlugin/split.cu +++ b/plugin/splitPlugin/split.cu @@ -17,7 +17,8 @@ #include #include -#include "splitPlugin.h" + +#include "split.h" using namespace nvinfer1; using nvinfer1::plugin::SplitPlugin; @@ -73,6 +74,64 @@ void split_kernel(int nsegment, } } +bool SplitPlugin::supportsFormatCombination( + int pos, const nvinfer1::PluginTensorDesc* inOut, int nbInputs, int nbOutputs) noexcept +{ + PLUGIN_ASSERT(inOut && pos < (nbInputs + nbOutputs)); + return (inOut[pos].format == nvinfer1::PluginFormat::kLINEAR); +} + +nvinfer1::DataType SplitPlugin::getOutputDataType(int index, const nvinfer1::DataType* inputTypes, int nbInputs) const + noexcept +{ + PLUGIN_ASSERT(inputTypes && nbInputs > 0); + return inputTypes[0]; +} + +int SplitPlugin::initialize() noexcept +{ + return 0; +} + +void SplitPlugin::terminate() noexcept +{ + +} + +void SplitPlugin::configurePlugin(const nvinfer1::DynamicPluginTensorDesc* in, int nbInputs, + const nvinfer1::DynamicPluginTensorDesc* out, int nbOutputs) noexcept +{ + std::vector segment_offsets(1, 0); + for( int i = 0; i < nbOutputs; ++i ) + { + segment_offsets.push_back(segment_offsets.back() + _output_lengths[i]); + } + _d_segment_offsets = segment_offsets; + + for (int i = 0; i < nbInputs; i++) + { + for (int j = 0; j < in[0].desc.dims.nbDims; j++) + { + // Do not support dynamic dimensions + PLUGIN_ASSERT(in[0].desc.dims.d[j] != -1); + } + } + + nvinfer1::Dims dims = in[0].desc.dims; + _nx = 1; + for( int i = dims.nbDims-1; i > _axis; --i ) + { + _nx *= dims.d[i]; + } + _ny = dims.d[_axis]; + _nz = 1; + for( int i = _axis-1; i >= 0; --i ) + { + _nz *= dims.d[i]; + } + _d_output_ptrs.resize(nbOutputs, nullptr); +} + int SplitPlugin::enqueue(const nvinfer1::PluginTensorDesc* inputDesc, const nvinfer1::PluginTensorDesc* outputDesc, const void* const* inputs, void* const* outputs, void* workspace, cudaStream_t stream) noexcept { @@ -109,3 +168,9 @@ int SplitPlugin::enqueue(const nvinfer1::PluginTensorDesc* inputDesc, const nvin return cudaGetLastError() != cudaSuccess; } +nvinfer1::DimsExprs SplitPlugin::getOutputDimensions(int outputIndex, const nvinfer1::DimsExprs* inputs, int nbInputs, nvinfer1::IExprBuilder& exprBuilder) noexcept +{ + nvinfer1::DimsExprs output(inputs[0]); + output.d[_axis] = exprBuilder.constant(_output_lengths[outputIndex]); + return output; +} diff --git a/plugin/splitPlugin/split.h b/plugin/splitPlugin/split.h index 53e1cef9..76ff63b4 100644 --- a/plugin/splitPlugin/split.h +++ b/plugin/splitPlugin/split.h @@ -38,11 +38,11 @@ namespace plugin { class SplitPlugin final : public nvinfer1::IPluginV2DynamicExt { - int _axis; - std::vector _output_lengths; - int _nx, _ny, _nz; - int _x_stride, _y_stride, _z_stride; - thrust::device_vector _d_segment_offsets; + int32_t _axis; + std::vector _output_lengths; + int32_t _nx, _ny, _nz; + int32_t _x_stride, _y_stride, _z_stride; + thrust::device_vector _d_segment_offsets; thrust::device_vector _d_output_ptrs; using IPluginV2::getOutputDimensions; @@ -67,13 +67,13 @@ class SplitPlugin final : public nvinfer1::IPluginV2DynamicExt } public: - SplitPlugin(int axis, int* const& output_lengths, int noutput) + SplitPlugin(int32_t axis, int32_t* const& output_lengths, int32_t noutput) : _axis(axis) - , _output_lengths(std::vector(output_lengths, output_lengths + noutput)) + , _output_lengths(std::vector(output_lengths, output_lengths + noutput)) { PLUGIN_ASSERT(axis <= nvinfer1::Dims::MAX_DIMS); } - SplitPlugin(int axis, std::vector output_lengths) + SplitPlugin(int32_t axis, std::vector output_lengths) : _axis(axis) , _output_lengths(output_lengths) { @@ -85,16 +85,16 @@ class SplitPlugin final : public nvinfer1::IPluginV2DynamicExt } bool supportsFormatCombination( - int pos, nvinfer1::PluginTensorDesc const* inOut, int nbInputs, int nbOutputs) noexcept override; + int32_t pos, nvinfer1::PluginTensorDesc const* inOut, int32_t nbInputs, int32_t nbOutputs) noexcept override; nvinfer1::DataType getOutputDataType( - int index, nvinfer1::DataType const* inputTypes, int nbInputs) const noexcept override; - int initialize() noexcept override; + int32_t index, nvinfer1::DataType const* inputTypes, int32_t nbInputs) const noexcept override; + int32_t initialize() noexcept override; void terminate() noexcept override; - void configurePlugin(nvinfer1::DynamicPluginTensorDesc const* in, int nbInputs, - nvinfer1::DynamicPluginTensorDesc const* out, int nbOutputs) noexcept override; - int enqueue(PluginTensorDesc const* inputDesc, PluginTensorDesc const* outputDesc, void const* const* inputs, + void configurePlugin(nvinfer1::DynamicPluginTensorDesc const* in, int32_t nbInputs, + nvinfer1::DynamicPluginTensorDesc const* out, int32_t nbOutputs) noexcept override; + int32_t enqueue(PluginTensorDesc const* inputDesc, PluginTensorDesc const* outputDesc, void const* const* inputs, void* const* outputs, void* workspace, cudaStream_t stream) noexcept override; - nvinfer1::DimsExprs getOutputDimensions(int outputIndex, nvinfer1::DimsExprs const* inputs, int nbInputs, + nvinfer1::DimsExprs getOutputDimensions(int32_t outputIndex, nvinfer1::DimsExprs const* inputs, int32_t nbInputs, nvinfer1::IExprBuilder& exprBuilder) noexcept override; nvinfer1::IPluginV2DynamicExt* clone() const noexcept override @@ -113,8 +113,8 @@ class SplitPlugin final : public nvinfer1::IPluginV2DynamicExt { return SPLIT_PLUGIN_NAME; } - size_t getWorkspaceSize(nvinfer1::PluginTensorDesc const* /*inputs*/, int /*nbInputs*/, - nvinfer1::PluginTensorDesc const* /*outputs*/, int /*nbOutputs*/) const noexcept override + size_t getWorkspaceSize(nvinfer1::PluginTensorDesc const* /*inputs*/, int32_t /*nbInputs*/, + nvinfer1::PluginTensorDesc const* /*outputs*/, int32_t /*nbOutputs*/) const noexcept override { return 0; } @@ -123,7 +123,7 @@ class SplitPlugin final : public nvinfer1::IPluginV2DynamicExt { return ""; } - int getNbOutputs() const noexcept override + int32_t getNbOutputs() const noexcept override { return _output_lengths.size(); } diff --git a/plugin/splitPlugin/splitPlugin.cpp b/plugin/splitPlugin/splitPlugin.cpp deleted file mode 100644 index 82a768af..00000000 --- a/plugin/splitPlugin/splitPlugin.cpp +++ /dev/null @@ -1,89 +0,0 @@ -/* - * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. - * SPDX-License-Identifier: Apache-2.0 - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include -#include - -#include "splitPlugin.h" - -using namespace nvinfer1; -using nvinfer1::plugin::SplitPlugin; - -bool SplitPlugin::supportsFormatCombination( - int pos, const nvinfer1::PluginTensorDesc* inOut, int nbInputs, int nbOutputs) noexcept -{ - PLUGIN_ASSERT(inOut && pos < (nbInputs + nbOutputs)); - return (inOut[pos].format == nvinfer1::PluginFormat::kLINEAR); -} - -nvinfer1::DataType SplitPlugin::getOutputDataType(int index, const nvinfer1::DataType* inputTypes, int nbInputs) const - noexcept -{ - PLUGIN_ASSERT(inputTypes && nbInputs > 0); - return inputTypes[0]; -} - -int SplitPlugin::initialize() noexcept -{ - return 0; -} - -void SplitPlugin::terminate() noexcept -{ - -} - -void SplitPlugin::configurePlugin(const nvinfer1::DynamicPluginTensorDesc* in, int nbInputs, - const nvinfer1::DynamicPluginTensorDesc* out, int nbOutputs) noexcept -{ - std::vector segment_offsets(1, 0); - for( int i = 0; i < nbOutputs; ++i ) - { - segment_offsets.push_back(segment_offsets.back() + _output_lengths[i]); - } - _d_segment_offsets = segment_offsets; - - for (int i = 0; i < nbInputs; i++) - { - for (int j = 0; j < in[0].desc.dims.nbDims; j++) - { - // Do not support dynamic dimensions - PLUGIN_ASSERT(in[0].desc.dims.d[j] != -1); - } - } - - nvinfer1::Dims dims = in[0].desc.dims; - _nx = 1; - for( int i = dims.nbDims-1; i > _axis; --i ) - { - _nx *= dims.d[i]; - } - _ny = dims.d[_axis]; - _nz = 1; - for( int i = _axis-1; i >= 0; --i ) - { - _nz *= dims.d[i]; - } - //_d_output_ptrs.resize(nbOutputs, nullptr); -} - -nvinfer1::DimsExprs SplitPlugin::getOutputDimensions(int outputIndex, const nvinfer1::DimsExprs* inputs, int nbInputs, nvinfer1::IExprBuilder& exprBuilder) noexcept -{ - nvinfer1::DimsExprs output(inputs[0]); - output.d[_axis] = exprBuilder.constant(_output_lengths[outputIndex]); - return output; -} diff --git a/plugin/splitPlugin/splitPlugin.h b/plugin/splitPlugin/splitPlugin.h deleted file mode 100644 index cc7af917..00000000 --- a/plugin/splitPlugin/splitPlugin.h +++ /dev/null @@ -1,187 +0,0 @@ -/* - * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. - * SPDX-License-Identifier: Apache-2.0 - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef TRT_SPLIT_PLUGIN_H -#define TRT_SPLIT_PLUGIN_H -#include - -#include "common/checkMacrosPlugin.h" -#include "common/serialize.hpp" - -#include -#include -#include - -namespace -{ -constexpr const char* SPLIT_PLUGIN_VERSION{"1"}; -constexpr const char* SPLIT_PLUGIN_NAME{"Split"}; -} // namespace - -template -__global__ void split_kernel(int nsegment, - int const* __restrict__ segment_offsets, - T const* __restrict__ idata, - T* const* odatas, - int nx, - int src_ny, - int nz); - -namespace nvinfer1 -{ -namespace plugin -{ -class SplitPlugin final : public nvinfer1::IPluginV2DynamicExt -{ - int _axis; - std::vector _output_lengths; - int _nx, _ny, _nz; - int _x_stride, _y_stride, _z_stride; - thrust::device_vector _d_segment_offsets; - thrust::device_vector _d_output_ptrs; - -protected: - void deserialize(void const* serialData, size_t serialLength) noexcept - { - deserialize_value(&serialData, &serialLength, &_axis); - deserialize_value(&serialData, &serialLength, &_output_lengths); - } - size_t getSerializationSize() const noexcept override - { - return serialized_size(_axis) + serialized_size(_output_lengths); - } - void serialize(void* buffer) const noexcept override - { - serialize_value(&buffer, _axis); - serialize_value(&buffer, _output_lengths); - } - -public: - SplitPlugin(int axis, int* const& output_lengths, int noutput) - : _axis(axis) - , _output_lengths(std::vector(output_lengths, output_lengths + noutput)) - { - PLUGIN_ASSERT(axis <= nvinfer1::Dims::MAX_DIMS); - } - SplitPlugin(int axis, std::vector output_lengths) - : _axis(axis) - , _output_lengths(output_lengths) - { - PLUGIN_ASSERT(axis <= nvinfer1::Dims::MAX_DIMS); - } - SplitPlugin(void const* serialData, size_t serialLength) - { - this->deserialize(serialData, serialLength); - } - - bool supportsFormatCombination(int pos, const nvinfer1::PluginTensorDesc* inOut, int nbInputs, int nbOutputs) noexcept override; - nvinfer1::DataType getOutputDataType(int index, const nvinfer1::DataType* inputTypes, int nbInputs) const noexcept override; - int initialize() noexcept override; - void terminate() noexcept override; - void configurePlugin(const nvinfer1::DynamicPluginTensorDesc* in, int nbInputs, - const nvinfer1::DynamicPluginTensorDesc* out, int nbOutputs) noexcept override; - int enqueue(const PluginTensorDesc* inputDesc, const PluginTensorDesc* outputDesc, const void* const* inputs, void* const* outputs, void* workspace, cudaStream_t stream) noexcept override; - nvinfer1::DimsExprs getOutputDimensions(int outputIndex, const nvinfer1::DimsExprs* inputs, int nbInputs, nvinfer1::IExprBuilder& exprBuilder) noexcept override; - - nvinfer1::IPluginV2DynamicExt* clone() const noexcept override - { - return new SplitPlugin{_axis, _output_lengths}; - } - void destroy() noexcept override - { - delete this; - } - const char* getPluginVersion() const noexcept override - { - return SPLIT_PLUGIN_VERSION; - } - const char* getPluginType() const noexcept override - { - return SPLIT_PLUGIN_NAME; - } - size_t getWorkspaceSize(const nvinfer1::PluginTensorDesc* /*inputs*/, int /*nbInputs*/, - const nvinfer1::PluginTensorDesc* /*outputs*/, int /*nbOutputs*/) const noexcept override - { - return 0; - } - void setPluginNamespace(const char* /*pluginNamespace*/) noexcept override {} - const char* getPluginNamespace() const noexcept override - { - return ""; - } - int getNbOutputs() const noexcept override - { - return _output_lengths.size(); - } - void attachToContext( - cudnnContext* /*cudnn*/, cublasContext* /*cublas*/, nvinfer1::IGpuAllocator* /*allocator*/) noexcept override - { - } - void detachFromContext() noexcept override {} -}; - -class SplitPluginCreator : public nvinfer1::IPluginCreator -{ -public: - SplitPluginCreator() {} - - ~SplitPluginCreator() {} - - const char* getPluginName() const noexcept - { - return SPLIT_PLUGIN_NAME; - } - - const char* getPluginVersion() const noexcept - { - return SPLIT_PLUGIN_VERSION; - } - - const nvinfer1::PluginFieldCollection* getFieldNames() noexcept - { - std::cerr << "Function not implemented" << std::endl; - return nullptr; - } - - nvinfer1::IPluginV2DynamicExt* createPlugin(const char* /*name*/, const nvinfer1::PluginFieldCollection* /*fc*/) noexcept - { - std::cerr << "Function not implemented" << std::endl; - return nullptr; - } - - nvinfer1::IPluginV2DynamicExt* deserializePlugin(const char* /*name*/, const void* serialData, size_t serialLength) noexcept - { - return new SplitPlugin{serialData, serialLength}; - } - - void setPluginNamespace(const char* libNamespace) noexcept - { - mNamespace = libNamespace; - } - - const char* getPluginNamespace() const noexcept - { - return mNamespace.c_str(); - } - -private: - std::string mNamespace; -}; - -} // namespace plugin -} // namespace nvinfer1 -#endif // TRT_SPLIT_PLUGIN_H diff --git a/python/CMakeLists.txt b/python/CMakeLists.txt index 580191f9..723c9318 100644 --- a/python/CMakeLists.txt +++ b/python/CMakeLists.txt @@ -40,7 +40,7 @@ set(CMAKE_CXX_STANDARD ${CPP_STANDARD}) if (NOT MSVC) # This allows us to use TRT libs shipped with standalone wheels. - set(CMAKE_SHARED_LINKER_FLAGS -Wl,-rpath=$ORIGIN) + set(CMAKE_SHARED_LINKER_FLAGS -Wl,-rpath=$ORIGIN:$ORIGIN/../${TENSORRT_MODULE}_libs) endif() # -------- PATHS -------- diff --git a/python/docstrings/infer/pyCoreDoc.h b/python/docstrings/infer/pyCoreDoc.h index 49580cf8..82878ac2 100644 --- a/python/docstrings/infer/pyCoreDoc.h +++ b/python/docstrings/infer/pyCoreDoc.h @@ -1141,9 +1141,10 @@ constexpr char const* descr = R"trtdoc( For example, to enable faster dynamic shapes, call :func:`set_preview_feature` with ``PreviewFeature.FASTER_DYNAMIC_SHAPES_0805`` )trtdoc"; constexpr char const* FASTER_DYNAMIC_SHAPES_0805 = R"trtdoc( - Optimize runtime dimensions with TensorRT's DL Compiler. + [DEPRECATED - will be removed in TensorRT 9.0] Optimize runtime dimensions with TensorRT's DL Compiler. Potentially reduces run time and decreases device memory usage and engine size. Models most likely to benefit from enabling ``FASTER_DYNAMIC_SHAPES_0805`` are transformer-based models, and models containing dynamic control flows. + The default value for this flag is on. Turning it off is deprecated. )trtdoc"; constexpr char const* DISABLE_EXTERNAL_TACTIC_SOURCES_FOR_CORE_0805 = R"trtdoc( Disable usage of cuDNN/cuBLAS/cuBLASLt tactics in the TensorRT core library. diff --git a/python/docstrings/infer/pyGraphDoc.h b/python/docstrings/infer/pyGraphDoc.h index ff09d9aa..bc69b28e 100644 --- a/python/docstrings/infer/pyGraphDoc.h +++ b/python/docstrings/infer/pyGraphDoc.h @@ -98,7 +98,7 @@ constexpr const char* descr = R"trtdoc( constexpr const char* LINEAR = R"trtdoc( Row major linear format. - For a tensor with dimensions {N, C, H, W}, the W axis always has unit stride, and the stride of every other axis is at least the the product of of the next dimension times the next stride. the strides are the same as for a C array with dimensions [N][C][H][W]. + For a tensor with dimensions {N, C, H, W}, the W axis always has unit stride, and the stride of every other axis is at least the product of the next dimension times the next stride. the strides are the same as for a C array with dimensions [N][C][H][W]. )trtdoc"; constexpr const char* CHW2 = R"trtdoc( @@ -232,14 +232,14 @@ constexpr const char* reset_dynamic_range = R"trtdoc( constexpr const char* set_dimension_name = R"trtdoc( Name a dimension of an input tensor. - + Associate a runtime dimension of an input tensor with a symbolic name. - Dimensions with the same non-empty name must be equal at runtime. + Dimensions with the same non-empty name must be equal at runtime. Knowing this equality for runtime dimensions may help the TensorRT optimizer. Both runtime and build-time dimensions can be named. If the function is called again, with the same index, it will overwrite the previous name. If None is passed as name, it will clear the name of the dimension. - + For example, setDimensionName(0, "n") associates the symbolic name "n" with the leading dimension. :arg index: index of the dimension. diff --git a/python/docstrings/parsers/pyCaffeDoc.h b/python/docstrings/parsers/pyCaffeDoc.h index 1f282941..9b2e1ebd 100644 --- a/python/docstrings/parsers/pyCaffeDoc.h +++ b/python/docstrings/parsers/pyCaffeDoc.h @@ -89,7 +89,7 @@ constexpr const char* is_plugin_v2 = R"trtdoc( :arg layer_name: Name of the layer which the user wishes to validate. - :returns: True if the the layer configuration is provided by an :class:`IPluginV2` . + :returns: True if the layer configuration is provided by an :class:`IPluginV2` . )trtdoc"; constexpr const char* create_plugin = R"trtdoc( diff --git a/python/docstrings/parsers/pyOnnxDoc.h b/python/docstrings/parsers/pyOnnxDoc.h index f0d6f0ea..65694747 100644 --- a/python/docstrings/parsers/pyOnnxDoc.h +++ b/python/docstrings/parsers/pyOnnxDoc.h @@ -134,10 +134,10 @@ namespace OnnxParserFlagDoc constexpr const char* descr = R"trtdoc( Flags that control how an ONNX model gets parsed. )trtdoc"; -constexpr const char* VERSION_COMPATIBLE = R"trtdoc( - Parse the ONNX model into the INetworkDefinition with the intention of building a version-compatible engine in TensorRT 8.6. - This flag is planned to be deprecated in TensorRT 8.7, and removed in TensorRT 9.0. - This will choose TensorRT's native InstanceNormalization implementation over the plugin implementation. +constexpr const char* NATIVE_INSTANCENORM = R"trtdoc( + Parse the ONNX model into the INetworkDefinition with the intention of using TensorRT's native layer implementation over the plugin implementation for InstanceNormalization nodes. + This flag is planned to be deprecated in TensorRT 8.7 and removed in TensorRT 9.0. + This flag is required when building version-compatible or hardware-compatible engines. There may be performance degradations when this flag is enabled. )trtdoc"; } // namespace OnnxParserFlagDoc diff --git a/python/packaging/LICENSE.txt b/python/packaging/bindings_wheel/LICENSE.txt similarity index 100% rename from python/packaging/LICENSE.txt rename to python/packaging/bindings_wheel/LICENSE.txt diff --git a/python/packaging/setup.cfg b/python/packaging/bindings_wheel/setup.cfg similarity index 100% rename from python/packaging/setup.cfg rename to python/packaging/bindings_wheel/setup.cfg diff --git a/python/packaging/bindings_wheel/setup.py b/python/packaging/bindings_wheel/setup.py new file mode 100644 index 00000000..b5b1b121 --- /dev/null +++ b/python/packaging/bindings_wheel/setup.py @@ -0,0 +1,50 @@ +# +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import os + +from setuptools import setup + +tensorrt_module = "##TENSORRT_MODULE##" + +# This file expects the following to be passed from the environment when using standalone wheels: +# - STANDALONE: Whether we are building a standalone wheel +IS_STANDALONE = os.environ.get("STANDALONE") == "1" +if IS_STANDALONE: + tensorrt_module += "_bindings" + +setup( + name=tensorrt_module, + version="##TENSORRT_PYTHON_VERSION##", + description="A high performance deep learning inference library", + long_description="A high performance deep learning inference library", + author="NVIDIA Corporation", + license="Proprietary", + classifiers=[ + "License :: Other/Proprietary License", + "Intended Audience :: Developers", + "Programming Language :: Python :: 3", + ], + packages=[tensorrt_module], + extras_require={"numpy": "numpy"}, + package_data={tensorrt_module: ["*.so*", "*.pyd", "*.pdb"]}, + include_package_data=True, + zip_safe=True, + keywords="nvidia tensorrt deeplearning inference", + url="https://developer.nvidia.com/tensorrt", + download_url="https://github.com/nvidia/tensorrt/tags", +) diff --git a/python/packaging/tensorrt/__init__.py b/python/packaging/bindings_wheel/tensorrt/__init__.py similarity index 86% rename from python/packaging/tensorrt/__init__.py rename to python/packaging/bindings_wheel/tensorrt/__init__.py index 0482c061..0192bc55 100644 --- a/python/packaging/tensorrt/__init__.py +++ b/python/packaging/bindings_wheel/tensorrt/__init__.py @@ -22,34 +22,28 @@ import warnings -def try_load(library): - try: - ctypes.CDLL(library) - except OSError: - pass - - -# Try loading all packaged libraries. This is a nop if there are no libraries packaged. -CURDIR = os.path.realpath(os.path.dirname(__file__)) -for lib in glob.iglob(os.path.join(CURDIR, "*.so*")): - try_load(lib) - - -# On Windows, we need to manually open the TensorRT libraries - otherwise we are unable to -# load the bindings. -def find_lib(name): - paths = os.environ["PATH"].split(os.path.pathsep) - for path in paths: - libpath = os.path.join(path, name) - if os.path.isfile(libpath): - return libpath - - raise FileNotFoundError( - "Could not find: {:}. Is it on your PATH?\nNote: Paths searched were:\n{:}".format(name, paths) - ) +# For standalone wheels, attempt to import the wheel containing the libraries. +try: + import ##TENSORRT_MODULE##_libs +except (ImportError, ModuleNotFoundError): + pass if sys.platform.startswith("win"): + # On Windows, we need to manually open the TensorRT libraries - otherwise we are unable to + # load the bindings. + def find_lib(name): + paths = os.environ["PATH"].split(os.path.pathsep) + for path in paths: + libpath = os.path.join(path, name) + if os.path.isfile(libpath): + return libpath + + raise FileNotFoundError( + "Could not find: {:}. Is it on your PATH?\nNote: Paths searched were:\n{:}".format(name, paths) + ) + + # Order matters here because of dependencies LIBRARIES = {"tensorrt": [ "nvinfer.dll", diff --git a/python/packaging/frontend_sdist/LICENSE.txt b/python/packaging/frontend_sdist/LICENSE.txt new file mode 100644 index 00000000..08f07f9f --- /dev/null +++ b/python/packaging/frontend_sdist/LICENSE.txt @@ -0,0 +1,180 @@ +Abstract +This document is the Software License Agreement (SLA) for NVIDIA TensorRT. This document contains specific license terms and conditions for NVIDIA TensorRT. By accepting this agreement, you agree to comply with all the terms and conditions applicable to the specific product(s) included herein. + +If you are receiving TensorRT under the NVIDIA Prerelease License Agreement (also known as NPLA) or under the NVIDIA Software License Agreement (previously known as the NVIDIA Tegra Software License Agreement), your use of TensorRT is governed by such applicable terms and conditions. All other uses of TensorRT are governed by the terms and conditions of the below license agreement. + +NVIDIA SOFTWARE LICENSE AGREEMENT +Important: READ BEFORE DOWNLOADING, INSTALLING, COPYING OR USING THE LICENSED SOFTWARE +This Software License Agreement ("SLA”), made and entered into as of the time and date of click through action (“Effective Date”),is a legal agreement between you and NVIDIA Corporation ("NVIDIA") and governs the use of the NVIDIA computer software and the documentation made available for use with such NVIDIA software. By downloading, installing, copying, or otherwise using the NVIDIA software and/or documentation, you agree to be bound by the terms of this SLA. If you do not agree to the terms of this SLA, do not download, install, copy or use the NVIDIA software or documentation. IF YOU ARE ENTERING INTO THIS SLAON BEHALF OF A COMPANY OR OTHER LEGAL ENTITY, YOU REPRESENT THAT YOU HAVE THE LEGAL AUTHORITY TO BIND THE ENTITY TO THIS SLA, IN WHICH CASE “YOU” WILL MEAN THE ENTITY YOU REPRESENT. IF YOU DON’T HAVE SUCH AUTHORITY, OR IF YOU DON’T ACCEPT ALL THE TERMS AND CONDITIONS OF THIS SLA, THEN NVIDIA DOES NOT AGREETO LICENSE THE LICENSED SOFTWARETO YOU, AND YOU MAY NOT DOWNLOAD, INSTALL, COPY OR USE IT. + +Preface +This document is the Software License Agreement (SLA) for NVIDIA TensorRT. This document contains specific license terms and conditions for NVIDIA TensorRT. By accepting this agreement, you agree to comply with all the terms and conditions applicable to the specific product(s) included herein. + +If you are receiving TensorRT under the NVIDIA Prerelease License Agreement (also known as NPLA) or under the NVIDIA Software License Agreement (previously known as the NVIDIA Tegra Software License Agreement), your use of TensorRT is governed by such applicable terms and conditions. All other uses of TensorRT are governed by the terms and conditions of the below license agreement. + +NVIDIA SOFTWARE LICENSE AGREEMENT +Important: READ BEFORE DOWNLOADING, INSTALLING, COPYING OR USING THE LICENSED SOFTWARE +This Software License Agreement ("SLA”), made and entered into as of the time and date of click through action (“Effective Date”),is a legal agreement between you and NVIDIA Corporation ("NVIDIA") and governs the use of the NVIDIA computer software and the documentation made available for use with such NVIDIA software. By downloading, installing, copying, or otherwise using the NVIDIA software and/or documentation, you agree to be bound by the terms of this SLA. If you do not agree to the terms of this SLA, do not download, install, copy or use the NVIDIA software or documentation. IF YOU ARE ENTERING INTO THIS SLAON BEHALF OF A COMPANY OR OTHER LEGAL ENTITY, YOU REPRESENT THAT YOU HAVE THE LEGAL AUTHORITY TO BIND THE ENTITY TO THIS SLA, IN WHICH CASE “YOU” WILL MEAN THE ENTITY YOU REPRESENT. IF YOU DON’T HAVE SUCH AUTHORITY, OR IF YOU DON’T ACCEPT ALL THE TERMS AND CONDITIONS OF THIS SLA, THEN NVIDIA DOES NOT AGREETO LICENSE THE LICENSED SOFTWARETO YOU, AND YOU MAY NOT DOWNLOAD, INSTALL, COPY OR USE IT. + +1. LICENSE. +1.1. License Grant +Subject to the terms of the AGREEMENT, NVIDIA hereby grants you a non-exclusive, non-transferable license, without the right to sublicense (except as expressly set forth in a Supplement), during the applicable license term unless earlier terminated as provided below, to have Authorized Users install and use the Software, including modifications (if expressly permitted in a Supplement), in accordance with the Documentation. You are only licensed to activate and use Licensed Software for which you a have a valid license, even if during the download or installation you are presented with other product options. No Orders are binding on NVIDIA until accepted by NVIDIA. Your Orders are subject to the AGREEMENT. + +SLA Supplements: Certain Licensed Software licensed under this SLA may be subject to additional terms and conditions that will be presented to you in a Supplement for acceptance prior to the delivery of such Licensed Software under this SLA and the applicable Supplement. Licensed Software will only be delivered to you upon your acceptance of all applicable terms. + +1.2. Limited Purpose Licenses +If your license is provided for one of the purposes indicated below, then notwithstanding contrary terms in License Grant or in a Supplement, such licenses are for internal use and do not include any right or license to sub-license and distribute the Licensed Software or its output in any way in any public release, however limited, and/or in any manner that provides third parties with use of or access to the Licensed Software or its functionality or output, including (but not limited to) external alpha or beta testing or development phases. Further: +Evaluation License. You may use evaluation licenses solely for your internal evaluation of the Licensed Software for broader adoption within your Enterprise or in connection with a NVIDIA product purchase decision, and such licenses have an expiration date as indicated by NVIDIA in its sole discretion (or ninety days from the date of download if no other duration is indicated). +Educational/Academic License. You may use educational/academic licenses solely for educational purposes and all users must be enrolled or employed by an academic institution. If you do not meet NVIDIA’s academic program requirements for educational institutions, you have no rights under this license. +Test/Development License. You may use test/development licenses solely for your internal development, testing and/or debugging of your software applications or for interoperability testing with the Licensed Software, and such licenses have an expiration date as indicated by NVIDIA in its sole discretion (or one year from the date of download if no other duration is indicated). NVIDIA Confidential Information under the AGREEMENT includes output from Licensed Software developer tools identified as “Pro” versions, where the output reveals functionality or performance data pertinent to NVIDIA hardware or software products. +1.3. Pre-Release Licenses +With respect to alpha, beta, preview, and other pre-release Software and Documentation (“Pre-Release Licensed Software”) delivered to you under the AGREEMENT you acknowledge and agree that such Pre-Release Licensed Software (i) may not be fully functional, may contain errors or design flaws, and may have reduced or different security, privacy, accessibility, availability, and reliability standards relative to commercially provided NVIDIA software and documentation, and (ii) use of such Pre-Release Licensed Software may result in unexpected results, loss of data, project delays or other unpredictable damage or loss. THEREFORE, PRE-RELEASE LICENSED SOFTWARE IS NOT INTENDED FOR USE, AND SHOULD NOT BE USED, IN PRODUCTION OR BUSINESS-CRITICAL SYSTEMS. NVIDIA has no obligation to make available a commercial version of any Pre-Release Licensed Software and NVIDIA has the right to abandon development of Pre-Release Licensed Software at any time without liability. + +1.4. Enterprise and Contractor Usage +You may allow your Enterprise employees and Contractors to access and use the Licensed Software pursuant to the terms of the AGREEMENT solely to perform work on your behalf, provided further that with respect to Contractors: (i) you obtain a written agreement from each Contractor which contains terms and obligations with respect to access to and use of Licensed Software no less protective of NVIDIA than those set forth in the AGREEMENT, and (ii) such Contractor’s access and use expressly excludes any sublicensing or distribution rights for the Licensed Software. You are responsible for the compliance with the terms and conditions of the AGREEMENT by your Enterprise and Contractors. Any act or omission that, if committed by you, would constitute a breach of the AGREEMENT shall be deemed to constitute a breach of the AGREEMENT if committed by your Enterprise or Contractors. + +1.5. Services +Except as expressly indicated in an Order, NVIDIA is under no obligation to provide support for the Licensed Software or to provide any patches, maintenance, updates or upgrades under the AGREEMENT. Unless patches, maintenance, updates or upgrades are provided with their separate governing terms and conditions, they constitute Licensed Software licensed to you under the AGREEMENT. + +2. LIMITATIONS. +2.1. License Restrictions +Except as expressly authorized in the AGREEMENT, you agree that you will not (nor authorize third parties to): (i) copy and use Software that was licensed to you for use in one or more NVIDIA hardware products in other unlicensed products (provided that copies solely for backup purposes are allowed); (ii) reverse engineer, decompile, disassemble (except to the extent applicable laws specifically require that such activities be permitted) or attempt to derive the source code, underlying ideas, algorithm or structure of Software provided to you in object code form; (iii) sell, transfer, assign, distribute, rent, loan, lease, sublicense or otherwise make available the Licensed Software or its functionality to third parties (a) as an application services provider or service bureau, (b) by operating hosted/virtual system environments, (c) by hosting, time sharing or providing any other type of services, or (d) otherwise by means of the internet; (iv) modify, translate or otherwise create any derivative works of any Licensed Software; (v) remove, alter, cover or obscure any proprietary notice that appears on or with the Licensed Software or any copies thereof; (vi) use the Licensed Software, or allow its use, transfer, transmission or export in violation of any applicable export control laws, rules or regulations; (vii) distribute, permit access to, or sublicense the Licensed Software as a stand-alone product; (viii) bypass, disable, circumvent or remove any form of copy protection, encryption, security or digital rights management or authentication mechanism used by NVIDIA in connection with the Licensed Software, or use the Licensed Software together with any authorization code, serial number, or other copy protection device not supplied by NVIDIA directly or through an authorized reseller; (ix) use the Licensed Software for the purpose of developing competing products or technologies or assisting a third party in such activities; (x) use the Licensed Software with any system or application where the use or failure of such system or application can reasonably be expected to threaten or result in personal injury, death, or catastrophic loss including, without limitation, use in connection with any nuclear, avionics, navigation, military, medical, life support or other life critical application (“Critical Applications”), unless the parties have entered into a Critical Applications agreement; (xi) distribute any modification or derivative work you make to the Licensed Software under or by reference to the same name as used by NVIDIA; or (xii) use the Licensed Software in any manner that would cause the Licensed Software to become subject to an Open Source License. Nothing in the AGREEMENT shall be construed to give you a right to use, or otherwise obtain access to, any source code from which the Software or any portion thereof is compiled or interpreted. You acknowledge that NVIDIA does not design, test, manufacture or certify the Licensed Software for use in the context of a Critical Application and NVIDIA shall not be liable to you or any third party, in whole or in part, for any claims or damages arising from such use. You agree to defend, indemnify and hold harmless NVIDIA and its Affiliates, and their respective employees, contractors, agents, officers and directors, from and against any and all claims, damages, obligations, losses, liabilities, costs or debt, fines, restitutions and expenses (including but not limited to attorney’s fees and costs incident to establishing the right of indemnification) arising out of or related to you and your Enterprise, and their respective employees, contractors, agents, distributors, resellers, end users, officers and directors use of Licensed Software outside of the scope of the AGREEMENT or any other breach of the terms of the AGREEMENT. + +2.2. Third Party License Obligations +You acknowledge and agree that the Licensed Software may include or incorporate third party technology (collectively “Third Party Components”), which is provided for use in or with the Software and not otherwise used separately. If the Licensed Software includes or incorporates Third Party Components, then the third-party pass-through terms and conditions (“Third Party Terms”) for the particular Third Party Component will be bundled with the Software or otherwise made available online as indicated by NVIDIA and will be incorporated by reference into the AGREEMENT. In the event of any conflict between the terms in the AGREEMENT and the Third Party Terms, the Third Party Terms shall govern. Copyright to Third Party Components are held by the copyright holders indicated in the copyright notices indicated in the Third Party Terms. + +Audio/Video Encoders and Decoders: You acknowledge and agree that it is your sole responsibility to obtain any additional third party licenses required to make, have made, use, have used, sell, import, and offer for sale your products or services that include or incorporate any Third Party Components and content relating to audio and/or video encoders and decoders from, including but not limited to, Microsoft, Thomson, Fraunhofer IIS, Sisvel S.p.A., MPEG-LA, and Coding Technologies as NVIDIA does not grant to you under the AGREEMENT any necessary patent or other rights with respect to audio and/or video encoders and decoders. + +2.3. Limited Rights +Your rights in the Licensed Software are limited to those expressly granted under the AGREEMENT and no other licenses are granted whether by implication, estoppel or otherwise. NVIDIA reserves all rights, title and interest in and to the Licensed Software not expressly granted under the AGREEMENT. + +3. CONFIDENTIALITY +Neither party will use the other party’s Confidential Information, except as necessary for the performance of the AGREEMENT, nor will either party disclose such Confidential Information to any third party, except to personnel of NVIDIA and its Affiliates, you, your Enterprise, your Enterprise Contractors, and each party’s legal and financial advisors that have a need to know such Confidential Information for the performance of the AGREEMENT, provided that each such personnel, employee and Contractor is subject to a written agreement that includes confidentiality obligations consistent with those set forth herein. Each party will use all reasonable efforts to maintain the confidentiality of all of the other party’s Confidential Information in its possession or control, but in no event less than the efforts that it ordinarily uses with respect to its own Confidential Information of similar nature and importance. The foregoing obligations will not restrict either party from disclosing the other party’s Confidential Information or the terms and conditions of the AGREEMENT as required under applicable securities regulations or pursuant to the order or requirement of a court, administrative agency, or other governmental body, provided that the party required to make such disclosure (i) gives reasonable notice to the other party to enable it to contest such order or requirement prior to its disclosure (whether through protective orders or otherwise), (ii) uses reasonable effort to obtain confidential treatment or similar protection to the fullest extent possible to avoid such public disclosure, and (iii) discloses only the minimum amount of information necessary to comply with such requirements. + +4. OWNERSHIP +You are not obligated to disclose to NVIDIA any modifications that you, your Enterprise or your Contractors make to the Licensed Software as permitted under the AGREEMENT. As between the parties, all modifications are owned by NVIDIA and licensed to you under the AGREEMENT unless otherwise expressly provided in a Supplement. The Licensed Software and all modifications owned by NVIDIA, and the respective Intellectual Property Rights therein, are and will remain the sole and exclusive property of NVIDIA or its licensors, whether the Licensed Software is separate from or combined with any other products or materials. You shall not engage in any act or omission that would impair NVIDIA’s and/or its licensors’ Intellectual Property Rights in the Licensed Software or any other materials, information, processes or subject matter proprietary to NVIDIA. NVIDIA’s licensors are intended third party beneficiaries with the right to enforce provisions of the AGREEMENT with respect to their Confidential Information and/or Intellectual Property Rights. + +5. FEEDBACK +You have no obligation to provide Feedback to NVIDIA. However, NVIDIA and/or its Affiliates may use and include any Feedback that you provide to improve the Licensed Software or other NVIDIA products, technologies or materials. Accordingly, if you provide Feedback, you agree that NVIDIA and/or its Affiliates, at their option, may, and may permit their licensees, to make, have made, use, have used, reproduce, license, distribute and otherwise commercialize the Feedback in the Licensed Software or in other NVIDIA products, technologies or materials without the payment of any royalties or fees to you. All Feedback becomes the sole property of NVIDIA and may be used in any manner NVIDIA sees fit, and you hereby assign to NVIDIA all of your right, title and interest in and to any Feedback. NVIDIA has no obligation to respond to Feedback or to incorporate Feedback into the Licensed Software. + +6. NO WARRANTIES +THE LICENSED SOFTWARE AND ANY OTHER CONFIDENTIAL INFORMATION AND/OR SERVICES ARE PROVIDED BY NVIDIA “AS IS” AND “WITH ALL FAULTS,” AND NVIDIA EXPRESSLY DISCLAIMS ALL OTHER WARRANTIES OF ANY KIND OR NATURE, WHETHER EXPRESS, IMPLIED OR STATUTORY, INCLUDING, BUT NOT LIMITED TO, ANY WARRANTIES OF OPERABILITY, CONDITION, VALUE, ACCURACY OF DATA, OR QUALITY, AS WELL AS ANY WARRANTIES OF MERCHANTABILITY, SYSTEM INTEGRATION, WORKMANSHIP, SUITABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, OR THE ABSENCE OF ANY DEFECTS THEREIN, WHETHER LATENT OR PATENT. NO WARRANTY IS MADE BY NVIDIA ON THE BASIS OF TRADE USAGE, COURSE OF DEALING OR COURSE OF TRADE. NVIDIA DOES NOT WARRANT THAT THE LICENSED SOFTWARE OR ANY OTHER CONFIDENTIAL INFORMATION AND/OR SERVICES PROVIDED BY NVIDIA UNDER THE AGREEMENT WILL MEET YOUR REQUIREMENTS OR THAT THE OPERATION THEREOF WILL BE UNINTERRUPTED OR ERROR-FREE, OR THAT ALL ERRORS WILL BE CORRECTED. YOU ACKNOWLEDGE THAT NVIDIA’S OBLIGATIONS UNDER THE AGREEMENT ARE FOR THE BENEFIT OF YOU ONLY. Nothing in this warranty section affects any statutory rights of consumers or other recipients to the extent that they cannot be waived or limited by contract under applicable law. + +7. LIMITATION OF LIABILITY +TO THE MAXIMUM EXTENT PERMITTED BY LAW, NVIDIA OR ITS LICENSORS SHALL NOT BE LIABLE FOR ANY SPECIAL, INCIDENTAL, PUNITIVE OR CONSEQUENTIAL DAMAGES, OR ANY LOST PROFITS, LOSS OF USE, LOSS OF DATA OR LOSS OF GOODWILL, OR THE COSTS OF PROCURING SUBSTITUTE PRODUCTS, ARISING OUT OF OR IN CONNECTION WITH THE AGREEMENT OR THE USE OR PERFORMANCE OF THE LICENSED SOFTWARE AND ANY OTHER CONFIDENTIAL INFORMATION AND/OR SERVICES PROVIDED BY NVIDIA UNDER THE AGREEMENT, WHETHER SUCH LIABILITY ARISES FROM ANY CLAIM BASED UPON BREACH OF CONTRACT, BREACH OF WARRANTY, TORT (INCLUDING NEGLIGENCE), PRODUCT LIABILITY OR ANY OTHER CAUSE OF ACTION OR THEORY OF LIABILITY. IN NO EVENT WILL NVIDIA’S TOTAL CUMULATIVE LIABILITY UNDER OR ARISING OUT OF THE AGREEMENT EXCEED THE NET AMOUNTS RECEIVED BY NVIDIA FOR YOUR USE OF THE PARTICULAR LICENSED SOFTWARE DURING THE TWELVE (12) MONTHS BEFORE THE LIABILITY AROSE (or up to US$10.00 if you acquired the Licensed Software for no charge). THE NATURE OF THE LIABILITY, THE NUMBER OF CLAIMS OR SUITS OR THE NUMBER OF PARTIES WITHIN YOUR ENTERPRISE THAT ACCEPTED THE TERMS OF THE AGREEMENT SHALL NOT ENLARGE OR EXTEND THIS LIMIT. THE FOREGOING LIMITATIONS SHALL APPLY REGARDLESS OF WHETHER NVIDIA OR ITS LICENSORS HAVE BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES AND REGARDLESS OF WHETHER ANY REMEDY FAILS ITS ESSENTIAL PURPOSE. The disclaimers, exclusions and limitations of liability set forth in the AGREEMENT form an essential basis of the bargain between the parties, and, absent any such disclaimers, exclusions or limitations of liability, the provisions of the AGREEMENT, including, without limitation, the economic terms, would be substantially different. + +8. TERM AND TERMINATION. +8.1. AGREEMENT, Licenses and Services +This SLA shall become effective upon the Effective Date, each Supplement upon their acceptance, and both this SLA and Supplements shall continue in effect until your last access or use of the Licensed Software and/or services hereunder, unless earlier terminated as provided in this “Term and Termination” section. Each Licensed Software license ends at the earlier of (a) the expiration of the applicable license term, or (b) termination of such license or the AGREEMENT. Each service ends at the earlier of (x) the expiration of the applicable service term, (y) termination of such service or the AGREEMENT, or (z) expiration or termination of the associated license and no credit or refund will be provided upon the expiration or termination of the associated license for any service fees paid. + +8.2. Termination and Effect of Expiration or Termination +NVIDIA may terminate the AGREEMENT in whole or in part: (i) if you breach any term of the AGREEMENT and fail to cure such breach within thirty (30) days following notice thereof from NVIDIA (or immediately if you violate NVIDIA’s Intellectual Property Rights); (ii) if you become the subject of a voluntary or involuntary petition in bankruptcy or any proceeding relating to insolvency, receivership, liquidation or composition for the benefit of creditors, if that petition or proceeding is not dismissed with prejudice within sixty (60) days after filing, or if you cease to do business; or (iii) if you commence or participate in any legal proceeding against NVIDIA, with respect to the Licensed Software that is the subject of the proceeding during the pendency of such legal proceeding. If you or your authorized NVIDIA reseller fail to pay license fees or service fees when due then NVIDIA may, in its sole discretion, suspend or terminate your license grants, services and any other rights provided under the AGREEMENT for the affected Licensed Software, in addition to any other remedies NVIDIA may have at law or equity. Upon any expiration or termination of the AGREEMENT, a license or a service provided hereunder, (a) any amounts owed to NVIDIA become immediately due and payable, (b) you must promptly discontinue use of the affected Licensed Software and/or service, and (c) you must promptly destroy or return to NVIDIA all copies of the affected Licensed Software and all portions thereof in your possession or control, and each party will promptly destroy or return to the other all of the other party’s Confidential Information within its possession or control. Upon written request, you will certify in writing that you have complied with your obligations under this section. Upon expiration or termination of the AGREEMENT all provisions survive except for the license grant provisions. + +9. CONSENT TO COLLECTION AND USE OF INFORMATION. +You hereby agree and acknowledge that the Software may access, collect non-personally identifiable information about your Enterprise computer systems in order to properly optimize such systems for use with the Software. To the extent that you use the Software, you hereby consent to all of the foregoing, and represent and warrant that you have the right to grant such consent. In addition, you agree that you are solely responsible for maintaining appropriate data backups and system restore points for your Enterprise systems, and that NVIDIA will have no responsibility for any damage or loss to such systems (including loss of data or access) arising from or relating to (a) any changes to the configuration, application settings, environment variables, registry, drivers, BIOS, or other attributes of the systems (or any part of such systems) initiated through the Software; or (b) installation of any Software or third party software patches initiated through the Software. In certain systems you may change your system update preferences by unchecking "Automatically check for updates" in the "Preferences" tab of the control panel for the Software. + +In connection with the receipt of the Licensed Software or services you may receive access to links to third party websites and services and the availability of those links does not imply any endorsement by NVIDIA. NVIDIA encourages you to review the privacy statements on those sites and services that you choose to visit so that you can understand how they may collect, use and share personal information of individuals. NVIDIA is not responsible or liable for: (i) the availability or accuracy of such links; or (ii) the products, services or information available on or through such links; or (iii) the privacy statements or practices of sites and services controlled by other companies or organizations. + +To the extent that you or members of your Enterprise provide to NVIDIA during registration or otherwise personal information, you acknowledge that such information will be collected, used and disclosed by NVIDIA in accordance with NVIDIA's privacy policy, available at URL http://www.nvidia.com/object/privacy_policy.html. + +10. GENERAL. +This SLA, any Supplements incorporated hereto, and Orders constitute the entire agreement of the parties with respect to the subject matter hereto and supersede all prior negotiations, conversations, or discussions between the parties relating to the subject matter hereto, oral or written, and all past dealings or industry custom. Any additional and/or conflicting terms and conditions on purchase order(s) or any other documents issued by you are null, void, and invalid. Any amendment or waiver under the AGREEMENT must be in writing and signed by representatives of both parties. + +The AGREEMENT and the rights and obligations thereunder may not be assigned by you, in whole or in part, including by merger, consolidation, dissolution, operation of law, or any other manner, without written consent of NVIDIA, and any purported assignment in violation of this provision shall be void and of no effect. NVIDIA may assign, delegate or transfer the AGREEMENT and its rights and obligations hereunder, and if to a non-Affiliate you will be notified. + +Each party acknowledges and agrees that the other is an independent contractor in the performance of the AGREEMENT, and each party is solely responsible for all of its employees, agents, contractors, and labor costs and expenses arising in connection therewith. The parties are not partners, joint ventures or otherwise affiliated, and neither has any authority to make any statements, representations or commitments of any kind to bind the other party without prior written consent. + +Neither party will be responsible for any failure or delay in its performance under the AGREEMENT (except for any payment obligations) to the extent due to causes beyond its reasonable control for so long as such force majeure event continues in effect. + +The AGREEMENT will be governed by and construed under the laws of the State of Delaware and the United States without regard to the conflicts of law provisions thereof and without regard to the United Nations Convention on Contracts for the International Sale of Goods. The parties consent to the personal jurisdiction of the federal and state courts located in Santa Clara County, California. You acknowledge and agree that a breach of any of your promises or agreements contained in the AGREEMENT may result in irreparable and continuing injury to NVIDIA for which monetary damages may not be an adequate remedy and therefore NVIDIA is entitled to seek injunctive relief as well as such other and further relief as may be appropriate. If any court of competent jurisdiction determines that any provision of the AGREEMENT is illegal, invalid or unenforceable, the remaining provisions will remain in full force and effect. Unless otherwise specified, remedies are cumulative. + +The Licensed Software has been developed entirely at private expense and is “commercial items” consisting of “commercial computer software” and “commercial computer software documentation” provided with RESTRICTED RIGHTS. Use, duplication or disclosure by the U.S. Government or a U.S. Government subcontractor is subject to the restrictions set forth in the AGREEMENT pursuant to DFARS 227.7202-3(a) or as set forth in subparagraphs (c)(1) and (2) of the Commercial Computer Software - Restricted Rights clause at FAR 52.227-19, as applicable. Contractor/manufacturer is NVIDIA, 2701 San Tomas Expressway, Santa Clara, CA 95050. + +You acknowledge that the Licensed Software described under the AGREEMENT is subject to export control under the U.S. Export Administration Regulations (EAR) and economic sanctions regulations administered by the U.S. Department of Treasury’s Office of Foreign Assets Control (OFAC). Therefore, you may not export, reexport or transfer in-country the Licensed Software without first obtaining any license or other approval that may be required by BIS and/or OFAC. You are responsible for any violation of the U.S. or other applicable export control or economic sanctions laws, regulations and requirements related to the Licensed Software. By accepting this SLA, you confirm that you are not a resident or citizen of any country currently embargoed by the U.S. and that you are not otherwise prohibited from receiving the Licensed Software. + +Any notice delivered by NVIDIA to you under the AGREEMENT will be delivered via mail, email or fax. Please direct your legal notices or other correspondence to NVIDIA Corporation, 2701 San Tomas Expressway, Santa Clara, California 95050, United States of America, Attention: Legal Department. + +11. GLOSSARY OF TERMS +Certain capitalized terms, if not otherwise defined elsewhere in this SLA, shall have the meanings set forth below: +“Affiliate” +“Affiliate” means any legal entity that Owns, is Owned by, or is commonly Owned with a party. “Own” means having more than 50% ownership or the right to direct the management of the entity. +“AGREEMENT” +“AGREEMENT” means this SLA and all associated Supplements entered by the parties referencing this SLA. +“Authorized Users” +“Authorized Users” means your Enterprise individual employees and any of your Enterprise’s Contractors, subject to the terms of the “Enterprise and Contractors Usage” section. +“Confidential Information” +“Confidential Information” means the Licensed Software (unless made publicly available by NVIDIA without confidentiality obligations), and any NVIDIA business, marketing, pricing, research and development, know-how, technical, scientific, financial status, proposed new products or other information disclosed by NVIDIA to you which, at the time of disclosure, is designated in writing as confidential or proprietary (or like written designation), or orally identified as confidential or proprietary or is otherwise reasonably identifiable by parties exercising reasonable business judgment, as confidential. Confidential Information does not and will not include information that: (i) is or becomes generally known to the public through no fault of or breach of the AGREEMENT by the receiving party; (ii) is rightfully known by the receiving party at the time of disclosure without an obligation of confidentiality; (iii) is independently developed by the receiving party without use of the disclosing party’s Confidential Information; or (iv) is rightfully obtained by the receiving party from a third party without restriction on use or disclosure. +“Contractor” +“Contractor” means an individual who works primarily for your Enterprise on a contractor basis from your secure network. means an individual who works primarily for your Enterprise on a contractor basis from your secure network. +“Documentation” +“Documentation” means the NVIDIA documentation made available for use with the Software, including (without limitation) user manuals, datasheets, operations instructions, installation guides, release notes and other materials provided to you under the AGREEMENT. +“Enterprise” +“Enterprise” means you or any company or legal entity for which you accepted the terms of this SLA, and their subsidiaries of which your company or legal entity owns more than fifty percent (50%) of the issued and outstanding equity. +“Feedback” +“Feedback” means any and all suggestions, feature requests, comments or other feedback regarding the Licensed Software, including possible enhancements or modifications thereto. +“Intellectual Property Rights” +“Intellectual Property Rights” means all patent, copyright, trademark, trade secret, trade dress, trade names, utility models, mask work, moral rights, rights of attribution or integrity service marks, master recording and music publishing rights, performance rights, author’s rights, database rights, registered design rights and any applications for the protection or registration of these rights, or other intellectual or industrial property rights or proprietary rights, howsoever arising and in whatever media, whether now known or hereafter devised, whether or not registered, (including all claims and causes of action for infringement, misappropriation or violation and all rights in any registrations and renewals), worldwide and whether existing now or in the future. +“Licensed Software” +“Licensed Software” means Software, Documentation and all modifications owned by NVIDIA. +“Open Source License” +“Open Source License” includes, without limitation, a software license that requires as a condition of use, modification, and/or distribution of such software that the Software be (i) disclosed or distributed in source code form; (ii) be licensed for the purpose of making derivative works; or (iii) be redistributable at no charge. +“Order” +“Order” means a purchase order issued by you, a signed purchase agreement with you, or other ordering document issued by you to NVIDIA or a NVIDIA authorized reseller (including any on-line acceptance process) that references and incorporates the AGREEMENT and is accepted by NVIDIA. +“Software” +“Software” means the NVIDIA software programs licensed to you under the AGREEMENT including, without limitation, libraries, sample code, utility programs and programming code. +“Supplement” +“Supplement” means the additional terms and conditions beyond those stated in this SLA that apply to certain Licensed Software licensed hereunder. +12. TensorRT SUPPLEMENT TO SOFTWARE LICENSE AGREEMENT +TensorRT SUPPLEMENT TO SOFTWARE LICENSE AGREEMENT +The terms set forth in this TensorRT Supplement (“Supplement”) govern your use of the NVIDIA GPU inference engine (the “TensorRT Licensed Software”) under the terms of your software license agreement (“SLA”) as modified by this Supplement. This Supplement is an exhibit to the SLA and is hereby incorporated as an integral part thereto. Capitalized terms used but not defined herein shall have the meaning assigned to them in the SLA. In the event of conflict between the terms in this Supplement and the terms in the SLA, this Supplement shall control. + +12.1. TensorRT DISTRIBUTION +Subject to the terms of the SLA and this Supplement, NVIDIA hereby grants you a non-exclusive, nontransferable license during the applicable license term unless earlier terminated pursuant to the SLA, to distribute the libnvinfer, libnvinfer_plugin, and libnvparsers libraries when delivered to you as part of the TensorRT Licensed Software in source code form or binary form (but not when provided to you as part of a hardware product), subject to the following: such distribution is solely in binary form to your licensees (“Customers”) only as a component of your own software products having additional material functionality beyond the TensorRT Licensed Software (each, a “Licensee Application"). Subject to the terms and conditions of the SLA and this Supplement, you may further authorize Customers to redistribute the libnvinfer, libnvinfer_plugin, and libnvparsers libraries as incorporated into a Licensee Application, solely in binary form, provided, however, that you shall require in your agreements with your Customers that their distributions be on terms at least as restrictive as those applicable for your use of such TensorRT Licensed Software within a Licensee Application. The expiration or termination of your licenses to the above described TensorRT Licensed Software under the SLA and this Supplement will not affect rights previously granted by you to recipients that were in compliance with the SLA and this Supplement. + +In addition to the rights above, for parties that are developing software intended solely for use on Jetson development kits or Jetson modules and running Linux for Tegra software the following shall apply: TensorRT Licensed Software licensed hereunder may be distributed in its entirety, as provided by NVIDIA and without separation of its components, for you and/or your licensees to create software development kits for use only on the Jetson platform and running Linux for Tegra software. You shall require in your agreements with your licensees that their distributions be on terms at least as restrictive as those applicable for your distribution of TensorRT Licensed Software as described in this Section 1. + +In addition to the rights above, for parties that are developing software intended solely for use on Jetson development kits or Jetson modules and running Linux for Tegra software the following shall apply: TensorRT Licensed Software licensed hereunder may be distributed in its entirety, as provided by NVIDIA and without separation of its components, for you and/or your licensees to create software development kits for use only on the Jetson platform and running Linux for Tegra software. You shall require in your agreements with your licensees that their distributions be on terms at least as restrictive as those applicable for your distribution of TensorRT Licensed Software as described in this Section 1. + +12.2. LICENSE DURATION +Each TensorRT Licensed Software is licensed to you for an initial duration of one year starting from the date of delivery or download. The licenses granted will automatically renew for successive one year periods, provided that NVIDIA reserves the right to terminate licenses upon ninety days (90) days written notice to you prior to the commencement of a renewal year in addition to the termination rights set forth in the SLA. + +12.3. EXPIRATION OF TERMINATION OF THIS SUPPLEMENT +Your failure to comply with the terms of this Supplement is ground for termination for breach by NVIDIA under the SLA. This Supplement will automatically expire or terminate upon the expiration or termination of your rights to TensorRT Licensed Software under the SLA or this Supplement. + +Notices +Notice +This document is provided for information purposes only and shall not be regarded as a warranty of a certain functionality, condition, or quality of a product. NVIDIA Corporation (“NVIDIA”) makes no representations or warranties, expressed or implied, as to the accuracy or completeness of the information contained in this document and assumes no responsibility for any errors contained herein. NVIDIA shall have no liability for the consequences or use of such information or for any infringement of patents or other rights of third parties that may result from its use. This document is not a commitment to develop, release, or deliver any Material (defined below), code, or functionality. + +NVIDIA reserves the right to make corrections, modifications, enhancements, improvements, and any other changes to this document, at any time without notice. + +Customer should obtain the latest relevant information before placing orders and should verify that such information is current and complete. + +NVIDIA products are sold subject to the NVIDIA standard terms and conditions of sale supplied at the time of order acknowledgement, unless otherwise agreed in an individual sales agreement signed by authorized representatives of NVIDIA and customer (“Terms of Sale”). NVIDIA hereby expressly objects to applying any customer general terms and conditions with regards to the purchase of the NVIDIA product referenced in this document. No contractual obligations are formed either directly or indirectly by this document. + +NVIDIA products are not designed, authorized, or warranted to be suitable for use in medical, military, aircraft, space, or life support equipment, nor in applications where failure or malfunction of the NVIDIA product can reasonably be expected to result in personal injury, death, or property or environmental damage. NVIDIA accepts no liability for inclusion and/or use of NVIDIA products in such equipment or applications and therefore such inclusion and/or use is at customer’s own risk. + +NVIDIA makes no representation or warranty that products based on this document will be suitable for any specified use. Testing of all parameters of each product is not necessarily performed by NVIDIA. It is customer’s sole responsibility to evaluate and determine the applicability of any information contained in this document, ensure the product is suitable and fit for the application planned by customer, and perform the necessary testing for the application in order to avoid a default of the application or the product. Weaknesses in customer’s product designs may affect the quality and reliability of the NVIDIA product and may result in additional or different conditions and/or requirements beyond those contained in this document. NVIDIA accepts no liability related to any default, damage, costs, or problem which may be based on or attributable to: (i) the use of the NVIDIA product in any manner that is contrary to this document or (ii) customer product designs. + +No license, either expressed or implied, is granted under any NVIDIA patent right, copyright, or other NVIDIA intellectual property right under this document. Information published by NVIDIA regarding third-party products or services does not constitute a license from NVIDIA to use such products or services or a warranty or endorsement thereof. Use of such information may require a license from a third party under the patents or other intellectual property rights of the third party, or a license from NVIDIA under the patents or other intellectual property rights of NVIDIA. + +Reproduction of information in this document is permissible only if approved in advance by NVIDIA in writing, reproduced without alteration and in full compliance with all applicable export laws and regulations, and accompanied by all associated conditions, limitations, and notices. + +THIS DOCUMENT AND ALL NVIDIA DESIGN SPECIFICATIONS, REFERENCE BOARDS, FILES, DRAWINGS, DIAGNOSTICS, LISTS, AND OTHER DOCUMENTS (TOGETHER AND SEPARATELY, “MATERIALS”) ARE BEING PROVIDED “AS IS.” NVIDIA MAKES NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE. TO THE EXTENT NOT PROHIBITED BY LAW, IN NO EVENT WILL NVIDIA BE LIABLE FOR ANY DAMAGES, INCLUDING WITHOUT LIMITATION ANY DIRECT, INDIRECT, SPECIAL, INCIDENTAL, PUNITIVE, OR CONSEQUENTIAL DAMAGES, HOWEVER CAUSED AND REGARDLESS OF THE THEORY OF LIABILITY, ARISING OUT OF ANY USE OF THIS DOCUMENT, EVEN IF NVIDIA HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. Notwithstanding any damages that customer might incur for any reason whatsoever, NVIDIA’s aggregate and cumulative liability towards customer for the products described herein shall be limited in accordance with the Terms of Sale for the product. + +VESA DisplayPort +DisplayPort and DisplayPort Compliance Logo, DisplayPort Compliance Logo for Dual-mode Sources, and DisplayPort Compliance Logo for Active Cables are trademarks owned by the Video Electronics Standards Association in the United States and other countries. + +HDMI +HDMI, the HDMI logo, and High-Definition Multimedia Interface are trademarks or registered trademarks of HDMI Licensing LLC. + +ARM +ARM, AMBA and ARM Powered are registered trademarks of ARM Limited. Cortex, MPCore and Mali are trademarks of ARM Limited. All other brands or product names are the property of their respective holders. "ARM" is used to represent ARM Holdings plc; its operating company ARM Limited; and the regional subsidiaries ARM Inc.; ARM KK; ARM Korea Limited.; ARM Taiwan Limited; ARM France SAS; ARM Consulting (Shanghai) Co. Ltd.; ARM Germany GmbH; ARM Embedded Technologies Pvt. Ltd.; ARM Norway, AS and ARM Sweden AB. + +OpenCL +OpenCL is a trademark of Apple Inc. used under license to the Khronos Group Inc. + +Trademarks +NVIDIA, the NVIDIA logo, and cuBLAS, CUDA, CUDA Toolkit, cuDNN, DALI, DIGITS, DGX, DGX-1, DGX-2, DGX Station, DLProf, GPU, JetPack, Jetson, Kepler, Maxwell, NCCL, Nsight Compute, Nsight Systems, NVCaffe, NVIDIA Ampere GPU architecture, NVIDIA Deep Learning SDK, NVIDIA Developer Program, NVIDIA GPU Cloud, NVLink, NVSHMEM, PerfWorks, Pascal, SDK Manager, T4, Tegra, TensorRT, TensorRT Inference Server, Tesla, TF-TRT, Triton Inference Server, Turing, and Volta are trademarks and/or registered trademarks of NVIDIA Corporation in the United States and other countries. Other company and product names may be trademarks of the respective companies with which they are associated. + +Copyright +© 2021 NVIDIA Corporation. All rights reserved. diff --git a/python/packaging/frontend_sdist/setup.cfg b/python/packaging/frontend_sdist/setup.cfg new file mode 100644 index 00000000..32a8c1c0 --- /dev/null +++ b/python/packaging/frontend_sdist/setup.cfg @@ -0,0 +1,5 @@ +[metadata] +license_files = LICENSE.txt + +[bdist_wheel] +universal = 1 diff --git a/python/packaging/frontend_sdist/setup.py b/python/packaging/frontend_sdist/setup.py new file mode 100644 index 00000000..7d647ffb --- /dev/null +++ b/python/packaging/frontend_sdist/setup.py @@ -0,0 +1,70 @@ +# +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import sys + +from setuptools import setup +from setuptools.command.install import install +import subprocess as sp + +tensorrt_module = "##TENSORRT_MODULE##" + + +class InstallCommand(install): + def run(self): + def install_dep(package_name): + status = sp.run( + [ + sys.executable, + "-m", + "pip", + "install", + "{:}==##TENSORRT_PYTHON_VERSION##".format(package_name), + "--index-url", + "https://pypi.nvidia.com", + ] + ) + status.check_returncode() + + install_dep("{:}_libs".format(tensorrt_module)) + install_dep("{:}_bindings".format(tensorrt_module)) + + install.run(self) + + +setup( + name=tensorrt_module, + version="##TENSORRT_PYTHON_VERSION##", + description="A high performance deep learning inference library", + long_description="A high performance deep learning inference library", + author="NVIDIA Corporation", + license="Proprietary", + classifiers=[ + "License :: Other/Proprietary License", + "Intended Audience :: Developers", + "Programming Language :: Python :: 3", + ], + packages=[tensorrt_module], + extras_require={"numpy": "numpy"}, + package_data={tensorrt_module: ["*.so*", "*.pyd", "*.pdb"]}, + include_package_data=True, + zip_safe=True, + keywords="nvidia tensorrt deeplearning inference", + url="https://developer.nvidia.com/tensorrt", + download_url="https://github.com/nvidia/tensorrt/tags", + cmdclass={"install": InstallCommand}, +) diff --git a/python/packaging/frontend_sdist/tensorrt/__init__.py b/python/packaging/frontend_sdist/tensorrt/__init__.py new file mode 100644 index 00000000..ee379126 --- /dev/null +++ b/python/packaging/frontend_sdist/tensorrt/__init__.py @@ -0,0 +1,18 @@ +# +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from tensorrt_bindings import * diff --git a/python/packaging/libs_wheel/LICENSE.txt b/python/packaging/libs_wheel/LICENSE.txt new file mode 100644 index 00000000..08f07f9f --- /dev/null +++ b/python/packaging/libs_wheel/LICENSE.txt @@ -0,0 +1,180 @@ +Abstract +This document is the Software License Agreement (SLA) for NVIDIA TensorRT. This document contains specific license terms and conditions for NVIDIA TensorRT. By accepting this agreement, you agree to comply with all the terms and conditions applicable to the specific product(s) included herein. + +If you are receiving TensorRT under the NVIDIA Prerelease License Agreement (also known as NPLA) or under the NVIDIA Software License Agreement (previously known as the NVIDIA Tegra Software License Agreement), your use of TensorRT is governed by such applicable terms and conditions. All other uses of TensorRT are governed by the terms and conditions of the below license agreement. + +NVIDIA SOFTWARE LICENSE AGREEMENT +Important: READ BEFORE DOWNLOADING, INSTALLING, COPYING OR USING THE LICENSED SOFTWARE +This Software License Agreement ("SLA”), made and entered into as of the time and date of click through action (“Effective Date”),is a legal agreement between you and NVIDIA Corporation ("NVIDIA") and governs the use of the NVIDIA computer software and the documentation made available for use with such NVIDIA software. By downloading, installing, copying, or otherwise using the NVIDIA software and/or documentation, you agree to be bound by the terms of this SLA. If you do not agree to the terms of this SLA, do not download, install, copy or use the NVIDIA software or documentation. IF YOU ARE ENTERING INTO THIS SLAON BEHALF OF A COMPANY OR OTHER LEGAL ENTITY, YOU REPRESENT THAT YOU HAVE THE LEGAL AUTHORITY TO BIND THE ENTITY TO THIS SLA, IN WHICH CASE “YOU” WILL MEAN THE ENTITY YOU REPRESENT. IF YOU DON’T HAVE SUCH AUTHORITY, OR IF YOU DON’T ACCEPT ALL THE TERMS AND CONDITIONS OF THIS SLA, THEN NVIDIA DOES NOT AGREETO LICENSE THE LICENSED SOFTWARETO YOU, AND YOU MAY NOT DOWNLOAD, INSTALL, COPY OR USE IT. + +Preface +This document is the Software License Agreement (SLA) for NVIDIA TensorRT. This document contains specific license terms and conditions for NVIDIA TensorRT. By accepting this agreement, you agree to comply with all the terms and conditions applicable to the specific product(s) included herein. + +If you are receiving TensorRT under the NVIDIA Prerelease License Agreement (also known as NPLA) or under the NVIDIA Software License Agreement (previously known as the NVIDIA Tegra Software License Agreement), your use of TensorRT is governed by such applicable terms and conditions. All other uses of TensorRT are governed by the terms and conditions of the below license agreement. + +NVIDIA SOFTWARE LICENSE AGREEMENT +Important: READ BEFORE DOWNLOADING, INSTALLING, COPYING OR USING THE LICENSED SOFTWARE +This Software License Agreement ("SLA”), made and entered into as of the time and date of click through action (“Effective Date”),is a legal agreement between you and NVIDIA Corporation ("NVIDIA") and governs the use of the NVIDIA computer software and the documentation made available for use with such NVIDIA software. By downloading, installing, copying, or otherwise using the NVIDIA software and/or documentation, you agree to be bound by the terms of this SLA. If you do not agree to the terms of this SLA, do not download, install, copy or use the NVIDIA software or documentation. IF YOU ARE ENTERING INTO THIS SLAON BEHALF OF A COMPANY OR OTHER LEGAL ENTITY, YOU REPRESENT THAT YOU HAVE THE LEGAL AUTHORITY TO BIND THE ENTITY TO THIS SLA, IN WHICH CASE “YOU” WILL MEAN THE ENTITY YOU REPRESENT. IF YOU DON’T HAVE SUCH AUTHORITY, OR IF YOU DON’T ACCEPT ALL THE TERMS AND CONDITIONS OF THIS SLA, THEN NVIDIA DOES NOT AGREETO LICENSE THE LICENSED SOFTWARETO YOU, AND YOU MAY NOT DOWNLOAD, INSTALL, COPY OR USE IT. + +1. LICENSE. +1.1. License Grant +Subject to the terms of the AGREEMENT, NVIDIA hereby grants you a non-exclusive, non-transferable license, without the right to sublicense (except as expressly set forth in a Supplement), during the applicable license term unless earlier terminated as provided below, to have Authorized Users install and use the Software, including modifications (if expressly permitted in a Supplement), in accordance with the Documentation. You are only licensed to activate and use Licensed Software for which you a have a valid license, even if during the download or installation you are presented with other product options. No Orders are binding on NVIDIA until accepted by NVIDIA. Your Orders are subject to the AGREEMENT. + +SLA Supplements: Certain Licensed Software licensed under this SLA may be subject to additional terms and conditions that will be presented to you in a Supplement for acceptance prior to the delivery of such Licensed Software under this SLA and the applicable Supplement. Licensed Software will only be delivered to you upon your acceptance of all applicable terms. + +1.2. Limited Purpose Licenses +If your license is provided for one of the purposes indicated below, then notwithstanding contrary terms in License Grant or in a Supplement, such licenses are for internal use and do not include any right or license to sub-license and distribute the Licensed Software or its output in any way in any public release, however limited, and/or in any manner that provides third parties with use of or access to the Licensed Software or its functionality or output, including (but not limited to) external alpha or beta testing or development phases. Further: +Evaluation License. You may use evaluation licenses solely for your internal evaluation of the Licensed Software for broader adoption within your Enterprise or in connection with a NVIDIA product purchase decision, and such licenses have an expiration date as indicated by NVIDIA in its sole discretion (or ninety days from the date of download if no other duration is indicated). +Educational/Academic License. You may use educational/academic licenses solely for educational purposes and all users must be enrolled or employed by an academic institution. If you do not meet NVIDIA’s academic program requirements for educational institutions, you have no rights under this license. +Test/Development License. You may use test/development licenses solely for your internal development, testing and/or debugging of your software applications or for interoperability testing with the Licensed Software, and such licenses have an expiration date as indicated by NVIDIA in its sole discretion (or one year from the date of download if no other duration is indicated). NVIDIA Confidential Information under the AGREEMENT includes output from Licensed Software developer tools identified as “Pro” versions, where the output reveals functionality or performance data pertinent to NVIDIA hardware or software products. +1.3. Pre-Release Licenses +With respect to alpha, beta, preview, and other pre-release Software and Documentation (“Pre-Release Licensed Software”) delivered to you under the AGREEMENT you acknowledge and agree that such Pre-Release Licensed Software (i) may not be fully functional, may contain errors or design flaws, and may have reduced or different security, privacy, accessibility, availability, and reliability standards relative to commercially provided NVIDIA software and documentation, and (ii) use of such Pre-Release Licensed Software may result in unexpected results, loss of data, project delays or other unpredictable damage or loss. THEREFORE, PRE-RELEASE LICENSED SOFTWARE IS NOT INTENDED FOR USE, AND SHOULD NOT BE USED, IN PRODUCTION OR BUSINESS-CRITICAL SYSTEMS. NVIDIA has no obligation to make available a commercial version of any Pre-Release Licensed Software and NVIDIA has the right to abandon development of Pre-Release Licensed Software at any time without liability. + +1.4. Enterprise and Contractor Usage +You may allow your Enterprise employees and Contractors to access and use the Licensed Software pursuant to the terms of the AGREEMENT solely to perform work on your behalf, provided further that with respect to Contractors: (i) you obtain a written agreement from each Contractor which contains terms and obligations with respect to access to and use of Licensed Software no less protective of NVIDIA than those set forth in the AGREEMENT, and (ii) such Contractor’s access and use expressly excludes any sublicensing or distribution rights for the Licensed Software. You are responsible for the compliance with the terms and conditions of the AGREEMENT by your Enterprise and Contractors. Any act or omission that, if committed by you, would constitute a breach of the AGREEMENT shall be deemed to constitute a breach of the AGREEMENT if committed by your Enterprise or Contractors. + +1.5. Services +Except as expressly indicated in an Order, NVIDIA is under no obligation to provide support for the Licensed Software or to provide any patches, maintenance, updates or upgrades under the AGREEMENT. Unless patches, maintenance, updates or upgrades are provided with their separate governing terms and conditions, they constitute Licensed Software licensed to you under the AGREEMENT. + +2. LIMITATIONS. +2.1. License Restrictions +Except as expressly authorized in the AGREEMENT, you agree that you will not (nor authorize third parties to): (i) copy and use Software that was licensed to you for use in one or more NVIDIA hardware products in other unlicensed products (provided that copies solely for backup purposes are allowed); (ii) reverse engineer, decompile, disassemble (except to the extent applicable laws specifically require that such activities be permitted) or attempt to derive the source code, underlying ideas, algorithm or structure of Software provided to you in object code form; (iii) sell, transfer, assign, distribute, rent, loan, lease, sublicense or otherwise make available the Licensed Software or its functionality to third parties (a) as an application services provider or service bureau, (b) by operating hosted/virtual system environments, (c) by hosting, time sharing or providing any other type of services, or (d) otherwise by means of the internet; (iv) modify, translate or otherwise create any derivative works of any Licensed Software; (v) remove, alter, cover or obscure any proprietary notice that appears on or with the Licensed Software or any copies thereof; (vi) use the Licensed Software, or allow its use, transfer, transmission or export in violation of any applicable export control laws, rules or regulations; (vii) distribute, permit access to, or sublicense the Licensed Software as a stand-alone product; (viii) bypass, disable, circumvent or remove any form of copy protection, encryption, security or digital rights management or authentication mechanism used by NVIDIA in connection with the Licensed Software, or use the Licensed Software together with any authorization code, serial number, or other copy protection device not supplied by NVIDIA directly or through an authorized reseller; (ix) use the Licensed Software for the purpose of developing competing products or technologies or assisting a third party in such activities; (x) use the Licensed Software with any system or application where the use or failure of such system or application can reasonably be expected to threaten or result in personal injury, death, or catastrophic loss including, without limitation, use in connection with any nuclear, avionics, navigation, military, medical, life support or other life critical application (“Critical Applications”), unless the parties have entered into a Critical Applications agreement; (xi) distribute any modification or derivative work you make to the Licensed Software under or by reference to the same name as used by NVIDIA; or (xii) use the Licensed Software in any manner that would cause the Licensed Software to become subject to an Open Source License. Nothing in the AGREEMENT shall be construed to give you a right to use, or otherwise obtain access to, any source code from which the Software or any portion thereof is compiled or interpreted. You acknowledge that NVIDIA does not design, test, manufacture or certify the Licensed Software for use in the context of a Critical Application and NVIDIA shall not be liable to you or any third party, in whole or in part, for any claims or damages arising from such use. You agree to defend, indemnify and hold harmless NVIDIA and its Affiliates, and their respective employees, contractors, agents, officers and directors, from and against any and all claims, damages, obligations, losses, liabilities, costs or debt, fines, restitutions and expenses (including but not limited to attorney’s fees and costs incident to establishing the right of indemnification) arising out of or related to you and your Enterprise, and their respective employees, contractors, agents, distributors, resellers, end users, officers and directors use of Licensed Software outside of the scope of the AGREEMENT or any other breach of the terms of the AGREEMENT. + +2.2. Third Party License Obligations +You acknowledge and agree that the Licensed Software may include or incorporate third party technology (collectively “Third Party Components”), which is provided for use in or with the Software and not otherwise used separately. If the Licensed Software includes or incorporates Third Party Components, then the third-party pass-through terms and conditions (“Third Party Terms”) for the particular Third Party Component will be bundled with the Software or otherwise made available online as indicated by NVIDIA and will be incorporated by reference into the AGREEMENT. In the event of any conflict between the terms in the AGREEMENT and the Third Party Terms, the Third Party Terms shall govern. Copyright to Third Party Components are held by the copyright holders indicated in the copyright notices indicated in the Third Party Terms. + +Audio/Video Encoders and Decoders: You acknowledge and agree that it is your sole responsibility to obtain any additional third party licenses required to make, have made, use, have used, sell, import, and offer for sale your products or services that include or incorporate any Third Party Components and content relating to audio and/or video encoders and decoders from, including but not limited to, Microsoft, Thomson, Fraunhofer IIS, Sisvel S.p.A., MPEG-LA, and Coding Technologies as NVIDIA does not grant to you under the AGREEMENT any necessary patent or other rights with respect to audio and/or video encoders and decoders. + +2.3. Limited Rights +Your rights in the Licensed Software are limited to those expressly granted under the AGREEMENT and no other licenses are granted whether by implication, estoppel or otherwise. NVIDIA reserves all rights, title and interest in and to the Licensed Software not expressly granted under the AGREEMENT. + +3. CONFIDENTIALITY +Neither party will use the other party’s Confidential Information, except as necessary for the performance of the AGREEMENT, nor will either party disclose such Confidential Information to any third party, except to personnel of NVIDIA and its Affiliates, you, your Enterprise, your Enterprise Contractors, and each party’s legal and financial advisors that have a need to know such Confidential Information for the performance of the AGREEMENT, provided that each such personnel, employee and Contractor is subject to a written agreement that includes confidentiality obligations consistent with those set forth herein. Each party will use all reasonable efforts to maintain the confidentiality of all of the other party’s Confidential Information in its possession or control, but in no event less than the efforts that it ordinarily uses with respect to its own Confidential Information of similar nature and importance. The foregoing obligations will not restrict either party from disclosing the other party’s Confidential Information or the terms and conditions of the AGREEMENT as required under applicable securities regulations or pursuant to the order or requirement of a court, administrative agency, or other governmental body, provided that the party required to make such disclosure (i) gives reasonable notice to the other party to enable it to contest such order or requirement prior to its disclosure (whether through protective orders or otherwise), (ii) uses reasonable effort to obtain confidential treatment or similar protection to the fullest extent possible to avoid such public disclosure, and (iii) discloses only the minimum amount of information necessary to comply with such requirements. + +4. OWNERSHIP +You are not obligated to disclose to NVIDIA any modifications that you, your Enterprise or your Contractors make to the Licensed Software as permitted under the AGREEMENT. As between the parties, all modifications are owned by NVIDIA and licensed to you under the AGREEMENT unless otherwise expressly provided in a Supplement. The Licensed Software and all modifications owned by NVIDIA, and the respective Intellectual Property Rights therein, are and will remain the sole and exclusive property of NVIDIA or its licensors, whether the Licensed Software is separate from or combined with any other products or materials. You shall not engage in any act or omission that would impair NVIDIA’s and/or its licensors’ Intellectual Property Rights in the Licensed Software or any other materials, information, processes or subject matter proprietary to NVIDIA. NVIDIA’s licensors are intended third party beneficiaries with the right to enforce provisions of the AGREEMENT with respect to their Confidential Information and/or Intellectual Property Rights. + +5. FEEDBACK +You have no obligation to provide Feedback to NVIDIA. However, NVIDIA and/or its Affiliates may use and include any Feedback that you provide to improve the Licensed Software or other NVIDIA products, technologies or materials. Accordingly, if you provide Feedback, you agree that NVIDIA and/or its Affiliates, at their option, may, and may permit their licensees, to make, have made, use, have used, reproduce, license, distribute and otherwise commercialize the Feedback in the Licensed Software or in other NVIDIA products, technologies or materials without the payment of any royalties or fees to you. All Feedback becomes the sole property of NVIDIA and may be used in any manner NVIDIA sees fit, and you hereby assign to NVIDIA all of your right, title and interest in and to any Feedback. NVIDIA has no obligation to respond to Feedback or to incorporate Feedback into the Licensed Software. + +6. NO WARRANTIES +THE LICENSED SOFTWARE AND ANY OTHER CONFIDENTIAL INFORMATION AND/OR SERVICES ARE PROVIDED BY NVIDIA “AS IS” AND “WITH ALL FAULTS,” AND NVIDIA EXPRESSLY DISCLAIMS ALL OTHER WARRANTIES OF ANY KIND OR NATURE, WHETHER EXPRESS, IMPLIED OR STATUTORY, INCLUDING, BUT NOT LIMITED TO, ANY WARRANTIES OF OPERABILITY, CONDITION, VALUE, ACCURACY OF DATA, OR QUALITY, AS WELL AS ANY WARRANTIES OF MERCHANTABILITY, SYSTEM INTEGRATION, WORKMANSHIP, SUITABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, OR THE ABSENCE OF ANY DEFECTS THEREIN, WHETHER LATENT OR PATENT. NO WARRANTY IS MADE BY NVIDIA ON THE BASIS OF TRADE USAGE, COURSE OF DEALING OR COURSE OF TRADE. NVIDIA DOES NOT WARRANT THAT THE LICENSED SOFTWARE OR ANY OTHER CONFIDENTIAL INFORMATION AND/OR SERVICES PROVIDED BY NVIDIA UNDER THE AGREEMENT WILL MEET YOUR REQUIREMENTS OR THAT THE OPERATION THEREOF WILL BE UNINTERRUPTED OR ERROR-FREE, OR THAT ALL ERRORS WILL BE CORRECTED. YOU ACKNOWLEDGE THAT NVIDIA’S OBLIGATIONS UNDER THE AGREEMENT ARE FOR THE BENEFIT OF YOU ONLY. Nothing in this warranty section affects any statutory rights of consumers or other recipients to the extent that they cannot be waived or limited by contract under applicable law. + +7. LIMITATION OF LIABILITY +TO THE MAXIMUM EXTENT PERMITTED BY LAW, NVIDIA OR ITS LICENSORS SHALL NOT BE LIABLE FOR ANY SPECIAL, INCIDENTAL, PUNITIVE OR CONSEQUENTIAL DAMAGES, OR ANY LOST PROFITS, LOSS OF USE, LOSS OF DATA OR LOSS OF GOODWILL, OR THE COSTS OF PROCURING SUBSTITUTE PRODUCTS, ARISING OUT OF OR IN CONNECTION WITH THE AGREEMENT OR THE USE OR PERFORMANCE OF THE LICENSED SOFTWARE AND ANY OTHER CONFIDENTIAL INFORMATION AND/OR SERVICES PROVIDED BY NVIDIA UNDER THE AGREEMENT, WHETHER SUCH LIABILITY ARISES FROM ANY CLAIM BASED UPON BREACH OF CONTRACT, BREACH OF WARRANTY, TORT (INCLUDING NEGLIGENCE), PRODUCT LIABILITY OR ANY OTHER CAUSE OF ACTION OR THEORY OF LIABILITY. IN NO EVENT WILL NVIDIA’S TOTAL CUMULATIVE LIABILITY UNDER OR ARISING OUT OF THE AGREEMENT EXCEED THE NET AMOUNTS RECEIVED BY NVIDIA FOR YOUR USE OF THE PARTICULAR LICENSED SOFTWARE DURING THE TWELVE (12) MONTHS BEFORE THE LIABILITY AROSE (or up to US$10.00 if you acquired the Licensed Software for no charge). THE NATURE OF THE LIABILITY, THE NUMBER OF CLAIMS OR SUITS OR THE NUMBER OF PARTIES WITHIN YOUR ENTERPRISE THAT ACCEPTED THE TERMS OF THE AGREEMENT SHALL NOT ENLARGE OR EXTEND THIS LIMIT. THE FOREGOING LIMITATIONS SHALL APPLY REGARDLESS OF WHETHER NVIDIA OR ITS LICENSORS HAVE BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES AND REGARDLESS OF WHETHER ANY REMEDY FAILS ITS ESSENTIAL PURPOSE. The disclaimers, exclusions and limitations of liability set forth in the AGREEMENT form an essential basis of the bargain between the parties, and, absent any such disclaimers, exclusions or limitations of liability, the provisions of the AGREEMENT, including, without limitation, the economic terms, would be substantially different. + +8. TERM AND TERMINATION. +8.1. AGREEMENT, Licenses and Services +This SLA shall become effective upon the Effective Date, each Supplement upon their acceptance, and both this SLA and Supplements shall continue in effect until your last access or use of the Licensed Software and/or services hereunder, unless earlier terminated as provided in this “Term and Termination” section. Each Licensed Software license ends at the earlier of (a) the expiration of the applicable license term, or (b) termination of such license or the AGREEMENT. Each service ends at the earlier of (x) the expiration of the applicable service term, (y) termination of such service or the AGREEMENT, or (z) expiration or termination of the associated license and no credit or refund will be provided upon the expiration or termination of the associated license for any service fees paid. + +8.2. Termination and Effect of Expiration or Termination +NVIDIA may terminate the AGREEMENT in whole or in part: (i) if you breach any term of the AGREEMENT and fail to cure such breach within thirty (30) days following notice thereof from NVIDIA (or immediately if you violate NVIDIA’s Intellectual Property Rights); (ii) if you become the subject of a voluntary or involuntary petition in bankruptcy or any proceeding relating to insolvency, receivership, liquidation or composition for the benefit of creditors, if that petition or proceeding is not dismissed with prejudice within sixty (60) days after filing, or if you cease to do business; or (iii) if you commence or participate in any legal proceeding against NVIDIA, with respect to the Licensed Software that is the subject of the proceeding during the pendency of such legal proceeding. If you or your authorized NVIDIA reseller fail to pay license fees or service fees when due then NVIDIA may, in its sole discretion, suspend or terminate your license grants, services and any other rights provided under the AGREEMENT for the affected Licensed Software, in addition to any other remedies NVIDIA may have at law or equity. Upon any expiration or termination of the AGREEMENT, a license or a service provided hereunder, (a) any amounts owed to NVIDIA become immediately due and payable, (b) you must promptly discontinue use of the affected Licensed Software and/or service, and (c) you must promptly destroy or return to NVIDIA all copies of the affected Licensed Software and all portions thereof in your possession or control, and each party will promptly destroy or return to the other all of the other party’s Confidential Information within its possession or control. Upon written request, you will certify in writing that you have complied with your obligations under this section. Upon expiration or termination of the AGREEMENT all provisions survive except for the license grant provisions. + +9. CONSENT TO COLLECTION AND USE OF INFORMATION. +You hereby agree and acknowledge that the Software may access, collect non-personally identifiable information about your Enterprise computer systems in order to properly optimize such systems for use with the Software. To the extent that you use the Software, you hereby consent to all of the foregoing, and represent and warrant that you have the right to grant such consent. In addition, you agree that you are solely responsible for maintaining appropriate data backups and system restore points for your Enterprise systems, and that NVIDIA will have no responsibility for any damage or loss to such systems (including loss of data or access) arising from or relating to (a) any changes to the configuration, application settings, environment variables, registry, drivers, BIOS, or other attributes of the systems (or any part of such systems) initiated through the Software; or (b) installation of any Software or third party software patches initiated through the Software. In certain systems you may change your system update preferences by unchecking "Automatically check for updates" in the "Preferences" tab of the control panel for the Software. + +In connection with the receipt of the Licensed Software or services you may receive access to links to third party websites and services and the availability of those links does not imply any endorsement by NVIDIA. NVIDIA encourages you to review the privacy statements on those sites and services that you choose to visit so that you can understand how they may collect, use and share personal information of individuals. NVIDIA is not responsible or liable for: (i) the availability or accuracy of such links; or (ii) the products, services or information available on or through such links; or (iii) the privacy statements or practices of sites and services controlled by other companies or organizations. + +To the extent that you or members of your Enterprise provide to NVIDIA during registration or otherwise personal information, you acknowledge that such information will be collected, used and disclosed by NVIDIA in accordance with NVIDIA's privacy policy, available at URL http://www.nvidia.com/object/privacy_policy.html. + +10. GENERAL. +This SLA, any Supplements incorporated hereto, and Orders constitute the entire agreement of the parties with respect to the subject matter hereto and supersede all prior negotiations, conversations, or discussions between the parties relating to the subject matter hereto, oral or written, and all past dealings or industry custom. Any additional and/or conflicting terms and conditions on purchase order(s) or any other documents issued by you are null, void, and invalid. Any amendment or waiver under the AGREEMENT must be in writing and signed by representatives of both parties. + +The AGREEMENT and the rights and obligations thereunder may not be assigned by you, in whole or in part, including by merger, consolidation, dissolution, operation of law, or any other manner, without written consent of NVIDIA, and any purported assignment in violation of this provision shall be void and of no effect. NVIDIA may assign, delegate or transfer the AGREEMENT and its rights and obligations hereunder, and if to a non-Affiliate you will be notified. + +Each party acknowledges and agrees that the other is an independent contractor in the performance of the AGREEMENT, and each party is solely responsible for all of its employees, agents, contractors, and labor costs and expenses arising in connection therewith. The parties are not partners, joint ventures or otherwise affiliated, and neither has any authority to make any statements, representations or commitments of any kind to bind the other party without prior written consent. + +Neither party will be responsible for any failure or delay in its performance under the AGREEMENT (except for any payment obligations) to the extent due to causes beyond its reasonable control for so long as such force majeure event continues in effect. + +The AGREEMENT will be governed by and construed under the laws of the State of Delaware and the United States without regard to the conflicts of law provisions thereof and without regard to the United Nations Convention on Contracts for the International Sale of Goods. The parties consent to the personal jurisdiction of the federal and state courts located in Santa Clara County, California. You acknowledge and agree that a breach of any of your promises or agreements contained in the AGREEMENT may result in irreparable and continuing injury to NVIDIA for which monetary damages may not be an adequate remedy and therefore NVIDIA is entitled to seek injunctive relief as well as such other and further relief as may be appropriate. If any court of competent jurisdiction determines that any provision of the AGREEMENT is illegal, invalid or unenforceable, the remaining provisions will remain in full force and effect. Unless otherwise specified, remedies are cumulative. + +The Licensed Software has been developed entirely at private expense and is “commercial items” consisting of “commercial computer software” and “commercial computer software documentation” provided with RESTRICTED RIGHTS. Use, duplication or disclosure by the U.S. Government or a U.S. Government subcontractor is subject to the restrictions set forth in the AGREEMENT pursuant to DFARS 227.7202-3(a) or as set forth in subparagraphs (c)(1) and (2) of the Commercial Computer Software - Restricted Rights clause at FAR 52.227-19, as applicable. Contractor/manufacturer is NVIDIA, 2701 San Tomas Expressway, Santa Clara, CA 95050. + +You acknowledge that the Licensed Software described under the AGREEMENT is subject to export control under the U.S. Export Administration Regulations (EAR) and economic sanctions regulations administered by the U.S. Department of Treasury’s Office of Foreign Assets Control (OFAC). Therefore, you may not export, reexport or transfer in-country the Licensed Software without first obtaining any license or other approval that may be required by BIS and/or OFAC. You are responsible for any violation of the U.S. or other applicable export control or economic sanctions laws, regulations and requirements related to the Licensed Software. By accepting this SLA, you confirm that you are not a resident or citizen of any country currently embargoed by the U.S. and that you are not otherwise prohibited from receiving the Licensed Software. + +Any notice delivered by NVIDIA to you under the AGREEMENT will be delivered via mail, email or fax. Please direct your legal notices or other correspondence to NVIDIA Corporation, 2701 San Tomas Expressway, Santa Clara, California 95050, United States of America, Attention: Legal Department. + +11. GLOSSARY OF TERMS +Certain capitalized terms, if not otherwise defined elsewhere in this SLA, shall have the meanings set forth below: +“Affiliate” +“Affiliate” means any legal entity that Owns, is Owned by, or is commonly Owned with a party. “Own” means having more than 50% ownership or the right to direct the management of the entity. +“AGREEMENT” +“AGREEMENT” means this SLA and all associated Supplements entered by the parties referencing this SLA. +“Authorized Users” +“Authorized Users” means your Enterprise individual employees and any of your Enterprise’s Contractors, subject to the terms of the “Enterprise and Contractors Usage” section. +“Confidential Information” +“Confidential Information” means the Licensed Software (unless made publicly available by NVIDIA without confidentiality obligations), and any NVIDIA business, marketing, pricing, research and development, know-how, technical, scientific, financial status, proposed new products or other information disclosed by NVIDIA to you which, at the time of disclosure, is designated in writing as confidential or proprietary (or like written designation), or orally identified as confidential or proprietary or is otherwise reasonably identifiable by parties exercising reasonable business judgment, as confidential. Confidential Information does not and will not include information that: (i) is or becomes generally known to the public through no fault of or breach of the AGREEMENT by the receiving party; (ii) is rightfully known by the receiving party at the time of disclosure without an obligation of confidentiality; (iii) is independently developed by the receiving party without use of the disclosing party’s Confidential Information; or (iv) is rightfully obtained by the receiving party from a third party without restriction on use or disclosure. +“Contractor” +“Contractor” means an individual who works primarily for your Enterprise on a contractor basis from your secure network. means an individual who works primarily for your Enterprise on a contractor basis from your secure network. +“Documentation” +“Documentation” means the NVIDIA documentation made available for use with the Software, including (without limitation) user manuals, datasheets, operations instructions, installation guides, release notes and other materials provided to you under the AGREEMENT. +“Enterprise” +“Enterprise” means you or any company or legal entity for which you accepted the terms of this SLA, and their subsidiaries of which your company or legal entity owns more than fifty percent (50%) of the issued and outstanding equity. +“Feedback” +“Feedback” means any and all suggestions, feature requests, comments or other feedback regarding the Licensed Software, including possible enhancements or modifications thereto. +“Intellectual Property Rights” +“Intellectual Property Rights” means all patent, copyright, trademark, trade secret, trade dress, trade names, utility models, mask work, moral rights, rights of attribution or integrity service marks, master recording and music publishing rights, performance rights, author’s rights, database rights, registered design rights and any applications for the protection or registration of these rights, or other intellectual or industrial property rights or proprietary rights, howsoever arising and in whatever media, whether now known or hereafter devised, whether or not registered, (including all claims and causes of action for infringement, misappropriation or violation and all rights in any registrations and renewals), worldwide and whether existing now or in the future. +“Licensed Software” +“Licensed Software” means Software, Documentation and all modifications owned by NVIDIA. +“Open Source License” +“Open Source License” includes, without limitation, a software license that requires as a condition of use, modification, and/or distribution of such software that the Software be (i) disclosed or distributed in source code form; (ii) be licensed for the purpose of making derivative works; or (iii) be redistributable at no charge. +“Order” +“Order” means a purchase order issued by you, a signed purchase agreement with you, or other ordering document issued by you to NVIDIA or a NVIDIA authorized reseller (including any on-line acceptance process) that references and incorporates the AGREEMENT and is accepted by NVIDIA. +“Software” +“Software” means the NVIDIA software programs licensed to you under the AGREEMENT including, without limitation, libraries, sample code, utility programs and programming code. +“Supplement” +“Supplement” means the additional terms and conditions beyond those stated in this SLA that apply to certain Licensed Software licensed hereunder. +12. TensorRT SUPPLEMENT TO SOFTWARE LICENSE AGREEMENT +TensorRT SUPPLEMENT TO SOFTWARE LICENSE AGREEMENT +The terms set forth in this TensorRT Supplement (“Supplement”) govern your use of the NVIDIA GPU inference engine (the “TensorRT Licensed Software”) under the terms of your software license agreement (“SLA”) as modified by this Supplement. This Supplement is an exhibit to the SLA and is hereby incorporated as an integral part thereto. Capitalized terms used but not defined herein shall have the meaning assigned to them in the SLA. In the event of conflict between the terms in this Supplement and the terms in the SLA, this Supplement shall control. + +12.1. TensorRT DISTRIBUTION +Subject to the terms of the SLA and this Supplement, NVIDIA hereby grants you a non-exclusive, nontransferable license during the applicable license term unless earlier terminated pursuant to the SLA, to distribute the libnvinfer, libnvinfer_plugin, and libnvparsers libraries when delivered to you as part of the TensorRT Licensed Software in source code form or binary form (but not when provided to you as part of a hardware product), subject to the following: such distribution is solely in binary form to your licensees (“Customers”) only as a component of your own software products having additional material functionality beyond the TensorRT Licensed Software (each, a “Licensee Application"). Subject to the terms and conditions of the SLA and this Supplement, you may further authorize Customers to redistribute the libnvinfer, libnvinfer_plugin, and libnvparsers libraries as incorporated into a Licensee Application, solely in binary form, provided, however, that you shall require in your agreements with your Customers that their distributions be on terms at least as restrictive as those applicable for your use of such TensorRT Licensed Software within a Licensee Application. The expiration or termination of your licenses to the above described TensorRT Licensed Software under the SLA and this Supplement will not affect rights previously granted by you to recipients that were in compliance with the SLA and this Supplement. + +In addition to the rights above, for parties that are developing software intended solely for use on Jetson development kits or Jetson modules and running Linux for Tegra software the following shall apply: TensorRT Licensed Software licensed hereunder may be distributed in its entirety, as provided by NVIDIA and without separation of its components, for you and/or your licensees to create software development kits for use only on the Jetson platform and running Linux for Tegra software. You shall require in your agreements with your licensees that their distributions be on terms at least as restrictive as those applicable for your distribution of TensorRT Licensed Software as described in this Section 1. + +In addition to the rights above, for parties that are developing software intended solely for use on Jetson development kits or Jetson modules and running Linux for Tegra software the following shall apply: TensorRT Licensed Software licensed hereunder may be distributed in its entirety, as provided by NVIDIA and without separation of its components, for you and/or your licensees to create software development kits for use only on the Jetson platform and running Linux for Tegra software. You shall require in your agreements with your licensees that their distributions be on terms at least as restrictive as those applicable for your distribution of TensorRT Licensed Software as described in this Section 1. + +12.2. LICENSE DURATION +Each TensorRT Licensed Software is licensed to you for an initial duration of one year starting from the date of delivery or download. The licenses granted will automatically renew for successive one year periods, provided that NVIDIA reserves the right to terminate licenses upon ninety days (90) days written notice to you prior to the commencement of a renewal year in addition to the termination rights set forth in the SLA. + +12.3. EXPIRATION OF TERMINATION OF THIS SUPPLEMENT +Your failure to comply with the terms of this Supplement is ground for termination for breach by NVIDIA under the SLA. This Supplement will automatically expire or terminate upon the expiration or termination of your rights to TensorRT Licensed Software under the SLA or this Supplement. + +Notices +Notice +This document is provided for information purposes only and shall not be regarded as a warranty of a certain functionality, condition, or quality of a product. NVIDIA Corporation (“NVIDIA”) makes no representations or warranties, expressed or implied, as to the accuracy or completeness of the information contained in this document and assumes no responsibility for any errors contained herein. NVIDIA shall have no liability for the consequences or use of such information or for any infringement of patents or other rights of third parties that may result from its use. This document is not a commitment to develop, release, or deliver any Material (defined below), code, or functionality. + +NVIDIA reserves the right to make corrections, modifications, enhancements, improvements, and any other changes to this document, at any time without notice. + +Customer should obtain the latest relevant information before placing orders and should verify that such information is current and complete. + +NVIDIA products are sold subject to the NVIDIA standard terms and conditions of sale supplied at the time of order acknowledgement, unless otherwise agreed in an individual sales agreement signed by authorized representatives of NVIDIA and customer (“Terms of Sale”). NVIDIA hereby expressly objects to applying any customer general terms and conditions with regards to the purchase of the NVIDIA product referenced in this document. No contractual obligations are formed either directly or indirectly by this document. + +NVIDIA products are not designed, authorized, or warranted to be suitable for use in medical, military, aircraft, space, or life support equipment, nor in applications where failure or malfunction of the NVIDIA product can reasonably be expected to result in personal injury, death, or property or environmental damage. NVIDIA accepts no liability for inclusion and/or use of NVIDIA products in such equipment or applications and therefore such inclusion and/or use is at customer’s own risk. + +NVIDIA makes no representation or warranty that products based on this document will be suitable for any specified use. Testing of all parameters of each product is not necessarily performed by NVIDIA. It is customer’s sole responsibility to evaluate and determine the applicability of any information contained in this document, ensure the product is suitable and fit for the application planned by customer, and perform the necessary testing for the application in order to avoid a default of the application or the product. Weaknesses in customer’s product designs may affect the quality and reliability of the NVIDIA product and may result in additional or different conditions and/or requirements beyond those contained in this document. NVIDIA accepts no liability related to any default, damage, costs, or problem which may be based on or attributable to: (i) the use of the NVIDIA product in any manner that is contrary to this document or (ii) customer product designs. + +No license, either expressed or implied, is granted under any NVIDIA patent right, copyright, or other NVIDIA intellectual property right under this document. Information published by NVIDIA regarding third-party products or services does not constitute a license from NVIDIA to use such products or services or a warranty or endorsement thereof. Use of such information may require a license from a third party under the patents or other intellectual property rights of the third party, or a license from NVIDIA under the patents or other intellectual property rights of NVIDIA. + +Reproduction of information in this document is permissible only if approved in advance by NVIDIA in writing, reproduced without alteration and in full compliance with all applicable export laws and regulations, and accompanied by all associated conditions, limitations, and notices. + +THIS DOCUMENT AND ALL NVIDIA DESIGN SPECIFICATIONS, REFERENCE BOARDS, FILES, DRAWINGS, DIAGNOSTICS, LISTS, AND OTHER DOCUMENTS (TOGETHER AND SEPARATELY, “MATERIALS”) ARE BEING PROVIDED “AS IS.” NVIDIA MAKES NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE. TO THE EXTENT NOT PROHIBITED BY LAW, IN NO EVENT WILL NVIDIA BE LIABLE FOR ANY DAMAGES, INCLUDING WITHOUT LIMITATION ANY DIRECT, INDIRECT, SPECIAL, INCIDENTAL, PUNITIVE, OR CONSEQUENTIAL DAMAGES, HOWEVER CAUSED AND REGARDLESS OF THE THEORY OF LIABILITY, ARISING OUT OF ANY USE OF THIS DOCUMENT, EVEN IF NVIDIA HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. Notwithstanding any damages that customer might incur for any reason whatsoever, NVIDIA’s aggregate and cumulative liability towards customer for the products described herein shall be limited in accordance with the Terms of Sale for the product. + +VESA DisplayPort +DisplayPort and DisplayPort Compliance Logo, DisplayPort Compliance Logo for Dual-mode Sources, and DisplayPort Compliance Logo for Active Cables are trademarks owned by the Video Electronics Standards Association in the United States and other countries. + +HDMI +HDMI, the HDMI logo, and High-Definition Multimedia Interface are trademarks or registered trademarks of HDMI Licensing LLC. + +ARM +ARM, AMBA and ARM Powered are registered trademarks of ARM Limited. Cortex, MPCore and Mali are trademarks of ARM Limited. All other brands or product names are the property of their respective holders. "ARM" is used to represent ARM Holdings plc; its operating company ARM Limited; and the regional subsidiaries ARM Inc.; ARM KK; ARM Korea Limited.; ARM Taiwan Limited; ARM France SAS; ARM Consulting (Shanghai) Co. Ltd.; ARM Germany GmbH; ARM Embedded Technologies Pvt. Ltd.; ARM Norway, AS and ARM Sweden AB. + +OpenCL +OpenCL is a trademark of Apple Inc. used under license to the Khronos Group Inc. + +Trademarks +NVIDIA, the NVIDIA logo, and cuBLAS, CUDA, CUDA Toolkit, cuDNN, DALI, DIGITS, DGX, DGX-1, DGX-2, DGX Station, DLProf, GPU, JetPack, Jetson, Kepler, Maxwell, NCCL, Nsight Compute, Nsight Systems, NVCaffe, NVIDIA Ampere GPU architecture, NVIDIA Deep Learning SDK, NVIDIA Developer Program, NVIDIA GPU Cloud, NVLink, NVSHMEM, PerfWorks, Pascal, SDK Manager, T4, Tegra, TensorRT, TensorRT Inference Server, Tesla, TF-TRT, Triton Inference Server, Turing, and Volta are trademarks and/or registered trademarks of NVIDIA Corporation in the United States and other countries. Other company and product names may be trademarks of the respective companies with which they are associated. + +Copyright +© 2021 NVIDIA Corporation. All rights reserved. diff --git a/python/packaging/libs_wheel/setup.cfg b/python/packaging/libs_wheel/setup.cfg new file mode 100644 index 00000000..32a8c1c0 --- /dev/null +++ b/python/packaging/libs_wheel/setup.cfg @@ -0,0 +1,5 @@ +[metadata] +license_files = LICENSE.txt + +[bdist_wheel] +universal = 1 diff --git a/python/packaging/setup.py b/python/packaging/libs_wheel/setup.py similarity index 59% rename from python/packaging/setup.py rename to python/packaging/libs_wheel/setup.py index d73d9fd7..eb45a258 100644 --- a/python/packaging/setup.py +++ b/python/packaging/libs_wheel/setup.py @@ -15,20 +15,11 @@ # limitations under the License. # -try: - from setuptools import setup -except ImportError: - from distutils.core import setup import os -tensorrt_module = "##TENSORRT_MODULE##" +from setuptools import setup -def is_standalone(): - return os.environ.get("STANDALONE") == "1" - - -def is_dla(): - return os.environ.get("ENABLE_DLA") == "1" +module_name = "##TENSORRT_MODULE##_libs" def get_requirements(): @@ -41,27 +32,20 @@ def get_vers(var): cuda_major, _ = get_vers("CUDA") return "-cu{cuda_major}".format(cuda_major=cuda_major) - if is_standalone(): - reqs = [ "nvidia-cuda-runtime" + get_version_range() ] - if tensorrt_module == "tensorrt": - reqs += [ - "nvidia-cudnn" + get_version_range(), - "nvidia-cublas" + get_version_range(), - ] - return reqs - return [] - + reqs = ["nvidia-cuda-runtime" + get_version_range()] + if "##TENSORRT_MODULE##" == "tensorrt": + reqs += [ + "nvidia-cudnn" + get_version_range(), + "nvidia-cublas" + get_version_range(), + ] + return reqs -name = tensorrt_module -# Only standalone wheels need to be disambiguated. Otherwise, the entire tar/deb/rpm is DLA/non-DLA. -if is_standalone() and is_dla(): - name += "-dla" setup( - name=name, + name=module_name, version="##TENSORRT_PYTHON_VERSION##", - description="A high performance deep learning inference library", - long_description="A high performance deep learning inference library", + description="TensorRT Libraries", + long_description="TensorRT Libraries", author="NVIDIA Corporation", license="Proprietary", classifiers=[ @@ -69,10 +53,9 @@ def get_vers(var): "Intended Audience :: Developers", "Programming Language :: Python :: 3", ], - packages=[tensorrt_module], + packages=[module_name], install_requires=get_requirements(), - extras_require={"numpy": "numpy"}, - package_data={tensorrt_module: ["*.so*", "*.pyd", "*.pdb"]}, + package_data={module_name: ["*.so*", "*.pyd", "*.pdb"]}, include_package_data=True, zip_safe=True, keywords="nvidia tensorrt deeplearning inference", diff --git a/python/packaging/libs_wheel/tensorrt_libs/__init__.py b/python/packaging/libs_wheel/tensorrt_libs/__init__.py new file mode 100644 index 00000000..83df0448 --- /dev/null +++ b/python/packaging/libs_wheel/tensorrt_libs/__init__.py @@ -0,0 +1,33 @@ +# +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import ctypes +import glob +import os + + +def try_load(library): + try: + ctypes.CDLL(library) + except OSError: + pass + + +# Try loading all packaged libraries. This is a nop if there are no libraries packaged. +CURDIR = os.path.realpath(os.path.dirname(__file__)) +for lib in glob.iglob(os.path.join(CURDIR, "*.so*")): + try_load(lib) diff --git a/python/requirements.txt b/python/requirements.txt index 3c9b272d..43663c1a 100644 --- a/python/requirements.txt +++ b/python/requirements.txt @@ -5,6 +5,6 @@ numpy==1.19.4; python_version < "3.8" and platform_system != "Windows" numpy==1.23.0; python_version >= "3.8" and python_version < "3.10" numpy==1.23.1; python_version >= "3.10" Pillow; python_version<"3.6" -##PYTHON_BUILDDIR##/tensorrt-py3.##PYTHON3_MINOR##/dist/tensorrt-##TENSORRT_PYTHON_VERSION##-cp3##PYTHON3_MINOR##-none-linux_##TARGET##.whl ; python_version=="3.##PYTHON3_MINOR##" +##PYTHON_BUILDDIR##/tensorrt_bindings-py3.##PYTHON3_MINOR##/dist/tensorrt-##TENSORRT_PYTHON_VERSION##-cp3##PYTHON3_MINOR##-none-linux_##TARGET##.whl ; python_version=="3.##PYTHON3_MINOR##" ##TENSORRT_ROOT##/python_builds/uff/uff-##UFF_VERSION##-py2.py3-none-any.whl ##TENSORRT_ROOT##/python_builds/graphsurgeon/graphsurgeon-##GRAPHSURGEON_VERSION##-py2.py3-none-any.whl diff --git a/python/src/infer/pyCore.cpp b/python/src/infer/pyCore.cpp index cca5504c..555668e3 100644 --- a/python/src/infer/pyCore.cpp +++ b/python/src/infer/pyCore.cpp @@ -966,15 +966,73 @@ void bindCore(py::module& m) .def("get_tensor_shape", &ICudaEngine::getTensorShape, "name"_a, ICudaEngineDoc::get_tensor_shape) .def("get_tensor_dtype", &ICudaEngine::getTensorDataType, "name"_a, ICudaEngineDoc::get_tensor_dtype) .def("get_tensor_location", &ICudaEngine::getTensorLocation, "name"_a, ICudaEngineDoc::get_tensor_location) - .def("get_tensor_bytes_per_component", &ICudaEngine::getTensorBytesPerComponent, "name"_a, - ICudaEngineDoc::get_tensor_bytes_per_component) - .def("get_tensor_components_per_element", &ICudaEngine::getTensorComponentsPerElement, "name"_a, - ICudaEngineDoc::get_tensor_components_per_element) - .def("get_tensor_format", &ICudaEngine::getTensorFormat, "name"_a, ICudaEngineDoc::get_tensor_format) - .def("get_tensor_format_desc", &ICudaEngine::getTensorFormatDesc, "name"_a, - ICudaEngineDoc::get_tensor_format_desc) - .def("get_tensor_vectorized_dim", &ICudaEngine::getTensorVectorizedDim, "name"_a, - ICudaEngineDoc::get_tensor_vectorized_dim) + + .def( + "get_tensor_bytes_per_component", + [](ICudaEngine& self, std::string const& name) -> int32_t { + return self.getTensorBytesPerComponent(name.c_str()); + }, + "name"_a, ICudaEngineDoc::get_tensor_bytes_per_component) + .def( + "get_tensor_bytes_per_component", + [](ICudaEngine& self, std::string const& name, int32_t profileIndex) -> int32_t { + return self.getTensorBytesPerComponent(name.c_str(), profileIndex); + }, + "name"_a, "profile_index"_a, ICudaEngineDoc::get_tensor_bytes_per_component) + + .def( + "get_tensor_components_per_element", + [](ICudaEngine& self, std::string const& name) -> int32_t { + return self.getTensorComponentsPerElement(name.c_str()); + }, + "name"_a, ICudaEngineDoc::get_tensor_components_per_element) + .def( + "get_tensor_components_per_element", + [](ICudaEngine& self, std::string const& name, int32_t profileIndex) -> int32_t { + return self.getTensorComponentsPerElement(name.c_str(), profileIndex); + }, + "name"_a, "profile_index"_a, ICudaEngineDoc::get_tensor_components_per_element) + + .def( + "get_tensor_format", + [](ICudaEngine& self, std::string const& name) -> TensorFormat { + return self.getTensorFormat(name.c_str()); + }, + "name"_a, ICudaEngineDoc::get_tensor_format) + + .def( + "get_tensor_format", + [](ICudaEngine& self, std::string const& name, int32_t profileIndex) -> TensorFormat { + return self.getTensorFormat(name.c_str(), profileIndex); + }, + "name"_a, "profile_index"_a, ICudaEngineDoc::get_tensor_format) + + .def( + "get_tensor_format_desc", + [](ICudaEngine& self, std::string const& name) -> const char* { + return self.getTensorFormatDesc(name.c_str()); + }, + "name"_a, ICudaEngineDoc::get_tensor_format_desc) + .def( + "get_tensor_format_desc", + [](ICudaEngine& self, std::string const& name, int32_t profileIndex) -> const char* { + return self.getTensorFormatDesc(name.c_str(), profileIndex); + }, + "name"_a, "profile_index"_a, ICudaEngineDoc::get_tensor_format_desc) + + .def( + "get_tensor_vectorized_dim", + [](ICudaEngine& self, std::string const& name) -> int32_t { + return self.getTensorVectorizedDim(name.c_str()); + }, + "name"_a, ICudaEngineDoc::get_tensor_vectorized_dim) + .def( + "get_tensor_vectorized_dim", + [](ICudaEngine& self, std::string const& name, int32_t profileIndex) -> int32_t { + return self.getTensorVectorizedDim(name.c_str(), profileIndex); + }, + "name"_a, "profile_index"_a, ICudaEngineDoc::get_tensor_vectorized_dim) + .def("get_tensor_profile_shape", lambdas::get_tensor_profile_shape, "name"_a, "profile_index"_a, ICudaEngineDoc::get_tensor_profile_shape) // End of enqueueV3 related APIs. @@ -982,8 +1040,8 @@ void bindCore(py::module& m) py::cpp_function(&ICudaEngine::setErrorRecorder, py::keep_alive<1, 2>{})) .def_property_readonly("tactic_sources", &ICudaEngine::getTacticSources) .def_property_readonly("profiling_verbosity", &ICudaEngine::getProfilingVerbosity) - .def("create_engine_inspector", - &ICudaEngine::createEngineInspector, ICudaEngineDoc::create_engine_inspector, py::keep_alive<0, 1>{}) + .def("create_engine_inspector", &ICudaEngine::createEngineInspector, ICudaEngineDoc::create_engine_inspector, + py::keep_alive<0, 1>{}) .def_property_readonly("hardware_compatibility_level", &ICudaEngine::getHardwareCompatibilityLevel) .def_property_readonly("num_aux_streams", &ICudaEngine::getNbAuxStreams) .def("__del__", &utils::doNothingDel); diff --git a/python/src/infer/pyGraph.cpp b/python/src/infer/pyGraph.cpp index ed355fc5..ae66ed12 100644 --- a/python/src/infer/pyGraph.cpp +++ b/python/src/infer/pyGraph.cpp @@ -867,141 +867,113 @@ namespace tensorrt .def("mark_output", &INetworkDefinition::markOutput, "tensor"_a, INetworkDefinitionDoc::mark_output) // Layers .def("add_input", &INetworkDefinition::addInput, "name"_a, "dtype"_a, "shape"_a, - INetworkDefinitionDoc::add_input, - py::keep_alive<1, 0>{}, py::return_value_policy::reference_internal) + INetworkDefinitionDoc::add_input, py::return_value_policy::reference_internal) .def("add_convolution", utils::deprecate(lambdas::add_convolution, "add_convolution_nd"), "input"_a, "num_output_maps"_a, "kernel_shape"_a, "kernel"_a, "bias"_a=nullptr, py::keep_alive<1, 5>{}, py::keep_alive<1, 6>{}, INetworkDefinitionDoc::add_convolution, - py::keep_alive<1, 0>{}, py::return_value_policy::reference_internal) + py::return_value_policy::reference_internal) .def("add_convolution_nd", lambdas::add_convolution_nd, "input"_a, "num_output_maps"_a, "kernel_shape"_a, "kernel"_a, "bias"_a=nullptr, py::keep_alive<1, 5>{}, py::keep_alive<1, 6>{}, - INetworkDefinitionDoc::add_convolution_nd, - py::keep_alive<1, 0>{}, py::return_value_policy::reference_internal) + INetworkDefinitionDoc::add_convolution_nd, py::return_value_policy::reference_internal) .def("add_fully_connected", utils::deprecate(lambdas::add_fully_connected, "add_matrix_multiply"), "input"_a, "num_outputs"_a, "kernel"_a, "bias"_a=nullptr, py::keep_alive<1, 4>{}, py::keep_alive<1, 5>{}, INetworkDefinitionDoc::add_fully_connected, - py::keep_alive<1, 0>{}, py::return_value_policy::reference_internal) + py::return_value_policy::reference_internal) .def("add_activation", &INetworkDefinition::addActivation, "input"_a, "type"_a, - INetworkDefinitionDoc::add_activation, - py::keep_alive<1, 0>{}, py::return_value_policy::reference_internal) + INetworkDefinitionDoc::add_activation, py::return_value_policy::reference_internal) .def("add_pooling", utils::deprecateMember(&INetworkDefinition::addPooling, "add_pooling_nd"), "input"_a, "type"_a, "window_size"_a, - INetworkDefinitionDoc::add_pooling, - py::keep_alive<1, 0>{}, py::return_value_policy::reference_internal) + INetworkDefinitionDoc::add_pooling, py::return_value_policy::reference_internal) .def("add_pooling_nd", &INetworkDefinition::addPoolingNd, "input"_a, "type"_a, "window_size"_a, - INetworkDefinitionDoc::add_pooling_nd, - py::keep_alive<1, 0>{}, py::return_value_policy::reference_internal) + INetworkDefinitionDoc::add_pooling_nd, py::return_value_policy::reference_internal) .def("add_lrn", &INetworkDefinition::addLRN, "input"_a, "window"_a, "alpha"_a, "beta"_a, "k"_a, - INetworkDefinitionDoc::add_lrn, - py::keep_alive<1, 0>{}, py::return_value_policy::reference_internal) + INetworkDefinitionDoc::add_lrn, py::return_value_policy::reference_internal) .def("add_scale", lambdas::add_scale, "input"_a, "mode"_a, "shift"_a=nullptr, "scale"_a=nullptr, "power"_a=nullptr, py::keep_alive<1, 4>{}, py::keep_alive<1, 5>{}, py::keep_alive<1, 6>{}, INetworkDefinitionDoc::add_scale, - py::keep_alive<1, 0>{}, py::return_value_policy::reference_internal) + py::return_value_policy::reference_internal) .def("add_scale_nd", lambdas::add_scale_nd, "input"_a, "mode"_a, "shift"_a=nullptr, "scale"_a=nullptr, "power"_a=nullptr, "channel_axis"_a, py::keep_alive<1, 4>{}, py::keep_alive<1, 5>{}, py::keep_alive<1, 6>{}, INetworkDefinitionDoc::add_scale_nd, - py::keep_alive<1, 0>{}, py::return_value_policy::reference_internal) + py::return_value_policy::reference_internal) .def("add_softmax", &INetworkDefinition::addSoftMax, "input"_a, INetworkDefinitionDoc::add_softmax, - py::keep_alive<1, 0>{}, py::return_value_policy::reference_internal) + py::return_value_policy::reference_internal) .def("add_concatenation", lambdas::add_concatenation, "inputs"_a, INetworkDefinitionDoc::add_concatenation, - py::keep_alive<1, 0>{}, py::return_value_policy::reference_internal) + py::return_value_policy::reference_internal) .def("add_deconvolution", utils::deprecate(lambdas::add_deconvolution, "add_deconvolution_nd"), "input"_a, "num_output_maps"_a, "kernel_shape"_a, "kernel"_a, "bias"_a=nullptr, py::keep_alive<1, 5>{}, py::keep_alive<1, 6>{}, - INetworkDefinitionDoc::add_deconvolution, - py::keep_alive<1, 0>{}, py::return_value_policy::reference_internal) + INetworkDefinitionDoc::add_deconvolution, py::return_value_policy::reference_internal) .def("add_deconvolution_nd", lambdas::add_deconvolution_nd, "input"_a, "num_output_maps"_a, "kernel_shape"_a, "kernel"_a, "bias"_a=nullptr, py::keep_alive<1, 5>{}, py::keep_alive<1, 6>{}, - INetworkDefinitionDoc::add_deconvolution_nd, - py::keep_alive<1, 0>{}, py::return_value_policy::reference_internal) + INetworkDefinitionDoc::add_deconvolution_nd, py::return_value_policy::reference_internal) .def("add_elementwise", &INetworkDefinition::addElementWise, "input1"_a, "input2"_a, "op"_a, - INetworkDefinitionDoc::add_elementwise, - py::keep_alive<1, 0>{}, py::return_value_policy::reference_internal) + INetworkDefinitionDoc::add_elementwise, py::return_value_policy::reference_internal) .def("add_unary", &INetworkDefinition::addUnary, "input"_a, "op"_a, INetworkDefinitionDoc::add_unary, - py::keep_alive<1, 0>{}, py::return_value_policy::reference_internal) + py::return_value_policy::reference_internal) .def("add_padding", utils::deprecateMember(&INetworkDefinition::addPadding, "add_padding_nd"), "input"_a, "pre_padding"_a, "post_padding"_a, - INetworkDefinitionDoc::add_padding, - py::keep_alive<1, 0>{}, py::return_value_policy::reference_internal) + INetworkDefinitionDoc::add_padding, py::return_value_policy::reference_internal) .def("add_padding_nd", &INetworkDefinition::addPaddingNd, "input"_a, "pre_padding"_a, "post_padding"_a, - INetworkDefinitionDoc::add_padding_nd, - py::keep_alive<1, 0>{}, py::return_value_policy::reference_internal) + INetworkDefinitionDoc::add_padding_nd, py::return_value_policy::reference_internal) .def("add_shuffle", &INetworkDefinition::addShuffle, "input"_a, INetworkDefinitionDoc::add_shuffle, - py::keep_alive<1, 0>{}, py::return_value_policy::reference_internal) + py::return_value_policy::reference_internal) .def("add_slice", &INetworkDefinition::addSlice, "input"_a, "start"_a, "shape"_a, "stride"_a, - INetworkDefinitionDoc::add_slice, - py::keep_alive<1, 0>{}, py::return_value_policy::reference_internal) + INetworkDefinitionDoc::add_slice, py::return_value_policy::reference_internal) .def("add_reduce", &INetworkDefinition::addReduce, "input"_a, "op"_a, "axes"_a, "keep_dims"_a, - INetworkDefinitionDoc::add_reduce, - py::keep_alive<1, 0>{}, py::return_value_policy::reference_internal) + INetworkDefinitionDoc::add_reduce, py::return_value_policy::reference_internal) .def("add_topk", &INetworkDefinition::addTopK, "input"_a, "op"_a, "k"_a, "axes"_a, - INetworkDefinitionDoc::add_topk, - py::keep_alive<1, 0>{}, py::return_value_policy::reference_internal) + INetworkDefinitionDoc::add_topk, py::return_value_policy::reference_internal) .def("add_gather", &INetworkDefinition::addGather, "input"_a, "indices"_a, "axis"_a, - INetworkDefinitionDoc::add_gather, - py::keep_alive<1, 0>{}, py::return_value_policy::reference_internal) + INetworkDefinitionDoc::add_gather, py::return_value_policy::reference_internal) .def("add_scatter", &INetworkDefinition::addScatter, "data"_a, "indices"_a, "updates"_a, "mode"_a, - INetworkDefinitionDoc::add_scatter, - py::keep_alive<1, 0>{}, py::return_value_policy::reference_internal) + INetworkDefinitionDoc::add_scatter, py::return_value_policy::reference_internal) .def("add_gather_v2", &INetworkDefinition::addGatherV2, "input"_a, "indices"_a, "mode"_a, - INetworkDefinitionDoc::add_gather_v2, - py::keep_alive<1, 0>{}, py::return_value_policy::reference_internal) + INetworkDefinitionDoc::add_gather_v2, py::return_value_policy::reference_internal) .def("add_ragged_softmax", &INetworkDefinition::addRaggedSoftMax, "input"_a, "bounds"_a, - INetworkDefinitionDoc::add_ragged_softmax, - py::keep_alive<1, 0>{}, py::return_value_policy::reference_internal) + INetworkDefinitionDoc::add_ragged_softmax, py::return_value_policy::reference_internal) .def("add_matrix_multiply", static_cast(&INetworkDefinition::addMatrixMultiply), "input0"_a, "op0"_a, "input1"_a, "op1"_a, INetworkDefinitionDoc::add_matrix_multiply, - py::keep_alive<1, 0>{}, py::return_value_policy::reference_internal) + py::return_value_policy::reference_internal) .def("add_constant", &INetworkDefinition::addConstant, "shape"_a, "weights"_a, py::keep_alive<1, 3>{}, INetworkDefinitionDoc::add_constant, - py::keep_alive<1, 0>{}, py::return_value_policy::reference_internal) + py::return_value_policy::reference_internal) .def("add_rnn_v2", utils::deprecateMember(&INetworkDefinition::addRNNv2, "addLoop"), "input"_a, "layer_count"_a, - "hidden_size"_a, "max_seq_length"_a, "op"_a, - py::keep_alive<1, 0>{}, INetworkDefinitionDoc::add_rnn_v2) + "hidden_size"_a, "max_seq_length"_a, "op"_a, INetworkDefinitionDoc::add_rnn_v2) .def("add_identity", &INetworkDefinition::addIdentity, "input"_a, - INetworkDefinitionDoc::add_identity, - py::keep_alive<1, 0>{}, py::return_value_policy::reference_internal) + INetworkDefinitionDoc::add_identity, py::return_value_policy::reference_internal) .def("add_cast", &INetworkDefinition::addCast, "input"_a, "to_type"_a, - INetworkDefinitionDoc::add_cast, - py::keep_alive<1, 0>{}, py::return_value_policy::reference_internal) + INetworkDefinitionDoc::add_cast, py::return_value_policy::reference_internal) .def("add_plugin_v2", lambdas::add_plugin_v2, "inputs"_a, "plugin"_a, - INetworkDefinitionDoc::add_plugin_v2, - py::keep_alive<1, 0>{}, py::return_value_policy::reference_internal) + INetworkDefinitionDoc::add_plugin_v2, py::return_value_policy::reference_internal) .def("add_parametric_relu", &INetworkDefinition::addParametricReLU, "input"_a, - "slopes"_a, INetworkDefinitionDoc::add_parametric_relu, - py::keep_alive<1, 0>{}, py::return_value_policy::reference_internal) + "slopes"_a, INetworkDefinitionDoc::add_parametric_relu, py::return_value_policy::reference_internal) .def("add_resize", &INetworkDefinition::addResize, "input"_a, INetworkDefinitionDoc::add_resize, - py::keep_alive<1, 0>{}, py::return_value_policy::reference_internal) + py::return_value_policy::reference_internal) .def("add_loop", &INetworkDefinition::addLoop, INetworkDefinitionDoc::add_loop, - py::keep_alive<1, 0>{}, py::return_value_policy::reference_internal) + py::return_value_policy::reference_internal) .def("add_shape", &INetworkDefinition::addShape, "input"_a, INetworkDefinitionDoc::add_shape, - py::keep_alive<1, 0>{}, py::return_value_policy::reference_internal) + py::return_value_policy::reference_internal) .def("add_select", &INetworkDefinition::addSelect, "condition"_a, "then_input"_a, - "else_input"_a, INetworkDefinitionDoc::add_select, - py::keep_alive<1, 0>{}, py::return_value_policy::reference_internal) + "else_input"_a, INetworkDefinitionDoc::add_select, py::return_value_policy::reference_internal) .def("add_assertion", &INetworkDefinition::addAssertion, "condition"_a, "message"_a, INetworkDefinitionDoc::add_assertion, INetworkDefinitionDoc::add_assertion, py::return_value_policy::reference_internal) .def("add_grid_sample", &INetworkDefinition::addGridSample, "input"_a, "grid"_a, INetworkDefinitionDoc::add_grid_sample, py::return_value_policy::reference_internal) .def("add_nms", &INetworkDefinition::addNMS, "boxes"_a, - "scores"_a, "max_output_boxes_per_class"_a, INetworkDefinitionDoc::add_nms, - py::keep_alive<1, 0>{}, py::return_value_policy::reference_internal) + "scores"_a, "max_output_boxes_per_class"_a, INetworkDefinitionDoc::add_nms, py::return_value_policy::reference_internal) .def("add_fill", &INetworkDefinition::addFill, "shape"_a, "op"_a, INetworkDefinitionDoc::add_fill) .def("add_quantize", &INetworkDefinition::addQuantize, "input"_a, "scale"_a, - INetworkDefinitionDoc::add_quantize, - py::keep_alive<1, 0>{}, py::return_value_policy::reference_internal) + INetworkDefinitionDoc::add_quantize, py::return_value_policy::reference_internal) .def("add_dequantize", &INetworkDefinition::addDequantize, "input"_a, "scale"_a, - INetworkDefinitionDoc::add_dequantize, - py::keep_alive<1, 0>{}, py::return_value_policy::reference_internal) + INetworkDefinitionDoc::add_dequantize, py::return_value_policy::reference_internal) .def("add_if_conditional", &INetworkDefinition::addIfConditional, INetworkDefinitionDoc::add_if_conditional, - py::keep_alive<1, 0>{}, py::return_value_policy::reference_internal) + py::return_value_policy::reference_internal) .def("add_einsum", lambdas::add_einsum, "inputs"_a, "equation"_a, INetworkDefinitionDoc::add_einsum, - py::keep_alive<1, 0>{}, py::return_value_policy::reference_internal) + py::return_value_policy::reference_internal) .def("add_one_hot", &INetworkDefinition::addOneHot, "indices"_a, "values"_a, "depth"_a, "axis"_a, - INetworkDefinitionDoc::add_one_hot, - py::keep_alive<1, 0>{}, py::return_value_policy::reference_internal) + INetworkDefinitionDoc::add_one_hot, py::return_value_policy::reference_internal) .def("add_non_zero", &INetworkDefinition::addNonZero, "input"_a, INetworkDefinitionDoc::add_non_zero, - py::keep_alive<1, 0>{}, py::return_value_policy::reference_internal) + py::return_value_policy::reference_internal) .def("add_reverse_sequence", &INetworkDefinition::addReverseSequence, "input"_a, "sequence_lens"_a, INetworkDefinitionDoc::add_reverse_sequence, - py::keep_alive<1, 0>{}, py::return_value_policy::reference_internal) + py::return_value_policy::reference_internal) .def("add_normalization", &INetworkDefinition::addNormalization, "input"_a, "scale"_a, "bias"_a, "axesMask"_a, INetworkDefinitionDoc::add_normalization, - py::keep_alive<1, 0>{}, py::return_value_policy::reference_internal) + py::return_value_policy::reference_internal) .def("remove_tensor", &INetworkDefinition::removeTensor, "tensor"_a, INetworkDefinitionDoc::remove_tensor) .def("unmark_output", &INetworkDefinition::unmarkOutput, "tensor"_a, INetworkDefinitionDoc::unmark_output) .def("mark_output_for_shapes", &INetworkDefinition::markOutputForShapes, "tensor"_a, INetworkDefinitionDoc::mark_output_for_shapes) @@ -1009,15 +981,15 @@ namespace tensorrt .def("set_weights_name", &INetworkDefinition::setWeightsName, "weights"_a, "name"_a, INetworkDefinitionDoc::set_weights_name) // Getters .def("get_layer", &INetworkDefinition::getLayer, "index"_a, INetworkDefinitionDoc::get_layer, - py::keep_alive<1, 0>{}, py::return_value_policy::reference_internal) + py::return_value_policy::reference_internal) .def("get_input", &INetworkDefinition::getInput, "index"_a, INetworkDefinitionDoc::get_input, - py::keep_alive<1, 0>{}, py::return_value_policy::reference_internal) + py::return_value_policy::reference_internal) .def("get_output", &INetworkDefinition::getOutput, "index"_a, INetworkDefinitionDoc::get_output, - py::keep_alive<1, 0>{}, py::return_value_policy::reference_internal) + py::return_value_policy::reference_internal) // Note: the builder is the _parent_ of the INetworkDefinition, so a reference_internal policy (which would // keep the INetworkDefinition alive while the builder is referenced) is unnecessary here. .def_property_readonly("builder", &INetworkDefinition::getBuilder, INetworkDefinitionDoc::builder, - py::keep_alive<1, 0>{}, py::return_value_policy::reference) + py::return_value_policy::reference) #if ENABLE_INETWORK_SERIALIZE // Serialization @@ -1026,7 +998,7 @@ namespace tensorrt // Allow iteration over the layers of a network .def("__len__", &INetworkDefinition::getNbLayers) .def("__getitem__", lambdas::network_getitem, py::return_value_policy::reference_internal, - py::keep_alive<1, 0>{}, py::return_value_policy::reference_internal) + py::return_value_policy::reference_internal) .def("__del__", &utils::doNothingDel) ; diff --git a/python/src/parsers/pyOnnx.cpp b/python/src/parsers/pyOnnx.cpp index 57887e87..ec9bc0c8 100644 --- a/python/src/parsers/pyOnnx.cpp +++ b/python/src/parsers/pyOnnx.cpp @@ -17,7 +17,7 @@ // Implementation of PyBind11 Binding Code for OnnxParser #include "ForwardDeclarations.h" -#include "onnxOpenSource/NvOnnxParser.h" +#include "onnx/NvOnnxParser.h" #include "parsers/pyOnnxDoc.h" #include "utils.h" #include @@ -98,7 +98,7 @@ void bindOnnx(py::module& m) py::class_(m, "OnnxParser", OnnxParserDoc::descr, py::module_local()) .def(py::init(&nvonnxparser::createParser), "network"_a, "logger"_a, OnnxParserDoc::init, - py::keep_alive<1, 2>{}, py::keep_alive<1, 3>{}, py::keep_alive<2, 1>{}) + py::keep_alive<1, 3>{}, py::keep_alive<2, 1>{}) .def("parse", lambdas::parse, "model"_a, "path"_a = nullptr, OnnxParserDoc::parse, py::call_guard{}) .def("parse_with_weight_descriptors", lambdas::parse_with_weight_descriptors, "model"_a, @@ -119,7 +119,7 @@ void bindOnnx(py::module& m) .def("__del__", &utils::doNothingDel); py::enum_(m, "OnnxParserFlag", OnnxParserFlagDoc::descr, py::module_local()) - .value("VERSION_COMPATIBLE", OnnxParserFlag::kVERSION_COMPATIBLE, OnnxParserFlagDoc::VERSION_COMPATIBLE); + .value("NATIVE_INSTANCENORM", OnnxParserFlag::kNATIVE_INSTANCENORM, OnnxParserFlagDoc::NATIVE_INSTANCENORM); py::enum_(m, "ErrorCode", ErrorCodeDoc::descr, py::module_local()) .value("SUCCESS", ErrorCode::kSUCCESS) diff --git a/quickstart/IntroNotebooks/3. Using Tensorflow 2 through ONNX.ipynb b/quickstart/IntroNotebooks/3. Using Tensorflow 2 through ONNX.ipynb index 4bc794e6..aa8f6328 100644 --- a/quickstart/IntroNotebooks/3. Using Tensorflow 2 through ONNX.ipynb +++ b/quickstart/IntroNotebooks/3. Using Tensorflow 2 through ONNX.ipynb @@ -8,7 +8,7 @@ "\n", "The ONNX path to getting a TensorRT engine is a high-performance approach to TensorRT conversion that works with a variety of frameworks - including Tensorflow and Tensorflow 2.\n", "\n", - "TensorRT's ONNX parser is an all-or-nothing parser for ONNX models that ensures an optimal, single TensorRT engine and is great for exporting to the TensorRT API runtimes. ONNX models can be easily generated from Tensorflow models using the ONNX project's keras2onnx and tf2onnx tools.\n", + "TensorRT's ONNX parser is an all-or-nothing parser for ONNX models that ensures an optimal, single TensorRT engine and is great for exporting to the TensorRT API runtimes. ONNX models can be easily generated from Tensorflow models using the ONNX project's tf2onnx tool.\n", "\n", "In this notebook we will take a look at how ONNX models can be generated from a Keras/TF2 ResNet50 model, how we can convert those ONNX models to TensorRT engines using trtexec, and finally how we can use the native Python TensorRT runtime to feed a batch of data into the TRT engine at inference time.\n", "\n", @@ -196,7 +196,7 @@ }, { "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAQEAAAD8CAYAAAB3lxGOAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjQuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8rg+JYAAAACXBIWXMAAAsTAAALEwEAmpwYAAEAAElEQVR4nOz9WYxtWZrfh/3WsMczxhx3zLmGrDG7qrrJbplkAyRNGjYEw4ApGbAeDFh+0YMBP5jQkwG9+MED/GSbhgDDpiHLgA3TosmmRLYotbrZVd1dVV2VWZmV053vjenEGfe4Jj+sfaOachVFil1gAp0LCOSNyIgz7LPXt77hP4gQAp+vz9fn68/ukv+qX8Dn6/P1+fpXuz4PAp+vz9ef8fV5EPh8fb7+jK/Pg8Dn6/P1Z3x9HgQ+X5+vP+Pr8yDw+fp8/Rlfv7QgIIT4a0KInwohPhZC/M1f1vN8vj5fn69/uSV+GTgBIYQCPgT+CvAU+APg3wwh/ORP/ck+X5+vz9e/1PplZQK/CnwcQvg0hNAD/3fgX/8lPdfn6/P1+fqXWPqX9Lh3gCd/4vunwK/9whehZdAKRmWJQGCsxXmPdQ6pFEIIgg+IAAiBDx7nLFIKhAQlJd57QACCEEAKifcBrRTee0IIeO9IkoQ0TbHW0vcdUim8d2RphkQSBHSmx4f4s+A94uZxA23XgxBkWYIUAu88goB3HqUUWmt6a3DOgRAIpSCAcx6tFEJACJ4sTTHGYKxFaY1Sir7vQQSyLKVrW7RW6ESSppKubzk8PEDLlIuLa0wHzkGaAMqQFwLnJW0NSTLC+x7T10gJwidIkRNEwPke7+N1lUqQpglCCJqmxXlPlmWMRmPq3Y6u7ZBS4kMABEKI4RMTKCWRQmKtRWuFsQYpBEWRIxAIqTC9wRgDAtJUgQg4Z/E+oJSOn5EL8b13PW74fJx38fMUkKYpQgicdTjrIATKskBKycHBQXxu03F1dYX3EucC1nmss4QQyPOc/YN9lstr2rZhOh2T5xnT6ZjO1iyuV9R1T1nkCCHpe0vwYE18vuADPgSElGRZRp6nOG+x1mKtwYeA9wEhBePxlKZpQARCCAQf4jVyDq00UsQ7SSrJ4eEBm/WaqqoBCYKfXV8hEAK8j1m6khIhBNYN1wWQUkCAsizoe0PXdvF3dUKRF3RtS6olSivatmd/f48nz86uQghH/3/777/2Nv+XXEKIfxv4twGSRPL2lw956+5rSCRt33N2vWCx3SLzjFxn7E9muLbH+kBrGozt2FVrxpOE/f05zjnW6x2CBCkypEjoq44izciyjLquqOsdt26f8s473+C9n7xL27ZcXl6ytzfj3u27CCPopWexvaYyNV98403q9Q7XGvrOYoLk6fklJIq7p/uYpqGrGgqdkkrFq6+8wmgy5ve++z0u1tdMDvdIixxjFM22ZX9vSpYqunrH7dunXFyec7VYsHd4iFSa88tLmn7Dt7/zOrazNE3D3XuHCLljPJX8jb/x3+PO6Rv81n/0Pf72//m36OuEu7cyiv1r5rccXZ+jxBd58NGGansG/gw6Q2JuM8lfxRc9Vb/gen1FMcoYTQoOjw+w3nF+eUmaZnzhS1/i3t37/Od//z/h4uKSICRKpSR5wWa75Vfe+TYEuHjxIm5K50lzjRMd1nbcv3uHu6d3cUby4JOn7KqK+f6E6X7C+eIBL86eE1zCbLoHTjIppuRJydnZOVVdcXh8iA2GXbPD+Rggx+WE2XhKrlNs0/Hnv/Md7pwec+v2EUfHB/znv/vbnF+84N33zrlcVBjnWW13bOuKk1un3L5zTFXP+erXvsB/86/+JfbmI14sHrD1T7m82mD6hO3G8v57D7m+rLg4W7M8X9JsHUVaoPOSq/WWv/7X/zL37p/w6OEHPH76gOvVgqazzA/22DYdt+/e5/LqmnysSXSCaQzbTU2iUn7zL/wmP/7hD1lfX/HX/tpfIUkS/v2/9bd55dUJzms8HkfAhYAQgiRJ0EqRKI0Wkq5paawlLUqc62mbHWVZ8tf+6l/h+3/0Qz756FMSXXByfIff+I2/wMcfvIdZn/Ptb3+Lf/Ld32cym/Hk2dmjn7cXf1lB4Blw7098f3f42c0KIfwt4G8B7B9Mw2/8+r+GtoHHD5/QNA3b7Tae1FKgEMzmE84ePccLwd7ePuUo5ZNPP8B7S9d17O3tEYJkvaoQMtycKt578jzHmJ48z9Fa8+DBQ6RQOOfI85x79+4RbMD0PctmS+d6qrphs92SCInSGlu1eBSj0QiZJYggcMaT6ZRUJ3hjaaqGqm7IsowvfenL1LajMQYhwDpDCAEBWBs3uDUOrRK0VGyrit4YkkQQfEOWFbx4vo0X7+4xtgt8+JMz/sP/63/M9//oQ3aVQjMiyyd0bUpf5ayrlm98/RbOaN770VNm49usqzVajtGpYtc3VHVF27ToRNA2grMXZ7zy2qscH8HZ+SWLqwVlMSZJUpx3SCVwePp6S5InHJ0ccnVxhfEOEQI+eDbVhje+fJe+azi/fEaWJOzNjijKFKUlOlFsthuauiXRKfloGk/b3hAKqOs6ZlJS0fcGh6XvepIsIUkTjDU0TcNkf8TJwQldb5ju7fOTDz6k+1HF4dE+u13FrvqEtq0RWqOSQF5q0hxsaPlv/Xf+Kl/8wn32D0co6chzwdnVFu8dk8keDz55wHvv/pTNyrO6ahHeM8pTbt++jUoKrjctT58+4/mzB7Tdhu22ouss870p9++9wicPH7HbVkzGU2yoCM6hlKJtW2ZHc9566y1+/MOf8M1f+TZf/fo3+Fv/x/8DxVjSGwcIrHdY73DBEwDvPbKIGY9znrpp6J0nzXP8kLHdvn0LKRXG9CitkFJgTE+aJYzGYx4/+4jnl+e0pufq0cNfuFl/WUHgD4C3hBCvETf/vwH8D37RL3vn+fCnHzHLR4QQUyhnLZPpBCsCiZYURUbAkqYFeZEjREBJjXGGtu0pipI8H7NePYgpqlIkiSZYj7V2SHtjWg5gjMUYy+HhIY8ePeHV+68wGiVcbK5BBpq65npxzaQYUagUayyti+mfEiBcQAVQQuCtw/Q95+fn9NYw29/j6PSUj588BALeOvquwzuHdzGFbtsW7xxJmpAkKdZuCM7jFewfltQ7gfPw/MmaPNnj9Tdu84//0Q8pi5JJOaerKhIFSa7Q8phf/dZf5Gpzwfd//IcsrhakhUTrGUUxQvkMmViCjyVQkiTMZjN8iAF0tV5xcXlJmuY0TcuTJ0/pnYmllwgURUZelqRpxvf+8LuMRiNu3TlFenj08CHT+YiDkxkXFzVXq0vSVJPnOS706ESz227Z1JfIJDCdzpAU1LuGPC9xxmM6R1EUjCcThJa0piVNc7I8ZVNvSVRCcAHbG+pNhe06kkTzwx/8Id/5zq/w1a99k+9+73s0piFIi9AK3xnKUcLewZjpLMeFik11yf6RpCg0R9mUnzyq+OTjp9S7B+y2kpPju7S7CwQtUii0SvDBgbd8652vUxQZjx9/RFEqRmVGbyreeuMtsmKEM57a1oymmjxPqesG72P637Ytn3zyCavNgmL0Fr//vT/g2fMz0rygbw1KxtNfaYUUGiEEUkoIEEIsaYsixzYdddMgROC1V+/z9a9/nfPzC4yxZFkGXmFsR13t6ExL5w2Pnz7CuJ5yXPzCzfpLaQyGECzw7wD/AHgf+H+EEN77Z/2NsZ4sK9hstjjnef31N9BK432s/5u2BgKj8QhrLI8ePaFpO6RQKKWw1iGlQusEKeUQQV2spaxhNBpRliVd13FwcMhsNqOu6hhIjGE8HlOOSqSUZGlGmmX4AF3bEUKs1bquo+9b+m4oyK2jq1v6piXVKVmaMh5PcN7z6cMHrDcbnHPUTY3zDiFjXyHWuBYfAlmaoqSEIHDOMx4Hvv2dbyJEYLep6VpBvRUk4ZCL5y3vfO03+Df+xr/Fwf4B682CIFt2254//IMPefz4jIurR8hsjc4bNtsKqUbUXUVtFhSFpiwKDg72uX3rNuVohJCSq4tLkiRjvrfParnmwYMHnJ2fo7QiEDg+PeJLX/4SAU9vO6azKdYbrlcL6rbi9v0TrKhp+g0yDbRmR9WsafuKXbWl72PAI4AUiqbpyLKcLMtp25aqqui6HmMsTdNirUPrDO9jf8g4g1ASISUeaHvDw4ePsNbjUXzve9/ngw8/pWladKoYjTOk8hjXYlzL1775RS6vn/GDH/0Tvv/Hv8fj5x/y/OwBVbXl8OAAYxyPHj3n0cMz1uuWJMnxHoRUSClp6pr7d+/yza9/nXe+8Q7Hh8e43nL75Dbf/Oo32J8fcDA/wPaWro59FCkkIsSeRlVV/MH3/wjnPT9+7z3+4W//Z+TjCZ31+BB7RDEIaNI0JU3iPWydxdgYjNM0BTx93+Fc3PTz+Zzz87Ob3kdRFLH0bXZst2tkIlGZJi0zVPaLz/tfWk8ghPD3gL/3z/XLQrC/d8jdO/dYXi3ZP9zj9qv3+fD/+3fIJiMmZYm1PVKCVoreBapdRV7kJKkgeFhcXSOEvmniGWMRNjAqCrI8Yzyd0DQNvTV0xnDvlVe4vLpitVlzfHLKaDTG1fGEScqcY3nMdDymWseUXGmNAALgrMV1BokgVZrgPQzNszzPebG4pLId6aggCInpOoosoyxynOlItCKEgJKSIi+GJpPFGcf+fESik9gos4GutohQYPsU02f8p7/9+3znO+8QvKLrLElhmKg5L86uyKZb3v76MSe3cn7ywwXvP14wTlKc3zBOEpJsRqI1idYoJXHWUe0qDo8O+cY77/Dpw0fsdhVN3VIkCqSImcd0wnq74sXFc77yla+yP9/no59+yNXFFc5YVtsl67Dl6NYebbdld71F54L9wzlnT68QAsq8RKeO+XwPO0nY3ztgebViu6xo25Y8y0nSlDTN0GmC0IK2a5FSUdUNZTainIyYjqe0bYOzCePpnB//+D2urhd4L+n6nnIyZjKdstyu2awXPH3+iB++WzCdKerqEuuvcWHNwcGMpq55/OiaD396xYtnPfVOYo3GG4+1kGY5ewd7TC3YvkdLwav37pMlgsvzFxzsHaJQJDLhYO+As7MFwXm2mw1SKISQSCEw3nJ5eUmRj7hcXGOMI5MpMsmQ0qF8QGgFOjYAAyCGBqBAEIKPt9fQkA0hcH5+zuPHT+i6Dq01AolEoZOEq6tLttUWlac4EfBSoLX6hdvvX1lj8E8ugcD0lqpqAMFicc3TyzO0Tjg+OmJU5OgQKEc5velo+ljnz2ZjpILNZsVqtUMgSZIcQsA5RyJjRzrNMtq2xRhDmqa0TYtzjldefZWHDx+SpjlKaVTmKcoClaWkZUGRZexWWwgxPdOJJhcC6z3eOnBQ5CWm62nbls5aUmupmxaVpxRZwa5p8N4zn48ZlSNWyxqIaV6WZUwmU65XK0xv8D5wfV3xD/7+f4YzKQf7B5y3FbY35FmJ6QKPHpxxfHjG5fmGk8PbJBnstlu2u5bZacrBUcJsP3BwkqLLhiIPHOwfM59JLs9a+q6n7RuW1zmm7ynynL29faxxtHXLZDzFO8F0UlDXNWVZstqsePz0KcW4YFutmUzG7B/sUVcVfWd49OQxxVHP/funnNzeZ1LkTGYjMjHh+nJFU7VkecJkOubOnXsIWUIQrK63CCEZjUYUeUFelOwfHtI7Q9VUZDns7e9R72rSNMEYw/VqSTCOUTmi7zqsNfS9YzrZY121eC/oh1LP+zjNaduGNM85u7jE+Q3zecqrr93hYP+AP/zep6yWHbYXVDtHqjOSJCdLM4QQ7KodRTbi8aOHrJcLxuMC61q0TKh3Nd//ox+CUiyvrmOgSFJ0KhAagvNYY8AHrLWxtECg0oS67XDOMSsLpI0lDEoMkxhQSYJSCjHcyyHEe94GR9+1LJdLPvroo2HSohAIpNAoKVgsLrHOkBaaTV1hraVMy1+4/z4TQaBtO+qmpeu62ARpK86uLxgfzlFa0tQVWIuQkt1ux67uSdM4yhqNC9q2xbkGrdKhGehQQZIohTEG5yxXV0u898xmM9brDSEErHW88spr7O3t0fcWaS3G9HTeIVON6XrqqmFeTAghkKYJeZ6xrWqEF3R9T5okOO9x3oOF0HWAYLfdEaSgGXoBo7Ikz9MYPAg3QWBUllxeLbDGQhCcPTMsLp/y9pfv0HU9+3sT0kzQ9juyNOPenXu0dWCz6ijzCU+fXpHqfdI8cPvOASE8YblcgQgcHKbcPTrki1+4R3ANFy9+grWWznRsNjvSLOHNN96kHE/46KOPqXaxsUkA6zxpnmGt4+zsBbt6w1/4zf8GH330ETpV7B/s8eDTBzGjCJ6ub3j67DG3T0752he/yuLFjnbbkGhN7QNSKPKsJFEpnQm8eP6czWZDkiSc3rnHvbt3OTg84u4rr/Di4pwXZy8oxyWzozGXlxdcPD9js1ihhWK73CBEHFnudjXzeZwOJWlJ21jqbsHyeotMdUyPq5bXXr/FF976i1ycfwpI1ust1oA1kCY5e3sFfddhWolSCXvzOc5seXF2xv78gFE+x/SW1fWSNFUcHxxTtRVPHj0iLUvOzi9IEkWWKO7eOY2Nyu0O0xuCDySpGk50H0vWQiGAvqlJRTzHCeC8v2lmJ1pj+h5rDFoqyjKnahvCkHUulyvKLI+jxRAQEooyZ71ek+UJInFYPCDZNtUv3H+fiSAgBJjesFwuKYqc1WZJksT59XazwVtDJhUJKs7bdcKbb77BV7/2Nlme8A9+67domx4pFUqlpKmndR3OO6wxrDdrnPf0pqceLmKaZWR5hpCK58/PmE3GTFLBbrejmE7p+562bgkh0PcW7wKj8ZiTu3d49/2f0DYdcpjrChnrOaQkzTKO51N2bUPV1jjn0DrejCHEOXlRlAQXm4XVrsL0Buc8wQe6VpIoyeq6JUsTyjxDyIa+3zCfj8mznE8+fkxwKX0nCS4nLQre+dZXmR0FHr14wv7REUo0INbMD0aMJhPOnm0JQcZmKfG58yLl8OCI69WKtu7QOsG7aqjdNUIJmram7Vq0Uvylv/QXWCyu+M53vsXf+4/+Pkorml1NOhaUZUnb9jjrkUJS7XasFy1pmjEejxHSs1lvabtHeFKMscznc+588Ta/8s13+PKXv8xs/4BiNKbuWjabNTrRlPOMtq05e/qMj977KRfPz1hdLxEePvjJ+0wnE4IHJRQEBQgWl1e0jSVBIULCelWxWu6QwnF5sUFLiTUPObu4Zn++By7Q1Dmr6wVt3ZEXOa+8+grnZ49YXm+xxnDr1VvcOj6mbSvG44zRuGBbb3ny7AkfP3iAEB4hBbduHfOFN97i2fPnmM4QnENISZpolIDGxGYpztM0LZkSGGvIEh3vD2tpux49lG3OudgkROC9J3mZIQxjRB9CzBYGHMU3vvE1fvjDH+JxEUugBKCGcvXnr89EEAAI+FjfSHkD5KHVeNOiAF0UJDLWWHmeIaXC+0C1a+haAyjEUIcpGdN3Z3qcczRtS1mWFGWJUgpjLJPpDKUUT548Zb1a8613vo51BmsNIXiUTuLr8rG5Y41FCslsNiVNE+ymRQuJCwEPBCnRaUJWFMwP90mqHd1FjwgtWkqSRNHUFcYaZnqM8R5jYi+AALZ3JEmKFJpUKe7eeo3t9hyE4eCwYDpL6NsM23e8eHEGQWJa2J++ScBy9/Y9anPJxVPPfHRCSst2fcn19QoZCp4/WeCcpMhzptMxNkQAy3q95vnz57RtS5aVEGJDq8zH7OotXdujdYJONKvlitu3TpmMxzR1TfAeZz1pkjMpx4yKnN2mjmPeuqfMx8i0QMuMpq3YVTvazoIqODo85Ctvv82vfOObvPWFL3Kwt49OUlCKPaU5dbcI3mNEg/dTDmd73Dk6ZbNY0lQVTx89YbNa8eUvv81qteXDjz5kMppifeDSXyPQCK9QImN9veWD9x5gjEXKFuED5y8WeBSpHrNZX7JeVfR9h04kUga22xVKCXSi6fqOPMvYPzigqVKKMmU8KSjGBUmmObt8wWoXMN5wfLJHolKuL5dsVhskguA9fdcgBaQKvO0JHrI0YZSmbNfLCDwS4EMgSeK2tC6WD0miEQGcADGk/+ElAE1rrOljc5nAdrthPClZrK6RWuNDIACoXzwD+EwEAaUUSgmM7cnLEZPxiMX2GmEMxngSKaEo4SWa0ASePn3GdrvFOcN6s8M7j8EhMISh4/qyEx8nDA6lNCA4OjpCIOi7nq7rqZuG7XaHLmPDrq4qyumUNM3oiOOquqppmprl8hoICKXwHmwI2BBwDOhEYLer6Lue4ALBBhKtGZUF1XaDVoo0SWibBq3iZCIEKPIcGyBTY6bTKXdv3+fd959RVSsOD6ccH8/pasvZ8zUhGCaTAiWhryaotOGPf/A+UlsunwlCu6PpGuqt48XzBaYes1kFbAQ7kqYp0hm89zx/9pzLiyvSrEBrj5TxNK3rlr6z5FlBkim8sPze7/4u89mMn7z7HtVuh7MRi2H7QLv1KGPYmYom6ZjkexwcHGI7RZ44lJQY05KXJftHt3jl/n2+8+1v8+W3v8y4LJEhELwltl4FUkiCEgQDpreoINibzRglmr6dcry3R5nn3L1zjxcvzsmyhMfPL3l+domWGiUUIigUCfvzIxLpCaIiTXLqrcWYjiQrMK5hcbmmaTRpmjOdTkiThMX1FUdHUwJT6k1sUAqpSJIUEPHAyhRKS3ywjGcFvTME0bPd7Hj27AW73WY4xQPe9nStJ0tzskRxsH/EbDbn+uqK3WY14CMCeZEzGo/p+z5OoULAMaT7iUKIMIwQI4q2qRvqpqYscgiOn/70fb705S9StTtWTR0zVCD4z3gmkCQaKUESSFPNZDphtBnjtaI33ZDJSLwXsYE1nPCr1ZKXBChjPM6Zm3Ge9x7vbIT2hthpTdOU3a5ib38/jqLcANZRKdvtjoPJHlmWsqp26KJglI9pk/ZmbltXOx49ehSxDInG9obe+wj0cA7lPb5tuVou40gnCEQQpEnKeDyib2uKIgKWnLEUkwIzlBqT8ZTNbkeiMk6Ob9N1ESDTNC3n52dU1Q7rLBcXL0iTwGgEXddwdV4z24c/+O4POD05oV3nfHq5pek2CJGDTxmPjuh2KevtmuBD7D8I6LuezW4bN/M4GcoACUh26wqpBXmW4qwhK1M+/fgTXrl/nw/f/5C6qsnSgiRL2DYb6pWllz04QyNa1H5JeWdMYx2dqxmPpty+c8rR6TH3XnuDWycnvPraq0wmY7zp8SFmrt4FjDEonSKkRCIRHpyx2C7OwOvtjtlozBffeiOOdvOMo8N9Hj2/4o9+8CPquuLy+prOGqRQHO2fMJ1lmH5D12/Q0iF1xnpds91VpElKno9RuiBJS6RU7LaGvYM55Uhz3l+h0wwQyEQjZKCqKzI0dbNDqMCrr92lanc03YZmI2mqBmcsKkvQiYLgybKEr7z9JaSIB8jR0SGL8/MbwI/DI5ViOp2yvL6mNSZmWxhwAV2mqFQjBCil0ToGK2t7Ei0Bx3bXs7c3Z7wYcbHZkKu4xZ39jAeBWFMLyrSg61uapqEoCjrhkc4AcQzoh3mqQGCtjThz54dN6m9O/ZeBobeWNFEopTk4OuTo8Jh33/sJZ2fnjEYjlEpiHZ9n1E2DUgfMplOWux3eB8pyRFe2ZGnGeDzhYrmgXi6ZH+5juljfCxmwHiye1vTooOlNj5QSJQSZ1qRaoVUc0UghMdYCERvedz193+M8JDrhYG+PNJVcXp4jhSDLMjarhmdPLhgV+3RtRV4EvFiDMti+wVuNDJLgJF0VaBtH7yAtU06Ob/HWW2/huo/YLGIdWRQ5PniaTYM1ljRNSVRCayzWxFqyby3lKEehadqaLFOs1itOj08iGEmnFGnOKJ9gjMU1lsm0QMie7WqFHwtsF/AuNt7u3b/N21//ErfunDI/OGQ0HkUOQCzowXuC8LiXtXIBaZqhpCJRCabv8cZie4PpWpZd7NdoCQTH8eE+hye3yfOUk9NDLq6uefDkMZeLK4ILJDKjswItMsZF5G0sFi3eSg4PD8mKKZ1xbDY10iuSFKSCsizJsjzW4VKCl7hgsd5RLdd4Ybn/6h2mB2POrzydaXj+rL7B+EMg1QpnPHvTGf/6f/uvc352zm/9/f8YYQ3L60XkwIh4j3dtN/At4mEWfKA3BgL4HjI94CWG7FYIgVKx5pdKkucJi+tL6qbCBxAiZrc+2F+4/z4TQUArTZomKC3ZrDYsrhd4LbEi4D3xNPfgvCBLc7yw7HY7xuNJHAM1zZCmxc3vfXzDSisQgvFkwv7BAWKYsV5cXjI3hul0jk5S0jSnqmqM6SlHJYlOCCFG2ywrKMoRSaJwD128YUWsz9Khx+DaFoKPUFovyYsSCfiXxBEp6bo21n3e4V1EyKVJQtN0KBk/qPnBHvfunnJ+8RwhW4q8RAjYrhuW1x13v3LEeDxitdnS9hvSDLSumEzmGJdjuwbTVQQvSDQkSjAeZUxmGpX0COFIk5TZdErV1Fhrh9FnSgixOWudRwWPRCOCAi/QUmN7S111BOfRUpHIFNtbRCaZFHM22y0ne/eQ9NitZW96RNdYymLG7Tt3+MrXvsT91+4wmpaoLEUpicATvEXgbpq4bWfYVi06zZCqRCBBOWwIeGcRwSPxERXX9lTbDVIm5HlBUk6YTQt+9TvfpLeBjz79lPd/+lOulwskglE+oiimSOHY2ooi36MoBSrLyIqSbV3z9NkTrPWURcF6fU2R5zCQdwIR0CVlIMszlptzklJw+94pdbeh6bfs6prFlSMEgdYJiZKkicJ4w+nxPvfv3qKrdnHacb2krRt0lscpC4F2GP8x3M1DRT+Q1SIy9SXRqaHl5OSEzWZF3zeoINBJzieffMRqt8OT4l0cR3v3Gc8ElJakaUyRvXf0fU8ICkMgEBBSxYYVEYeeCoHWMR0iCPrODAysgA+OEGIgzbOMEDxJkrLb7Viv1rRtOzQeY4YhEIiB6dV1XXwcEfH9xhikkHRth1Lx8ZMkpelarIfJfI4A6q7FhhA7wUimo4KDvT36tuPs7AwI7HZbnHOkaUpZ5KjRiDTN6I0jCM1oPGPv4AAhHLvdJVmmODreQ20TpJ9S7zymi4Gt6bbM9hWHxylm2zKeBtbbjs16RVFqxuM5nakRiWdXLXj67H1Wmyf40KPUCOc8bdsSfEQsIiLTUrysxUMgTVKc9dRVQ5IrvLNMxmPapkEEkEHQdD2NrCnyKVYLsJosTyjzKfPpAePRjDde/wJvvPkmr7x+j2KcojOJ0BKCJ3hPcBbbd1gT8RXrbU3TWSbzfYQUCC9QQqJkzKa0FDhrqHdbqqrCGMt0usdms6Lzz+iMZe/gmCQree3V+8zmU54+e0rfN4xHKVI6Ls+fkyYjpJ7T9i3bZocQgTRTBOEwtsU6Sd3scM7hQ6Dre7oBlzAaZ6RZEkFU1RX5VLGtVtTtlrPzDcaW5FlOliucbSB4ppMRb7x2j7NnTzh//hRnehrbc7h/SNU7jHekKh5a1tqbrDbuD4VEYIPDGIPWKQLQWnP79h36vqOpdygV+QPb3QZrPagc50LskXnxC/ffZyIIxM47JGkCQiCVRChNZw2JToabQOKtx/aRaDGbx+6+dyGezD6OUoQQSCVumidSSNq24cWLlrpu6PouEoKcQ4iA1prgXcQZytgZDz7Qty2m73Hecn5xzmhUkCQRh9D2FtCRhuwcfW+o6wYRAqrUSK354pe+TFPXvDg/xzvYrnakqWY8mTAeFWid0HUdPnisNRweHZGkGR998hOC6hE6YzqboVTByeGM1bJmu9silOPo1oSvfP2E01slH7/rcFRM9wsaU3HrcJ833/oCH378Iav1il215MWZYFcvEMojtWS727DbbkmSBKk03VB7ZmkCWHpjiKxdB85RjEY4AQcHM/q+IxDQSWyKGW9IvCXPC87OLzjYG1OWEybjCa/cu8vbX/4Ch8cn5EWOShOklghhYxBwsZwxTY1znmqzYXG1xAYdiTQhILwYGnIZNknj9XKG3rR0XUO9q6h3O4SQOK2xztP1PaCZ7R8wH4+YfvELOG/IMkVvWqaTCW1v+OTRCx4+fkBVV/QYgggURYJSkGiFlGD6DiE1LjjqrkYIDzKj7Rtme1POl49RlRsg4AVts0JqwWQ6phwlrBYd3luydESepTx7+pS6rrh9eoQUgte+8CX+4I9/Qtd7RqOS8XTKdlfFA4l4b2sVpwPxwAAVuBm9ZnmK8wYfYnaptUBrgfUChEIGInJRJb9w/30mgkDfdWRSM5nMeP7iHIsgSxK0jyevMxYlAnXf0FlHkAqVpFgX66Ku7xFCoEWs46QQIALexL4BzsUbnbgJnAsY29N1LbdPTrFtw7rdEVRKniq00mx3Fd4YwLNrtvS2i131oCiUZnwwo6mrCBn2Fte3MXUdyoKQKrQs6QSUIaGvHPuTfebTMQiHx3F5fcnZ5TnWgQuWy7NrNu2SkzszJIradtRdQzmaoLIGr9dkU8e3vvEr7B1p2q4lKXq21YZ8XrCfTLn/xjH7pwX9w5plvaGYTNBygpY7OrnBqzhPdsFBiNcpWIeQnmJU4ENL19d4GUFXzhlQGVJ4yjKlqnfko4Ss0NT9jqTI6OyGyXifbmvY9TW3Tw65ff+Et966zdFBRlF4lAaVlQgZCH0Tv0xN6HaEtsIbS7dasjq/ROVzgg84OwQBoZBK4wVUfUvjOpwMuGAwpqVabijyAjmeEaTG1g3eBTZdR16OmO7NGI1HJEUOespoNme5XvLg2VPqdkvb1+zMLiJB8UglCd5h+o5E6pgFKoMJDc5aqsslu+qKO/cOqPotdtsynk4Z1yDDApn2HJyMEMIj5RRrOpbbHT/+yQd842tf4/4rr7G/f0Jwlr29Oe/++Ad0sudgOmfvaMZuu8K7HjE0tKUI8YCzmhAUIcSDRsjAcnNF0+1AWdICVOJQ2qGtR3uB7OMYUYh/BdyBf5HlrUX4OPronSPIl8IgCukDRVEQgsOYlt56ZJJTVTuyrAAEzvsb8YsowCDxzsRmCQJEFG0IAtI8ZbvZkSYZm82aLNUcHx6wXl5Rdz0nJ6cUWcFyscRZE5uPIQYRIRR9azk8OuLeq6/w3nvvonVMleUguiGkoO07fvrxR7gAje0pVU6RlJyenGJcg/OGbbUaPsCKvYMjetdycXnG/umc8d6U3WrHarPFGc9yfYVzLcvNC5I8cOvebZ6+eMj3f/Axpd9HJoA1JKOUdJzw+PwRV+slq23HndMMZxOcTfCAlx6VKpTWVNsqCnsET5ol9H1N3+8wtiEvZhjjMI1BJQzXMACe+XxGQID2ONmTFjn5RJGUc7TSnN69xf1X7nDr9gFJYpDSoFOF1AneG3zfY+odod8R2i1dvaFtWqrlmmp1TTrRBGMjq9IGpAqRUQeY4GmNwXhHb3ps3yHwCO9wvcELh7AOLSX9rsU2Fc12xXh/j8nhIeV8jkoKgtxSjlKyQuNWsRQxzmOtv/ks+64lyUsQlrpbo7ZQ1TvqZoNQPXde22f/aI73AdOD6QTBSvJCcHxrj+ViwSuv3eX6asGzp5d8/OkT3nrjTWbzff613/iLCODdH/8hiewoEk+RCVItsKan71tARhCUAKUFwugBEBUFc+qmout3uNCT5Yqs0OhUojqBUgJvLM4HZPADAennr89EEAhEhl7YbiMSSic4N7DtnGc2m9I19dBAc1hrb9h/LxVvQoj9g1j/RFWiTL5kngWCj8o/SZJSqZoQArvdjqqq2d/bw4fAbrvj/v0oQpIkMV2P8GJLOiC6IpgoQp2LoiBLU87cixi0ZKxdvXM8fPiQ3jqMNfjEsn8w4+TkkE8efoCnp+0qrO04OJpydLQHwnJ0PCMpJcurDZvljpPDU27dPmG3XnN1cYl1Lad3Zvzk3Z/y6OkjHj8859Ys4fBoD9tbdpsdz59ccrW4ZrfuyJICETQX55c0dY1QAuctUqUEYhmSJmmcXAi4ulpF+m+qBqUkzcHhmOPjE1bra5yNaM08K1httiQ6ZberuXU8R0tFlqS89uqr/Mo3vsbd27co8jyOfrVCJpqAx9keZzr6vsO3LbauqTZbttsd6/WOtmkJaYc1HQSPRyAGZZ8kScmSFBkEwQa89TgXSLIcpKbv49jXKU2RpngXpw2WQNO3dM4yDwGV56gguH16l9V6y3K9paq7m3sJIZEy9qh6F0vVF+fP2FYbvHe03Y5bdw44PT3lzp1jttuK3/md32dxuUP4yCfRQrHd7DjeP6JrDVpognN88skDZuM9vvn1dyjygrPzc3SiyMjojcU6x2Q2oen72BRXkRQkpSCO/FVko4o4NbBNG/UF8gStYkPb+Qj79sJgQ+QdvJxO/bz1mQgCANZZ+u02jveEwBpLANIkIQwXI4SYFtkQhsgmb2b43vuhq29puxbwFEkewRLBx/EOgBBMJlPqXRVhybsth/sHKK1ZXC/Y7WKDpSzLKPwxdNCjPFkUc1gsFjxfXHB0vI8xhizLaOs29iKIGIWm6bA+QmilAp0EdtWKpt3R9Ft6U4NwzOY5eSlR2nH77gEfPXjE4ydnSARH87uM8imXzxfUu55EC5TM+OiDT3DCkmrw1lEWYzb1huvLHc4qjPF4I9mbzkl1wWZ3jfcepaKAR6rtjTxbnmcopai6mt4E0jxhPp+wWNSMJ1OOjw6ZzfYiUce0GONp6pZqWwEC18c+TTHNOTk55ptf/Srf/OpXmJUZioiVV2mKGEA1zvYRz+Etpu+oq4pqV7Hd7thudzRtC3l3Qw6SUg2NMU2aJEyKEdusoBcbEhm1GLxzGGvp+i4iMLVGhxxc5HQYP5BwROwvFNMZSMG0nPLqvde5vt6w3bWsdpvYTcfh8Xjn6IxFSYXdrNizc2bTCVJ7skyzWi0ZjXJEUFydX7NetZRpgRSSq4srNqsNz56+YLPacHpyRL3b8PjRE8pszG//o3/MyfERQkiKyQRXt0QtONjb26PpDF1n4vRGJRFIpUAy9LqUIASB8wHrAyFI2t7hvaVqDKZ3SO1wysVGt/yMjwiliOOU3RDV3KClFoina9O2KBHYVQ06zwewhY/Y6EH7ryxLiiJjtV7inUcnEjPwsaWKvHAXAt450jShQUQu/dWCO7fuMJlMuLh6ymazQSlFnueRMzCQOYKPwSZJEparDdfbJeUoIwyPF0VLYqAxXY/pDUiBVprxqADhePb8MbtqTWt2JJlivj9Gac9kVmCtZ7m+4PzsktV1Q5lnrK93vJAXPH96FsEeXoGT5OmI2/eO2K4/4PDggMlowmq1QbqU3bUhy0tSMWacTRgXY9pRz3q9QGmJ89D13YDNUHR9x2QyplQ5b8znuNATpCPdetI0IyDoe8tkMmNx3bPdVEwne4zLKYvFNVlaIILi1ukpX/vK27z56n3GRYZWsckqk5SkLBBKRg0G3+P7Fryl7zp2u11s2HY9XRsxE4k1mL6jNy06TUhVDs4ivKFQmlle0kpNrxN8lrFarWnaFmsbQnB4Uiw2Vi8IgonlnPGegCAgyMsRtveM8wn3777K9WrNrm6wzseyz3RD03eA7qaB1myRjWM2LREyovOscYzyGV1twAayMuNwb45WioO9QxaXC5SQnJwec2YdXdPx6MFjzp5d8Y2vv82v/vlvko9GNNZTTKZ01lK3LUmaEm7kPgTGxoAWBAjUULIMmW6QOC+o66gf6azABYFMQOpY7vWh/4X77792EBBC3AP+L8AJMaP/WyGE/50Q4n8B/I+By+FX/91BW+AXBwGpyNKMaiABEeII0A+peOQUCKwNCOfQWUZvDFJExpW1loODA6QSdF2LUjFa2t5itSMZgkAIPxOAHE/GtFXDYnlN28fU/uVjFUXBZrOhLMuB9OMYj6ZkWcZ2uyXLUvbSecTbD4Kh3vt44xNLhe2uIgDHpyccHh1SjnOWq0t2uy06g/nelHJcstqsCcGyXK94/vyCtunJ0xyNpq16rlmihCYM9WGiMqbjObdP79B92fHKrS/GjnpvyVTJcrVD+pJxNmM+OWBvtkdbNVxetaSjhLIosZ1lXW+iSk2A0TjyKu69cpfl5pqLq0vK0RhrPUVRonVK3zU448ELDvePOD4+5bf/0X9KUYwZF2MO9vY52t9nOipJhcD2cfJR5gVCJxBcbHT5juB7ZPB4GwUyrXH4QSDUuXgCe2sHGHGcEti6ol4tMNsN2hgS70kIpEJgTBu7+BhkcGA9IUROhhAK23Z0DnzbE4KIUGiZ0LUdvTUcTA94/f4bbHcNV6trHH6Q+RLoTJKmisk0xTnDcn1NklqatmC32VFtO3K9RXhFniTcu3WXg1tTTG/QIuHqxRVHt46Zjmf4w8D14prVck3fLRiPco7vHrLc7PBCkqQZl9drrhZrhNSx70LAB27kx4QSKCEIQg6wGImQCu8FbRuBXlIWSN0jtUSqOOJu+vZPPwgAFvifhRC+L4SYAH8khPhPhv/3vw0h/K/+eR8opvRRIWg0GtHbQF6WjMdTVstr8ixhNplw5/Zd3v/4I3SacHp6irOBqop86TzPI4c6zfC+H2rXOK9FCPrKRKEO5ymKgsl4QqKS+KFs1qy3GyBKQY1GI4QQ7O3tsV6vWa/X7O8dcnh4yGq1QipFrjIAsjSlpR5EQhRKKgSRDWasZzaZcnR0RLtbUVUN1gVUUEBs8BjjODs/48HDa9q2pe8TEqUInoHtZphOxhBSppOcRGkcCYlO+OrbX2Bz1XN29hyBQAmN8BqcJMtzJuWEUZFDsIzKFJ3IG/ZZ11usdRR5hrGWfrfharFgW28x1mBMGNSHc6xxrFcbvBMUxRhnA1/6wtt8/3s/REpNluQUaca0LMi1RAaDdYbOWPJI5ABn4mnuehIJQYIUL9PbCKP1zkdk3MCfT5REix5b1azOX7B48ox6saDb7qi3W/quoesaZLBkaRTVcF0PzgJRycgHAdZgW4cRFlBsshIpU7xMMJ1Bpwl3Tu/Qdj3hk4+4WF6hpEYXySBIkzEaaZ4/e44LPdbWVPWGIi85nN2iqwPtNpLZDvcOuHv3lMePnrDdbOk7S7BwfnYFPlAWY0zv8G7L9fWSv/db/4CL9RWTyRj0jq636CSlabs4FZGxIRuEiNMtEVA6jsxfKlprpUGoQflZo7WGgX2YqITa1fTdLyETCCG8AF4M/94KId4nSo3/i69BXjmEwGQ8wQVJlpccHB5SVxVSCg4PD3nzzTf55PEjVJLyjW98g+AF6/Wa7XZL13V0fcvR0RGLxQWTyQScH5BeITKyBFEqWyryvODVV1+jbztOTm+RZZrZLGc2m5HnOWmaxmxkyA6MMTfBSitobMd4PKKuGmazKYmMqkZpmjKfzsiynLrtONjbY1SO2a1XeC9IkhznLM4KLi+XXC527B/OAEXbJjgrQThSpZnPpwTrKIuMzbrh5PgI8NR1xfJ6ifeGj95/hDWBcrRHte0Y5SXeOLx2bDYrDg7GKGV54417eA3L1Yq+Mdy6dYI3gc1qTW8Mxaig73u22y2rzYpqlzCfzfnkk0/x3jKdjMmyhODgg5/8lL/wG7/J3vyA7bZiVI7Z399nbz5lXOYoLLaLk5zI5QjDl0P6HiUCFo8kTnNeimk65+MIJ4TIuEsU2JpqueDy8Sc8++hTrp4+p68qlBRRaswZlJYkWYpE0ZiANwYhAR+bhzKAMx3GWbxXLILCOpge3EJ5iWl6iiLnlTv3ubi64unzZ/jgGY0mTGZTshzSLOA8ZJlm/2CKMx1eFty9ex/pco7md3j+9BnjcoRCsFou2ay3lOWIvrdsVkvGoxHee8ajCbPpjJNbR3z3h3+ISyyr7Y7OCWbzA4rRjGfPXgzgH0nAR8Ja8IhgUSqP0vXWR/n7vIiEOBuABILCW4mwKjY4u4Btf8mIQSHEq8A7wHeB3wD+HSHEvwX8ITFbWP5X/D15XnD14BGvv3XITCZ89NEnLK6vo1Zbqnn48BFPHj/BWkfdbfjxj3/MyfFp3Gx1zWazoW2jFLkQgjRJWG6WdG1HlmdRwz5JaNt+6CXA6ekpm82GW7dvcXl5xlu332J//4CnT58yHo8HAk/kMUgp2WwiK2w6GhF6SVVV4Byj0QFf/fJXWVwucNZxeus2B4cHXC+XXF1f8+Lsgq4xaJ0TpGA0KSjLESfze5ycdoynY+7eeYOmc1RbQ7ACbzy3T0/56IMPOTza40tfuk+gJ8gOv7I8GDjsQViyPCdNNLPJmNsne5y9uKSqdixCx8F+ThCG6d4B49mcNM05f3HOdDylb2G7a1Ey8iScczRNR9tavFPsdhXGdrz5xuu0TRXRnG0MFL/9D3+b1fWGruu4e/c+t2/fIc8yMi3p6w68i+o+DKexN9iuxjRbUt9FbIUxeOsQXsRywMQu9qgsGY0KlBZsLl5w/uQhV88es3j+mKcPHtDtqoi8nExI8gzvJELF6ZCSEp0kaCni67U9tvfIEMAF6t2OujV0vQORM5qM6dsGYQyjUc6rd+6zWF5zvV0DkrqqcS4Gstu35sz3cqrtNT5EnUjTOUZpyv7+AalOWa+XfPzdD9hstkzGE1575YTZZMZ7774fhW6NAyl451u/wmodRW0D4AO0reE0H+EGfExRRnHQpm3i5ElFRqLWir6LE5bZdE6e5SyuFvSdochHMQMygXpZ43OH2VmmowlL1j93//1LBwEhxBj4fwL/0xDCRgjxvwf+PWLo//eA/zXwP/o5f3fjOzAqM6qqou97tNKUZTrU5VsO9vYIPsTavW2QSYYeBBmbpmW5XPHixQu22w3WWkKwpJliV1V0pqfuWrIiJ00SrHGUZQEBri4v+aM/+iO22x3nl1dcnD0nU3cxxrJcLjHGUNdRkkzKSDLZbDbMZlN0mrG4XA0GEJLF4opMJ3gTx2oX5+fUdUXTdixXK1QAawzGGuq2Ic0L2sYzneeURc6onDCdJRRlQaILZuM5TdXw/PFTpPKc3jrizp1jLi6ekmQ5T88FvTF861vv0FQNP/rjnxD8liSZcOv0mLZuGJUps70ovyZVGOS9Jmyrhq43NFXPbLrP8fExu92WuulIMsne/j7TvTnBT/DWMptNePvtL/P+++/Rtw3lfkFwgo8+/ISDvQPu3LrLdDJDacV8PkNoi2l21LuK1kmEUtGAxTpEVyNdS9/V7DY7NqsldV0TfCxl8rxAIdnf36fIM1xbcfX0AdfPHrM8f8H6+oK22tLWDU1doxJNWmQIPLbrEWrAl0gxGNMolJSEYOOG7cE4j+sDni06u4oZhbN0TRSyvX/7Lr21fP/dP+ZytSBIgSRhcnzIycmE09MJf/C936cYFVTLjuX1Blem7M8PuHt3ynZ7zdOnjwHJdLJHlme88dYXODg85ez5Cz766Kec3DohyRJ+/N67BAmeaMKikwhk8y4wm8zwwWOcGUpdi/NRqr6pdwihojbEZEyWZKwWS6qqRVKzfzDHdAZhDKIPyB6C8v/lLfinEwSEEMkQAP5vIYT/F0AI4fxP/P//E/B3f97f/knfgaP9aWi7FqUUL168QKiUg8NDur6PmIA0jfLPrsYLG8UXfYin3SLW0lonvOSip1lCXVckacpE6+joIiUHB3OKouDZ0+fUTcP60SOElAPlFK6urri6WgwKxTY6AsHNaDBJkhu58O12x3hUEoRnt1rz6e5TZpMZWiVUVRWdk2SczRZZRt92pHkGKJrasFxteHFxRW96dJbQ94ZyVKJ1JBxpBDjQieQnH7zL1eKAXbXk9TdfoSwnPHm24OGjp/RtF3X49wtM63j08FOurhbcunXKrdunLJZnrDdrnj17znrXYQzk+YjWd0wnUySCi4sL6rZm73CPvf0DRpMRJ6dvcLC/z6gsaNuax49K9qZTfu3Xfo3f/73v8vTRc956463IpWgaJvMZ+cE+obrEmpbddgPlnCwr8NbS77bY9QLR71heX3H+4oLF5TVta1Eqw4uEJM1QUnN8fEyRZVTrNVdPnrBdXNJsN7RtDUqQjUqsC/Q+0LQ9Mk3QUiKFIiiJEgGlQKjoYtQZj3MdfefoncdL8KJmu1pQ5AlpnmPqhq0zTOUh909vs9vtaD9o2TUNRwenvPnq6zi/5umTp1ycVRzuK5zVnJ9f0o4EMiTsqsDV9Tku2Mh1kfDeT37CJx8/YjKeMpvP6axhNJkgE83T52ekM40NgUQllOWIpq6j9FuaDbB0DzL2SxKt6PBY0zMqx0gh2a7XbALUdYVpPPlcYZqWIsnoG4NUgkJlhM796QcBERk7/z7wfgjhf/Mnfn5r6BcA/HeBd/85HuxG9ODp06c4JLdv30VIiTGWZNgYLy3G2rbhyZMnKJUQQkQU7nY7mqZlb3/KrVvHeG9v8AQvnj7H9j1f/uKXIcDHH3481J+BoizRSUKW6mGDRyMT59wNVTNJEq6vr9nb2+PDDz+k7fuofDQZIbyIzbM2/qy1Dc56kjQ2bJI0QYhAEBHznmWRJ7/Z7ggC2q5DKEVV12RFRpIIhAhkSYbvHbdPTnj48EnEt/cVXgTyIkHKgh/98Yf0nWM2G7Fe7whOsrhYErxgca04OJ6zWK64vFry4jIq6cxmB+RpwWqxZHG5ZFSMyPKCJEtxLvD87BxxKfjhH38csx4pGI9K2rbBG8dv/8Pf5upyQVcbrq6u2Jvuc3pyi3uvvBLBSE1FtV2yWS/Zmx4ikwTTNLTbLduzF9hmxdnFC85eXLBdN4SgyYspQuc4FCqXTOdzlFZsNxvWV9d0u5rgAsgEpzOSskCjcCLQC00iErTKYtZBQAoXxfSDQ2nIModODaFuaPuOICGRiqbeUm1zErVHsH1UNxaKwzu3eOerX8c6x7vvf8DixZI/bt/j1p0RVbNiPk/4whfe5Ozxjlwf4nrJf/F7v0ugpigj5yDPC775zjf59NMnfPjBJ/S9Zble0/Udl4srdKawwaJDrNuTJCPVOV3T0ewaQgZJluC8offR7yL4ftCfiHDm4H0kwgWB8IH79484Pjzk/PwCrSQyLxiV5T91oP2pBgFi7f8/BH4shPjh8LN/F/g3hRDfJB7LD4H/yX/VAwkEdd1grYtEkc5weXUZacDT2Y1jjxBR7ikoGWfdGPK8YDabAVFkwznPer3GB4cLgdNbt6I01nVMPU3bR3ZhH0U7qroiL0t88LTdzxqAL3UJXuq5XV9fc+dO7Hv2fcQeJElK3e4AbrwPTNvfoAYJgSzL8b5nPI3qvSFInIvINE8kd2itKcoiRv5gMX1PWY6joUXbc3rrDsb0JOmI9brFBUVe7LGrHSk93mlWq4rjg2Om0whQUlpzfnHFbtfiQhy36iRDq5QsLSgKw3q14brdoLWiGBVxqmECwYUBRtyxbWpMH/UOrTVcnJ8jhWZ/b4/rqwX78wO+9PbbjPf28NUlm6tzHnz6CdebhsNXvkTwRFUnAl21ZXXxjMXlOdv1hqa2SJGjVYF3AhskeZKgBbRNxeLqktXlCms6jBcYkbKxFc50SJ1Fwkw2QWlN5yVJECgxKPF4i3Ce4AI6zShKj9oZur7BBk8hFVJ52irDljm+b+naqAOxvS7YPznhW199hywpePTsOVIFvvTWV6iafT765Mfcu32XWSnZrQTPHl+xq7dkhafuG4pxgXOB1WaFCw5HoOk7jLX4YDlfXLBpVoxmBSpPkcQsxhqLs57D/UMm4wlVU7Gro7S9ShR90xJcoKk8ImzJEk2ZF9SVQUvNK/fuIKXgm9/4Kp9+/AlGtRRFxnbTkSS/BBZhCOG/IDqA/pfXP5/XwD/9WAQf0DpBBEkS4kabTKYkSULwnqIcoZSktTbqCCp1Ax2+vLykbduYUQRPVdXM5mPOFwvSVc5kOmW9WvPhhx+iiEIdQmk609M08XSwfYfMNWpI4ZMkYTIZ0zQtu12Fkgm73Y7bt+/gnz+n2kbDUEKg73tSqQfN+HAjBum9j8YkBRyd7JEkJyyuVzx+8jTOcIUkEOmjSsdMJM1S2q5jb37Iwav7fPTBRzgTS6AsTxmLFERO2/aYXuOcJ00KZvMxQSQkaU6WpNx75T4PHj+maSyQoRNFkib0xqFFj04yppN5pGLb4YSRHmMdnoDWUaTy4PAA7wxNXTObTjHK0mxr9Eih05TvfOtbvP3lLxPw+OC4vrrg4aefIPMJk+kUZy279Zaw2yGtIRWBIksZFQWmq24MROO1i5Dduq7xl4GL8zO2mxbnDEalVE6y7ByrqqZ3O0ZFyeE8MB9PmYxG4AJBeqQaBDlEFCpBQJJm6CTFWEfTW1AKpRN86MGbGKTqit5YgogaC8d37vLOV7/JfHZIVqbMJwVp0nDr+JQ8y2k1PPj0Y8bjA9759q/w7MWHLJZPybNJ7Cv88PuU5Zz5/pym7qiaiulsTNO1GN8wnU9Y1zVCJWidxoaedfG5pjNGoxK9UixWUQ9hPBpFHcddFanFxhK8oywSBPD86WNGZcndW7eYT8fYLKGuKpp+R57nv3D/fSYQgyH4YXyn6Dsz+NJJynJ0oxO4v7/PannN5voaLyPgQ0pB25obffyyLPDe0PU1VSUxxrBarTiY76O15mqxokwSkrQbWIiOEALb3Q5vevbyGUIIjDHDRo61+stpwnK55PXXX+fy6gpCoBscil++xkhciDzvZJAib5qa8XzKvXunjCZTqh9t2VVbVJJErPgwuVEv1ZWrijwfMZ7Mme8dUjfv4Y1jOplSVR3G1sz2TxCiRCeGqlqiRUBOc9arNZLYVMqyAtM72tbR24DOJVk+SLIHwW5bYXtLopPBlKUnKzOyrERnUeQlkrQSRuWMF8+eslwuGZdj9vf3cH3PpJzw7W/9CulkjPMtrms5Oz+jqja8evdVpgeHeBvYbrc0Z+f4zZpMK27dOiHPdyi1QZBTjA8QqqBzgWwyiepLpsdZh1I5fe+pjGfZWBatY9laqqaHVcPZouJoPuf+rTvMC0meAZlGaRmFZ2XULhQykqaEkhjX0XUtee7w1uCdpchTUq1o2pZgLKbu2C43eCE4mB5QTsfUmzNEojg5ucWzp895/HBDVfd87Wvf4dnzBzx/fsb+YUkxGSNVx9MnF/igECJjvdugE002KjC2IfgI8c2LgjQdgwfpFcEbdtsdWmjG0zHRMbpBWcV0UjAaFZRZhtYJy+sl2+2ON994E7znhz94wHze8O6P/pg0Tbhz75R3330XE2BSfNa5AyJKfRsXT1GHoDc/k05q22jg8VI4RKsI4qir5mfgl8EiDBHhxF3XkqYpXddzeXlJ13UkOpYT1jqkjlp2SZpCiAKPDAjFzWYzAIbUjVeBNR5jDNPpbFCETWiaGuEjn8EYQxiQb3VdR9qxjDZeo1HGrTsnnF9c8fjJI6TyWBdTz0xr6qYl2Ej53Nvb5/j4lKdPn3F9tcL0DhEk69VuUEvOUConSwVa9aSpwVhHQFEUI9q6AiF49PAxVdVincB6STBExZxcE1zcmMHB3jynKEdol+JF1EYwzkbDjsEMNE5eDBBo25ZURFm3r3zlbV77yttgo+LSZrXixYvnKKU4vXuXtMjpW4eWimq3Y3d+xt5Ycev111EqI0lGzOanHB7dozWw3Fak4zF7+3t44ZnP52yKCW3vqOua5bZm1Rh2JlA78L2laSra2mKN4M5+wXycwSSHXJKIwTYeEXn4Wg8KVD1d39O2lrre0TQVB/vloCZ0xep6RTGa0TcdIdEUecl6taVzO2b7gjIf0eWBPOv5wluvobXi4wef4IXFeMhDVABSKlrdW9OTpPHQWSyuGI1zEJHN+Y13vsXquub5k2eYvicZtAMWiwXrzYqqrWnrlmKcs9tuaat41hweHjCZjFFSMRmVBB/Y20vIMx1l0aeH3L53mx/86IfoPKGclMDq526/z0QQ8MEjM0Vft0iR4LXGO9jUUf1mbzpF64y2XaBVTjmeRF01X5OnCSFEpZxExeah845gIE0iiaVvenASnaZIlbHZrggi5WB+RN00uM6TpRnrXYWUkr0sxweBkhG+XNc1INisVxAcEgemJysyrpZXJEqT5RkdhoCl6lsCGZNyTJHnvHrrNXyT8OG7D7m+qlBpQds0+GDRXpCrJGLBDRRoquWaerOl3mxJ0kiRLouCuq4IxtJ1G9IsQ0mL3V1SFDmhr2jqhlFZst3u2Gwrmqoh1Sm4mDY65fFJLCGUCkgt6Wx0Q9aJZjqdsq12sf8iNSIEGu8Y5Tm+TxAIdFFyvap45bXX+PW//JfJJmO6viO4msXV+2jVkxS3ufPGnyfoBO8vePbpe/zOP/4BT54/5zvvvMn+LdB6xP03XmF++xV0PoFdRV6mjEZjxrMpzvSUWcre8R6N7bDLLX1t0C7BtY7gU7I8Q0pJ1fd8enVNUhvk3phCnVJmY3rbIVxDrhVp8OTSkScJShU4AU56eg+dMTRNjZaePBX0/ZbV6gJVJpTzPbbVNaPJjLCboVzC8sUlm3XPq7e+xNViSb29xlQ1o3SfjJyjyTHUV7z69huY1vDpJw85PTxi7+iQH/zoR3Q7QzEpMUKxt3fCevMpvW+QuaRuarI0Z9Nsca0g0SVaz+lqCGXCarvBmBaZ9RRlQi88P330kIP9KUf39uMhtt5Qm44f/fGPCM6Dd6yXPx8jAJ+VIOAcQQ7sweAATZJluBDou47FteOia/HOkBVF5LkPsFetFFJBlqWkiR6syqPNGESZ8USnECLt2LrYX8iyaDratX20vq4bQugG/EEzjGT04GDkfoZnD548z6JLrDF89StfwRjDRx99FKcJIZCnOmoJWoPpwHSGxeWKF88vYopubKy7kySCXrLoqdDWDV3TDKIY7obr4H2cLCSpAjxdHxmLWgnyXNH3Lc470jzFhUBV1bRth3WOJI2SbEJAoqLrTfAOOcgseB/nzzJEvzqtFUpJsmIUNe6FxhqPd1EIlSApyjFf+erXeO3NNyI7UwlEcOQpzCcl09Et9g9vEYTnyeMP+Ye/9Xf5g9/9KS4b88rrd9jVUVq+nM7JZ3Oc1NTLJXXfsX90QJIq2mpQdgo29ixsBz6gpQQX8MaBzgevP0dvOjZdxVR72mrMLgkoDDq0CBsNOBIVgzqDX0RAEAb9wCjvTdSSdAHron1Z5saURU5wljTJ2G07ri53pGlOUYw5PcnY7rYorWlXPdZ4XrufIVHsT/d4fP2YUZ7zpS9+kawo+Z3f+QEHt0akacF0f59nT5+z3axARpei5bqmqmsEkiCjelWSJpjOMypnGOdouy7iPUyDs9E/c7VdURYF1lmquuXoSHG9XOIhCuzwGdcYdIOWmtYa46O8mFQpUijSkaata0zfRa31ENN4b22kkBqD9EO33Q+a9VLdsBG1Tm7osiGE2GDLykFWLNxAM6N9eXQK6k3sS4QsGzQLYgrWdR2r1QqtNNPJjOVqydHRcTTUzJ9ExR5jSHRKIiIPvGlazs4vCBeXbDYbhOCmzNGDyWmSaIo0w3Q9Td/H7vawXlKlm6ahLMvBpyAMWAZHmuUgXpY13DQXq7qJMm0SMp0hlUAl8oYQhfgTOgwhwqu7vv/ZWFRHGzYhJV0bZdfTLME5x+3bt/nWt77F0fEReIscdPWno5xJkTGZniKl4PryjE8+/ICHn37CZrVgelygpKBzgTzJUHkBw1Rlu9vSNDUyS2NmV+/o6orddhvl1vsecCgRYg8kOFxXgZFI15NrKHS06hIyxJNdGFTo6L1F6wxrMwge5yP12Pvo4ee9x1iDGhyuFX7o+UQH6nEx4nKxpKprLi7O2GxXfPvb34plX56xd3BAWY7w4ZLpbMazF2dsVisOv3PEhx99zMHREX/uz/05LhYLJrOM0XjEK6++SjYa8eDhRyAM49GIvfkek9GE87MLql0bIdcyoFNFCLGwURKyPAHvIUhunZ4iRODFi+cE6xmVI7TUZElGIyt0oqP03j9DT+CXYk3+L7pCCBBExJEP97+SUa+/riu6ponKwfGXSeRgwDi4EFkbxUmbpo2NRQ8gcYPb70sfwWQQZ3BD8Oj7boAs5zene1EUP9MnlJIw9CLKsiCEwIMHD6ib6L4TQuDRo0csFgtmszmTyYQ8LwiDJXWSphyfnBCAx0+e3EwB5vM5ycBLaLtusOO2MNiZ/UnGoxhEVSGO2rquYzQaMR6PabsW5wJZmqNkMgRByWw2I0sTsjxFSNBakqbJzSQjGrHEG97fMCs9XdfeBJm+72/YkdY58jy/IbPcuXuH1994gyzPsNZExfCuoUwViQzkWmCqDcvzc2zf8Pr9O5wczHF9EwPebJ/R3gHJaIoXkr7rcNaSaIHUEmvbCDFud7Fsco5EKzKtKBLJrEwYZxLlWoSpKVTgeFZyenLA0fEBo7JAK0GSDH5/pqNuKpq2xgdPYAiEARgEZ17iQl7K1Xsf6/qujSrR5aigrne8ePGC58+eU9cNSqes1mvG0wnT2YxXX3+dvYMDQDKazhhPpvgAk+ksZrbek5c5fQ+379xlu92yWW/om462brHGcuv0FkVRYKwhCI8PliAsQnuqKsqOTcqCNNEUWcbpySl5muNtYL2qKbIxs8mMpm4jFX+wdP9nBYHPRCYAMRu4IZwNXgOjckyiFflkRFNF8cWsKOKbGkRFIcQGXIi9gKhIFB/TDzezGPzdXzbw+sHyOppaRsUarQVSxpvee0+SDTc9kGUZ49GYtmk5O4t+8D1RbOTF8xc3tamAKIiqouqLRPD1r32d3lje//CnpFmKCbFE8cOJ7JzDWoOxFqUUqZK47mcb9WVAeOnN+PL1FEWBMRaNilzzEEjSSHXOfVSneYluRHo88XnsELxevjc/CK5IFRWXX4qoGGMQRHy8dw6VRmfn/f193nrrTQ4PDyAEvHeRQu0dSZqQKkHf1AjTMRnlvPXm6xyqknFywPm65otf+iJH915lNJuSTKYEIXHek6SaMk9JZMD3Da6vaXYbujb6II5HBZPeI5MoDpsoqGqLEjAe5Rzuz9jPPUWeEJSgSFNGZYLrJd3Ws6ta2q4BUpJUIZ0cSqOXaXJ8Ly+DAj4SnJzp6Zqag+MTdrst19d7GNuz3mzonWWxvEamKR8/eBA1LYCu71Eq4eNPH1C1LYv1mr/9H/wHXC02dMYwLqPFmBSS+WSKty27aoc6khwfHfLJp5/igyPVMROwvidISd81JCryYvzAvbi6uGS1XOGNp60sXWNwJnB5sUBIPxwm4ga9+vPWZyQIxAJVyPiCvA84Y8izlK999StcLxY8ffwYa4d01XMjxyxEVBQWSmJtrJ+FjDgCqeQN6EdKOQQLjxt8CZIkjamg6ZEyJQR3szFfbkAfAqPRKPYc0jSir4zBS8/tu3c5Pz+jaaLJqR4soossw3Y9venprWGz3UZ14xBl0pumJvj4Wl+y6MLgGSeG4KaUoigKlFLUdX1TFmit2Ww2g8NRIEmKqN4j1TANWbCrKiaTcbRL7zukEkPt7yJSEgbSir8xQpFB3gTAACjh0fpn71fKDtP3vPHGG3zlK19hOpsiRMTpe++QgzWb0joyP5VmNJmgOGRkNcKM6XXBm1/9ItPDA3RRIJMUY3oCgiLPYxAIlrZr6Oot9W4dJ0MukGpNmSWoBLJUk6pAX8bsYFRkcXwme6SIwhtZljMa5YQUEhxtF0VJjSOWRnqQEhsIji+zoZdZICFE3r611Lsto/GYw/053f17TKYTirKkalquVxvWux1VXdP2Pb/5zjvsNte89+57fPjxJ6R5gfGOTz56iEehMk1V76h2Fb/6q7/Kd3/vd9iuFgjg1ulpJHLZyIxMswQhJW1rhh6GiaAqE3sXwTkefvKMpmnJs4Q0EVxeXFHvOqTyyJQIroOhR/bz12ckCAyyyINKsJYh+q5L2JtPefr4EUIKppPJwP5i+MC4kRaTf0JqTA26bCHIKAIqJVqrQbNAo1S0JyvLkr7v2e0qwJOm3NTEYvhdgiDRKd6DkpogQclAUaZMJlMW19c0bYdUCuuiSCXZAESxjp9++BG7uo4SUL1BJRo7oAqNsQQRGWTGe5x1N3LpaZqS5/k/RWd+GQQuLi4oyzJiGaSmKHMQis5YkBJjzQ3hJM0TxqMRVVUTeksIg0VbCPHLR4ML4QJprtEpsc8yHJA+RD+CEAK3bt3inXfe4c033yDJM2DoTRgblZaHr13To/KSotQouUN3kuKLJySzA9K9MSLPEUmCD+B8QMhYbo0yjTAdptnR7Nao4AaacZT9zhOJRpKnUCQCyMlTRaYliZaUMo36e0qgk2GcnKgYwLOEQE3bNRirAR8vPGKQMY8iJFJqNArrY38H2ZFIwXazZDqbUxY5xk3YVTWWwJOnz6nbhslsjpSSN996i76t+d3f/UPOz5fs7U1xPjCdzVhuNhhjODo95Ytf/CLvvPMODz76gKP5lMPjQ+7eu8PzsxeD/0V05tKpQtgenEMIh+0NWnpGxZTgJcZEGzlV5KRZwmZd4YynnOQEbCyZ8bS/DD2BP8310v77JTeAEDVVqs2aJ48f0zTRw88Zi1axq25tH8dYQtD1/WDlJHHe4q3FhRDHeQMI6WWNLaW4+X40ihtpu90Opg76RjD0pQd8BA/ZwaMvNuNiOl6yXFzTd2YQpnzZjPQ0bXS+lR4uF9f01qK0xgeig4/S2OH0fmmEIlXEgnvcDSbiZQYzGo1u+gFa6wgeGXQQu6pFiMiahMiVcC5mH85ZJpMxs/kM6xxNGyXU3RAEGPoNznuEkJSjMSEENptNNKsIgSzNKJKMsij41rd+ha997SvsHx5AcENGFcuwRKUEkeCDpnMC0oIgIeiUtJyQ6oJs74AwykAp0FE0FhkDdqoKUunxdUU/fE3GI6azKcascAjKPMXFj4WQScCSaYmWIPEkitgFVwpJiBMVHd9jWZbotMa4GuuBIPDOD05RHiMsSkqUEiig6w1926C8BxFod5I8y5BAU9d89OAh49k8Ctb0hrppuXfvNl3X84ff+yM22xqlBb31mF10e/IB0ixjVI5YrVZ89NMPOTo4xHYNf/7X/jwez4/ffTf2Y6zFOkuqMnQqCdaBhuCiS7EclLnLIkNLOZDqQAQRLfakpHV2uIflTYP7563PRBCIMmKDeUiArmkoihHVbhNlv3VMM5d1PXxQUBZ5VBEOAecdSiusszeDEOcdYVCKfXmSvkQgvmwU+uAwJo4FhYivIU0iECkMIidK6cgKHJRcjDFDk0yw3e5izTw0z14+r28d3kZhECUjLFgoibOOYGx01hleZ/7S5spahJLIof5/iVp8qbc4Ho+BOEF52bTLsox6W9P2kVueFxlZkdN1AWOi23KaJTRdExtiIUTVIOsQMmYbIQSwljTPuX///o1Ii9IJQkBeFBRpyltvvMlbb73JyelJ5PDfNK4CUmms0eikpDaQT/fx6YjedwRdkI5Sgi4QaY5Ic7wIIAUixACN1mAMru9pthuWiwVd05Bqxagcs00b2r4nHWzl5EAOCs6TyJgxKkEkMCEi/fflNR5KqyzLyLIcKRucsxHJN7BRgx/ETIgZJoASkX/gjaCrHS5Y8jwjS3KctVwvrllvK05OTylGJe+//wF13fDhTz/ivfc+IM+z2MPKy8jwcy7KfyvFfD7n4uycjz/8iOkoZ28+GUpVOD+/oKoago9TMGN6pBLIIHAikCSKRKvBcDdqMIQQ9RikkBRF9E40pkMrjXdD2ak00P3c/feZCAIvazIlJM5Z8B4liIafeBAq1urWDLTiyNzr+p7eGJSKzDsRdUfjGjwAvI/69d1AVY6fcRRt6PueZkDGxR6jG6ycBT4wdNujZqAcmI5pmkVfAx+13EO1w/Q9etB1F8Pc2RoTyxuiLwLO32z2YEPMDFwgLTLUULcJJenbniyLH4vWmvV6jRCC8XgcNwwxqL3UQEQJbPBR1DRJUIkmIbrXVk1F03WsNhuCh4DADrZUSioSnUavOyFRSmOMpWk7vA8RpGQd3nn29/Y4OjpkNpsxnowH8cpormpNj1AZNigIml3rmB+f4FSOJSMtJmjhETqHvCBohRjkw0SIiD5JlHI3VU1b10MTuKet6sEPwEbEooBkMHiVSIKUCG9RBJSIsuRSSNygxhszPn8zOtZao5MEpEGEaG92ozsZXh4W0V9CD0EkOIexPdb1dGXJaJaSKs1sMuNytYpmrC5mYG3T8fHHn0SPS51ytbjm+PiEclQyn8/4J9/9LtZafuPXfwNnLX/n7/y/edjs+MbX3ua9995j72Cf6+sVIQiUjpD11DryURYBWcEzGk8o8pKmamnaOFl66T1Z5hlqGKNbH23j0ySN7Mh/xiDwsxEEBn+BJNe0TU2eZ3gXfQBj97yPIpTeQ3CkaWz6tW0UEdFJpBwzBIHoqyeR0RAo4gsINE0TnW6Vupkg3Ci2hICSacRq1y3JWNP3hq7rI0hGSoQIfyI1NyDCcBN41BDI0jSl7yzWB1zM/4mvJo4ijYkjS+883vlYXpQlfqivdeJvRoLxJDCkacp2u70ZYb1UPNrb2xsynzCMguINgIgeizpJEEPvQcqoq3BzkZDDKR6FP4y1vP/+B7FMAKyNAKldVUUSVwjcuXOH0WQc5b9UZEG+3EBCpphNQ9M7bo3neKFxMsEJjRUuGowmGWgQIRCsIQSL8B6cw3Yd2/WaZr3BWU/fGa6ulqzXLW3bDZ37aLKqhYpiIB4CPvoTuFjSBEXsDxFt4QkC0zusCzf8Aa3B+yFTGF5/vM/itVcqjuFFiA1r27f4LtAWJUpnKCE4OjxkXTdcXFxRVVXsF3nPcnGNDIqmaXnjjTf49V//dfb29xBS8vjpU5qm4a3X3+SnH7xPtd1R1xuePnuKUJLsaRTSuXV6m9ZEyrEg0oSDiwY7RVGQ5wXL6w3rVR2DuI/SbS4EgnfD/RV9MYs8J/ifNYB/3vpMBIFoChqtnwVi+IBj469r42nQGz9QbWM5IBUoFcikRiUJq+12aOy8NI8Q4GPqKGTEIMSJQrhh+PnBueglaShISVmWtG1706V/OV3QSg3KNXG0V7VVbMAZO4wb4yZ07mcnjxuAOEIqsiKnLErapom1v/dxO4rYXW+NIQCz8ZhExbr/5TLG0LZt5BpkUVnJDUKSLlhciPgK46KFaz+wI411ERVpLCK4iLS0wxiUKMcltbppEtZ1TZ7nAy3a3jgrd33P8fExd+/dIUk0wZlB/DIgxNBbUAlN1WCMYzKNmolBKBxE1JpSBKUg2BsTUi3jGC4Q6Jqas+cvuD4/o68qtuuKzaaharqh8Rtl+aUAQYi7lJcjMB/LvsHIU6jIqgsv90cQN5nQS5QgA0jIOx9LAhmGeyKOCZ0DJaNasTc91hna3RaVFAOrMwabumlxNrpPR/Kbpd60pFnK/XuvkCY5T588Y7leUWQ5qUr40Y9+xHd/7/fYXK+4de+Y49MTemv58N2f4HxgNtsjNz27qiYZmLVlltP2nqppaDtL3Xa4oXcWBsORIERUSfYWqSRaaUxv8NajxWd8RBiIqaWznixLsX2EQjoXbZS8j9bVOk1IEoXWgjTVCDmmNxbjHFIGhJAgI2mDALbrb+bhSRodWqQcPAjccMMMGYG1UYSkLEc3GoX7+/vREce/ZAnyM1ciY6ibGqSMGASt483cdjcMtjgWhDRPyPIcPwigeBsJMT9TK3JRG94YnCvRMtxMPeII09yAhtI0vRkd9n2PUAEhozJwLG3iWG+z3SGlYKIia9AYE9+jjXBr9bI5aAfkIFAURXw9LmrtCwGj0Yj5bM7R0RHO2RsnYZTABYtUEX4slYybMEA5GkfXICLoxgcXhT+Fx5qWdrPC9g1FkaIzjZIyOjg/f8GjTz/FNh3CQ9u5qOXnAsh4MvvBSOSl4vYNGFZEGTERXuJExIA7EbHpGlwElg2CplJER+vYzH0ZyLkJDN4FhBaARziHdBbfRyVj3xu2yw1920fthSSCpkzXxGDcW+bzParNjv/P3/m7XC8XHB4fMBmPuLy45B/8/d/i/NkLTOt4/bVXefvrX+F3f/e7XFxdURQll5cLfAiRVq8ybt86oap2nHXP2e4a+m6DFJrxZITpLaY3sc80lEFCx2uKg6qpML2JBLlfsD4bQSBEHnuqHXmWcb3dxJmwUHTWRWUhnTIaj5lN8ug3EAyjMkf1Pe16E/HxUmEGSKyxFkRE00WmYUqaZpg+puMvG5FxEhDLAobpQZT4Ukwm0xicgqXvLdZEKXOpYg2dpiku+JtpgrHxJM6yDGssTVdFI5NBMKTZVYyKMgaQuiZLs+H9EzUPNhvqpkaJEZPJBK012+2W6+vrIU2NUOPpdErTNOx2W7JMx1m9eQmMGsatUqCUHv4d1Y2EjH0JHwJayGGs6RCCgWeQ3vRSlNAoAVmes7e/x9XVJYvFgpPTk8HazeGcRaeRaCSDR4tAphVJniGcRUgbxT3wIBwEi2m2bK8vCc6S6TnkGqEVbV2zuLzi/OwS2xmKrIhBW0mMdcPJHzAmxGlAEj836+LpPQwcBtyFwgvFy0AGEufA2RADCvHaJEIgkFjropy3jH8fm4UBj4yqPc6igVRKMqURoaetG/q2xzhPkqWkOiHYOH0p8oK+Mzz45AGPXzxnPBkxHk0wfc9uu+X64gLhQcuU6WzOp48e8umjaFvnBzNV5wPOGWbTebRLW16zXW/Z7Xpa45iMSvK0oG16un4DBHobvTjyLIkBs7Xxv85j+1/idEAI8RDYAg6wIYRvCyH2gf8QeJWoLvTf/2cpDscKlnjji6gYmyYp5XjEcr0GYZFpzmQ6YTxKaesV3sSJgE4UOpEUKo/yUn0fTwAJ0meMRiVSSvK8QAhJr0ycAqTxRuj7Pjb5spREJWidUBQl3nmuF9fx1JAqAm8GTwLpY/BIk5Sm76ODbvA3/QOpNMrH5t+oHHF0dBwputbdlDxSRI+C3jpEEkU2d3WDaVt2ux3T6ZSiKG5MUV5CiLuuuznRje2RqRz0GGJ67kKk7uZFSaITjHH4EANDkqYkSRoDoFJonbyUQCD2HAVFWcb6uI0a9l3bcn5+zna94s/9+q8BIebJzg5gJ4UzLvoJiECZaWSqwHSIZGja4cB2OGdod2u6aoeSgLcwTHGqXcVus8V0BmfB6Ti9QIL2HryJEwGGaZLzUV7Lxoax1oFEiajtKFS0mI8pJj685JVIXprTKqlIlCRRCufiOE7KyJkQAB6CcHgb8NaQpYoiTZCAMxFnoqWmNS3WGIoiJ89y+rYmS1KcdXRdz/7ePLpWLZfszWfMpzPOdxX7szlKZUgpePD4IUIGhITeGJLBBVolBYf7RwQb6JoOJfWA7oz/DSL2fWIjO8QDRAmyXMcSCUmapZFJ+Iv5Q39qmcBvhhCu/sT3fxP4RyGE/6UQ4m8O3//Pf9EfJ0nC4eE+r7/+KrvVNa6vGY0K9g/20UnMBlA61j1DI3A2ncVZqo9KNV4IhNKxLveeVCT0IRqEJmmKUpq2bYcUMcRZ6zCnf+ljOJ/NB1BOPCEvLi4Yj8dYY6iqKjrB1k1k2iUaY1z0IUhSgnMooaPteR9xBaPRmKPDI/YPDjg7O0Moya6qsF0fBS5edi5FnD075wgI6jrOlXe7CqXkDa4BoGlqkiR+bFEpqBvSYzVAiyMMN80iLXez3t7oNMZsKKFt26HUCAOrTsSGU1lw995dltdLLl5cxN7H/4+5P4nRbd3T/KDf2632ayNi96e9bd7KzKrCxkYIBMYWCBDCYmLBgF7AhBkDmgEDPEEIhBggBEgIMwDJIFnYCBDYwkKis8slV1GuvHkz895zT7ebaL9mtW/H4L8i7q10nnSZrJTOkrb23nH2jrPji2+96988z++ZJ/7oF7/gB599Slk4cpJSPAb/VHWk5CHMKBJ1VaCtIoYJ7cBphUqRNHUMc6A7ydpXaU2YZiYyvjsz9iMZjbGPJh+F9wlXGgmUCYoYRtEIsJjOlsNNaYs2Vg7CzDJzQQayefkLi5ZD9B6/pTbVojT1fl7OBwmR0Uv+gcR6zeiqxhrNOPQcj2LLLauCcVm5+vkxX9EzDRO2cPg5sNquGOaR92/f8erFc64uL7l++57f+93flbZOG66urri/OzCOj+EhDq0069WK/X7P+/fXsjoceuZZKFMoQ0YtlabkEIxDTwppaSUWNsdCbRJc+Z99/WW1A/8k8I8tv/5ngH+ZP+cQKArDjz9/zqcf7XmrzmxXr0BF6trhbI2rSmKGu/sHUAG7Knj9as84jDw8HCmNI2b5pjulGH2SmzSt8aPIWYd5ZJj7RSWXiJTkpJlm0Vrv92vW6x13t7dM04w1Gh8jry4vuP5wQ0ZTVDVdN2CTptCZjJaSXmmUtehKJL7TMNM2DevVCmsc53NHPwyEFBhnzzxNVGUpPalOGKU4d4JMr1xNDqCU5XA8UxaO1Vo0AjEFQvTMQXp/lCLMSoamWrFer5Ypd6AsC4xWaLVMjmOisBprNWVZMM+BGCJVWZGAsqhpqoamqDgqRVKZ0Uus1/F0T9v8mO5wR/Iv0FkxDT2uqRB5TiCpQMaw2mxRaiIpJ5WZrglpAh/ox0Q/W0K0dFNg+nAmeM/D3R3fvuvoY4MqtigEd5aTxxhwVmThOWpQiYQMja2xWPsYVGtARXzS6JB4pFWhjBwOLJLynNExYuwCI13aFFk1ZHKIKGckAFdBIuEJJBvwauT21HF3uiNQgIWkMj5F8jQRw0wKgbKWVKeH48Ann79h8jV/+IcfeLi/59nlBcYathd7fuenP+YXX/0xaU7kAFVREWKmaetlNgW39/ccTg/000g/zEyDX1o88XugoF21oh0IXlbTy2Ys5UjX9+I+tH+5M4EM/F+UUhn4ny4o8Re/RRx+h+QV/j3Xb+cObNYVLy8VpTnw4pmladbM80RMHh/OrJpE3dQ8v1jT+RmfEus1XO5WXKyNRC11A8dzRzd4eq9xpWXWKx7CiFKJKXZUDXTTWZ4ABRhVsFptOB0GmnqJsk5JfOxGKgZlRHxiyoKmXtGPEeNKAYK4EmsL2SYohdWGQgvlaImM5ObmlvHdgE+BlCNaKXSp8DoQkpSgWllQkcJBWzfEKbPebCirinkWqtJuv+Hh4Ratk2jIyXR9h1EXGDR+nCl2FaPvCfNM6QzTeAZmCid9c84ZV2Q2puF06lm1K6qy4XQ6MZ570jzzR6cj3k8knclG0nyMAZUG+sMNOfQS/3Y8sCufQU4olUkWsq1ptjugJxUFQa0pzI6gR7LO5KJGhxXD6Wtu7u85nh64vrnj3dsPHO/vmc8zZa6odAF6IKYZnRcxstbowgIzMUdIkiblnEMvk++E2GZJAUMQHYISt2DMkdnPEAI2egoKtNWgPIqIVRmVFSomtJV2IWsICrwJ+MLT64EH3zPknpEErsBUDj8OWFdCUhjn8HnG1Y5tXfPJJ284Hg/8yR8pvvjVLyF5sk78nV/8XfYv9lx/uEFFzT/0e3+dfhwIKXFzd4eyhruHew7DkYT8O2JGqMsmL9J2udGNU7K9ygqdNCZajFUM+Ux0IjYz7V9uJfDvzTl/o5R6DvxflVI//+3/mHPOywHBn/r4U+7AJ28ucl1b9hcbPl69FEiDXjPNI8fTHcbCalWx3m64vn9A24L9ZkuhHfUbiYLu+5Gb21uOXU820i+9/+Y9thgoGstH1ZpkIsc+YJ1jt7tkVW/YtBd88+Vb7q47umPH0N9jrAxlZOAUeP36ghAGnM0Ym7i8XDGMR2KcF6GSyDyrssAYT1kBzHTnmQxYK6q0oigY+p7NZg1IsszFxU62IFHaom1T0p/uKFxgt11xf++ZppGhO9HWtXju0cQ5EUNC2562KZgMhHBGMWPN4+rSYbSnLN1TiZkjMlwzFc+u9pRlDXmQHtJIXLgxitoVBB8Zp5mqbRmnAVcY5nEgo2X19MhvULKitFbAlvI1O+acMdpSFhUxRZpyxRR7TqcT79695du3H/jm7Xs+vL9h6jpUmGksXK0bGqdF3r9ImmWVq8l5sY8vmwpJma5wrpB5jIcUw7L686SsUdpJib846XKSzYWIr9RvTGgLJzIs7YqyFlJEK4Mra2xRUtXQrhRzn1ivNlxctfz8538oA0lkSDr5yGa7fUq4evf+PZvtinEcePvuHdY5rq9v+Bf/xX8RYy2f/eBzfvrT36GqKzb7Hf/s//Z/x5/8+lcydF7eyxlwVtq5sMxirHX4JCvS8/nEOI04bVj6JLz3NE3N3d2Ji4tL/tLwYjnnb5afPyil/jngHwXeP+YPKKVeAR/+vM/hCstf/4f+CrvdjtlPbPuGzXbNhw/vCLFntW65uLgQSKbKuLJFA5fbPRrN6XDm9ZsXXF5dMHqPz4lz17HbRFJu2OxXrPYth/4Bbd/QrlYURU2YMnW55uULxfkA776yzPM9SsH9vXACnz9bs9nsmMYj3Xnk+bMVb95c8O7dif3FczabLe/ff2AcJ9br9ilq2y/46KauKNuakD1t23A6abyfWK1b6qri00+ecXi4Z5zGBXYZKMsJ72+Z55a2UazaFd4HVquWnCJGL16BnCHd42xDXWn8fId1hot1xel0QKmZ16+uhCyTIlVRkpJi6CesUcCZnDxF4cXK21Tk7BjHgckY+i6jG8dm2+DDyO3dDQ8PL9hsd5RlJU8/ZcnZExfZ8/l4JE0zdlVQmpKQFit30xC94/bmlp///Od8+dXXXN/cc/dwpO9HwjShoif5RF1oCluhEOUbLLZvvUzrs6g3fQqLK3Lp9+2iW8hS+aQ4k5Jk+aUgWgOjJMzT2MVluoiFUOpRYsSSnYZKCZUyRVXi6pbRZ7phQhlH3ViyUuz3Oy72e06HB9F2pIitSlwhLcGXf/RHEo7jNMY5DqcTP/nJj6mqii+//DWffPIJN9c3/OLnf8hnP/icoqw4PBw5n860qxaNJkRpbawWSlb20hIYY0k2Ef28zLvk6/fRk9BMY6CpJeW7qdvvvP/+oglELaCXQNIW+A8B/x3gnwf+s8B/d/n5f//nfZ6ydFw921I3FUOfePnyAqUVqJmqFmJsUze0qxXNekXXzzzcPbBaN0LMLTX7/UaMRTlz7nuGcUXxeUvdgLaAy4x+z2q7IgN9N/H+7S3n4y2F1fzuX/khn77eUNeZh4d7/ubfvOWHP/yIH//4Dc6VrFa/zzh4yrLm5uaGfjD8I//Iz3j+/AVffPEFXTdQliXv373nw4cb+n5gGEbqGnb7mn5MkAc2a8vtzYHL/RWr1Z7nzzcY1ZGSIaWAn05stpkQBvr+RFk0XFw8p20uyNlIT6sMQ9dTNzXby8Q4jFxdXYCSHr1tay4vLW3b8OzZFYfDAx8+vCdnjzEWrUeUGhnHAaUaynKmqmtWK7ERhzBQb9ZY7TEriyYSwsiXv/6Cj968Ybe/pK4LtBJoaZJIYXCWeZ6YTh2r54KEn6eOAtFX/PEvv+H/86/8Df7W3/pb3N7e00+e2UdQRnwT2uLDyLE/Y02iMhCTXqTNcmM+bkkee3Z5gj+WunITaKGwEb0cBpOfmL0mxYhG4bTBGiObhpiWfz+gf5OObZSwFEgJV1Rk5bg7nPnm/TXZtNh6w/X1e4Z+RpGZp0EoSwq0Nsyz5/r6hg8fPlBVFcMoZp7Vas1ut8MYw263Zxon3r//wN3dPXf3D8ze8+HdO5y2iykLITdFAZ/IZumR8yDZjWTRqBgrVcE0jxSuICfRrVSlozt333n//UUrgRfAP7fALizwv845/5+VUv8q8M8qpf6LwK+Bf+rP+ySimpvZbq/ouwMQxCNdWdarF4yTyGS9H9luN5RV5M3LFzjrONw/SHqtkkjnwjhinFitd1TFDmdhDhPn8cyq2tCYhuvbW/pO09rn2CJyfz7he2gq+N2ffc71dcv7t3/C7//uD/l3/SO/z+3tPavG4mzJD3/4E/7Vf/VfYdVOfP7ZC3JKXF3UvHi+JYbI2D9AWlNVz7m9u8Nax8efvuHm7pqvv/6azXbFdv2ai4uGnCOn4wfWrebFy5e8ffstQ+9pmx373QXznHi4P/Hq5Qs260vu78/s1iuaesXFdsPFxZ7NVU8MkbatF16Apygsz55fLUj2gd3W4uxE3w2igUBRWMdud8GbNx89zTTmeeL29obLy5LLj17SnwemrufDt++o6h0hzuQs0tqQxFugVJJJukLasBi5/fCB5tNAtCKYHrqeP/jX/w3+b/+Pv80f/OEvefjwFf0wETKSpKuMDCyNIqfIoR+wJuM2rRwAyxVzXNgJmsIWKIXcLLYUE5hRZJ1QtiCoQJhFRelDwnslVu3lJuJRKuy9zAEeLdVakqOUEiehQkxko890Y1iSfyMqJmII/OqXf0LbtIR5ZNXUWKuZs+F0OmKt4Opc4ciz6En2+x3eS/BNWRbc397hvWcaJ5xz3NzeMnQ9RV0t2ggwiM0+BLE7K8QV6PxMURYLjSsTougmjJkpS0e1xMoXzvH+3XcX43+hQyDn/Evgr/0ZH78F/om/38+TUuLu9pa2rQl+BkSmO0/jgpYqmNTE2A+EACjLar/jw/v3HI8n6rrm5uY97WpFVTdAorCObfuSr778lofDifNw5vmLKxqz492vvqWqVkKknUZsKDBpy4cPX8r6T3l+9IOPefFsy6qxvP3myNdf/gnPn72kLhTPLtZcXPyAwgYe7g989PqK9VripC/3LW9ePuP16zd8eH9NjIEpTNxcz9SF5uPXz/j44zfE6OmHM9988yWbiy2ff/IKw7z0uCU/+tGPaeo1X/76G55dvcLamvdvbwFLWTacPn7Jfr9Dlx8Wk0zCOYP3M11/5vNPX2KM5nB4IJNpa808eZq65f7+gRgSH330MT/4wQ95eDgQgudweOCLL8TZePXJ5wQfefv1t1RW8YOPP+f5i2eSlKQNMSRsVmhtyWkWWKmXf//XX3/F678WUFZhtOVXX37NP/8v/Av8v/7GHzJ7hWMkLGKcfvb4mCgLR6oK7CJCmlNEFxZjxaeQVV70EglrzVM0nbMOZwussURELut0gVGBsRMTmoiFhLisFIsZLOJjFrVgVou6UWYFeiFWkcWEo7Th4Txw7AbQFqUNKUWePbtiHEeMilSFpSyEVoRxHI49SmXRnziJmReremQcBzabDePYS/pUVXN/f8/x/oBVmtIVGKUF9PqIBlOK2QMuYczC3ljk0nMMiBxeYYyiKA1l5XC5kNVllorgu67vhWIQRMH3x3/0S8qqpKxqpmni/fv3aKV58+YNztWUpabvB06nB+5vrvn2m28pipJnz55xOBw5HB8Wdv8eZzUpGG4+dDw8dPRDT+0iD9df8s2v7tjuYN1YVDakyXG6H/HTQJgVNx/esdu0GJ3pu9NiWskUVvH226+Yx4711vHt22+4u5O1j8qBoTvx0etXfPzRx7x6+ZrT5x3jOPKHf/SHjMMzPvv4Na9evcCHifvzgdcvnlGYzKtXL/j49Uu64z2b9ZaiKFhVNXVV8uqFyE3rusXqhLMVxjjUmwvatuHt9S0hyF68qQuKbcvtbaA/H7m82LNZ1XjvWTc19cUlz66ecf/wAFmx3WxZ1QVxdjjX8vL5BZd72cxUF3uMdmQ/8uPPPuP5xQs0BRmY/EyIinoJXU0pkYJHzyMKuLuRSPmsND4kvn37Lb/4xS+4vr4BU7JywhEIaMZpZJg8KddYA9qJdyTkSFJIUpOW3X7+rbogLy5PHwI5q8Vg5tEWbOXEOqu0QEtgQW9njFZPKVPxSUIsO/dHKIpCiUjJGOqqwK1XHIaBYVpcq4svojCGZ1c7VIyEyjKNA5DJ2bFqazJZZiUkVq2E5Vxff6As7KJuVDR1CVloSTHM1E1NjB5lNW6xRKOWf13pKKzFR1HDOmexVok+oy7YrkqqoqCwFqMtw+CpVyWHw5HtesNbbv70jQd8Tw4BpTSvXn3ML/7wF8xTQuUHDscDMUb2+z3BQ86aoqqIc8fY9YyTsOeqqpShVyVWXj97op/pTgfm/kvmuSPGke2mwerE6XCkNECYqAp4/uw5P//5HzF0B/b7PdM08OL5c2Y/sWolJbZtaj755GP2u0u++NUvUUrRj3c83D+gUHTnjsPDgc16xZvXH+NcIUEl6xXrVUtT/1V+76/8lK47M449b999i1Wa7CN1UXKx3XF/c4dVBqcdV/vnpJR4uLvj3bt36NcilFFE6qpAqXn5f9wJY2+z4Xg8Mg0DVisu9xfiPYiJoRuFmjvMNEVDoR3PL64AuZHOh4MAK3SkbRvajz7icDgyWcVms8b9+IfsNxeo6MjRSOk6zWgrYS0ZeRrFJI5AoxRdd8YUJWEZ4A39SNf1i+GJJzFLiEHgHd5TRMGlx5TJMRJiXGTYohrUWmOCJgSezFMaMYkFIskHJj+JczQGSqMWJoIEfhqbQXnJuNAyZExZlIIiepKVICiMNcL4qwqqpsTWJa01tKsVPp+xzsoO/nRPUzfsLnbEMHN3myisYcrVk++jH3qK0kLyIkVvaxSBh/sbUvT4eaIsStqmIuWEMdDWpfAWtByIy9wSV4i2ZPKTMBlUpi0LCrPm5fM9bVVTlwVj35GjojvP4q5NAr7le30IoFnVWy52z4gxUtoGpycqZ6nciugVzpWo7ChdxcvnLxa3mF6SXcJCw4lQllgjarSg7ylrzxQ8q1VJzmeuLkvWqwusLbi8bLi8KPjVF6NYR2koi4q2abi9vcYow831Dbvtju1mi3MFCs1uu+Orb255dvWctllRFiXr1Ya2XeN94O7unpvrW9brDc+fP2O/3TGOPeO5w6B5/fwlzy8vOZ8OlBtHd+z4xS9+zk9/+ju09QarS2xh6U4DZEXhjCTROoWxnvP5CAOcTifqesN+e4FCy0Q8JaxxS4ISODPhmgKrSnabHU29JqVE153wIcjwC8l3KFzBdrslNpmqcOx3G3btFrKhP3uaeoWztYA+y2KBe6SFaCwu0Kosif7IPHn63FMXjk8++ZQf/ejHfPXub8mO38lBEFNc6L/gg2eaNIREoRM5l8QQRd9ARhmZ6MekZGPgWfIoxYEaY6YfPSl55smwW1cYV6KNA/TimwgLg9IIdTpl8U6gBD7ixImXtYhtmqambiuGHNhsVrx+85L87VsympTgYrfheDjgbGa9aiW4ZBwJWrB1Xd+zah273Y5pGtnvNux3rRwg3ZnCWurCsFnvnshR/dhjaiFHp/yY0ShzM6dljWxtYg4zsw9oAtt1w2azWijcmamXKsHtdozDwGrVEr7vZCFjDHXZ8vmnP1jw2xGyTJeneSKHSL2rqIqaXAWMTiijGOdp4QbYBdwJVhuC9zjrKCrNFDJttrStJcVIXRbUlXAK5ume+4eZ9UZyDkIwrDdbpnFgt9uLf7uqePXyFcdjzzjOvH71mv1+T9NYVqsWpTR3d3c0dSuruwWTHWOi73ti8KzahmHopMwuS5wTrNlDWZKzUIA/+ehzfucnP6Nd7TmeOuq6ZBwm9GuRDTvnqCpHWVkOR5mVKOXRGLSyrFvJUTwcD/g5UuxqMcOsNXVdYbWlsEJFHsceY0pyUoQs7MKiKJZQTI01BUVVC+Rl8epbW1DVDU29oSwLbOnQzghmzMvKMRvZsz/c3XH3/gNq95zSWF6//oh//B//x/nimyNffP2WxCQ9epaSOyaR+k7jiLJQN8WThj/lKE9rLVsCcfrJAfLoEclZE33k3M/MfqLQGWcN66YEJbkGIYoTNcOiFBTjWs7i+4BEMJmQIeRMVRas9htsoen9zGpVUdQlEJknsX2/fvOaX/7JL0lxRKFpm4IYxB05WVg1jrqp2O/3dN2Z58+vnuYab9+9JacsTENjQBna1mFPkio9+1mIQY++Ea1QyVMuDsG6KgnJEXMgTD1jB0MURWT0M9ZpCespJBw3Fu4777/vxSEActJtNjs2GzgeTzhXyYsx+2XoVWF0QVU0KOWFkxcmjBMQ6Gq1oXAOP45MQ09bN9i6ZfQJawq2m43Yeq3jeHjAFY4QA9fX79FasVo1oFp2uw3ffvsNV1eXwngrCoy1zH4CpXj2/BlN01DWhq6TtNf1ZsN2t1uchDP3D3coDU1boEwiRFEtrtdCLX7MOihLaRu0Vnzy8WdsNhtWqx3jOFMUJVVVs16vljI2UDcFVeXYrDdoI/kJlj1KWcpSUGbjKIaoVbNZtgXgbEXbtJImFAJVpXGu5Hw+4oPg0uq6wdmCYZgX2XFm6Caiz1hT07YrdrsLUlRLqYqEfcZAjhNET/IT8zRyd3vH3e09b55/AkqGcc451ts19v01KYwCXNFSySkjbAVLwmkoXUFVFLgnkpKg5FMWkvBjZoJYgRVhmhmHiXMv5e9AYNWKLkNZR/CROUbm4AkpLOIbublY2hNyxofIFDxFLmgrUQMqB01Z0a4qtvs9r19e8uH6mq7rWbcFP/jsNe/fv2e9rlEgN7JbCVD0+TNpsdoV0zQwzxMpywBXI98nlKHvRzGy6YyzGef0U/6FD/5pjakTtLUBbYlIfNqpm9Epk+YRP3usNtRFiXWWMabFMfvdQBH4nhwCKSXGcQDUkzmmaWoKV8ju2HtilKGOqxoUkZhlf1s1FVlBW7c4azmfjlhlWLdrgqm52G+wWqMzGKByjuwNTdsQUiDEd2irqcqGsl5JuovOrLcruv4MOXN3f83D4Y623bDdrTDG0axKSUrSmhADTdswjj27iy3T3DPPE7v9mrZtyDFCWujFyNPPGBhGT7tqhCxcVmRgGEXr3XUnpmlgtdqzv3jG/f0tWoFzNfvdM8pKwlaTLwHFer1ejCICXy2KSuTL2hFDZp4iduEKFq5g9gPOOXa7/TK1zguJWWNtQeEaQhTsttEFTbWiaVq6blxyH4AkZCdlMswJPw5LpWO5vb3lI6WlLC1rzl3P7e0d4zRRKDFKPd6ERhu0gkIbSqtwyuDQOG1wVqGM+i2LtEGrR7efJsyZ4GeGcWSYIjEpdIZ+Svio0LYkIZbfKXimKJgyltwBrRUpqgXIkZlCoEyJoq1YXWxxleaylrSk7a7l/sHjlOdq36JMZv/xC9rG8vLlq9/EmSVH3YgD1BrD/mJP15349u23aGVxhaNyz0k5kZKiH+fFoJbYbYXp8Ehz8gtPcpom1lXD88tnGOf45u1bvn7/jlVTPCHxCAGjFaWT90BZGoIPC/Tlew4VAXGlpTjzcD4yTvNT8Iiw86W819pisEQvfW/ZNBR1IYQfG8Xvr52U5sqSTMWm3VG6gqnrMVkAEbvtc4xThOh58eI1rrDMwVNUFejMdtuy3a5ABfp+QClYrSuUShSleYovu7i4QAClE85plC6wzrBafcrxdAASMY4YBXVbQVYyXbYG6xT7/UZciNZirOF0PKKsYrMVOGVRavEbaE1VNbLnzuXCmC+YhoOkLTsBjUiUWoHQZQNpYSLKJdRkpbRMlBUUhcM6caDNc1haApEYo2uGYUZbhzYV1paAoahqXFmgrFmUghM5hWVdpbBFSXfu+IN/4w/40T/072bV7mnWaz7/8Y959vwZX7+7lnI+Z0JI+JhRylBai7NWeARaY5WhtAXGRHHz5fwkGspqcQAqg9YCC/Fz4Nx7fMwUOtANnq6fl0m/IabEHIMMG7VCRYNhcSGiFuKaSHR9DFSrlo9/8CnttsY4/eS4nPojn37yiouLC7765luquuT1q59yefWclDNd3+Oyo2kb5nmi73uuLtfstxXWRBTSyoCEw9TtmqpZcXd/z/l0ktlWzqza1SJmipCEFbFvV1xdXmKLkqJQhCTqRVdUgKI79/jJY41lGKbFYRJpqoaiLL7z3vteHAI5J8apAyQauywKnFWczgfGYURrS+EkeLNQBpJCGYPVjhRgHKV/Cj5hlP1NVltRkBe5aV3XqBg53N3QtjXC5YPNtmX2I2Pf0W5bfPTsLtYYl6lqyzCKdfby2Z6hnyQNJsHYJ1zhWLUNxknmfVFaIRwZKMPSeKKwSlOXMqCqojzBxnFgu90zjWLJjWFe3G4eYy1161B6i9FuSQOyGOPQuiHPnhQcKZR4f8IY6fPFIpyX/y9IZSXf/MLJZDnMM3OKWCeZAuM4LfZVS0yB8TzStiu0Lok+UbYVxpRYU5Bixi6WXUF7RWKYCPOITgFjHefuzJe//ppx/QvOp45XLy4prOPjjz7iZz/7GX/y62/ojgPJCxPvaTfvHIV1lM5SOkPlHJV1QFwGiAgYBXhMjso5MQd5SvbDwOkc6MaZttRsN5F+nCksoLV4HVMiLf2/tAPyGskrJm1O1gqfI7YquHr5gssXW7rzgRQ8dVVxsfsRZVUxjAP399e4wvD5Z59QNQ1FVYm1egySbKQS9w8P7DbN4uqMWGvwwS9o+ETVrinqhv3FhtPxyP39PTkJy1K0C4nSSZ7Crm6oyxJXVlxcbvnBjz7n1I84W1JVLV03cHt9yzzN3N09cJpmjqcTq1X7BM39s67vxyGQEt6PdN2J9WpNXUsakPcjMYXFC51Ji4hkHmaMM1QpgdOczx1lWaC1EQOMdZDkTRPStAAmYZ4HhvGEcxHrNHMcqV2FLRKrTUm7qun6SF2v8GGmbkoSa8Zh4vJKbthpmogx0537hcU3kYnEDOeupywtKXtS9mgNVVVilWLoz+SM5AYaxzQPnM+GnBR1XQmbb73i1N9wOt8Ro6CxyrbCe8/5PFFVLQon5iBlKdya7U60+X3fEVNivV4TwhJkssBR59mL4cQsdOGc0MZSOLuQmBIpibV4nDxlWUEKhDnhNgXaFgt7YUlmnjNlVWAMArZMksYcg2c4nZinaQGYCKrMak1T1/zkJz/h9d/+u/yqv8GHhF1YCSCiH2tEzuu0EfLRIuxRC2bcaP00Xyit0H3HceR4PHI8HJkmx7kbsGqpXLQcyrBAUVm4iFpYg4/OSh4Vj8gPiSVQEjRTOKq6pC3XsHAoYkpMU+Z3fvIjjCu4vLrAFA5txT5t6rxUF4mqclRVzTyPXF7tqatSchqcbACKumVOCa0STV1Q11LVrZqWGMICxxVykc0ZnTJFJWa6V69f8827D4yT59Wrjyhtxf3dA2TFw/HIwzDy4cMHdjvhZPyP/2f/pz/z/vteHAKgaasthpEU4HToZCc7RaYxoJJDkzC6WIATiXGYOQ4dgczkZy4u9lRFyTiNdKcelTIualxTMZEZup7T4UDwE+mc5XCxin4OPH/+DB8mwqjoD4HVuiLOklWwqq9wecAP8mY+9mfp2UikOTCdE6vNCqWh8zNkxdh5+n6gdA4THdnAPMg03KkldSdphvPAarVFZS2WXWspTcu5OzFPsleuty0P5yOEJBWAn7m7fsc4TlxcXnD1YsPDw2Gh/FgJpzCOwpbM08y09JuhimzWm6VG0FhdkgtQs6XrOxSLWw9RA6ZykK2BjpTWYRMo5ZnShEfoN2bWuDGgh4nQn5iGCNWKj370Qz755CP26zUqO2Kqybbl2asrPv70Ge/frYjxjJ/TExDEFJbkFMmBqQ1FpXEm45ys8gAKZzEadJaEpMJZpmz54nbktnfMMaGcplqVFKVBG6n2HmPFSApNgaIkZzkAU1ILX9AsfoNAkcSjkZfKp90/oz+el3Wb8CPKsmW3vwSEd1CWVlqBxeVXuIJx9FT1irpqmSfFfnv5BKoty5JpGpY0qkipFVVbU1mB4EiAaIEr7BMOffYDRiHQ0Jjpu4nCaD7c31Pakh/+4IewW9PUNRcXK5KG7rMXkkwcvucxZMZYVs0OZyZSDkJ5yZqmWpHCgNEF62ZHVdV03QEfJhSG09CT4MnNVpYtcxEYZgkLSSERp5mzD5zPZ8ZxlDJMGc5DT1GUFFnR95njcUIFjx+gizPT7IUr0GiMtZzO58VKO1I5S+McU5rJPlBoA0bjbMU8ek6HgRgyq6JBpwLvJ1R2EikWJVsgJyFlWlPQdwOHw4kYM9YUhFFjVEVpC3JQEMAgII95Gjkfb/niiy/Y7ffM4VNOpzN11UiO/Ryf4KohpCdSsZ/9gtxaJuFzYPaTJBGHQFNXgABOFZpZDxSqQCdPlUFNnoBHmRltEwZFnhJ0kfBwJM8d4xw4+Uw3e1ZtTZomjC4JWaNcg6ss+4sVu/2eOEf6MEhJngXOEVUiGoVyYJ2SSiNL2yFZjUtqdfBM88TgHcfeczPAh9FQpJFNZWjaEmXESpwTkKN47VmwY1GTlBKbsdLCI0iKLHxVUogEHzBKo7PG6IJhiASvMVYSrNP5geAzRWGYxgFFhODJBG6P91hTcD6P1OWWVRsZB/E99L0YzZQyjENmGI5sVgYNhFmMWGGembzAQbURgdboZ1T2WBImGYyWIbLKUjmNXUd3PjH0HeNwIOfMetOw3wgnIzn1Xbff9+MQkIw9xzzPOOMWJHimqirZ/ZuC/cVeAkems6CltEw8bVlQtw3GWMqqILY1Tou2eoozYRFh5JyXF19RljIss9bSti3DMDL0A2phB87zjAK22y193yNWVi0EWoRXl2N+cq/NPhDnzDCMyw0nkVaCupKhGwoKUyyRVcMy6FwUePPMuTuTEYfgw8NBnGbW8fAgKCvh6kXGruPi8pKb21vmeearr74SpNgTM18m/fM8U5Y1j9mMIURurm8Qjbl5CtrwQZKM7AIxLQonGw8fUEkxMlOlEYJixuPLiFm5JezVM02DCF905Hw+8/7de96+e4sm8as//mMunr+RgaGz0hZUFbv1muFwJoyBFAVOSZIknUeM2uPXLGGxCEUppsUvrxknz/u7I9e3ExG1KA0DlSsorUblCEkxT+Hp+y9A2CWCXClIApZNSdgMRssNFVNkHAa895zOJ2Yv1OqiLBbGhH7CqmldMJ5PxCiJUX0/MI4dxsjhez4N3N3eE6OhLAUndnFxwbmblpxMwb5HL9yIEBNosQ+XVSXUYCXI/DAnAhHbFrSrLeOQOBw6Xrx4ASiOhwdSCsu6fbWg2mUIbJ6clv/m63txCEi8l0yuh6EXhLU2BL+UMCrT9yfO3Ynj8cD9/R3nocdVJa6qWG3WWCuDw0ykqCxTL9FbjyBGayXa+3Q6MU0zq9Watm1F2tkLvWcYzwvWSabxu90eAGMUGUeMnqpq0cqQVMA5B0ZIw+fzmX4YMFbShHOM9F2PtzMhCjsuuEwIonCxthARR0oLRdjQ9x0piXouLVPk0/lEVcka1BYOmyKb3ZZPP/sUHwL3h1tRMi7sdOfs8gaVQ1Qt/MAQAvd3t0sOgqw1UYqqqhfpdca6AusKfIgkLWTePoxYfcLgUKUmBI9TTnBfZHyUr88PHafTmQ8f3vP65Quc0fztf/1v8nt/7R/G1SVWK+qyYNXU1KVo3GdryUYUeh6ZK5DzUy6DJER7yko0ECprUlLCH8wSfHrzcMDHkqquaXzPqnZUVmPIkCJ939F1HfNCmX5kTOYs8mSd1SLKkaBbk4XXn5bvqT1oikLaK2sMx9NhSTUWDqEPMrS0tljox5mydBJyYlb0nWeapEKYJs/V1TNWm4Lz+YSxUunM88w8CyOgrksSoLxHISj8aZqWzAyFMY4YMw/3B66vr+m7iRcvXmGtoOvbVYtzlu12jfeyXnyUMH/X9b04BFJKdP1I01TEDH6U3AGFoixruv7MueuYJ8/xeODm5gMPxwOXz68whWO9aSnLUoAbKEprGacOV7c07Upy/haoZIyJYRjY7RxN03J9fb3AQh3OZNl7awtkzufT8nTV5PyI8RYzS1FJAEjICW2F8lJVUJaF2D1zJsx+SY6RN0UMCq0dq1WNtUuU+DwJ5tzJdL6qKzbbjQzVUkAZxRwmvJcBqbGK0/lIu26EIFwKHcfPctA4555SlqqqXoRWjrIsF1mxHKyPT8a6rinLUsAX1hJ8wDoL2aAwhJAY0kRdapqyZk6T5PRJCohwHzQ83AtU4+72ht//vb/K1J/56uuvebi95sXHb3BGs9ts2K5WVNZQW4d38oa2VcWUZqLyQvONsiWaVMYqGdxpZchZMc2RaQ4MHs5z4u440KVEs11xUTXsmoLaIX8vRvrzifO5x0dFTurpAFg2goBUd9IGyXuxbmo2283Te3OaJoGRGEVMgWEU2KxSinHqCTFSVkJpLjKE2ImDsCkXj0eW19JHqkZTlpmUDdM08nBzR+kcSzADrRXyNIuOQqqUgA8Bq2S1entzy93dkfu7E6vVhhiDaA/qAmNkiDqOw1N2xeNB8l3X9+IQyBmGcRYbsFKM0wQKqrrClY53H95DFqHIOI3EmGjbVgQ2KvNwfwcqUxUFTVWRoyErwWqnlJmmmZwzTdOw3e6YZ884TkzTzDCM7Pf7RQc/yRrMarSBrjuz2WwlnUiaSym9Y6JcVczBQ4qUdY0ylnk5dVWGyhVo1JLVV5CVllRhYygr2X4AfPj662W1qGjahs1my3othqB5nlmtW4ZhYO4n4hilRRk6Li4uSCmx2WzkKT+KZkA4DLI3ZolrFwVgKSGsSXIdH1uHsiwXBZ6VfbsPtKs1g09YXeBMhQkFxsq03wZJ/JUVq8YVcsD148jt7Q3T0PPq2RV//OuvCaPm7sN7Xrx5idWKzXrFbr2hcSVNURKKkq4bMMj3LmtDacU+G2LEe6hb4Tg+pkFNc6CfI/eniQ/3HafRM8VMUc/sryq2taUy4JBQ2HkUJHjKhpyWbMIcAYuzZskhWHInZKNLu1rx7NlztrsdVV2Qk4Bbc86LsCpxPg/LATEK1suVyyHq6UOPNQ5VCck5xhkwrDYr+v6BfDdijGYYBw7He+qyecoenOaZ/cUlSmuctVhXoJW4LU+He/rgxU9SFDx7/oyL/RX7iwumcTGKzSPr9ZqUxDiVF+rwb7dZf/r6//sQUEr9FMkWeLx+APy3gR3wXwKul4//t3LO/8c/73PJ03lC6wPD2DEMI26h1CgU0yQZgtYVNG1NVVWSiacSs5/op56+O7PZrDAqMY0J5yzTPHLuJ7quI8bIZrNhvV4TY+L29o559jKHcAVlWZGip2gqUlIL6ltuIjlFlej8YxCKTJSboKwrnCvxMZFnQYoVziHJN4ayqAlJE5JapsiSoqRU5tyd+PLLr9hfbLm6uqBpaqyzhBgkdsxIdl5ZVSitOZ1OWOfQ829mHSkK9jzGyGOc2uMcQL42t5TVQSLElpJYUnAFoe4XabaUsBatLaWpMMlS2JqqrCms6AMkEFigGlprCd6oSlJOfPGrX9HUJTnM3H14S7l6zv31O8LwY2wj5harRTdRaE3jCrpwYup63KrCOUPhNM4ur73WOFtitSVGz+Q9k0+MXvHu7sg3NydClkzCGDzrqqItFM6AToF5GiEJfz8uqcOCCpBAMqMlmEM8CUrWgW3FZrtlvdksceYa5yrWbcP9/T0pZ5ySmLhxHAHoup638f2y7swc52uuLq9o2y05S3s3Tz1tW9H3Z46ne8k4cA5XWMZ5wi3EZDFThUX9KdyEtITdhqrFz/MiO6+pyobVak1V1xyRNW/GojVYZzifOnKW2Vbb/iXgxXLOfwj8dXlhlQG+Af454D8P/A9zzv/9v+/PlRKHw4H7+xs2m5UIKhY4hrXy5Bz6gYeHA029Yre/pGmkTTDJU5el4J6jTLz9PElf5iwp2aeEoK7rnnLaH/v4x5SfnD3T1LHZbLi7O5LiiNaO0+nE6XRehpRx+eYPkDJVXaGsg3Gi64al7RCYZ4gRi9ycX379lq6fefb8is2mZZ6gXYsbr121NE1D3/dst2tijDwcbjmdTmy3W46nE2UpPe/sZag4e8/9w4MQZkG2HEVBURQ0TV7EPoK4eowfDyFQN9UyzBSIxjhO9ENPTpmyrJfDN9EPk6xoxyD9bila9IyXft17iBJvrq0FYxinmQ/X1+y2G26u32GJ+O7E4eY9w/lAYxq6h3vOh+OS3iNtRQ6RcejxaaZIJU6VYMuFIKyXFXFiGDr6eSTbgs4rvrk+cneS9inNM4WGykJhElYbYvDM47QcarIeZLmx0AarNUay0ckYtHU0qxWXL57x8tVLXFGIurMsKcqCqqlwvcPP84L6Fi+DtY7dfg8o7u/vWa1W+DmLwMqWKBTbjWKeg1Rc1gglOEWqqqKpVzzcnyjLGmvdEqNeLAPGUazDs5Tyu90OloNbsGoIkdsLSn69aYGWm5trrNuJVfx8pmkaSbD+jusfVDvwTwB/knP+9eOA6t/WpWCeZ6Z54MXLZ4Dl3J/ISdJ/jXEcjodlgAfrzY6+75+eeptmTeEM/XAm5bjw1mc5LetKBms5c39/z9u3b3n+/DmbzebJoTVNE9N0FrKQsUxj4HweqaqGaZQ/U1U14zhJZFWGjGacAome3Et46GNWYPaRGCKByNgPfPP1W27vDozjxG63pqwsL80zqsrx8UcfU5aWb99+xTzPGFssIApELNL3aKXk9Rllnff4Cu+2W+qqwi46+sf+f7PZUpYV4ziJFXU5BJpV+xQ64n0gjRMh5mVIumKab5jmwDjNNK3ocxNgnMMUC8duVszDiHU9RVHjY6AbR87njrZp2K7X3F+/py5q7g8nHt6/5eHtt1jznNPDA+fjkaHr8eNMaQt0zkTvGQ4jejKEsUKvWtxKzD/zecTqxPFwB9bw/KNP+dCduDtNjEFRaotWM+vKUtmM01loO34WSpV0cU8+BbcoTa0xWJVwxqCNo2hadpeXvProDS9evpR0aR+ockEInkRiuxeEXIwJ4xxlLRSm/f5C2itn2W63bOc17aolJyPDRl1wcbEFMoXrRJ7t5ClvjKMoJP9yGmf6vqdpWoqiWKApHj/LBkch1WVVVfR9L1sLIzZuY/XTDKHve9brDdvt9t9yKAj/4A6B/yTwv/mt3/9XlVL/GeBvAP+1Py+CDES/fXF1SUoeVzqmcZbeaL8l58w3X39DyokXL1+Ss/RHYoRxOCf9+2OGXAxRtPNh4nx7w3qdxU2mFIfDA7/+9a8ZR4mNds4xDD11XXM+n0kpcTwdqMoVt3e35HRPTpo3rz/CGklzEblnJGclh8LiSV9v1otsOaGyou8HjtOBeZxp65awUXSnM+NwZrNpaeuStKoY+x6jajarNRqYhpG2blAo+k7yCoMPUtKHRbizpBe55cmRYnp6Y9R18zQHkCj037QHwQdO57MMsIpqgWo+DhItFxeXklbkHP2poywaqqqmaCq0Etuu6uD+5pbudOLF608ZJ88wzaAUn376CZC5v70nxYxDc/3Nr/njv/v/par+KmpRFhojKjqbNRrxCUzBM3YzfhwIXU/qN8x1hSWQ/cDtzTUvPnrNq9cf8yfXPyeoAu0yKcO6Kdi1BZXNGISRIBj3hPdJBoLLfWCMBqMwKmOVBKmWTc16t+Pi6ordfsdqvaJcDtecM+M4Mw4jTdtQV83Ccaip64aqqnDO0TQNH3/8Md57Lq+e03VncQxaR9/d0zSOTMTaEgEoG4bBA56iLMUBuKRNpwVP771HK+n/y6J4Souepon7+ztSeqQLGeq6xTlH13Ws15vFN1LIOvJ85ng8fuf99w8ii7AA/uPAf3P50P8E+KcRBeY/DfwPgP/Cn/H3nsJHXr244NmzC1IK9P0JpRXzshZp2xWXl1eitnMFVb3m9uZ+cbs5zuczisw4DXLTVAXaKMqq4f3791hbst9vFpdW4Ec/+oH465c0HmO0rKFKx37/EfvdJb/85a+5u70lZ0NOmpcvX/Hhw3s22y0g09Z5cTY6pSRSTGnp/Y4nrJG4K2ccQXkudjtevfqIeR45HO5wRou01sI8TqQw0zQlVhvGOUCWfL/gI2S/mIQcbbPCGktZlVLeKfBe5hWP6cXTNC4JxHHR12f2+z0oRdfJuixnIelI7qJImZu6JbjfJCBX1lA4iSbTpSPHQBpHzscjN2/f46qS3cVLymrFerPj+csXUo11R4ieuRso7Zbj3Q1//Ad/l9cfv6S9aNmsNzx7/pJ1vebm23dSvVhLpUtJXiITfeZ46NA+s2lLVNJSOlc1/TBzf+iJ2aILTQyeolZsa4vKA/M0EiOQhW+YiU9gMgWy0WAJKc+LM9VairKkrEps4ZbNh1oO04gFzqee0+lMWVaEEJ4Gq0ophqF/irM7n88UxZp5yrJlwdDUa6xxeA9Gl/h55nQaF2oyxDRgF31M07R475ecCJlZsDzhjZHk5Jzj8pSPzH7k4UEyIJqmIadMVdY4W3A6nR7vtb+cweBvXf8R4G/mnN8DPP68/M//58D/4c/6S78dPvL7P/ssu0KGaYeTxzlNzJHD8chms2Wz2TKME0VRst5s6PuRru9JUWLMY5gxVlO4CoVis9mSNVT1gapawB3R0zQVRVFweXm5bAtq2lbimeZZk1Kg63q8j6xWa/puompWFEXB7e1bYpJvfl3XHI5HrBOia1nXT1P6eRgxKNkO5MzUj5y7CVdWhNJhdEbrLPJPYyisY54HdFORFhRU3wmAcr/dihqwLGmWwU7hCtYrSSwepuHppk2JJ6VgSonT6fj0sRDC00xBqhiZWeiFY6+0JP9Ok/T8Wht2V3ucK9FFs2ghZMAVY5S5QMikpGjWWwiZ8PoNhgRxwqqMH3ooCsYxcf3Wcn/zgd3VD9lsNnz2wx/hsuJfOf0/pboqS4IH5gnjLFZLe6OVw+gS7TKXF89wpuDbb9/z/uaOkMUdGKaetixZF5kUPdF7YlRkDCihBoOkJGkeb+5HrsgiV1Ri9skLskugaRL3js/CIkySYGS05FFkpI2s61rWrUpR183T5gCk+ri/f6Bta5wrOJ87pvG3NQENPsqW4eH8QFGUuCUzU8RpAnSZpongPfuLPatVKw9AZ6RNSXEJqPXk/Dgfaokxcj7fPN6HT7OwP+v6B3EI/Kf4rVbgMXRk+e1/Avg7/1afIMRAN5xkzddUGANXz55J+ZMyWSnmEJnmnlM3UlcNIXjapmHWmru7jlW5oi1KjLOs1nvmMHNxcYE2mZvba6qq4uWrFwzDwDB2hBCpm4p21S7kHsXNzTccHt7jXMGnn37KNEbW6y0XF5fEGDkchMq7WsvTb7Vac3F5wWotPSAZkg/kEHHaoFMmx8S5H5jnGT/PrFcV49gx+5GmrNiu1jwcZoauZ+h7EhbnKmwhNKCmaiic49mz59zd3VHYgsIUjMNAiqJySznjCvsE23ic/ltr0Fpzc3PDZruhqGqUMYspRzMvIRaME8PomafAaiE2l5sNVhlylrAUs9wwVVHw4vIZSVnpD1xFWa+om1aGZdHjNKg4E4aOpBLd8cCHt9/y8ecfsd1sUZ9f8mx3wbdffsPP/+7PBaSZBQ5TFiWlsbSmoG3WVE1J61bstxW3h3t+/eVXdP2EMi3JzzgFF21FxUwKXvp/BSFCzIq8RJRrwQgv+YgKjbQrWuvF/BSXmypLyIli2ZgIrLQwIhUPwVNUJQ8PD1IROMMw9MzzzI9+9CP2ly/pz4YYPTmLWElpOWSmaaRpGub5SPCJ4BL9OLK7aMldzzRNzMxM48Q4ThRLQvaj5mQaB1arGqUfK8DEfr9nHGdBqAdZ/WptGBZQyaNY6C9NJ7AEjvwHgf/Kb334v6eU+utIO/DFn/pvf+blg+fcSca6IJgCdf2CeZ45Hs+kCKt2Lb3ZJKrAspC1Xr30zForXFFQ1hUhZIwueP78Bcfjgb7vl4ogUVXF04T/9vaGcZQ+2lrNyxev6fuB+7ujPA13G5wraJqazz//jNvbW6ZpYrNds794SdOuqKoK4xYFXcqoAlRM5BBJw4gyju2+IvkJFrFO350Y5x7nDHG9IoeJ4+nIPI/EXLB/80JQZccz2+2W4D2ng8Ajh26iOw3kJFZmVWeGYZA9f0x0XYfWIkRarzdPA1HvvVhqU6IsqyefQYwJcqCqGlarimfPX7BqV7IKTBEfI4fzmVVdCx23qqmuntENgWkMrLBkJevIYYlvd9ZQFZZ5CvgwMZ0O3H34QPSe3W5Hudvy+vkrfvq7v88vfv4LvvjVr4SW3KwkPTkrSlfhTElVrtiuHVeXLe8+vOfu7kG4gcoS40RdFlysa2zsSYTFQaqIPhNiJmNBJZSWp7U2QvDVgFMatfw+I0/7RJbDYimhjdEYJYyGtl2hlMiuh354wr5Zu2L2E9oobm4/sFt/xDTPgGK1qpn9wFdfvUcpxYsXryiKAteUlFXF4XzL7D0XV5d0p06AsePE/d09bdMuSHWHXVucM4zTQNedGIYO6ywXlxcUhaPvZEPlXKZwivNZDibRjkhux3ddf9HcgQ64/FMf+0//2/08KcoKKATPZtMQlp1214nqSQZeK1arNVfPrjgeD+L80gqrLHUlYMfClnifiD5SNfWTBbVZ0nOPxwdevHhB2zZ89dVXfPjwjqurK+Z54nQ6sl2vePXqI07Hgb4fKFxNjPFpIGOtJkb5nBeXlzgJHRRnmA8kL5N7kxVhnBjPHSpnysKSU0BbScitqoKqtqTgUZsNhdUio51HdLFmu90JZDILM9GWhrvbO3a7HefhzOl8Yrfd4ayFJXUmhsg4joJp14Znz56xXst0uGkajBUOo7WWpmmpq4ZUSRthjGW7kfJ/tdmSY2K6eY+tK6It6PqOunQin21asAXTfGYOCYzEwocUcc7KbGW7hjlxe90zjDNKWbEvA+vNhlXzivVqy09/52d8+df/Hdzc3jLdy0GktcElReFqjClR2jKHzP39gdvbB6bRo23FOM14H6lbcXTmuUOVDokfUiQCMSliVqRFBfQba7J0APkxiXipcnLOpMVbEEIkk3BOUVSFiJqMZbVakXOS3j2MKC1eFDtrDod7/s7f+dv89MeJy6tLnNP4oBjnGW0Sxliur99RVStevXpF4SreXX/N7e0tn3/+AzJnCldQlTUpCqTliZfpRF/S92ceDvdSYSwq2O1mD2hhKE6eVbunKKQ1LopCFI/fe7KQgnkemeeJX3/5xYJHFinsRx99yjgEunOP1ha0TL5zSDRlJdbhriPGJIgoWKLIT9zfvUWpgHMF1i3ru8XJpbXsX7331HXN3e0df/Nv/Gv8/u//VVat9Pfn85nVasMvf/lLxnHgcHhgf7Fld3mx4LGU+OuVCJ5SyqiUJSdv8qQFzxW9Z/aDePajlJ2Pqx0UVGVN+Vz0+017wTSLbr4tWzncCmkBrLXkAE3ZsN1u0UYxF/LEcbZgniWTLsa0xI0JUHW324ksetEbVHWNURZtHNYUoAx1uxH0eM483N9z+uZrLl+9pL68kihwazCuYBko4KyDogJt8VNAG8PF5SVq7mEcMFHTHwMPxxMpeMZ+YOh71i8Lis0GPwdW6zWffPIZz5+/BK1FcwAUGCrlcK7EmILDwy1f3X3Fh+tbtC0Q/cCItVq4/XGmsIJHC1nckwm9fF8yKUqPb5bkIZUl2stoLa2BfsSXSf8sv+CJglxQotBPlVNZOpoGMiVdf6Lrztzf37LZbNjuNhxPD7x4eUkmkLIHEpeXe6qq4ttv35Nz4HB4ICYRnOmomecJHyN1U9PULfvtXh4sUVKVh2EgYRkneR9pLcEqXXdms97SNC3TmHi4P9I2e4qiZLvb0jQNDw8P33/ZsEJWaj7MnD6cFtljou/GBQ/unnTsd7f3zOPIN199zasXL9isBKIxzRPH7kyzXhFz5nA60Q93rFYV3gu4UynFr774AoUAQ/th4P5wz+QneVJG+NUff8HnP/gB4zDztntLVTb80R//CZeXV7Rty09+5/f4/Ce/T4yOsOCo9KPizGpUSsRZDEACv7B4PzJNnhi93Pgohm4gpbiUp5qmqRd8tqMo9CIUyaxXG3JOrJ+vefv2LUop1putbFBCYEweYwrqZo0rAttdj1aatm1pV+sn5Fhd16w2G1xVkROE2cshUNZI0kZBigNhmujPJx4OBzbPnrG2lqoopMy2lpwHwas5hykLmHqGTtqn1XrLfHpgaFp8M1NVGo2nO018/dWXvH9/w4sf/R5aW07nI+fjSQ6pzZbVekU/jnTnjgKN9ZlSGWpXcNcP/OrbazqfKNctYcpoldjWFReritpNtEVJtBbv80LREdtxTkm2A4+moSz6B200hStR1qGtXYAmkv5TFqU4TXUg5YCxhk21lpWsNrTtihgDh+M9Shnads3xdKRqaj797HP8BInENA6YRT8Qoud4krWtK0o+3HwghMRm19K0Ig0uC0keCt6z2+6Yp5nDwwNhDjw8PGB7gykUTbumXa3JOdG2DUVV0qxa+uHA3cMd2paSh7C/XB4ApRC8v+P6XhwCKWV+8YdfoJT0suNwpiprjGm4u+0EuOjFG75pdnz1xTVffPEtt+/PXD27YLdfs9o0qDny4fYdp/MBbTTrpiVOibZd01Zrgs9UruGbb7+lKDyb7UtQiZuba5p6yz/8e/8efBgoUFSrinVrGYaZH3z2Kbv9R/yVv/KP8vEnPyP5NedChj+F8rg0oZnIeiaoCdOUFEahBo+KGU9FsHuUFVS6mie0KUhpJkVxxb19P6G14eWbShRqZYVKkRxntII0zTRFgbWKOc4iE9aO2W+XoV/DeDyQVU/IiaK5wpYVZSMmobauRVMwebKXAdocZlm9lgV5GiDMqDDi6NBNQ7lak+bApqxwCXKEMSqyK9A24ownvv8FZu7Rdcs0ZFK9odpfMHQPlPbEtumIWXP99lv+4A++5Wf/zoZ2isSHEzdffs2vfv4LSm34D/z7/v38+stf86//a/8aRcys65JGG4opcThMfJlKhssdZtXS9W/ZlyM/cp4fWsWbukBFD9FQ5Cz5E0h2ITngtJbUnqxR2WIw6OyYR4MtM9rN6Dig44ROidKUlEXDEE6cxiN2grWuaIsd4ziQ7hTtaocyW5xq2OxbxqwIeRRMXtR88c1XyxO6oV6VnM8dp1OHUoaVK9hcrCUiLUwMpyM6qSUPMYHVEutmFVlncHDx4pLruzON3dFuVxSFxjpFyp4pB2oTmFTPmx8+l8/rItmseXvT052lGvyu63txCPjg8V5wYk3TsNvun6bcbdtye3vLMEzUdcPsPavNip/8zk85HO/o+o7L53vR1EcrsVLa4pzsfp21NE2DNoZp6MnZP4EaVpuWcRxkjx4z1jgaW1JVDnQk5Uy7Sny2vuSjj3/CxeUn2EJ08qWGFAMmByxRDEjGoRYKjLIGXZXoAHUJbiV3kU6RHD0pNIRpQjQsinqaMLagWTlimMnZUxSyIsspMo6yrqyMISCMf1vWTMGJI9EV7HZGKh4Qj0Up/804KwfKOBGmER8jpigoqkoqoBCIswwujdGs24bRZ1xVLwRiJyXygrZKM6R5lMzAWSo4a0pwlqosCYW0LlVdC+i0jyhtGBdf/+nhnrfffMNXX37FzYdryrrk6uoKW1hu3r/j9OEWE+V18VEgM8F70mxg1JTGsLY1m1XFbrvFGJk7xKQWLYAi+kAI8urGRRkKsIAFF04hT6YhAZdqqcaWP6a1oSxKckpc7PcoHPcPdyJAQtOsVriipd3UnLt7zsMk6cE+4ecZ1lvqqsaHCPTsdjvadi2T/JQpC4mWGzpHCOHJ9feo+fBelIJlWdK2LZtdJCkDRIpSUxSaaY50nef29oZV2wKKrhslcCSOXF+/pa4bSWD6jut7cQgoFC9evMB7z/NnL1ivxR55Wqal1ha8fLknxUyOcHV1Kbz8psCVVuAbhyPj1JOV5tnzF4vLsMD7eaHRziQydV1xcfUZ0yRBIGSRkpyPZ1b1mhg9D8cTwzhSVhVNs+bi6iVXL9+gdEFaJKRlVuQ8ouIEMZBVAqx4CbQCY1DOQMgYPEZHiDPZS2CKwkEolt5TU80e4xzWZaZTT0ozxq4wpiAF8atX1QpTt0Q02VhMVRNiWijLGm02bLdXQELl8PQmzynhhxEVZjlcF559HDJFU0ns17IG1DlTWMNud0FRN9I/WyECZzLaWlQWqzEhPTELjULoSkZjjRB8H8VKIQSCjpz7gRgz54c7bq5vuLtdPBL7Lc16jSkcz58/J5576CdImaRBOUVTOawrUNqAMdRasW4F4BnDuNibgaiBJWwk/ptBJfwpK7HAS3+jF3hEeeWUxRzV7FB5hJSJybNerZjnzN3drZi3rObU3XN9fU1RKS73l5weOi62O3a7C+Zp4nzqSN6z3q6I3uNcwWq9FkNXnCjslnGcmGcxuymlF0q0DB1lPX3EFg2rpmT2IzHM9N4zzQMf3r8np8Tz5y/YbHbUVUHfPbBaWZ4/3/Dhw3vevT995/33vTgEANpm9SR5HYZxsbnWYgAxBmsLtNPCzJ9mRj+w2W0oqxJ0ptUtw1Az+5mqrshK4cqGjDz1i0pTlOIlKIuC29trisKw2W6YphFjDLowzKPn7nDi7u6B3e6S3eXH7C5eoVxNmgXQaZ0GeogdeZ6IkxB8c1WhVxsoapQuycmSjIJ8QtETolBunU6CHDJCkdEaSm0kGCF2ZH8gzBOTCjhXk3NBxqFci6p28tRVFqzFFQGjJxlwGYFbEj1pzqTgySmQYhDjSwqYBCElIpmyrpYM+xEVPc5o+Tt+pm62KFOIC89Eye6LUZxqj9FgGlmxGdDLiu0xITmmSEwZyQFVkkGIJqEYhwFyXlSVhu1mQ2UNp8OAQhR8QUk6dTZgC8uqKQmVIy6z/E1ZsmlqUhRqsXYanYS9J7Fdwn1ISeLRtVaoxaOfUxTkmFoqByQVKMWI97JW04MgvwoDOSYeHh5EPKVLuvOZaQrcxYgrLdpm7u/u8EEUqxebC+Zx4rCsZq0tuNjv2ay3nM+dzCxiIubAOA54L8gxrS2gF+9AKSKhxavQNA1oRdcfGIYObcD7kdu7az5cv18qwQ23t2KTL4oKY2Q2ZGzgw4d333nvfS8OAdm3Fmw2G25v7+j7ge12J44sH9FKM43SB7dt4tyfmeYJly3deGa92XBxuadqGoZxYBhGYgjMPjIvhp+qXPTvOdIPHeM0cHsjq7XbuxusseSklkmyoWr3XL34hI8+/gmb3UtyNktgpAIVIJxhOpOHkTDOjFG2AkW1wlYl2dbEaNEoAah6US2qLKGYaRqZxx5FWgxASpJw5num8z3zFAjTiCs2uGJLwGCiwlCAXQPCStAWTLFMtKMnzSJDTiFAkh9KSfJSmGaOhxNzijTrlWgRzmemvqMpjUzeUySECa0FkU5eMGpaQ4pLbx1lGKrzUxKRIeFTeMogSFkAGGiDLRQqW1brtQhxcqaw4uev65LVgtd+uLvjdDxIYk7OKKMIJpJVojQak0UeXil4vlmzaWvm6YHKKTAWhQFEOWeMgEZT8Mv0Xy2R42lpCRbtgFYolXmMg5uniXme0ZNFqwjWolLgeDxQlTV9d0uIcHn5gpgS2ii8H8kxEmdPtoa6qrh/eFgOjoLd9mIxhEWqsuLu/oHz6cx6vQZk6Lff7ynLkt1u9+QDkZbFME0Ciy2bkq4/8/Bwx/5ii3WKeR7IOQixSmU+XL/l/ftr3rx5w+R7mnPNxdWemPvvvP++F4eAWCgt3gdCiEzTzOl0JifxC2QNIQjDbhg7EiKT9FEwWnPwT+EhAp7wWOcYp8DxeGKaRtqmpyzsgtaWWKZ+6DgcDlxf37C/2HP3cEThqJsdH3/2CT/9ye/x4s1naFeRU0QXCiwkP8D5gXQ64rsJ77PYX5cSWCkt+v+shVuT8pJAO6DCTMYzdUe644EwDTK/KArR0aczce7wPoFKxODQdktEkY0DU4KtQBlyCJBHuUFzIvgJPw6QAoYoz12tgMw0Dtx+eM/xdGS121E2LRkxNeU4Y6pWyuIcIcyitM8KlOTk5UWma62VTAed0DlJ9SJSSVQKaOTGYuH+pZzR1mER1aNBeATR+6WNMaiUOB+P3N/dcjoeCfOMTQntLLhENmLM0lEOxv2q4bJt0DnSDz3WWFQCkiItWQJyOqkFBiMdmoBj5LzUSkCmjwAZRSJGT4hyaBSFZC3WZUEIgeH8IGlAc+b5i9esN62YrQrL7C05XzHN9ZJpkVivWiFPT5NkU4bI2I9YW+CnmbIsWTUt2mS67kgIYQmRlQNrnucnMVKMkdPpxIYWYzJlZTBazEPPn19xPgtQ5nC4o64LmqbkdLrneJS1pbWJ9ab67vvvL/He/vu+HnkCh8OBqlrCOu/v6fuBVbsiJVitVmitxC6MX2S/4uI6njuGacQWBcZIgk7McD53PDzcMw5nyspRlZYUA+PQ09Qtzhbc3x8ga4LPHA93tKsLPvvhR/z4p3+VF68+w5br5T0V0coTfI8fzqSHe4aHB/zg0aak2taUdY12xVISi+c+R8hxIs8zcRpJU09Ms3yOsaM/HrBGodbCtSd2+GlgnBJJZUxVgB1x7QpTlEugSiIBUSV09iJyCTPz2JHChDNqcTMmSJ6hO3P94QO3N9cUZcFqs6FoWnwQzmBd1xRliQoT0U/4scOmR9ugzCzk3lLSk6Nh6bUTiRQ8wScZlCp5CgtRJ+GDtFCla3j15iPIMPU93enEPIyonDmfjvz6iy+4ublmmkaIAbfcwNkqkkqYLEYeoxXP1y21UQydQEAjggvPXvgHaE1eDD0sPb5Wi1wamVlYa55+CB/gN+KhwlrKoiAbcWA6a5iAaZ6o6zV1XdH3J1KCslxRFgWb9YqHh5l5munOHau2Zb3ePFWxc5rRaM6nDmsc26tnOGvohhOFKxb/gWaOM8ZYSZhajGOFK6ARkVpVW6pyu8w5Ivvthh//8IfCt9QaV5TcG4WfJ9abNVYrrj+85/Li8jvvv+/FITB7z+FwoG0FsJFS4ng4EkNcGIBCBbbWMgWJVipKS9vWlFVNP0+C7D6dqOuW/f6CcZy4vbllnkZ8GEnZM46R7nwixcjUzqxXO2KEomiZfaJdrfnBD3/E7/zu7/Pi9Q8wuiUltZzO8iYf+xP96UDses6njjgG2tbR2oKirFBWE3NYpMMiYldpQIWA9jNxGohhRPuZSoNyBmMUFUni1CZPfxoZ54QuHc5KUGZTlujCgo7E2BOyIpOxWRJ+wzQwzR1V5bAqQQqkONEfDxwf7pjGnu12w/bykvV2h0bjJ6Ho1k2LLSzZD4RxYOpO6MWDnpbg0LywwZNMzmSgmtJi2fXkyT8O3nnMDEQptDWY5Njutrx+/Vqeag8HTscj8zSRU+J4ODD/8k+4Py56fBDtfs74HJn9RAoThS5pS8vlqiGHmaE/oxVECRUiR0mmVoInlnmAWiCiRgjUeqFUay0EX2cN2hnSQgVKMYgCMwaGqWOMicLJatdqQ1FaTt2BaQlpub0bxTg2S0UXo7wuNzc3T0YhgegaonWCn0dxPp94eHjgeHqgqssnwrZbUGplWTEMoyQgZ0Q6bONT69J15ycY62azFbtxWTIMA2VZ8PrVK/b7SyFZn8+i2vyO63txCIDYXV++fIFzYs5Yb9YUhcRzF4WAHZyzPLu6JOaBzXZHVTfSOpwtSmXpL9cNOUfO3Yn7+3uauiLGSN+PGJs4HO5Yr9ZM88R8e49WJev1mrIq+ewnn/BXfvbXefbiE4wtSUmD0uIuQyi4OcnPXjlyUeOspl7vKdq1yIKjR+cZYoaQwEc0soO3KUKMEIJ46xXUTQ050Z8lh87PEqrpyhVlvaPZXtJcXNHuNminSHnAx0gEjDWS8BsnpvFMzl78BGEmpJF57DgcruUA2GzYXLzAVS1aO1KSexm0WGKXO3gaOsbziXLJwIsxoo1eqhuBcubHyT+Pk3e1RKU/4k4UxjmKqqZdReJU8OL1a168eM7hoef+9pbTwwPzMBC953w6cR47puCfnsYg4qvR94xTj4qBQpVsqorSCBtinnrq2jLNgQKDUTK3QQl8A6UlOTmEpzWgNUrWgDk9IbyMNWRjlrZzZp5H7GTpxiMmJnwhevyyKNFW0fUnQhCrcN8LmWkcez76+CPOpxNKweF4IMYo7EHjaBphOxhjKArH+XxiGCemaUBpSVWSSrgS5P3ChBiG4YkYrRScz0dubm44Hk9sNutlAPoIlClklVyVNM2KslhR1wZnBXX+Xdf34hBwzvHTn/54QXhlbm5K9vtXOFcwDGKr7LuedtWwv2zph3vatlogDIGmKXn18jntak1VN7Ju8RPrtmW7XfNwCPTDyKptmaeGsiqpqxW3NyfKouKTT3/Eqzev+fgHr7m8fIVxpayWQHLhlVoINSI1Lcsau7+iWe0otaMqakwh68McJnKOpBhQPmBSQpHQcaZYPO0iEpEeOvqZaZEkC0NwxcXVK5rNBapsqS+fU2y2mFUDOhFDT4iepDJEhUkjfu45HW/Zrlty6EgxEOaevjsQ44wrNPWqpmxbtKmIIOtWZTC2lBI/eVSMDOczQ39mk5J8/eS/h8zzmAOoWG4ypD3IC4Q1L3ZdYwvadk2Mltgp3rx5TVWVvD2/53w6cj4emaeR4GdiCqQgsV+i7F0CG5RhmEdiipTO0haWVVXi55HT+UjKHlNoTEg4Y0E/IrfECeicw8f0tAVQKj/pA0QZvHwcyavMS9UwTwN2tiiVqCuHtlF8/zni48zkR7yPdNcdpSuo64a1W2O0aFuGcWC1Wi0Pm3m5yS1Ky5AvLMCTqiopSkeMAWsdp9OZw0EAsykJD0Ki7OUmH8eBOUj0mkJjbcnF/oIQBHt3eDjhQ6QqG3JW9J1H68TQR+r6ex5IChCTnNZaKy4udjRNLfSZBe1sjPyAKHDGsYc8UjcNlxc7ERvFhCJyebmjaUpeXD4Tt52NzL7g+ctLmtYx+8R28wxj1+z3b/jZX/uHefH8BfXaYrQEdyqlFwr0kl2XRVfijME2Ldk2KJ8wEXRM5ODJKZOI5BzIYQQ/w1JOq5whBvzY4/sOnTykgJ9HUgxUZUVRltTtJUW1xZQ15XZPsdlBUYDVpDgT44wSpYA8yuPMNJ6YxhNu3xLmAZUi09gzjsNySArPMISEs3LzJhTGlZSPT00JP8T7WUr9ha68zBVlr27MsnXIi7DG4kyBV4Y5pqdVoLEFZVXTrtcoXXKee6xSTN2JoTsz9QNj35NClNARlYkBsBJH/lhPpJSYxgkFVEVB5Rx14fBhJCRPUomQhGsQo5LoMKUWiIiAPNUkAza9BNLID9BK5hopgk6PQaeQohdnaappVxWbTUU2AWM1wzASciQpWe/1i39hGAfW6zV939N1PVOYefnyJdvdjrffviOmhLKasihxRYH3EXxYtgNwe3tL08h6vO8HrHVM0yMWTAbN0+Tx6UxVay52F0yTZEUGn3l29ZL7BzEVla5aKp4CZ2sODwcOhyNx89333vfkEMg8PNwtp55it9twPsuTX+i8BdvtejH8nFGLOCVEDzlSVwUp9czzRDYyiGsqx7Ze0587wn7P7B3rtiZxgdIFq/aSF69WvHn9E958+mMKazFqImfR9mPyUpqmRUAi+3GNRhlLtlIeEwJ5uYFQkPMMOUCcUHGWJ2myYot+uKU/3GPw1IW89NoayroSpZ8xzNHSDSOFdTSV3PzKGaIficETc0BbeULnFEhhYu7O1JXDGUXwmRgC4zhJqo4pKMqGmDW+GzDFmmwtGYV2VsQ/OsgAcxmiFc6hVZYUn0VwpBMovSC6I6BBZYvTUu4OKRP9TIgZW5TUTUtdt2gKnBm5v/nA8e6GOIndeB7HpwNFYB7LU3l5arqkCDEwjCOFc6yKinp5cgY/Y+uSOXoSihQVac54LYKrx6+DxWUnDxGN0fppFqCVxK3JWaeegCMyD5DqabttKWtFNwt0VhdCi4o50Q09zUpi2vtuoM3iWZFPl+nHgXKq6MdevBrRkya42F+QEhwejozzRF3V4vI09okA/agSHMfxSTLvvWfyA9OcKIuKohDL/DxFxtFjdEFZZEKYmeaJqtRs1jtUNvT9xPn8vV8RWpqmRCmh2mhrBfjR1MJ7X/hwq1XDPEW6bpAUnRQZh46mXaFIlIXEkjsLMc7MkxwWF7sNd4eRmDx1XVFUK5pmy7Nnn/Py5Y8o6xVpjgiBRsl+Oway+k0gRU5edtdZoSJkG6Qfn2fUHNFKXIUxzygVUETUIgryMfJwuOfh4RZLpG4Fr52CxlbFAkJV9ONIPwqDwFaaECfy3GMKRxikBdBGvPBqKc+nacKPI1cvXqCVwRqY+4mhn9HKSclZrZlCIsyBdiHrqvxYFksmYopZHJFJuHhW3DdSMCu1RG2DRvPUtmcheBhlxfI7jwQfsY8JPNZAoSiLgtPhgeF0IKfAPI74aYIFhS4jgGX1mIWdqKOEyHbnjqqo2Kw3tIXB2IzGULQVadbi5IxLUaQCTmnZnKS0FGFCWzKLU/A3lYDs4PWSRWiNUJMfSx9rwFqF9wPn7oiNIrG2sv2lnwYu6hpbOtwyc0g5YwuHnxRfff2VQGG16D9iSlgn/zZjDcYZzueOwhU0TfN0w4N8T/XSzlhr8f43aPxzd8/Qe1btht32Eq0ttzcH6romBXkIKBxVWaPQrNdb1qcz7z98+O777+/nJlVK/S+A/xjwIef8e8vHLpDcgc8QeMg/lXO+VzId+h8B/1GgB/5zOee/+ed+fq1o2lqm3EHQzJeXe+CRntrRnTv2F3vWq4YUe4ZhIEYvWXVKJuUSihNkp82SL4elqSqGqZCQSmTg4sqSqxcvqVcbcjZPJXGKkZAC2IxywnQjpeWA0hA1cZ4YpzvG7gyjx2XZ8yssIc1kHbFL/59jpOsHzsMZ7TSbzZq6sMR5JC+GKayl63qO44BzhnZToRxMoafWK3IS6o+rhDbEMtRCKaZuJPpE3axhlqATHzJj77m4uMRojXEtOUyiTixKtCvRGVJOchPI8Yf3sjIsikKEVdEvwzUnVRGip09ZCdA1RaKXNGNblKg54OOM9xLPFWJEG0vTVJw8zNNAjhJhnnMii5yQlDNZS0S6UpCN9Mnd+UwIkf16Tds0WBUJcSSqjCodlbOEfsZPkdo4ks7MISxrTNlsaK2f/u0ZYS/kLAeDXjImH8NX5BCQV2OeR7oukc0ISta1KXrxP6REIjGHmSJ6XFkwec/Qn6RNcoJ+M85ycXVF3/UYZ6nbhmEaKVyBcZY5ePqhh5Tp+55Xr15hjOH+/h4hXFfs9ntSStzf3ZGyJuWZeZI0ZWtLSlfjp0xZ1JAFcSbMikYkyMhQsa6+Wyfw3SmFf+/1vwT+w3/qY/8N4F/KOf8Y+JeW34MwB3+8/PgvI+DRP/d6jGs2VnM8PTD7CWuFoTaOPX0vCr/D4YHDwwOHh3uhrcZIDJ7z6QAp4JwBEsfDA935SGEN0c8oEptVuwBAJenn4vKS9XpHygofMygHWZKMU4g8puHG5AlxJqWwlIswdgO377/hdP+B5EesRXb9v8WmCznjU6KfZsZ5oqgL9s8uaNYrsoGowFYlpq4YY+Q4DCRjqNY12EjIE6YAZRUhSN6AQCcVcY7kAOiC8dTLUz0vqbrZEJMmRmjqNcpUKF2icJiiEuiGMkhPsfzaWKSzCRhtKMpSpMbjSJjF9IOS6kMBOWWCj0QfZcCYJevRlSUpwzALLdqHQFUWtG1DUxVMfUeOUm1opZYQ0ix6/hRFSZniU3TWvFhqnz27WijPgcnPhBzBGJrNGm0sfvIQxQAUfBAl32OiEPKETik/MftSijK/WeaPPAaToJ5gnuPYczwdSFlmKplEPw48HB9AK66eXy0y3n5pPSQ0pGqkRbi4umR/ccFqtaJdryWlSmuGceTcd/gQyCRCDByPR87nMyCGudVqhbVWYKZWUqK22y3OFaxXGy72l1zsLtEYxnGmbdZYU2K1o3AlKYjHxs8Td3e3wh9Mf0Ercc75/66U+uxPffifBP6x5df/DPAvA//15eP/qyy1zf9bKbX7U9zBf/PnT3A6j7iioB9Ff308H7HGieJqnCmqin7oGIbI/d0dIQb2F1tyjmSVKXKkMCJQmYOXia9LJD8RvcKWFcl7snK8efMjXr78BGdKxs6TzQyFgtCTZ9HhG6yUyBlyEK03CtI8MY89eE1drES/4ERuqwgUOsiQMwb8PBHijK0cla2etPnjHCiKCls4MgrvE8En2tUKra2sQ2tLUVqm/sQce8KcyLEmeOkRy6pCJRi7Ufh+HonZ0gblGkzZoosGbSK4kqJYQBsxkuPyNMyi/X8U1MzzhNFgNUzHW4oUcfUKlZslwAVA5MA+BAqTyHjU8gR3Swsw9xOzj4SsicriyooiZbrjNTrV2HGkzgqvBJeelEIbi1ZGBpcGkk2s9jW7q4LCavxRAkoyCZLCKEPlamY30uWOoBPayoFgY3wKXSHJ10vOpKwJGXQ2RLVIonOWuULWaJTceFpeE43BqAqna9FHGQ0W1pUQgZPy4ltpRKSmcs16XaNNzbrZ0lQN4zCQY2T0ZxSIzDcpSThqV9SuJCyBJjFFSl2w3e3ouw5tNEPfLcnYEnbarnbkqDGmxE9yYysNp+M9IcycTqK6dcYx+Zn3H77BWQkz+QsdAt9xvfitG/sd8GL59Rvgq9/6c18vH/vuQyArplHx4fqOTEJbRcieh7szd7cPeB/Yuy0+BZyrwK0xLjBG0UtXpWOMM/3DJJPXLDHQ89BhciTMAR8NPtYUqy37y89pmueooClixDCippHYvSXMCV2tMFWNzsXyBll2/zmQ/IBVicvtJxSVRdtISB2JGZs9Jgrw0vuJ5APaWoq6gZgJPpBjImXJuU8hEbzHDwGXLZUqSD6hy5qyaAjTTNcdSbqgadcUNuJDIMRApUuin0hzor1YkaMmRA3aoqsVtt2RygoVA8rZBcYRfoPU8kGefCh0VqiYCNOE0ZkcR/oPX+JyFAR3mMm6FKuuUSRj8CrgTCD6M9aI2i5HT2ENU0zMAeZseRg9HjAuM5zfoXpN1XfklJi1Iy1yfkWB0Zay1JgioWrLs6sdznQc768Jflhcio65T5iocEGgI31lmMol/m02JDJaI0yHlBefvlQzSSmCEucpRqONIWqLUWZRREoWQV23bDZbGS4Gx/P9jt6cmcuBdVvx7duviFPPxcUFl/uWECJGWYyaaYorvPaooAlTAJ15++3XrDZbnC3wPrFRO5wtiClSVI7G1BirSQgCrlyi3WIMdMNZDtnSsd5e4WdFCgo/95JnkCf68R5nNOMwkqPh+sMHsvEMw4FRO5z9S5YN55yzUuq7Dct/xvXbuQNXF1t8iNwfbzFWnvAxRm5ubnh4OLHZbBnHgRCFs37/cBINdTCs1pJk3J+HBbLp2G4vUFh8CJgM3ge63lNsdvzgBz9jvb7EmhIVM9omCBO+65jP94RsKF0p7jmr0BiSySgiaepJKVA2Na7ZonIg5Q6LvIHy5AnDmbh49pW1GO2Is1QmOSbGYaIuSyEFIwi14/FEYZZE4CUGO8dEPwyMw0S1LlitW4zKzCkQgwcyyc80TbMkMidmHzBG4sWLukZbJ+tJLZPxx/xEEfdIbqI1BWZRAYq+XoJb+vOBarWlSYHsPVEpPEaAHNZgjMSbhxBkqEaWrIjF9TZ7T0hZXv8lAFVmGx5nE6MKQKKqHcTMTFx680xVGarWQR7x5wmVA+SAUm4Bqg5kpTBKCzquLFCFBLJY5zBG/As5LwpBZ2TmuKxGRfoldCHr5O+ljHg/tKZpJOOvbdbYqsBYy7rZkIO8RnVVs11vIScudnuUUlRlSXfuKeqGw0OHc5YUJ1KEqigJPnM+dbx8uaXvT5yOnbgSyVRlhVkYF94HycXM+UkvUBYFZVVSVBVKOVIMWFvSthZjEnrhHBqlqCrF0AXmyaMrz+XVBSlpEb59x/UXOQTeP5b5SqlXwOP48Rvg49/6cx8tH/t7rt/OHfjs4xf59uaWsgHvPXd3dzw8dE8QCmMMIUq+/M31LR8+3FHXjq1qaNuaafAcjmcUmtV2Q1Wu8HPk1A2EaWZKimqz56PnH/Hy5cdyKoa0aPtnwtDTH+7x0xllG0yYcWFGuWKRvkoIxDiNspJsanRhiZMk3ioCOczEYWDuexkYKkW1XqNSYhw7WenMnnkc2bYrwjwTk8SfT9NE0VpSihROCLMxZoL3GKVo6xpbVMRxJnu5KUmR5D2rVYu1hmU5JY7BolhItfKkYVH2ycz20TefpTdexDkqS6aBigUxytfl/cRSLMtMREu7JXODijAMIqpKEh1utGUez3Tn7in5SDwEkteQyITsaS9qZhUhKuqyRo+ePI8Y4zFq2ecnRYgDfh4XY5Z60iuEGBZpssKVBVXd4JzBoHAuYjRoY0nL12yMhKnmJW2AZZCMyigjDsmsWPrvmqZd0TQrmXPYErRkCUoRpRb69Za+77FW0oOqtUjMy7Ihlo66qZimAbJHq4Km3lBWFUZXaDWhlRNLeJrZbNZorRl6SRY2Wi/gm5kUI0VRUlc17WqND3Duhv8fc38Wa2ma7vlBv3f6pjXuMSIyIqfKqjqzj3vAlhhsgy9AlgAZIWRuAMMFluw7JKQWSCAsSyCmGyQuEFwgAZYlS2AhLjDiwjR22+52d5+xhqzMrMzImPa0pm98Jy6eb0eV8ck+h3P6oFpSKiN2rFg79t7rHZ7n+f9/fwyR9WrFclEyjnsWzYqcIqVbUthI09TcHl9SVTWLZsPu4S8ngejfAP7rwP94/v//5Zc+/i8ppf5V4B8H9v+gfgBIPTqMPdppFkvJZn88YapKE4LHOnkDn07tnMba0NSVjAy7I8djy2q1wehCPnbqCYcjh4cduIp/5JPf5ONPf42yWEBSci0OgTh19O0DXXuPzgGtvAh/op9VayJM92FknAYKlVFWiSkoTuTkSWki9D2+axn7lvZ4nBNtasYpEBNkYxm7VuSqZJTKdH3L6XiQN7DVFGVB4ax0n2PCaSMOQ+dIg8STh5iJkyf0PWGaZNSYAgY3C6vAKE1TNxLpnaIs/CybaI4B7SR+HcQzL5E9YvxJSs+GIWEipuhFL5BlcYYYUAYKV5J9QTSCfDdaU5cV76aJ/X6PUhpXlkxTIOSECgFbZDCwvloQbKJKGkxFOmVSrwipJ/hIe5hojxPWZPI0Se2emOXLsnkZKwvXOkvd1KQcMVmRrBUdiTZom/GTRI9lRCGplJbsAYXkVkotMluPRdWntaWpF2hrOeyPjN5T1ZIfqJVlmiJF0eBsRd97QvDUdUYrsTNvNmcsmppu6NjvQSnL5cUTmsWSlOH8rGCzPsPYgr59kLJMiQXZzCNNHgnWKVGVmnEYKcsFSjuaasE0RPpuJEWPNomqXsg0xUu/5+Lymtbf40NktVqx352+c/39WUeE/yekCXiplHoJ/A/mxf+vKaX+W8DPgf/K/PT/GzIe/BwZEf7zf9rra6N59uwpP//mJ6w3TynLknGMxPniNk0jSlse5/hPrp+y2iyoKs0wtvjgWSwgRei6gbLQTFOk60dOw8QHVy/45LPfYL15AtmiAW0SmR4/tYzDjmk8oLPCoCnmiYIyzC45CUuN2aMLQ0qeOA0olQRDPmaZeYdAPweNFK4gTp5x8hTVAnxk6gdWq6XcQEKadeFa9P5aUy8X5AzTOEkasDYi0BlGwugJs0Emh4hvW9HrWyumFOcwtsTHSB4nqXlRaG2lSa4UkVEWEBljpNQxOb33MeSsmHwg+IDSmRgnxu6E0xW5WJLnrn2KeSY6VWg9yOefJwXBB7nSzuiy4dSSFfhZ6pwdKJcoVprSlEwBbEjoEEhDT3c6EvwJayKF01glw1bBpYuGQ2lZ2CnLFMCW4rh7RHCnmKQEMm5ON1bvFzqzKMdYIw7sLO8yjCFrTVaayQeOp5bFakkMYggypqAqS1L2tKcDerVgtTpnHCQl+j7t8VOi7ycuNwWFK7HOSZDI2FMWNVXRgDKsVyVFIcAcpxPTKFkDdqYtT9M0U6nlBlU4xziNDEOPNklGjECKkWmcGKcDxiQOhyNhyoQJDseWru+YpsA4TlT1XzCVOOf8X/2OP/qn/4TnZuBf/LO87i/9JYzVuEKy3e4fHni4P3B58RTvw1zTRZSBq+srtptLYhoJMUpNXImgYvewn8dVcqVNGM4un/Ibv/1XePL8E4gGXTboGICJnERjH6YTYWqJE7iYqRZr8dUnT8qQkpfmoFOYwjCFkcl76sKglTTEHmfeYhopqKsKlTImZgqlGbqe5AOFLYghvnfQLZbSadYzCzCME3Ge1xuliT4yhBPGFihjST5Qu5I8SbS3cyV9P83kYnHB+flzmpgAhS4KFAZt+lkBKSXCLzP29ExwSoj3qagKlIb2dKRxC8pyKeNPHwhEmqpBKUvG4iMoH+YutqKqatQsTxYzz9yHUCK0aceWaBOurMhDQNlAyiMxjZyOD+TYsVnVpMnj1S8mGczTCWct2om6zoco9bwRO7HWWizQIIusKAnBz5J0wZprI6M3bbWExjxqhpVBGccUMvcPe4qqZr1aM3gxsfkgITfTGEgLJS7UAG07MQ6S8TgOgbfDa8GPb7Zst1umSaYrbS+EaVcIEakdRowS9F3X9cIw0IpxmohBbldVKbmTVVWTlaLtelIYKYslZelIKdOPwnZwzlEWBeOQ8GFisVhS18xS5F9xF2HKibc3b7l+co0rHW9u3kr9pg3KZkJKqJSoSsfHn3xIjgW3d0f82KN0KbBLP+EKS1UWPOKiVpstH334KZ/94DeoqiV+UpikyTFD9DNc02NMwNqEjhZjNM4ZtE7kaZCZs5WPZe2whaPzHcpmlE7EYWRsO0I/yNw8w6JpKIqK/tSikiL5wGG3lzFbjIT5RBV+XMIj2QfTOOIHYethrCy4mMjzZmCdoz91XFxfEcIkSTwaQhjJvcYkSNmSgGQiSUsdq4zF2Fkbn9NM/3lcWHoWQ2Wsc1hXklxBYwuU1XTdEdtsWRhDePyBZY3WVkRZs9bAaEfKCmMdRV2LXl5pVps14wzICElGapEAzoIFU1psZdBWNtvoJwqjKa0TQlKa1z4SN46WE/wxK2AaJ6Eru0LgKMbNTUExEVV1RdsldEyoOaZN4IIaZTXGivEILbkKMUOYYbWPO2TXduyPB7qu5dmzpxRFTVXWaCXSXD8JlnwaPdvNGc6UAJzavcTcWcU4ikfkcDoRY2KxWos2IAriPZPmgFyHs2buBTixQsfMYrHEp4mub1Eqo7XYibv+SFkp6kVJJjKNgTzfno0tUEnWT12571x/vxKbgHDxKxbLZg7pSHz6yfdoW09RFHTdCZMUYDAGju0JbTLLqsI6xf5w4ng8cn52SVUXxACXF2esl+d87we/zubsEq0MZdVAhBTSe82/MVAWFtUUqKLBlA3NqkFZSwgDMWWcqygLQ0pO3HsKqqbAxMgwDUxdR/bS+Q4xYYqSjKI7dVS2oEsdh8OR8/Nz2SiQhlVZVOz2O4wxVHXN4XggTxGjDDpm8RKME0obfOhQWnM8tpxvN6K7N4E4WoauJakJVyVM0YAVyW+OiWn0cjVGUpDSewSY0IJyDKRxkDBUpTG2QJsCU8qinvwos23ZWrDGzNdnN7cMFdo4XKUovceV3Wy+kpl71dRwOmGc5Xg8onIiGyPaCi0jvyYuOJ1aDnuFNY6q0OSkMLp4L+QhIUk+RSENvbmhGWMgpCQTEuRGoLIiEUX9WJb0w0B6NIUpM1ugE2BQxoiAyjjKeoFxJZMXutXD/Y7FshYNxThyOByEiuw0fS/Aj9VKEHibzYrT6cTl5SXEwO6w5/7+Hm2uKcqKGCe0kY2ubfcslzVlpfGDlDBaG4mEQ1yVj47HaeYBWFsQUoufTkwTgs+bPNZlzs7XLFcVOwt95zk7X7JYLjl1DxwOBxSZYfhLiiH7h/UIITCMPYdDJCQJEu2HgdOp5+zsgovLS1LqaRaON29foShARawr0CYjm3mm7Y5SY9ua7faCFx9+zNWTZ7iqmUnFI0Y7yDJH9jEIxkkpClfQ9QFbKUzhZmhFhy5KlK2lY58Uh9t3OKcEwtFPcnqPE4uy4HQ6ECO4qpHGlLa4ouJuf8CHyHp7RlEU7A8CUIlZXGIoTdv2TGPEKjULdzzX19e0xzeM00SzWHCYmXW+a9HWMPYtWSty8EzRkzHUtiR7T7YBn6A9HvFhwl2eEWIgxQmXpfmao2A7U5TEWuuclAVzEGaIsNE1wXvCOFCsV0xegKnEyDgFhkk882IE0tRNg9ISGV4VgsYqZ6aDtZbd/R1+mlhvHCmNrBY1H7y4ZhwCu7sddbPAad5nLeZH8IGFPHnqqiLMzc6ubUFpCuvou0EckVpIx4+hqzFFrHWgEtoarJPNLaQonf65b+LKCltWGOtIGfw8DbJOc3a+wfUWYzXWWlbLJa4QX8b9/T0hTNzdycL92c9+xrKRxKD9w55hPPHk+plsVEVNjhMeD0pMZNlrXnz4EXd3N3TdkfXmnGkaJENw6ORmUBTsdnuU7ciMHI5HjJLcyM1mibGBV6+/4vXrt5xtr9Da8vXXN/gw0iyWBJ9YLpffuf5+JTaBnLNAPnYtzknwY1GWbLYVMWXqwnJqJ/ox0vUD45BxhSYmifUexp6qLpkGz83NDU294unTD9ienVHWi9nnnslIt1WXhniamKaRRwXcNAVs0VAvGrL3jNNARs0ClAmMot3tGLqeYrsgK6m76rJkeXHF4UGyEZrljO7qR8pmiUlKTo2ixBjLODcL6wb6bqDrB8qyfE+giTlj5pHY6XSi76Weq8uSh+BnutHpfcTYEIOAUaulZAY2K5arFTFH4dq1R/rTgeXCCSs0CZlGZSQLcQZrKBwpSE1cVDUhJJS2OGtJSc36WqErWSNJyD4ErCuorZLmoDEUdUOzWpPblpRlHGeNnQk/iW6m5RjTY21FcVGz2mxZrtdCONKaPI1oImjYbhsWywUvX32LK53cvOZGsUWBVnN894AuC6qimOX/UnLlnKVJqsRQlHOmKCsKowWMqswv/tMWZYtZKh6xrsZqgXluzpZcPTnndOroh46r6494eHjgeDwSwoS1Al7ZH/a8DnfUdcPu4cCZuuSblyNlWbPZnjH5ga7r+dnU8fyD53ifGMaCxXJJs6wIcWQKE7vDjrbtubp8wursjNOh5f7+DcYG/Nhx7I+sNw3ea7746me8ffuK7dkZ/dDyzcs31LVEmG+2Z0xTz49//JPvXH+/IptAoqpLxsnLrWAYqcpI06z50Y9+zEcff8BqtZpxyx3b9TXH0463b+/ROnFqTxRFyaJZyzWrXKCVoSqbOYxD3Hxa6Vl4okhR5rH1Yk30mhwVhV1RrFaEydMPI8VihXGWGDwqKXb3D+QcMHpNCB6jxXjSd3tev3zFkD3nz5+BdQyhw5U1w/7EGCLnV5cUTcN4PJKVRlvHMHmUMiLwsZK/mKKEnpRFwel0wvuJZdOI9LRtCclTWf2e4zcoAZp+sNpye/uA0pazi0umUYCqYZLI82k4o6jNTEcSWpBSWUaWWrwHYT7NjasFgaakWaiS6PxJ4vqzzhFTwMdAXRVUVcmwfyBrTdk0VAsJick541yBVoopT0zDSN/26KwZuoGiNKisKV3FZnPGxeU1Oif293cYldE5sT5b8vGnH3HzcE8CukHKRenlWKxxZDRxDHgg1RWFcXMfIaJmf4LSCuEiznTrqkKnhLJy81GmQBmHdg4TEtaKhFkbRVUXtN2Bu3vRraQI3//B98kklsuG5fISV1hOpyOT7/n25S3aXHBz+5rDYU/MRoJuTyfKsubsbIMPnpQCwzjRtgPWKUKcmKZOyoacKMqCfhzZPTy8pwk3TcnzFx/QdZ7D4cDv/8Hfw4eOuilYb9Z0rWe323F+cSWbXVXTj56H/e4719+vxCaglOLDD19wPD1wc/OW/X5P8HB+bqjrmsNhT1YFxmaKoqQsa97dvOPdzR1lKWq1srDU1ZLt9or16pynTz+kaZaQIXgJ7ZQTL0sDSSdUXWASxFhg3ZJ6swVXEIdWBDHOoXOW9KJxJHovxg5liQjK248Tu5sHdvcHzp5fU6+2dMETjaUuNIfxAVOULDZbsrEkZdCuJCQYfaRqljjrxA4bRbATgwelJE4c6ZkcjjvGoaPvO2onzMUYE9FU7A5Hzs4uyN7z5uU3PHvyAVkb/DAK09APJN+TioIQPTYFrHGza1ba7jGmOcseyOBMScyzUSjLKU6MGCW4bu+lAWWLAlM4fAasQ1uxKmM0BlHXKdLMEOjJMeCKBcx5B217oh966sWCyyfXWKXo+o7SGqqqIOSBs6sr1tsNU/CUucT7wNDLpmJNSVk4dJ676jGhq7nzjzSUT6eTNBRn5FgIER0TdbMgawtFIRuBNhgreoDlqsIVFmsVTW1597Dj1atXrFZbrHF88/WX3N/fi9uvdrS7A5MfMFbz4oNnnJ9fEXzi7ZtbnCtE2ahlZGmMo6pqpnHi9u6O42ng2bMrQhy5f7ijbaX3cHX1lHGM7I4HUXYaQ3toMbahcCXtqccYx3pzhQ8D7ann449/AGqBD4nLqzXWWs7PL/id3/lHvnP9/UpsAimn951QGTNpiqJgGkcWiwXDeOR4HChKhVIN45BYLc4AxTgKQHOxWHJx/oQXzz/l/PwJZ9tLXFGTYyQFYbZhLBBRKqKdIkyJ9tQRRk9R1qiyIvW9aPPrRq6944RaLPD7A6UtUFoJoaeyTIcToRtwxnJ5dsnFtaDJumPPYrnG+oxyJauqxJQV3eQJgCsrhtETQmK5XOPHidOpYxgmlpdnhEnj/cQwTXjvGaaeruswRon0dhpompJh6NCFYex63nzzLeuzC3b7A3fvXrNebxm6jmk4olIk9CdcufwFLIRIjJ5HQFgKMudHKXyUn0fX9XR9QBcL0danPMuFBZElEWcanyJYg9FSk4unW6Pn7jwpMqeQUBiLm1WEU5g47B84tQeaZsn2Yiv8xixJvOfrFW/ePOBDYHO25ebuhrIq8MHT9wN931G4mkW5onIlp7GVkir9IlNAz+pGHx7dhcIa8D7hlAXtpBFqSrQR9WFVaaoClguHM4lTe6RpHE+fXZGzNCy/eflz6dhPI1134utvvmK9XrBer1jVH1JXC9arc75cfktRNMRkePLkKS9fvuKrr37MkyfXuEISlVxREMlszrZk7Rmmln7sGaeR1focZ2vGwXPz+p6bN98Qg6aoVhRFzaJes2hKqvpKSMuq4Mn1Cx52O4q5BA0+Udf1d66/X4lNYBxHfvz5TzieHtjvH/jww0+5fvaU3UNLiJ5Ipu9ODPcntosPKI1hvbpksVjRdgfquqYsKpp6y3Zzxfn2CUWxQCG3hJwjKQdU0OQcsAq5egXP6BOlW9AsRcp7f3ePKUrKWmrulDM2BGHFF6XYnic5SZTSs602sF5taRYrHrqePkSWVcUURoFtrlfYqhHxR9lQFyWnwxFb1KxXZxzTgXEYyEkSlhSZ9nRiGAb6thVFoRZ9+lSWkCN+GvHTiMNTWcvbV684O7/gfLNhaE+slgvC1JH8gDGK6AfIDdba2WMfmMYRZzTOSBxaIqO04NyCT0yDJ/qMq6x00Yki2/UenyK2sCgr8lZTVmQvzH4zC2UKY+bRnZd/v3PUZUkcPTGIZXjoTwz9ie3ZBlctmYYR5RT9NGLqkikn3j3cYQo3ZyhAWRY0dU0KHTpDYSyrZkEfR+lV+ImsZwCSgqauOfU9YRKOgZr9FNMQ0KXBFWK8ykrjioK6LkihpSgdOo/s9vcE7VksK+pqiVKGvpvYbLfsd3sWywpjwBUG6wQR9urbGzbrC5ypWC3O+Oqrb/CTYNCmITH0gWlKbC4uqZaGrjtwdrlhc3YGGk7tid3xQFaOGFv8lNC6pCyWvL69Zb1ecH15xRQG/OT57LNPMNZxe3ekKtc8uXqBjwfqeoGfeo7Hv6Bi8C/7kXPm/v6eaepm0UbmdDyRM1xdXfHzr3ccDgfGsWVVwuk4cti3aJMx1hCDxqM4Hkbub1sWdRQDjdbkGfGVcyTGCdLEkCJTP5ETVPWCwtZobdnv7tgfT6zWhil4TAatLd39AylEqqIUKkxpmeKJMA60pxPtseP88oqYNad+oFgI3dVPXsw8TYWpKpSXN3+9WRNiZOk9pqmpvKcsCvqum70KRjrgSuFjJMVEXVUMbZT8upxoT6f3yOymqjgcO25v3nF+/XRm9nmCH4hhFBNU9ECaT0dhAsToyQEKJzPxPPP5jS3ww4jSmrKyOOek0TaKki1GKQWMlSbcOAewhnHAuZKiLLCFoy4KCVWZMs7I6xTWMk3zNCNlgh9ouwM+XFA1DTFqqkVFP/UoK9TiV2/fYJWirGvag8SgV2VFmuR2EbynsAVlUcBsGMs6Sz6hkhBVa630PObZv1KPpiENyshEwAf6fqQsNSpH+r4lh56YPFMaMMngCsPQe4xVtO2Rl9/+nMPxjpvbt1gH49gxnloe7vY8NB1alXRt5t3bB/LbPZ98+ilPnz6Xen8QJqGrK7wPvHv3lpQmhrGfYaKecUwMXUBrx4urNc8/+JjSnbFcnvPRx59ineHd3WuOx4Ht+QLnFuRkubvf0w3vaI4tm82W9XbznevvV2ITKMuSy6tLprHj1B7Z7fbc3R74/me/IaqwyXN9dc39ww1KCUjhdDqyWtWCGE+a29sdRjdU1ZKqXKICJAJyHjDPXZN09aeBYRgoTYNzBXEKHLp2zpgbWaY0U3ZEl7B7eKCuKlxR4OoaKoufWm5vbjm9u6XRlsV2yzijslfbM5ntx0RVlKLFV8ImJEiJYFxJUTWA5CwWpcWae2JIaGeElluIH6CqxGU2DCMgHf6h7zg/27JoGnLbs16tePXyW9bbc8pmwdC19G0rNJ/sSNELaQeBZ8hD2AQplDIqzOIaEGFPwlmhEGENYRrxBOKcnMss2NGPISFkJu8pywLrLMZYqqoWpV0MwtBDYbVGFZZhGlFkQvR0/YlTeyBrAXPYwhJyIOSAKxwPDzsWTUVd1+x3O5wWA5BSmmEcMIcDdbWUZinMxig9p6hlhmEQEVOhmcIjwFNCXo2RXkBOMI3SbCuLzHKhORyOpNBiysjZ+ZbNdsvp1HNzc8fp2PHhRx+z2z1w8+4tw9Tx8Ucfslw33L56x3I5G9k83NzckSIsVitiBIX4D5QSwdU4jdze3/LN733JxeWasnR0fc9qsWa1ctSLhnEIfP7TL3jx5Am/8zu/S98Ffv7VS2ypcYVi8h7jOk7HAUXkYX/P6B/4/Kc/45NPv8dy+d2k0V+RTaBh7BqOx4mpLzAKqqqgfdhzf/NOhB25pq7O0A6KasTWiZwPTGmibx3Wrrm++h6XFx9R1VsxjZggoZjZo7PB5ozykLqB2B/JeU/UWkQ1w8ju5p4QAsVZQxU1qWs5TAGdMq5YYuoGVTYyWjtODIcTeVlh1xd0y5pu9NT1BUu1YeoGbHGGNxHrLBZLQmFLh46QsmZ1fo1PimBKumkiVgt6NNtmCXZPXWvubu/xMbOwFeMoYRN13XAcOrZmCVrT9h1Pnj7l62++5e3L1/zgh79Of+yJnZRAKWeMqlGpQbNAZS3GqOAJ/YG0sHJjKEQ8E1KiVY6irLHW4WMkDEeKsibHzNT24k5bL4hdj98fcFqzKgsRHoWAMwaMxlYVh/bEpBW5rpnajnEMqKqhnK/fQzdyOpwIU8I6ByFTu4bu2KOUYZoCTnv8EClsTX/qyFiUU8Q4cVQZrxLNwqGSJ8WJFBI+SmJS1ogxx9Uko/HKEExB4cQPYGwBWaOSBi/X9bSoiLZGlwUpHWnvDugxsT+0pH4iD4F+13O1/oCz80tCAqdqxqNC6ydkDK5cU9SAdpxfKU5tyxdf/QGr5ZqyrKnqkqfX55hCkcYlsX9Ke+w5vms5255RpjXrYgtkjqd7bu5vqBrLZvct5MQwvWE4ilR7c36GcQts43Glo7254dzV/N7nX5DajhcvXnzn+vuV2ATE799yf/dAYTWrRc16ueJ0OpBU4nDYceqOEiyxtiQ76+RzQKVAVS3Zrq9ZLtcCnIwRU1rRrs+wSZMNmjg76eL7TrdOif1hx/39Dh+1ZLdpiH5knAKHU0+1XFNVBQpRaqUoCUN1WeMKhVs0mLKkcZIRqJISA6I2knFfFTirGFMSr/0wMo0ToLHGMM7a8bJpMDpxOJ7mHsXw3tI6jmI9ljAUjfeJmBTaWuH7KcN2Tl5q25aUpRYeJ/EodG1HsQlgZENU2ZP8RPATOXq5JCslgM4kslZBjonL7lFl18+5emTpvOcwcwJTQCGk4+gldBWYGftxTpa2uKIQrsEcExZjJA8jx/0BP3mWKzmxjJYMAO+9jHZnK3FZlExmoh9HYtYY55hiImRJQtY5kdXjOFA4CTlEYp6k+WdL0HbOGWBmKIp11xqJH5Owm4BPnmVdoXxJGHoe7o6Mk+fq+gnnZ+AjbLYbqrrCuRo7lyOH/ZIQJrZnZ9zf33J1fcGpPXF3f8uPfvTH/PCHP2TRRG5u33HqHlhtGwpX84PPPsP7THvsubi4pFk07Pc7co5cX12yXFSUpeXdzTtIgYe7W96+fc1yucTVJe/evSVbx3g8sN8/UFaJ8+0579684x9E+/iV2ASUhmfPr1B4lk2F0Ug0V1HiSsux2zFMgzRzfKDvRrbbJcY6QogoY1hv1yw3S2xpSSS8n1A6zjprMFlBzHNu3gwnTZFu7Dkej0zTxPb8CU3TzKOyeQyWInUpja4YPWTZZMa2oyxLKu2wxmK0ZMpnFNFHjFZMc5dbZvoiXokxCmAkJ0Fnm4pxHOj7lvPzS4zOvH1zy9XFObu+p2oaEnBqW0IUeawxTkoVW0jqstKc+o7t2Rk3dw8cjkfKqmK9XrM/7emHgb4/sSWKeShDThK3lWIk+YhxIiMOs624aRaMw0AkYYwlA13XMYyjXKGVEh3CDBERWTGC8UoRRRYN/EwwftT7O2vQhUWRmfycK6A1XXsEMsvlksIZCifj3JREQ++cY6KX+b2z7I9HUlRoJ98/HxKhzBRWUVixDOcsakM9h40666XEEQ0T+jEsPQk0lRSJYcJPBm012iqs0WgqsAsgs97UnG3PsUXJqeuEBBwDWY1oK4zD58+fc3PzjpQSL1++5LPvfywKwv0OYzSH/R5nS66vrrClWOnrusJow3q14sMXH1KWIldu2wMoWK42lGWBc46hb3m4u2G3OzKNkfUHZ9T1gpfffIspa24fduxv95Tnms++/wO+/fYlV9dX37n+fiU2AWs110/OUExUznE6Hri7u+HFiw9QRrFYLsidMPpXqzW7+4MYV+A9kx2VcYWlbkrK2QGXckDn9J4YnPzE1Hf4aaIwkkq82+2IIbDdbFg0jZh2pgmtDT5GnC0oC0sYBgIaUyimsac7nXCmoKgalHVSZ+c8NyEDWsPkByEYhwIfJoa2xViLstIgs3aO/5pPqxwDrqwQTYMlSrAewzDfGlyJdRXKylhLGUs3egn+8F7CUJH6MCtwgyEGT9eeaLqWnCNaC8sAJZTZPJ/GrixlWJglQDRFcRo6JzbnYRxnDbvjMW4shMQ0TYTgKayMBDVz4u/ctMxRNoTHDActmR8YlVEkoh/R1hGmieAkVr1wBmc0OXq0UtiywFn7nrBsZ5bhEMIjKZT9fk/rMk1TsWwqSudIM5fBipMAvAc9YAqwRmNyQMWJ5BWehNaJGEvIUiaUTYHKkZzAunlTzJYpZOHSqsz+tENp8NFQNoqQBs6WF+R8ScqBuq64v3+g7zu6ruOv//W/xmYjaLGLi0sCI4fjHUrBOPWUZYV1hn440bYtzaKSJO3oORxOrJYbqmqFNnuaZs3zD16wXK+oqxXj+I5FKaEwddPgqpKLqyvWZ1uePn363evvL3Nx/1kfKScedjecuj1tzqiUGOc56RQmXFlQk3FFwdX1M8gFKY/0bcswTNT1mYy45pgoly1WS569JP9IQGf04/vEn5hF3z10HWVVYZ1jt9uhkVjqfupJObPaFox9T+gnbLWgVIb2eCDHTN1UuFKulzEnUvTiSkN+PXRHuSLH5QyMbKmqata1S8hG3w/kGZLR9/JDXyzXTLOhJyaxy66ahqpZIJFf4ov3UW5Mgjt2+ClIPLsSx93tbUeMEzF62vbA2J+whcHqAjuLflJK+MlTL/S88UhUvPfiGbTWCv03xhnh/Qucd55luKLgU+9NL1ornNHytaeAIpFjRKuMm91/2Si0pIiiMZLGNPYSMpoTVitSkOSfx5uWMRIVZp1lsWhIaSChaJqau/sDx35iDAJUXS7MLI2WVChjNSnKaBWlcM5CDKQwEWbscC40JEMKkg41tCPdeEQlw9XZE1whKU7aluyPR4rSsjvu2GyX9N2RspGg0dDXOOc4tQd++MMf8Ed//PvEGFkuJXD3o48/gqTYH/aUtQShvrt5Q1k2nJ9fiIM1B3b7Owrn5kAWw3q1xpqCaZwoy4aLS8tmvWSxXFAtGs7OLqiXC1Cavqg4Oz+j9wPrzVq0HN/x+NXYBFLk7v4d3anFjxOXFxesN2tevnzJ6CfpqgqCVJpKypGCpKq0XYs1De/evaGuNvNV9YJKFTgtPjfSjP8KkzDvYxAfeggU1mFQtIcj+9PIomlQCvqupW4WGAXt8YCyhVy/+5axPbFwDmcdYfJgBMKFMThnUFn03X7qKZwl5YD38vmnUZRyVVVROEd3OhGCRysY+x4fAs1iQXs8iFV4VpjF2RI8jR7tKsp6QUCRMihbkJTBx0nm9sFTVyWH/R5rFYumIoaRrt1T1AVOZ2xh55JI1HwpJjGrZHHaRaKEhyAde5jVf3MuXpix7I8BGSnNEUZZBEUYLeWClwWlcqQwhuQM2ovn36j8/kpODoQp058OMla1wiLwSahLWgkrUBSgktE3jIEpZApbYLRiGCKhn8COaFfitCJGSThSBnROxOCxxqBiIE4DBsAmsgGVC2FM+IG+y4zTkZAGFs2WZrWhcJb9/uF93HpVVxSFpSgL0bgcdnRdy49+9neIKXJ/f8s/+1/6L7DZbOYQ0oZ3795S1zUff/wJfd9hi5rt2Zbj6YRWGYh0/ZGcoSznuD0yxjQslmuiVyKd355htKDRj6cjU/IUhWW5WFDVlUBtQ+TQ7XGV5euXX3/n+vtTN4HvCB75nwL/eWACfgb88znn3Ywl/2Pgx/Nf/1s553/hT90EYmLyE8vlklD49yz7b755JXbawEyU6TntforGsDmriTHip4nDcc9XX31B1wVSyjTLkmqxFTpwnEhTT5p6gh/FgjmOWJ1FGlwUeO9p2x6t5Kqbk1hmnTVyTc1Qu4I09bSnI3GcsLUlToGH/R7jCqrFEqfAD73EcQX5HFZDnEainzBaWPBd2+MuL5mGgXHo3p+qMUTRFlgndB8lDjdbalmoWTH6SL1y1IsVIWb6qaUqK/wkEdYhRhkvrRYYa4lxYrmqCXkO/0gBUhBst5ZGZ9e2rH0Qnl7W+JBmX4QhhEgIEbLALKdpks8z9wNExl0xDe17pp+zFp0kSyAGL6VFThTWMCHjWqOYY93yvFFLxt/Qd1QzHsw4y5iSlE1JqFLWKLw1c0KPJSShL1unUNYxBNC9p6wjdeEIQShKMWXUvJl5MpNWuCIK7ThHklGQA9aUOKcprKEoGrQp2WwvWSwrUIp0SHRDx+XVGf14YrFczXZng/eRxWLFelPw+tUr3rx5xRdf/IzVaoH3I5vtmmEYGIZeDoHCcXd3y8X1louLC5bLFUpn3r57DRmapqYoFkxetAOH/UgMam46LiVkVEUedncYp2nqEmvgcDxQNzVTnLClIirPw+H2z78JIMEj/yvgf/9LH/s3gb+Rcw5Kqf8J8DeQzAGAn+Wc/9E/w+u+f/jgGfqRp1fPiFPg7eu3QglKmrpZoLKcWn5KfPP2FXVRUzdPgExZFVSVQ+nMfn/Py5dfcXV9zmJZoHUGLzkBaeqJs8pumkZcXc45dcLTV9qwqBvKQrTopXP0fcfYdzTLFVZnuuOeh8OJqm5QtWHsOw77HVWzpF4sIGe69iTACxKlE3ru1Hfk6HHGMA0D09BhFcQwzXmCkRhlAflxFNZHznN0uPQHlBI9QWZEKYuxmbYfOI5HjHG0XcdysSCmzOjH2eFnaYcTKRaUVSVBLDlCCqSA9EmiZxxk8ymdhGKG4LH2ETv0i4fIIBRm9r6P40RZiloz+lFwZ1rGfvqx+ThPG9QMQU3By41svooLwUkUjBnpt0CicCWucGijhKPoZyKSEvCLMXIbiBn600BROGwQLcXgI6OPWGOJSW5pIUpPQuVZRp4CVity0MSc0EYRQoVSDWVhWSxqmsWKGMXoFBkFZLuuqOpSyrfdwGq15XQ6Yo2cvqvlit/6rc+4vr7k+snFHLB7xhdffE5VX/DB82dY4xjHga5ref36NfvTPU+ePGW9Xs4/b+jajhAnlsvFHMRraI8dbTsSo+f+wbCOS4apZ5h6zuutoOsUfPXVDTmvKWrpa1gHq7O/AF7sTwoeyTn/33/pt38L+C//Gdb6dz5SysL7x/Kw26GVxY+Jzeqcqm4YfWB7fiFnhr9j2dQoJHZs0ZQoIut1zbLZ0LZHDrt74gdPoFBCqwmTnM7jSD/0lEVJ4Ur6rqesG1xRU9RBuG/aoFVm6Foe7m6pqoplsyB74e0RJggWUikLnkxdFlLTTqKmc1Z4+kbPqURhkrk5MHQt1shJE0JAk98bXyDT9x0uyYYQ4wwJVXr+vgu5JwMow+nUky3z+E1y9cqqpG2PHI9HjJbXadsTS2Mooif6kewMRDBzk6099QxtR1MuiFHSeh4NTCllwXxnGa/qWevetj1917NarbHWYWxBTP08DXFkm/CDf7+PpJTe3wqCl3SiEDwpR1RUpCjKvTybsgDKVAoT0WhI4noUGbg0ex/9/ZBFoGQzSkd89ILuMkIgHn1AOY1ByaaTE4FEqkqyE+SSxMmLvLlpahaLmqoSeAgqoPSINpb1VrQT+/2BoixFQRlgtbzkdDqSY8F2u6EsHd/77GO67kjdlGijiNGzWq05217w1Vdfc3Nzw2q9xsdhFoMNyARiRVE6Xr96RUye5WKJUopmIdSsySeO7QPaRrquJcRRbDFKpj8XZ0v2pz1DstR1hS02vPjoCd/1+IfRE/hvIpmEj49PlVJ/FzgA//2c8//rT/pLv5w7sFqVlK6i3bcM3UhTNmxW2/e18OFwS5gSm7Mzmo+WrJc1o98T40F030mxWTds11IzLRc1zmmUnt8wZIxWxPkUW202kCJFWbPebMXfPgw4V2CUZuhOHPZ7pnFgUZdoEmN7RKdEZTU6efw4kKNn0Qh1yE+jLFoNGivX3Nmqm9J8KoVInLMCyJGh74hzolHOMunQRU3KmTBN+HHAKi03hRC4f3igrGriLH89HA6cP9mI27ERkElVltJraDuMztSVo+9P2KHHzviyXJSPgUrSRY+R0/FAUy/BOIyW8Zr3UmJopeVKHRPOzZbnlMgwqwc12lhpkM6mJD0TiGKU6UNKSUw8MZBinGOxZJoiCzuDSuQsi0UHcfulGGAGo1prsUZ6BBIuyvxxub2ASJN9zvTDhNWaReUIMTHlSFmXOGPmRKJImITzoLQmkzDWUFQF9aLGzJ4IawxaZ4oSRt+jUkF3PHF7e0dTL3m42zNNkefPn2JUxXq9YZpGhqGjWZxRlgXkxGIhvg3rLMMoCdoXF+estktCmlgsFvS91P9F6aiqQhK5rSbnyOnUobWmqBRVI5mG6ES9qLCFfM/2+wFFYr1a8LC/ozudaDvFqT3w2Wff/84F/BfaBJRS/z0gAP+H+UOvgY9yzndKqb8G/J+VUr+Vc/6PQM9/OXfg448u83qxYr/bsV1tqMuGuqhJSZJ65QTSLKqa9WpFVWj6MdG2b1A5cH6+ZbNqIAXOzy65vrqgsIYcx/nNIUx54yzNYkHZNPTHE8vVhnq1wYc9qIBWWvoDp1ZO+XlSkfxEO98aVBT8dvKawmmUtaTo5dr5SAieNx2LiG+C9xDiLNyR03/oWvb39+8lwM46utORerUlTJqhayFGmmVDd+oFwHp3x7PnL8QTESLH45HLqxWkQOEcJAgps16uGIaOcRhYLrY4J9LloZNI8LxYCEdv3gS0UpyOR5rFkWqxQmk7L16JX3eF3DRSEit11w94H6ibRp6XEYqvKYghEVPAaKEED2NgnOT7E2OShixy05jfRGIMUlICxST0Xx2jZE0gZYAiY53BakNROIwdiUm8I0VZEIInhow1DgonQqgYsbahLAx56qUXoUXIZbQSd2MKKOR7UDc1q/Vqpl2PBN9RV4axH6lTjfc94zAw9J797oHkIcVM6SqMslSuYbM4Y5jumfzEz3/+Ff1w4ur6gmZRi5MvJ06nI+vNkqqqhVeR/ftma92UgDgg15vV+/5L3/e4wlCWDusKbNK0fUtV1WzPztHGEPxE156oSxlx7g5HoTntHjgc9v/wNwGl1H8DaRj+0zNhmJzzCIzzr/+OUupnwA+Bv/0Peq2yLLm+vOTV8BJnNOtlQ31xwf5wROvMclGDsZxfiFa+7/YUTnN+tiRnz/l2hcqe06Hn8uySRVNjlAhfQgyzOETSZaqqBjQhZlbrFVk7QgJjC3KKtMcjh/2e4+GIzpGpLDjtd7IZpYiPkapqMLp6f53NMaBmlLU10skW+KYR9VwITGli6EVS66eRw37H3d0NdVUzDT26htPxwPZKkoCnYaAwGqsVXdtSVzV+GslRmpZdGEXVNnT0Byfe+KQYe2k6xRBojwdSzEKZcZbJB8Z+JPo4u32lMVoWjnYYOJ2OoA2uFI5/mJ1+rijn0aAk5EzTRIoSIx9CxBQaTIEyGcxj2GcUaGeSTeIx9QctPweVHzcC9dgaFDVnzoiMR5DamSw/wyxMwTzX77IpQVJI/Hphyb2MVa1WRCOx8tZaVqsVoVeQvVB8QUbIWp5jjCDIy6pkuVqRyJzaFvKEwjCmDu4kfNT7iMbObk84225FbhwjVmuGrqNYFFRlwRdf/JS7+3cUxa9zfiFoudOplXyJQs0lIHPQTBAUmrEYq+aNQM9sDUGrLxY1TSM2dB88p7ZHa4dzlfRegmYaIjp6rBLCdIrQ1IIY+4e6CSil/nPAfxf4J3PO3S99/Aq4zzlHpdT3kGTiL/7U1wNWi4rry3PaQ4sfW0qr0AS26xqM4ez8guVqxdANHMaOmDvO1g1KZ5rKMvQncjBslgucUu+7yVpLLaiyhF5aJ6e9NhZbNfgpkNG40jEed4zjRNue2O/uqZxlWZccvNSopxBIOdOUBVpFTscdWUvWYFE1kBUqMwt/EjlmyR4Mv7D+QsJPowA1TkectVIjG83pdGDsO/TkGfsWW9eMw8Bht8OcKaw2dKeW1WYrNmBrGdsTpxlJltGMfU/pClQWfNmp7VmtFxTGoaJnHCamcRIxjk7zxljSDQPTOND1Jyqx58z5AyLXfWxU+jDNBhzJAhiniaYsRTAVEzqJ/TpOkcRMHy5KovcYN4m3P3r8TANGyYLn0eGnBK2mlBLtBTM5FxklJ52lJDAaY8EgFKBm0VB2LYMXZJqdpw/DMKA2Ehw7nnbiOCwsWs1RZUjfJCOj0Cl4ptPIqW1pKmksrlbbWanqCT5SOkNdNjhjWdQV0+CZxlbi6+JACGrOPdCkFDm1J84vzgjBM44DZVkxixkJ/Sh1DdID6vuesnIMQ0/fd/K1WEtRVNR1SVkWtN2A1o6rqw1XV0+x2rK7eyBHS2nFju1zoqk2vH37lu3HV5xt/wKKwe8IHvkbQAn8m7N67HEU+E8A/yOllPhW4V/IOd//aZ8jBI8mcrZdMhz3tKcHTod7yWc/PwNtWDQaZyNjHlnWjtFrRj9SFGIXzkHx5OqaJ1eX6CxX+EeMts4JooSDaiWhja4oQTuGoUMZkaX2KYk3YFbRKWfJKc4JSI5+EOmyVpkYxGde1QvKqnqf42e0JoZImCbGYWQcRnHgTQN+mjBzGq6fRhEJaTkLw+SJ3jN2LT5mpnGgsobDbqI9HigLJ5LTw46qket8WTjC0DEpRagamFOHY5CJg/gIujliq8BqS5g8QzeQZ+mucSLEsUZi3adxmE9tmQDIzUkWvOQUhnlaIbes2Efq1VpCTkxCm0g2UdJ+zAzw1JIiFYKnqhtyGPEpzSj0SMx5bnbORGCtZVFGCaARGCxzQ1AYBUb3on6cPQZVVdJUnmlqZ4SYhdlBeNjvub7YYo0hBjWj1cQB+cuPYRzZ7Q8oK0yCwilSNmzWl/icicHSticWdUPXdiy2C0pnCGPH8bgnliXl2Tn7/UjOiY8+/oj1ZsE09RyPUhE/PDyIH8RYjC0oywZXVCidObUiLTfhF3zJ1WqDuMoNXTfIzyIbnHOcnV1zfv6MafSURSQyop2DqSUlzbPrp+Ro2awuWTbbP/8m8B3BI//b73juvw7863/aa/5/P2IMlIVi93AihA5tHRow2nLY3xBz5ni6paobjCqY88IZ+xayRFo39RnPnl5TlcX7K7hySmS686I2xs4/eIUtJBR0nLzUzErNM3HpDZROwBWPnWg/Sh3sCokhH8eOtj3MufNSYxZlIfLP0TP0A33bM44D0Q/k5BmHgbquMVrhp2m+CMtNKKVAWRRM48DheCLnzGa5oG9PMxvAo3Km63revX3L9uJSFjwTOtfkFEkxYpQEn1or3xfvI23bC+hys4U54YgUgYSNwqOXTruZQSuejCUlKIvqfa1urSVEqdmFhx/F1JQho1HazB1+jXEFZdWgUiY7R/ATwUvEm+8LlBJ1pStBBbk9SFiJiIRCEik0VsxSSWVUzGQjqb1ZiRTZaDOLlixV4Wa/QUI7N98ooO1a0nYl74VgZriqnssAmQ5o2Y3pxxHJqJpVkcA0JYpqgdU9MfQSZosEuYxjyzS2pDQwTRNv3xzos2Oz3XB5ecHV1Rkvv/2acRxo25bdbi9BIkaayU2znhObJDEqzT2AcRQ9hrOOgPRT3rx+y3a7pVlu6IaR02lks04Er3BuwdQGgteslhesTMYta37j1y7IObO7+xWHiiil6YeRn/3sZ5Qz0rmuhCP/8HBPzBltrQSCJkPwgbpRKGVQyuJsyXazpSpLxqEnaMFgF64W0mySOlVJ7hSmKNHOiclIKaxzhHF4H6IZZhRVURbEGDBasGcZ0cT7ccCnxNC3syFpRKvVTAQ6Mg4TfhwZp5EQJikDcmSaRsqiIClN155EMzAOhDDNyUmWaRw4Hg4YrQl+oj3JDy9FAWaG4Lm5ectqs2YaB4zy0mFPCT8FtCnIOUmAZbN4r58fhxE2whKIMRCUzO+ztP/nRGBD9p7TqcVHi9GGZd0wOUuYmXzWCJxDxpZKLMlz+SCBHRqwuNJSOofVhjT3L2KcIaeuQNsR60qUdqAFMAuPbkVJUooRnK2kN5ATOmdCUKQs//aUE8YVAlpBUVb+F9OGlMjz1EJpiS9zxhDFNoHWBmUU2sz1t3lMNJZRa0ZjixLrCo6nDucVMcFysZaxIZovv/yS87M1WsnNwZiCt29fk4oFVe24vR1YLBe4ouD27p7b23eSRGysHDg+cjzKdGGxqIT/aOF0PNF3LUVRMAzCEYRMCInDsUOZWjIZkqbrBmEXaosPmZwUy/UZWQWO/Ym6qRj6nof7u+9cf78im4Dhjz9/w0Ob+fSjpxSrFcaJ+cXVkcqIJddPQRpGOoOpKVyNdQ7nVji3IsbEfn+HNY7tZgusyFQok1CFJcWJ4EdyVeHJnPoTdVOQo8Rfl84xWUvbdhhXkrSjbTvqwjGOIjcdYsvYtUxqZJgDSAdXcLY5Z2qP7O7uRXATPSlPjL5n7DsMoJIkH03jwHja4/ueqashJjHjjD0pZHRI+Ozxo1iMlVKibygd6hgZ+yNjd6AqFEMb6UfPIvOehmSToygM69WS4/EgJ1tKjENHdIayMigjHXWfAkVZSjkQPSZHxtOew1iwXa/oTwdMEJhJ7QrJO1Tg6oacPaWGFD02KbR2RFuBchirMWR8NujksRF5TszoekAFsKqA4NFqwGgRjWZFWAAAvMtJREFUEBHTbDiSsix7O8NQHhd3FgIT+X2WYFWVGJdYeYW2GoMhKEVW4kQNKhOUxWnD6DOLSth+2WayzmijhLWYNU3RgIG2n0A7grIMU4fvdhitubq65t2bI/008M23r8hKs1mueXdzoq4qji30xy9pu3dkDNuzKy6unnFoPYmG88unZFXg48D+sCNGT5wCYTRstmucMdzsdgxdix97TscdRVlS1zXb8zMmr1Gm4Gx7Tnvqebh7y9XVE6rGcXa1pu96cinuMDMNdP2RaRxYbv5yosn/oT1iglPn+e3f+Sucna1ZLWva9kiOnvOLK/zkaU8dp3Riu91QVIXEX48Tw5DwY0uOFoVl1SwxNWidxEOdFGiN0nMCD6CMpm9PTGPPpqlo24PIXGNkv9tR1TUpWtpOLKpGRSYvmC8/DgzbkWin947DHCNhHNjtj5wOB4qyEHKOhr4/iWtRGzTyOfzYk7xIiYfu9H60NvQ9pVPUZckYPH3XczqdcEUhuYuLhZhvspQjWmemkBimIBiymQyU51uHcwZrDFZrytkKnVVgnGamYEqkkDDz/N17jx9H4jTRtZ7KWU5xJFpNs1gQpxFtK5TWFM4Ro6bQcR6JSniqKzUWuYYrMm4u3QofKHxkHEds2VCETFaWNHSgvdB+ldCgBIsuDsfgg2gVtCIruRENcYAsp3eIMi0ojGO1cCxXC+73J1xTE7JiSsIayEYkxhgHylJUNV57spZSwGiZ5tRlTbmoQItxKw4R48DHkWM74EqHskJ9StpgXUXZrHBFx+HUsVxecrr/iq+/eUvdbHnYdxzbQEyGjz75PpBwRYXWMIwDOXjCOMqo02lCKKiKkuNxT9ueKEpH17d0nePqyWeElFk0DWXp2O8eqMqKcTwwecPmbMsQWu6ONyyXBQ+7t6QUZPS5Wn3n+vuV2AScNfzVv/LbXF9fUFcFfXdkvapZLi5w1kmDZLPkeGxYbTY0qzWvv33FNz//mnEYWC+WHI5HVIbiqWO1WqG1EViGGlFGkXOQUyeKmWU4HTAkDIk4Sjrw4/X78vycu9sbur6ldA4/TXgfCTnTnTp8iKAl7zCnSFUWDH3Pzbu3TJMn1DXDNFAUlqHvJAA86/n5j+IZQXudTieqekHMSpJ1cayXCyJwPJ44HVvKOqKVpigqaZwp9b5ulNeRuv8x4EMSeiUFSBuF0jJj11o60MMwvE+8fXx+VUlK0DRNM2h05HTYE02mN1BWlTyvURRVjXaWnAusiriiwboStMbMjb0YA8I0XEKcyGEix8DQHjHWUVUVOUsjUmmN1ZasFRFFehQUzX4K9X6MmAnzGFFEjY9KSin1ikJzfXHB/cP+vX15GHquzs8BxcNuz7qsiKj3gBY1Jy6puZRJCaqqpokNu8MtykSqpiBnTd9N/PzLr2maBSkk1qsNMWYeHnZcXV3hvWe9XIF7xfH4JVVZc+omPv/8Z5xfXvP06RO+/OJnGJOJfqDrOg7TJCxGCu4f9mhrWCwbYlL4AInIMPRMfk9RXYJyhDhS1+c8fXrJOE2MM5uznCynbsfxeKQfC969fYNSzKGow3euv1+JTaAoHL/7O7/FH/3x78Nmyd3dW56/eEZZG775+iucK7i+fkLVFGhbMYbA4D13ux1+GFmv1oBiGCe6bpCFY0sWuca5jLJKtOlhJMdJXIXTSF3XqCgfN7agMJr1oibHwNQPFEZEJIeTWJZTDByOLT4kUh7xc7OrdI6uPfFweytQkTSHQiaJ73JlPfvemcdRAssYhoGQMtoUkvs3TigmptIzTRPDMAjAZJDQi+3ZGdYKvy8EOVW1dsSQaU8dy9WS9MjZR6YuOQdSMqQYKIxj7AfaGEVlaMz7jSOnmcKrhCFQGM80nEjJE6xwBoZxxMbAsllgy4qUJpzNaF2gbTkvJtEC6JnA5KwlBU0ZGkKYcOUMa43ydWgjRiBNJgUZ6+WsZ1+HbFpaaYkNJ0uzkEfNAcBMHRKDNefbNeebDfthJERJW95st6zWGw53tyJaSjCGSNGUKONQ2pGUxsdM2/WE28ip2xFijy0y33z9lnESIMzQ96yWK6w2VFVDzvCw2xOmwHK5JOVMUTQ8ffqcqtngDj3923v8nG+YUuLh4YGhP3I8nSiMZbna8LDb8fbmlma5YNkuaXuPsRWH44FT280lZuL8bEF72vNwLylQ0+Q5tSeMNYyhpR0OYBJZKxaLZpaew/3dw3euv1+JTSDGwO3bb9ndvuPm7ddstktCGGhbz8Puls3mjNF3VE2DchWHuyOnvmecMVZTCBhtcCbTdi1hmiBmCtPglAa0OOdCmJFYgdoVNM7h+440iWnHaGjKip/97GekGLm4uGD38MDD3Q7vA+M4sN+fOLQD6JaxH+i7juA993d33N/eUTcN1ogKzpMY+47SFjDHVMk1WTj/0zgJ6TYmwoxFSynSdx3dOMwx545hHN8LaZyTcaYkz3oW1QZQDIPn8rIC4HQ6MQw9SjGbgR7xX3LFlumA3EiqqoKYSD7QLBoK6xi1YdkUnHYt09CCMbTHI9kr7PqS87LBlBUhWlypEQNDgSlLyBL4ZpwEfViTCDmIrBg9u//cDIKR09wYi8qJmD1CAp5DOo0h+tk/ATO1SPQfKUvHXNkCY6xszFFciddXF3TfvsEUDrTlcDjy/U8+obY/5Obl12StMK4GraUkUZaEYQqJ/anFjh0P+7f40LJeV9y8u+HYDhSuFHNY7imsIwbIIaGN5ptvX1K6grqpObWvuLx6grUlOY8smgXn5+d88803DMOAsyKEGoaeIUmg6263Y5wmqsWKUz+hdIkra6qo8dGyPdtyeX6Js4bd/sT93S2n7kTXtcQsoqqLqwtBrBmNK0sWC4m9X61W74Ns/qTHr8QmELzn519+Qekc7969BAIxjLjSslwv0U7zsy+/4Pr6Kc3qSnjyxnJ5/QSrNHXdkH3gcDyhsuJie4bWRubf+jGkUtJnc4agIotmidWw38kCVyHLbL098frlNzx/8SEpRO5uHzgeO8hwPPW03cTD7sRy4VEZ/OjpTi13N7fsHh7QWlEU9n2ybHc8sqgagYDMFKQ8a+nznLsdQiRlkbNqpZmmaU4jVnNabZxlpQFlZBGZOYuQecg4jhPOlXLt3z8wjgNVJQacGDN5jmIrC/derBSCoM6zjvT9hNaKqpJcg8opep0Y/cgwZt68eY1qBi4+/iFlvUCXFQSHrRwxJMCBdSKSmgNfReCfyPSkDKOfpItfSsdaz4tdRmOPWgHe5wIopWF+g/9CsCTqOdk4RSYs7MdIGCeO+xZnC5q6ItkCHzPvbt7x8ttX/NYPPsMpaPf3jDFQZotCY5TEzkc0/TBRoOm6jtvbb+lOFcZVXJ4/xRrDcrmkazt2u3uqqublt6+4vLxAG8OhPXE4HbEmcjz0ZDWx350YvLASP//8c0KY+PijD2Y14IFh8Nzc7WkWCyEOlYuZKGXZH3uMKbi4es56vWa3O9KfdujZT6CV4uXLr1mdbdAGjq0R5yWG+4d3pD5yf3/Ps2fPxAH6HY9fiU0gZwFDTNPA+dkFx+OenCLr7ZrNmUQ/7/dHtudX3L96xdubPdM4cnFxQQ6Rdn8gjh5i5GJzxuXlFecXF5JDaAvBccVAzpqsI1YZbAFhaAlJoUwhXXg/st/v0UrhtOXVy1e8e/OWySdQctomLLt9S1NbrHXkJOm4h92OoesYmwZnRXijNKTwGOCZ6LsWQyJM/v2JFoOc/NoWsyNNaDwpZVJMSFKQpBQPw4QtHFobtHaQ1Xvu4ORbYspUzlGWJcPUE1JAGcXgByY/Ql5QVzV920lktVLkZURZyVvsTie5YecMccJp6ZlMfuLduzesn1Si1agaVFlRZOkDqDwBBWBBxTkkZMaYzZ6KnMEHCWwxVkZ6xkrirvcTOc1KQWRer7XIrzNZCEEznkxeL8nIN4siE0QjkaZJJiwxUljNfduCceSs+NFPfozTio+eXGEU7Pf3qLlJiHagHcpWoDWHw4GURBIcQ4dWhqdXz0gZzrYbXvlX+ClQVTVVXcu/RSmGYWCz2bBdL8UvkTNNs6R72PHy5Su8H5mmAa0SZSHK1YvLK1AW7wO73Ymu9zQr0Q5IYIhmvdlwe3fAMnK+rjEp85Mf/wxbGPp+ZLnNDMOEv71jtVlSVBX9qWVdNXRDz/1uN2+2f/LjV2ITiDFBNOzuj7z4+BnXV0/IOnI47vnxjz6XmWpKjIPnyy++4eHQ8eL5c/q+59U3L2n3B5zS/OB7n/H8+XOxchaC/UI76QhjyFmLp1wVKBI+j1AscMrRngZC39H3PZvNlnEc+dnPvmC/O2Kc0Hz7KaBtwf7UczVW79OH2rana3v85Bn7AWfN+7lvYQyLpqYfRlnsj2/keUY/DSPjlKgaJdQg1fM4EJum6X0K0jRNjOMg/IpZryDec0ddLzgej+QsmQFVXRGSl0ZeXTCeBGSqzi8ktXc2DFVVhZ8mCe1ImX7sKJzDKE3wI0ZBWVjiOND1HRdFITHk2gLSUMs5EZKfQ1Ek0FQbQ06BGLMwFufGnZpFOTkjGHRjKArRduQkwFiVE0R5nTxzG1OW+TePtyfke6C1Ee2ED7NwSLNeLNgPwoi8v32g2ZxRVCU6J/7+3/89hu99wm//2g/ZnK25P+1kk8AQMcSkyBGKqmF71jAO93SnOzabS66vnvDmzVtubm55+fIlh8OJruuoqppu6FmtlixWC4qqoGt7Ntsz0TBUNbZs+PrlN1xcXAKRL7/4nKZyNM2KzeaMgKFte+7fvCMfe566hnHyLJZbDoeWn/z0K4x1/PXf+SHPnj5hmAZ8yHz51Rcklbm6/ICfffFTtM0cTy2nvmVZ11z/2q/zwQcfAPDm9bvvXH+/EptATnDz9p7Lqyf4Ud6MVVPSd4H72z2nTvzWd7d/h33bc3H9jN/8jd9gd3eP70dqW5BDZLPZUjcNoDDWgC2IyiFXVYXSpXStlUI5w+l+hyqWxNRx8pE8TuwOe55cPeWnP/mc07EVppxRZKPwIRHDhDu29INGK1Ec9p3EbfspMPSDqBYLJw7GPM+2+56YAsMwoBVEL5jvyXu0lVhy7wN1rTgejlRNQ9d1NIsF09DP7r2ePOvqq7p+3x+ATFVV3D88cHG5JaVEVVa03Ym6KUXxOE2y0BQzNizT9y3v3olyMKXE8Xicyb4WZ+R7lAuD22y4e/lWSE4xyelrFSkptNLYohIRzjy5eDQLiYLQSnNzGimKArtYkocOax2D92TynBwtI8UphveEIm0N1kmJEWOYoaWPbkPIOqOsxljH5AOT90JYSiLMX6+XDGGCERZNTblecf/wwKvXr/nt3/4N7KLi7c1OAGdKE5URXLmSxtp6cwZ5JOXE11//nHHyHI8HtIbNZsV+v5fb6/kZqERZFiyXC25e7XjYfY1xBR+8+FBky9fXPHv2DO8Hbt69RSE9p6pecr87MU2BJ8+e0bY9h2OLtQXHYw/a8MNf+02qqqYsDPd3J07tEescl5fPqBc1YYLvffprxOy5vb8FHJtVw+GwZ7lc4VzF6dR91/L71dgEpsljTMnZ5pKvvvmC+90tn37vY5p6RVUcuTx/yugDn3/xBXXT8OLpE968+pY0BZ49ucJ98IzCWDbLleDKrURtKWtRM51XLDZAiHPCTkAVK0qnwTrK1ZYcOzCWCDNU087wTE2M4EMmkvAhMU6BwmqmMTIM0yz2mRiHiaEfqapivinIzL7vW9kQciJF0eGTFCkmlMrkmHmMzp6mCVsUpPl0ezSRkDNd180TAsPFxQXWNeScqRc1RVlQNQ1106A0HNsDrihYzHHrXdtSGEsKQRqDwTPERHc8CdK7HzjudixXKxGbaJHXqtnRZmaUt0iDjfx71fy15HnyMSPHUxKUd5jmOO8QKZ0lBEn7iSmJVHgGjL13GeZZSK0eeznSL0gx8VjYSo+E+VYgUuM8W7ZjSvJ3kkiJVSENvxijwF+IvH33hvNXaz75tR9SNFu+fbcjROYysaKuFafTO1IZOb+45ObdLYfDXtiLwXNxecYnH3/E/cMdb968oSiFsxhTIKvM8+cfst8feXNzw9dff0M79Cxz5nDY0zQLPv3ke3z15RfkpLi7fUAXFWfnF+8Pk6KsaZoFD7sDu8OBs7MLLi4u+PlP/hidAuvNhn5/QJnMB8+vKUrLGEZWixU5Wy7Pn+IcTNMDX/z8K8hGRrjf8fiV2ASWyyWffvoZ6ITWTuTA2nK2EdzSD37wa7RdT/TSdb4829KeTkLtzYbCGrarFcVM+oWZMGsMypXzlU+T8yxi0Zq+3aPLJbbU+JSo1meo2FI0C6YQsUVJUdaMk/AOfQj4EMjGMIXI4dizWS2Ykse2AynBNMmG4FzPer3EGMsUZYJwOh5nqKUlh8cTTfz6OQRMeMwDgJwU3kdgvjqnjPeBqpYwj8KV5Ayr1ZqyWrLbPbBYLFgsFjjn0EZT1xWXV5d47ylcSZgiXdejUUzj+F6IA4FT27JcLMg5czqdWCwWpOxROcnGqRRlVVEUhchtlfql67pkJv7CijPbgecFG2aPREoBZzTTPM0J7/0CzDLgR7CITFZU1u+buzE9Eo1FlyGSYbEfy0akZ0l1JGfRFIQovoJ6uWaYRvkeG8WirJmGji+/+hJVOJ5/8n2urp/wcJoYpoBrtKDJiprd4Q1dG7BaUxTSdFuvzhmnieunl6w2C4yVcsVYTUiaP/7jP+Q3PvkNmmbBejVgy4qPP/se++OBv//3fp/vffYpn336Pdpjz2azpp88p37km69fcjq1bLdnPHv2jL4fybND8tSe8MFzfnmNyYmqLnn97i05J7bbI9uzDU2zYvfwwO3tPT5MZCbW28zD/QMpaVbLs+9cf9/dLfj/46OuaxaLJYfDnmfPnvJX/spf5cMXHzKNI8t6QeUKFlXNJx9+yKIsOdzecn12xvPrJzSlE/FPfyKEiaJ0xOwZp56YBWARkRoUbaSOtY4QwVU1WRuysRJgWlcszs6JaExRUS+WGGtkEcb0mK7NOE3s9kdigilEjqeOlBWg8V42gpQQFFiGaRrfh3hKglGcDfYKlTMxRKbJz7cCNXe/4zwnT+89/ORMWZacnZ1LHkJZcXl5gTGGzWbN9mzzHg6aMyyXa6bJM46B4AND13PY7+cE5ISfJkIIdKcT4zD8wjcR5Go9TJOo7ICmaXC2kJgwZebZvHqv9f8FC4BfGHTsY4hImoM9pE8hsJAZKxd/sQHkeTygtJlTgfScezDbmecJiagIE3F2fYYQRDCkDBkJGo0xgxYO4Xq1wphZhm4tq4VEyf3oR3/ET3/yU5xxbLcXNMsNi9WWarGmbFbErOknybTIecTZzPZsQVUb9rtbdrtbzi/OSDnwh3/4+6Qc+bXf/DWsk+TnmBKPROZpnPj+97/Ps6fPOT+74vrqGcHD+fkVVVVx2B9Yrzdst2eklLi6vuB73/uE8/MNOXumqeODF09JGv7gj/+Ydhg4v7wUtoVxPH3yAU25ZH934O//7d/n9Tev6fqWPK8vcSD+yY9fiZsACs6vznj38Iq7hxvqpsTHDu8H6qri3ZtvSSlzthHgZ4yey80G5xxjWcgbKCWMURgr3WUfAy5OmDinDWHFOGK0GJKMpjAF0+Sx1lDUFd2xYHNxye3wBu1KirqWhN5JFpU2mqQUUwi0/SS1ZMocj728Ca3cOLyP+ClSljBOssAXTSMnog9o48hh7oBrjfcSuhp8mCcCQg3WWr+P8ypLMVSVZcnFxQVn52cYa9lsl1SVY3u25uxsS1k6iqKQhagtOYmeoOsGTE4EL8rBnMUibYym6+SUrKqaGDzH4x5bi/MPL4IfkS4njHPwS53mDP+hW8B8qZ9bm2DKknyUzIGpHxj7nhjTHI8uTb2UHqPOkDJAScmg1BxoojRaQVLSg8hq/qxZTESkRM4a40qJIwsCZiFL07msK6zVTEPPOHTUy5qqamgnz+tX3xJ0w/Pv/xZl1dCPntWqYrWoWS0NftyBP7JeFWgFPrRcXK558/ZbvvjySz777DOaRclqu6RqSparJeEh4mNgv99juo6ykTShq6snrFYb/BQ4O7skRvjqy6/ZHQ8sFyv6ruNl1/LJJ5+yXn1AjJ7tZkUGNtslGNgdd0Q8f+Wv/i5lWfLq5Us+cM949/YdVxdXbJZbFtWCJ1dP0bqfLdENTbP4zuX3K7EJ5Bypl456UfB7f/AzUvKcn2/46MULysIw9EecK1ktVmyX1xROxmkmJzaLBcM4MI4j7UmirK6vr4QIq0AT0Vnm8MpElLb4tiMnTwSmoad0wqqLaKrlClPtcZWw/au6BjVhUsQmK2aUFPABQhTfw/HUiX9dCVdv8oFj22GdeMDdybLeCI14nPxcP0pJoJWBLKdvjKICDCGQFCIKmrvoZVlKc28WDG23WxkF+R5dCBTFlQUxJ+rlgrIsscbiqlvCsSXG+fofZ5Q5wGOnXUGYPNkV5Bg5HY4si817bULOE0rV+CBR62hFSpnHIkA6/7OcN2WhGec4k42ThHnEhB97/DR/fVk2Oh/EG6BmV+IjUOQxWeiXbxdaC84NICE8gJRmcAsSI6/0Y5q0gTkhqaxLqjkK3I89odTYpmS7XtJOitevX3EMluwacJanT8+5PK95/mzN5eWaUrWY3PHwcMup7TA20/UHmkXBqzcv2W63vHjxnKquadsTq3LDdnHG3e6Bh/2e+4cHUJrPP/+cjz76hO36jLP1Ob/1mx/w7es3lEXBxfk5P/7pj9ls1nzwwVOOxz13dzcslgtRjvoBu6q5fHoONqEtDGPHR598yNj1/N7f+w/43d/9XS7Pz/gn/5P/BLoIfPDDhq++/Jqvf/6GYYa3/kmPP7UcUEr975RS75RSf/BLH/sfKqW+VUr9vfm/f+aX/uxvKKU+V0r9WCn1n/2zbALjNPLv/Z2/xfH0QL0ouH5yzvWTc4zNWAtFYWgqiyIw9R1pmkjeU5cVdVnMoMwj+92O/e5BOvBGY8xsJCIiQLgg19KxnxNvRBGndWLyI2OMYC2mLKmXK1abNevtGYvlUjrbzkpzDAgxcWp7hinQj5Nc55PCh8g0Rdq2Y5w8IUQOhwPjOEKWLjcpv+9yP8pjH7MHpsnPC1b8BSFI80uhiQkWiyXOlZRVjXMFx8Neuv0qMYwD+8NBTtmcKcpKVGtIB/0xp2EaR9S8hMdxfN9089NEipFpHBkmT5g1+uPk50addOulDPiF+kQKA3k99b40SBLkMfQItFRm+CGIJHqaJibvJZB0XuTSf5CT/xdXA2YMmJqv/LL4Hz/no78ghZltoMwM9CxRRqS1j3yFwhmMUaicCDMifbVcUlUVX//853z++c8Yhombuwf+5r/zt/jDP/4xMSmqugIVQAU26wXBD1xenVFVBXVTcHa+5dkHz+RmOo18/fXP8X7i2bOnfPbZZ7x48YIf/vAHXF8/ZblYU7iKtu1ZLtZ8/3s/4MMXL1gsGp5/8AEvXrzgdDoyDD3Pnz9jvWpwTrHb3fLy1c/xaWB7vsSnkaKy/Pqv/4D1eom1mvZ44urykt/44Q8E7R4jm+2GzWbD5eXld66/P2/uAMD/Muf8P/vlDyilfhP454DfAj4A/h9KqR/mnL87AwkJwPgP/u7f5td+7ftsz1acn63ZrJcYBcumgZQx2hJDYr97oFUHnj1/zqIueXi0XXpxzZVlwfF0oDpUXLiNbAA5CENjvl6aGVfdjZNgplWNHwf6aSKGiLGOxWpJgcH3EkJ66jp08OgsnWvvvQR/FrUIe+a6Vmb96T0UwljLONNlK+fmhplgvOfv2i9O1F/aDNKcYMzkhT5sIkVR8OzZB5ydneGsvOGGsaeuK7Eb9x3H44miKLi7u2e1EqOQMZacFZOfsEqabHVdo5TieDzSNA3ljM9OKWGMoet76lIyFkMMOK0lQm2Gb+SZMvy+FMjp/dem57o+5cDUt5IZ4Uf8NEm/YZLegPd+nirI94EZAvLLpYEx4oSckABUNesG5PmzijAnYk4Er8lqBnFGMHNYawxyK1EKiqIQUKnR2HkDXi2XLCbLsl7z/MWH3D28ZZoCr1+/4adLww8/OeNsYbi8OGO13vD67TuefvCUq+trXFHS9QMpBYrC4sPEdrthsWhoVguKqiaQWSxXbDbnBJ9RyfDm1TsOh5YPP/qYxb7mcDjgCkeIHu9Hrp9c8+LD54ToOT8KmsxUln4cyDHTVBVNWfPu5i2LZc0PfvB9aldxttlwd3dDDJ7D4cD9/Y71ZsNm9RfYBP6k3IF/wOO/CPyrM3D0S6XU58A/Bvw7/6C/pK3m6oNLkoZPvvd9NqvVLF01vH71cz568YRhOHLY33IcHwgeVlNBeOi4effA8dAzdJHV8hybzzndRjb1Bv10SdQirTWFI6WJnA3GgNEF+12kblaMPhM82CmSfKIoayYsExY3RVQMmNORKozovgMfSOGM/lQTa41XQn+ZFAwxURqLj4bC1SzrwP3DwNhm9KLCas9isaLvB0bvCdkQsiIpjZ37G1qBMxaVYZpkbu6KiovrK4q6YHO5YYwDu9MDy0ZsqUZPvH79LYVzTEnT7u+Z2j1THymzJeBQriFHT/ADpilwpiJHjdj4NSEGlDHErNHHCeMVi7JgGCcm54UuPMM/lJGZenq8roPo1lVGW8jTCL6nMIqhG7EonLH4riWcdjD2mCRKzJQgAjkr4SnMkfEpyG3IFA4bSsYQmKaABMbIhmmNTA9iiCSViToixUGYScKZMIFKUfIQlMEnjY0GFcHmQGEz19sti7Nn+NOO/e1P+cf+2qeUruDdqyOHm8xHH15zfb1huWlYLycKo/ng4+9R1Ft+8sXXfP32NS8+fcGVMXzv2RVaFZwOI6vlFUaV3N+cmE4lOsto2IWaw5sDq23kapl5dnbBw87hUybEyHqzplSRulSsmzVtexAKdCmK0UUj5qC7uzsur65YLqQJvT5fcjfcsL7esCgNd/7E/thyvP/LySL8l5RS/zWEJPzfyTk/AM+RMJLHx8v5Y/+Rxy/nDpydNWzPzjnbnuOKmo8++h7t8cDQHXj95h2FA+9PfPvqa5HuZsvv/eHvUZULmmqNH8GYmmnM3L3bs2y2NG6NLQqiDqic8H5AKycAT1MgYRORarVmv3/geGxJ40TyEVsU+KwxaHLXo+sKV5XkqURNA0obulgyDoqgAslEYhY02RhlzjdF6VZXZUGK0LcT0WdyVO9vDfJ9kMmFuAnnJJ4819tJhEYoRbNc8PTpsxmdbtifHiirgto1TFPH/cMNu/t3bDZLnC2IvidOkf4UKNwCZwppbAZNd+rou5EwRYx2aGWFCpSl1s4oCJkwRZLJkAT3pWfX3/vjWylZjHO2AwB5Nv2mAHPoS3c6zcg3YS8SJkxKWCWfL86koPgIZlXpl8aTwgu0RYEZPT5I63FmnaKUIUVPComklZCNlYY54zBniF4yD62TkWvMkLKwCAgTNnoMNb4bOI73NBW8eL5l1ZzR3r3i3/13f8rf+vd/yiefrPlP/Se+z7Nri/cDZTHwk5/+iDEpPv3eD6m3FSMjXvVcnq/IKrNYlMTB4Kzm6mLL/n7H/f0tz59folTg9ubnXFwWnJ09pyrO6YaJh91B4tKzkdDU6Em+h2TQusCYjMmRuipQl2csmpL7sePi6opTeySowGKzpnYVl1vHcLzhtP/unsCfdxP4XwP/srwb+JeB/zkSQvJnfvxy7sBHH17k02Hge59csX944NtXb0l+IqcJYyp+//d/QggjD7s73r17wBoxxiyaJc8/+JCn1885Oz8jRwnqWG4cdZNQKhDDhMqath1YLbaMfY9daIL30jNQmbY90LYn9CjJNWWzJGWom1qwW87JhuIcxjm0jWADPsV5XOXlFIriUQ8p4WMgEihKi7aKcZrwQa72fS8KwLIsZh1CIs4z7kdBUZYVKT8kaykKR7OoOT/fMgyS2Pzpp59w9/o1MSbevn2HDwGlzOwtMPgxcDyd2K4rtJZ0X1tYQaqfTkQv7su5qY9WmjF4SBKD/pgRaZylKEphArriF0YeIyEgItqRmDFSIqUgY89h4ng40HUd0zgIsvvxJvFL2oI86wDSXI5oJZkI75uBSkmT04kPwPs4j1AlHNX7QIiRx7NOGekrKCVuPVQCM5Og536MTEiE56fnIJSu76gXNZ/88Le4vr7m+uIZ714GDqe/x5u7e949ZLQ78p/5J3+Xp1cb7nYt/9bf/LdZbbZcPdxw8WzD9nxJaFsGs2NVLTjuXtEeBgpXU9cLQurpphPFpiT4Eb878vqN59QOGFfT9iOHY8tqs+ZMb2gWMhUqXI01DucKdvs9Dy8fqJuaPAu0pMcy8Xu/93vs9jt+8P1fp9RnXF9ccLb6Pne3J74L//nn2gRyzm8ff62U+t8A/9f5t98CH/7SU1/MH/sHPpx1kB13Nztubm7oTh1GK9rTgeViw27/DW3bslxe0Z4OkqmnQKNJ0VEUNWUpwaJlZVhtDKYYGYc9x74HDHHKxLImpQk/iebcWYWfeokGs5qgNG3XwRzWYYuSqmmwBwFhYCxZG5LWKJfx2ZOjB50RabtCaSvNtRjop5F6W+FK0ZOPk8cZmcXXdU1ZFIx+QM5DiMHP9fFj2y3JqaxgGHpiDCwWC07dkbouJaTyeJTx1+QpipLNZsM4TJSViGLGcRTF3azQKyuJ0WqPrSwUrfEhShS5Vu//X7tHyEhmsVpR1Q1FWT0W8O/bghqBN5F+ye4bIuM4cDoe6Id+lvkmxmlkHEdhDc5f5XthUZIxb4qJqCSD+tFf8ZgZWRTlzDGcCD7+h/UJ8+b5qL3QzL6FWSFqjEXnPPMmH9OPHsePirJwRK+pypLrqysUipubG25u3hJSwjZL2nDi3/v7P+LJC0GGvbl/zan3tO0bjM2slzCakW3d8NWPPpd4NlPw8cef8PNvviazog8tl89LTt0b7u9v2Ww3MFV0w8Dx9oG2G1gsN4QQOBwOrFZPaRYNKUU5qIwCEvvDPbuD/PwmPzFOnruHe3LOlEUpobpWo7Ti2eUzjDl+5/r78+YOPMs5v55/+88Cj5ODfwP4Pyql/hdIY/AHwL/3p73eMEw83J1oqiNlWXM4nNjvHpimkX/kd36X7//wd/mDP/gjQijYrF6wfzhwcXnOixfPuDg/w1gh5RelRquAj3se9gHflrSDBzSb5Rn7XcIoERdJQy4w9B1+7KhKR1dIhFUEirIgKyVhpMUDyjlh69sCXEBirCZCnMT3jiZ6MChMVvST59C3nF/WFHXFfn+i6zs2q4XUzlphnRWYBxlnDcMYyRpQCaXsfO1NhBiY/CQ4rTDNZGDNH//RH/H08gkxjpRlyWJR4VzJw8NhHkOm9yfpMA4weorCkMlyAs7z/piiPNfYudWv3nfeJx9YGSukXm0IMeKUfu8UhCSEYyWOSZ0lMWjoO9q2xWpBcY29nFg+BEIUoY8scnh/FUF+HaMoPvN7q7T8mdYSh+ZcZtQT4yj8AWPMvBGIOEfnhNFgtSLN/zcKFLLJpGCIIWKdAFSM0ZjCYaolPg28/PolbWd4++od//a/9RPuHwKDKymqgm/f3fD//Jt/D20XjKcTZxfPOTy846/9o3+N4/5bxt0B70ve/fyW/XHPi49e8B/7q3+dL7/8QkaR7ZGr62sSGVs04k/wmXLyFFUNdw+cn29YrdeEFLFObl8xJl69fsVy2bBYLGkWDW03o8y1ousG3r274YMXL1guVzzs7ji/XHHq7tE2Ycr6O9ffnzd34J9SSv2j80/nK+C/DZBz/kOl1L8G/BEST/Yv/mmTAZCx2B/9wY+5vLjkw4+eMvmGYexZb7c0yy0vPvw1vvnmwN/+9/8uddFQVhdcXrygqdfEZLi93XH/cMPl5ZLlsuBud+JunxhTSUgWZwsM0HdvWS83NPWSGCOFsaLr7w6C+46JpDTGOarFgn4YMc4Kh0BrsjZgCrSL5NwRg3jaNZqUDVMEnRSFNkwp0Q4TSRvKuiYrzakbWCxqnFbz/F/qa60U2lr6fgKtyJr3cdqSISCBm8tlg3OOpm74+c+/YOg7VqsVr14f0Nqy2Z7T9SNt2+NMyThN79/kw9Azji3Ozhl/WcJR5twIUk5oLRmCEgoi8/8wTcSUmbykL8UEzEahx0Vn5gZhjpGsxOY7zSPRqioZ4ySbzDydebwBpCQtvFl3OLcZZPyYY5q9E3PS0HyTSVlCUY21hG5AKzPzBwxqphIrMk4r6sJh5yQirXnPeAx+IkxzOag1KQV0DjR1Ba5kGG75+us77m/uOR53jJPhNEbWtkbZFT/98gYf/l2eX1/z2z/8jB8++YCL86e8ffkF3//0A959e6DUS2qX8V3ix3/4ORfbp/zopz9F6xVff7GnqGo++OBjUsrc339NinD95CnWCatxuaplWhA8bSc3p6JwwoVQeWZtllIGBbkVlWXJzbsbrq+vedjf8nAwxGgZhp7Li4///JvA/y+5A/Pz/xXgX/nTXveXH1VZ8ekn32O9XLHb3XN5tUVpsM4Sc6aqV3zw4nu4v/+l5P2ZkrbzVLVnvV1RlQ1td8v+cKQol6joGYaWQ5uxxZL1as1hf8fu4UAYB7piT86wWa3ouo6H+xs26y1t3+NjBKOp6pppDgJFa0KGKWc8iqiNJPsGA1GBKcnRkqWfhs5JeIQ+McSMLipsUTFME6P3FItKVIta46zFWrHd5hxRyr0fkaV5/KURGW5VlZydbXk43nNzc8MPfvAZx+OJm3e3GFvQ1Ave7A/krGi7nmGYKMp6ViV6hnEUJ+IszkkpCRNwjmfPSZpwWX6OGDMvwCzy6JRFCv0LjeB8+sLc0JRrdkqBlCJlIQGg3Vzry8jPoY0l5UAmSOOTua8wNwPlaxdZsexRjxuA/LuU1vPN5PHfLWPVR31EnlkNhdVYo94bmmbXETFKE9clZlCLhzCSYmBztmWxqfjq61vGcWC1Knl92zL1E4MDUxTYouTbNy1Df4fKFf/cP/vP8O5di9YrQqho+1ua5Qqs4fLqknfv7vn13/wdtosDrlji+wfGfSZuNxyGG27ub+jaHmWzgGW0ZrdPNIsFDw87iqqirErOL86QbMZMWZWcXVxydfWUtu14+/aG1WrDm9dvOduc8/bdKz7/6Vecnz3l+fMPONv+xXQCf+mPuq74T/9T/wTLZcnf/rv/b0Z/4IuvfsLzDz7m+slzxmlisz3nH/vH/+O8ffUtYWypFzXnFxesN1uur1bEdEHb3qPNKM0s65jGA9aWAi9tD0xDS+csdzdvWTYLmtLxcH/L7uGW5UKMQyFnElBUJdv/D3N/FmPblqXnYd+cc7V77X7v6CNOe89tM7Mys6pYrCpVFVvLFmlIlgXZfDBMwy8C7AcDfrBg+MlPerEBPRkQYAM2YIi0LYmkDDZiUaJJFqvPyva2p42+2X2z+jmnH+aKc5NUXbLMKhp3AxfnnrhxIiJP7jnWHGP8//d7HrPpFD8KEZ5HbQWFtdQWaiRaeA5/ToDBf/vGLU2FEIpNVjNf54TCQ3i+iwVLc5JWTFVrB8cMA1ReUZYaa4zDaCPe7r4NoJR8CwDVRvPJJ5/Qbie0khYvPz8jzwtOHhxjjAOfSOlTFrlLEU4CDA2Iw9omS+/edNNM/KVwPbOUeEK5nbs2BL7/dhUolEcUtfCj2K31rHFvHutk3LZ2aC9hrWsLhHAKRu2YCPcaCCGkC3uxAOWX/bypm8PrbgMIdz+4FxPBvYxYuW5eKnzPb4aCzkOgQtXMFdyfubcey8Z3YBuHIbYpsAbqqiaIXOR3rZ2QqqtCdnf2mNzesd2uydYr6lyQURMkLZTsgFLcTgvm08/Z3zng4V6fYTvhd7/3iijOENJSU+K3fHwCkiTm6PAIWwf0WvuslxWtIGFZTFCBwqslfiiJQzfEzfMtNzdzVusN/cEQz++9NVxVdU25XjcsiTZxnLBabSmKiuPjE46Ojvni+XO++OwVg77kl37hf8jh/r+mVOI/sZeAxfyOIBzQbofkxZZBv00UewSBIi+2ZHnaSG9HoBPGOyPaSYw2Na9PzxCUDEcJWZ5SFBW7e2OsjRHSGUZubm8RQmN0SZ6tMXXJoN9jvVrgKXcgkBLZqAKjVoQUivV6jRd4KD8gLyvyytlY81pQatmo8WKscc41a3mL/drkhskiZdTuUCMptaWoDFlRvX3CB1GEl5aYbYmQ0r05cY7a+/EZuIisqq65u71jPp3xjW99xNnpGfPlguF4zGAw4ub6EmOaBKTC6efbSYc0LTBYlOc56a9Ub5+4Urn2xw8CVOCjrcFqQLvrehhF1NrSjhN29w9QfuDUgxZoEoOtrqHRN7jA1RJfKXwFWbF1sNAmwsxgfwof5n6K+6g3jG3CY7/MKNSNOcjzHPTVNopiISXS89BF1SQZi7ejhftWwynEXNSfuR8iQnN7EGAcx1hKkNKC1W4YWpScHD/A6Iq7yzWbJbRWBdsid3qFTQF+iB+EbPKav/Prv8n7j494dLiLEprDRyn9bhdVw916yuHeHqiK0aiFrXzKDGRVMhxEhLqP1zlC64qnjx+6lkRrFssFz1+8wtgS5UGtS6q6fiv/TbcZV5d3HB4e0el0MdrS7/Xx/YBO0ubk+BHXtxtur9f4XgddCb7q9bUoAgKoqoxax4x3+qzWll7/oRuU2ZIsW6FkTZrOGAwj8iynlUj8UOB5EWm2ZL1Z4wfO0CNVizDs0e0GjaouJwyDRlpaMRj0mE3nLgC0yBmNHCPOSk3civADv+Hqa+cgExY/8MjKgqKqAUGpFZUNnKLNb1HXGiF1kxTkoa1mm9WkuWHUj9DCQwsPGURsy5qsyOl3u4RRQBCVSJW7AZoxaEGjIpTOFK0NVV0zmUzJsi29bp/lfMlnX3xBr7vD4eEx0+mUm5sJSdKirlPKssb3A6I4YrFYOS09HmVVYZVz2N33/0I4CEsUO0qOqSs85QaLYRhSa0PS6dIfjR3q24JAYq1T8Ln1oH3LKNB16UhDunRMxcYpWVXVW4uRW4c6mXGta6qiaJyHzVtSuISpWpeuQAjVYNoadWYzSagaNLkbWroiLHBGI6GaHgeaAnLveHSdga5pOhqNNS69qBW36LRjev2IRzwk/1M1H75TMrtd8oMfP+dynlFoiTWCrHLJzEaGPD+dcHO75uhwn3BP09/vUqRrlPBQUczV7TWh9IhUSJ5lWOOsxJ5s45sT6jIjzTK2myX9Tpt2EuN5AiktVVUwXy1RKmK7yd7i5qyFXnfQ/B1Dvz9gtVxz+uaMvZ0dfu7nv80nH3/B5eUL1qv5V56/r0URAMPxyT47u33mC4tSBq0r6mrNejlDdhWjQZe6bLGz2yXNBFEUIIVLh213HpOlzhRkbUUYKupKMJnOMbpiMpnw5MkTB6OsHDJbSMF8saAsK+LYmXKqeuve9HVFWeXNm8W9U4RypJu8LvFVSFl71Nr1qdqA5wd0eh6L6QptLQrJJiup8TAyQMsALTy8OKHWBbqsGfkBnvKRno8KfKQ2iKYHd4ARC/fW2NpwdnrOaNynqgpubq9ZLVe8++43qWvNx598ShxFeHlFq9V22PUgYrtNqaqSMApZr3JKp7Zpemg3YHNtuSAIQrKycJAOC1YIirKi3e+QtDus1lsGgxoZONefEs2+/X5wp2usrlFKosuSzXrdYNElm8222VYojDSN0MgxFRz5uEaKxhzUrAaFchCToijBSrzEd4NC/VOz5uZAC2EQnmqWCY1Y6R5uiPs5EU6F6SLUnFOxKisoIfAqlBLEUcze7h7p9orNxh3q3fdHiMry8PiA//ev/xYvL+YopSjqEusrah0QxgmzTcn29S1rW3B9Y9kZ9XhwNGSzDYl9S42GoED5hv7QpzYzSgMaH4RhPr+j2C4o08zxCSonY7+4vOLq5o6dnWN63SFVlQOWbqfzZQsoFOdnFyRxi8lkwsPHD3j4cEQYGs4ufkyvO/zK0/f1KAICPF+SpluEVHTaHbSu8WTA1eUtQlsePuzR73l0uh4y8F3sdFYwn8/p9waMRuPG6lkQRz7rzZIoSthuVyxXa25ublENuff07IztJsOTaXNj9NDWo6pKkiT+0sGVJA2q24VYdHtdlquN60GtR1ZohKiRasvuzh4Kn+V8hhW1a0Rx+XVZZRCBmzxb5WN0hZUK2fjzhZSu96ZRvDV/KY0Ex03AheL2boKxGs+TbDYbDg+OCIKI169PAZeyNJnN2RmNEdJx/ZerVSPndRqAutZ4Ur2d8DfAY2hs0bV2B1WYxpxjDF4Q4och2zRnIJX7Xm8dD27H73byFVo7qW5Vlq4wa40uK7ZbVwSEkI0VuckU1AZj9FvvgeMEaKfme8tWcF6MIKjxPP/+uzY4Mwlo7nkC3BsQGy8CzeAQKfE831GdEW+VksbkLkuyLpvo8AKsYLPeMptO6SY9hCg4Pt4hij6i0Ibf+v1P+fTVJdrWYBWL2RLdHSNFBNrni88KZjdrjg5jqrRGVIL9cYI0KXIQ0m1Bupmxur5Gxz1u1in9TkxdeggRUeQG5Vl0DYvFhhevr8iKksPD9wijDq1Wn+PjY6aTKUVeM5+twFrWqxXBvu+KQ7PKPTgY8erla9L0q8PBvxZF4H5ffXl5Ta1LwkARh05nXWQZqmuosiW2WlMUhtqWbDc5V5dT1sucw/1jpFCMRyPyoiAvUsJAMRzvkOYpQRCRFxW+Z1ivViznK4IgxliQyr0x0iynqkqiKODmZs5kIpBiB10XbufsKY5PjonjNh9/8jnKiwhDS61TVusFJyd7mLpE2y1xyycMJZ5K3HAMiZY+WvhURlBWNbbUDl7iRyA9hPQQykNXGiNEk7brLNCh70Qyi/kSrGU46qO1RQrFfL5guVrT7fa5u7tzQaYNqkwbQ5qmBEFAlqUUZXF/elANqKOua6SnqI0mLwvXIiiFrJ12/75lcIXPGZ+EdIxC2yDE3FBPUzcyYaxuVoTgex6rxZy6GdJB4/7DHU6tteMtNusJ3fgFdKMhcHBSR+PN8owoBBc4It8WT3EPJRHSzQyaxaMrOO4WID0P6YWuXlmBEkHT0rhtQVUV5HnGYj7n7i5xw1SjyfOU+bJiZyekJuPx412CKCArVlSiJu62eXV6wWx6gZAhQdgl8U9IlyGv1huuXr3ki59MOdzt0usqfu67z6jGIaenS8pqw2C/zc1djjQRoUroJAm62jTR9oIis0gRI6WP57c4Pb1hvVpxcvKEo+NHhL5iu01ZLRZUZUWeZvR6PdqtDuvtmizNmVyvieP6K8/f16II5HnBzfUd1hpacRvfUwTKZ7acM+oNCT3J7O6Cqq4gKDG+YrNdkxeZiwOXcH5xznR2x2o5o6pyHj46wQ88VqstSbvHk6ePXWDDZsv+4SECD125vrCT9NDWcnv6hijyyfMtwiYUWUpdFpRlwWDYZzpZkrQTgiBkMStpdxL8IGSxKBiOYkytue1JlBDs7Y3odmKENFzd3bJebpGmZNcMQXrU1qXgtJXX6OdBej7aFo1X3gFFjHUqxKoyZFkGuIKU5VvM6TkH1oVrSCnZrLccHBxwdXVD4Htv9QBCuFTlWmtnTGpswdgv9/V5XhBEEe12m6IsMaZCC+3YDdaS5aUbCr59/jfrwQb5pXVNWRYOJ9b4KILACbOyLMNTihqDsdrBPKE5gLVjA2oXjuJWgc5KqK1BWDfQLKuKvCiR0nNGIOseHKLBkTlu4b168cu5gJTKJRwpD1TgOJHaoK3AF196OI11EvMsTbm6vGa8p4iiCF9KZAiZWbOtNuTlmk5H8Ys//4TesEuQRPz+Dz3+4IdnrLcleblitbqmihKisEWGx3pecHE2o9+PuLrbsrsXI72MIATRjhgMRmBqpFdT5imX51OMzpnMlmxWNUcHDyk0bNYVlxd3LFcrfvSjT/nud75NVlUMhzvk24zhcIhSHvP5nNvbO0ptydKCq7Mp77zz7leev69FEdBakxcVB3sHdDsd3rx6zW16RysO6LRb3FyfARVh5FPJilxItuvCeeuVY78b37BazinrHERNWaUsVwWbbcZgOGQwHAHgeQFFXnJ1cY2nYrabHKM9olYMuGjxMPTpdNoO0y0cYXg0HPLyxRvqCo5PTnj+5mPixGd32OHgIOLps0OEtQS+yw5wvn/J7e0NF1fXZGlFp+WzXG+JlJO3rtOcbtdQVDV5WTrACPeadtO0tAKtrYsWt4K60lxf3+L7Et8PuL6+YbFYYK0lCmNW6zVXV1ccHR660aJSFEVGUeS4K7ij7XiNv8DSKAa1k9eGQeTwZEK/XclpbdDGUYH1vST3/vre7N6rsmiu/zVKuEJltGa7StF17W4Upn67KjTNgE5rTdmEszrIihsa3k/7jbHNUFM0hcaJfMCFr9xzGrUxVLgtgbAW1QwWlXLaBCEVVkgMjjvgvpeHVAqUaWqbG1IulkuiVgDCoJSPihR3mztqCZ1hxPR6xoOjLqNxm7zO+OjZkFYMt9OK09M1dVawmM0oshZSdshtQF4lpFXFm+tr4o5k/7hPknjcLl5wvHtAIEuOdtsIs+X6ckk78UFHWFMShwMeHByT5gWt1oqDg2Nub6b89m//Lu+/+w6z6QStHWF6vV4iBLx+/YrDowM26ynGZETh1zx3wKGm3Bs/S0vmsxWmLqnynNfzG9LNnKOjMbq2UCkKIyjLEhlGhEmI73uoQFJXEaGBLF+z3S7ZpjVJu4OuDZeX1wTNE2GznXN5fc1773xEkWs22y3S8xgNB5wcHTnve5GDdbvyqirBGj766ANWyy1lLdnfn+AFljj2effdEzrdkDJL2Tvo04o6LBcrEJZWEtIb9PD8GklFVpRYpTGNoUY3/MKyqomioPEeuCHbPXm3KCrKqsaTbjW2TbcMwh6eH3B+fo7neeR5gRwozs/OATfwcrGAgvlySVXVTvyjHZDTVz5+4L09jGEQIVCkaUatmygx5YqEMS7xyfMDpybk/gnqZMNYN8HO09R9z8AnikJWi5TpdAqCLxOUmmGg/albSF3XWG0QQXMDaNKF7gEjSkqkFE2eX+HmMNJ7O+OotTMU1VK5VgrjNgPy3kzl5hBCSTwrMULhcd8OuJxKXZVYUWBlRWQVeV7QbnvErYib6QWb8o7Ia/H+o2+ijObm4oz5dMb17TkVklYgeXIycnFnkeWLz87Ic5+bm5Kz8ym6qlnnUAuDqmBebp3PxZvyRXTKoONzOI7ptAyhJ0jiiLgVc9zepTvYod8/QG6W7Owe8PTxI8oy5+L89G0gahwHRGFMp9NmvVkzHPYYjAWffv6KbreHtl/zmYCpNdlmxVm6djMjWSFCQ17XXEyuAdB3a4QU7B5G1MJdNzflnGG/i+/VCARxLLDGR9cCU1fUpSZJ2iwXKen2jH6/Q1Fsmc8mtGLFoB/jS8XN9ZzNcoUf1rRCifZ8FospgefTG+8Q+pYsnfP+ex+SpQW///0f8nO/8ISr63N6owAjS9JyS1mmtDoBStQYu6Hf6RL6HVA+i3XJejFH+AItHNdvVdaUyqOSilKot3FYZZpjaAI6PA9tBbVQVKZCaesizbRhk2asyw2RiMjKjHadkKYp3aTjDDbaUNcVWVoihDvwVV03rQZuDmGcGsEPYmoDZVaCEngYlOcGgLq2xGFM4Pl40jp3pqlAlwhj3UAw2yDqAi8Mmog0y3K9oag0vhdgbe4GdNagq5y6zqlqBzqttW3ChQTSCIdfb1DsQlokDgduhXVFs66wwrkutZWNH8FSy8q1B4rGeGVASfd7YVFS0IoCAm0xWmBN4R4+mRN9CZ1h5QZbQV0qPL8DniNOx2EbXdYuzyZUVLjAnCAc0ks6qEXBNrMc7o/54KOAb304pqoCPv/8mn/wD36PrFiw3NZMlilVrdBGUeYBufWYW0Ucenz+whKFHseH+3S7gjiJ2TvYw3RG6E1M5AkenEQYKxmOhlh8skKiZYuwPaC/t09epqymG1Lr8/Lylje3E94bjBBJ6yvP39eiCFhryTdLFusprXaIlTWr7Yajk4eodkRVK+5SQTvpUtoIYVM8pQhCjygUCFG4SruaoISkFQZIIZmXK1b1mqp0MtZ8myJVhacM7zw5xFMlcSiJg4CiyGm3FEpWWFOTbResK0MrDtjbHbBerynLOZPJDXU94cHDY5LugJ29HV68/IIwPiBoheyNh5RpSroReCoHv2b/oI3XzgmikjIvMZWFOCSXgtRYlmVFIcBTAoRPUWSNN96SxC6FuRYSIXxyI8AqVmlJrjU29JBhAEXJauOcgVpbiqxonpQ11jo1YFGlVNqRhu7VjaYBAllcKq8VTh/gCdOAUD2E8InDFr5SCGowBVYXSF0htMGWBTbfupjyBp662abkRU2728PWJfl2g68UpqowOqMqUsoyIysKitqtRsvS4CuLMAJlBMY2NmBr8aSEhsZUmoqirlxhQzXDVgPSoDwBzabF6YFcIKqwCk9ZfAnSQGELamGxtcDYEKl8lNKIeovOLVXRIU0N+Jb9vQdglsymd2ht6I9H+GGLF89PwYvY239Cv685Oz1n3BsyvfkxRVHR7Qx5dCz4xnsB3f4hN5MNL95oXp9NsUKhK0laOKn4OgvwvTZ66XOzmSPVljCO6HTXJJ1T9vfHHA89hi0HXe30uyxWCxDQSjpsiohZ6vH6zZQwGrGultQFVPKA11eWoP2vByryJ/ZqtZxZYrtN8UOPq9srkJIHDwM8FRIGLSBgZ2efOIKyKCnzjFYvoSot68WCoijYLNfUdUU7adHtdJFKuH7TuBXfcJgQxwFVtcGimczvKFKBVBF+5CNCgfElZ+dXzNcr2u0On7z4gqOjI+JOh1Jr8rpkd38P6QsePzoi6bRZLnr0ewnb7Zo0XdFvtzk63qfYbplOpuD7YEqydI3RhkGnhy+tOxzFhijykIM2Rhu8QDk2gbGYhu+flSVSSMIgJIxj8s0aYy1pWtPZH9NuJWRrQ55XhF5AXmjKckNd1y6gVEq0KSlqp1SsrEZjqYxu1mdNnHrtnpwGi5YGhRu8dbodByRp4KjmPl1ZCkcMrnUjxHErRdHIf9vtBF8JtssFSgm80Kc0pbMfawdXdWae8u36Vtgv03OttRi+RJEjJcI46Wxd66aPVxjj2inlKxQNn7ARJBljqHGkpvtWo6q1SysyNSBRDXAFaGjPAt8P3soMup0em9UGXVs+/fhT9vcOGQ33iKOIu5sLjDbs7e5ydnqGlII3L6+5urrmF37hlzElpJuUKMp4+uQB3f6QVucNKmihgogsLZhNVkwmK7TeUuSaRTrH4rYyl8KAqAmjgEEQ0o3clT+IAoQUeIFHnMTErZjeoMt8MafT7bikJAXZssN2rpnfvvzK8/e1KAIWgxWGVtJiudoQhgkWSZEZBwLJ3ZR8dzwGUTDNNyStHkmrR7otubuZEgQBWguq0rKotmRZjVWSOGlR5BVeEHPy8IiyXvP5Z6/Yph5S+AjbYtDtIT2frVjz4+efs1ltyYuc3BomtxO0kjx99IRtmRN1EshzlGfpJAFpuuDZOw8ZDHucn9Vs10ukrVBYtKmodckmnZFEHdotn7qsOTnZ4/TVK7ZVTl6sePz0iGyb8+LlC9r9IXK6osgd0rzSmtoIBxOVIVGrx2q5JVCKotIcJHtuVSdbCKUxCNKixFlyK3wcsaisCypd4UlBIAQi8LBKOiu0VBRVgdVO72+wLoHIc6Gr3U7XtQXaSXhpkD/WWHRVUWuDUB5CWYRUDTAlpJ20EFZTec7RJ42AKsPWNWXmfpWN3FgK0fgQau4zDYwxzbrvXhHcqP6EQ4rVunYafeOyKoVtAlHu/zxOwqCEamzSoiEcO12BVAKBwvNUk+34ZUakQBL4Eem24OY6pdsO8KTPi5evOD+74Vf/jV/j5MEDsrQkabd471vf5LPPPsP3JKP+MWWqiPwO5XaDEh5np68ZjsaMhhEPqyHDvT1anQ51rpnfLbm7XeD5CVUlmU5XrDcp682ass4pypI0X7AtehSrkusrFzAilUT6TYCrcgXB832M0RRFRaBit/6sqy+d2n/I62tRBPI8Iyu3dPtdFqdrHjx8RK8/wmiBDjy2qxlSSbbrFcaW5HnFzniH3d09Jnd3LJZbOokgDCKkEqTbDbPZCq+leDx4jMEQ+hGamtvJDet0TaU9xsM9TC0o6gxra2Y6Zb7d0u4kFGXG5PaGLM1orVc8EAZtNdeTO/Z2dlA2I/Jhudywu9tHVznWVlhbs1zNyTZb8q27nlsNgbR88O5TrDF0O12qfIc8b2NNjRcKWiqg3YsZD/aZrzNmL8+orSASsM1y2kkHjaTVGVBf3oKWlManlYxZr5YYG1GZAitAeJHb2QvT9Ns1ZeVaosD3CIVC+IFDrltDKL3mqSgIVeiGfhKCMKIVt2l3Onieu1FUZYHnuwyHqtaYe92+F+BZiecHhJEb1ko0Vb51wNIkdi2AhLrIyTebJrHZxcd7nkQpJwy6jynXpm5aFfFWmVm/lQy7HEcrNLW2jd+ARifgoEzSgrUSqXyk524C92nQvu9jPQewRXhvAadOsyLJ0gyjIK9L0k1FLxmRtHqcHD/i9nbCzfUt77//Ee+9+x7DwYiiSAl8ie8JHhy/RzveYXq7YrGY88F7HzKZz6irFKwgDEvaSU0cF+BBP+yy2w+J4i7tZMh6nbFNHT0qzbak+ZqizAnUPlUZcHNz40JjspSiKthmW7bbNVUJYZgABikqilQ3hfRLy/gf9vpaFAHlKcKWIk0Lh4aSPt/6xnf48Y8+RlISKI8wDlgv524g1IAl20kHJRQX55eURel4bsbieSFClBRVgbbu2qit5fzigizfOpWeJ0m6bW6vlpxfTKmQqJ2E4e4uy8WCvNa0BwP6wxFPnjwhiGPubm9J2h2kJzFZga5y+p0EXebcTiZcXV4QRxEY21iU51itSVp9VrMprSggSRyyejDoIkSPxWLBJl2xs7PDwyfHdJJdok6PEsP1zRQ/9vFCj02WNvFoEXG7S5EVhEno4trnS9apSxGurcb3FaXFhWxql8JUaWeasVJQo6hw0t5aG+IgwGJQShKEYePnL7BCEEQhXuA34A5Nmqa0o7CJCL/39gcI7aOEeosgw9MIE5LVGSIK8EzAqtw4dl7TMkggUArjeQgpUKpZjzYsAW00xjTXeqCsNGWlqY1FWyi1droDKxBKglTOdGRoskdcbiJNMpFFIjyJ3wTU1JRulWlcUXHSYqiriiwtSMsSLd1Qcnq3Yrtd0056PH0yZLlYsl5tsNaSZSk/+Yf/NUIYqjqn3FToSrKcpxgjODrcx/MltS2pbMWoH9JuWaJYY4ShncSMuhFpWhLIDcOOot+OEbLFfGFod0eMRgNq26cofVarFd1el812Q1mVvHr9krOzU3b39xmPRwShz8sXr7l4fUmeZYx3xoDl4m9P/tDz90eBivxfgL8M3Fprv9F87K8D7zWf0gcW1tpvN1TiT4DPmv/2W9ba/+BfXgYMldlisLSSiOl0ymq1ZrPeoLWmnUSEoYcVmuV6xWhnzHg0cuKUuubo6Iib6xuyLEVKSX84wAt8VvmMsqrxvZCqNlxcXVIUK6p6Q7ebUFuaLLiMSii6oosKQk7PL2i3Wox2dtiu1nh+wOXVFXc3N/yZX/01Pv/kY7LZDZ4U9Pp9ri+vmS3mZFmOEh7pNiXPSjw/Ii22VGWNMJp0tUEJSVG4Ch9FLtBDSsHxwxMmkwmIkIOHD7AeTKYrtmnJg8eP+YPv/Yh0m1FjGO/vcXd7x87uLnEnojQFq3TJznDIarVBlK7PN9ZSGu0Ohgaaw9S2itIoJ3u2AhnEGF0hlcAPInTDRyzqurl6a7TRpNsNUeSwYxLHJPSUj/FqhA5Q1jR5DwqMwfckxlMIT6IFWF2hqwJpIfR9N7REYAPfmX+wWElj9HLaBG1w8xErnJ4iL9EIKmPefo5F4HkBSNncGizaSDyaYaL0AOVMU1I4CAwOqqKtW4feQ02MdnL0VhDihS7kJssqLi5XpJsV3W7JRx9+iBIBr169oSpLdsZj6qpkb38PYwyFrrBW0W73iGOfKIwR1uIrSRzHRO0utdAEypDVGeu0II7aWF0jjGTQH6ONZbNZURcL4iCi05LM0zWVsSRdSdLR9IdddnZ3ePCox2RyQrfXZTqb4Xkevc4j9oZwe3fLYBARxxF/42//4afvXyl3wFr7P/qpIvF/AJY/9fkvrLXf/iN83S9LgNVokxK3OqxXGcvlnO9//3t40kfXJXHkIUSNH3jM6owo8olij8n0hjTNCIMQLxBkRYHQwjH4Wi1MBudnVygVoLX7S50vbjA2RxvBZLridrpgvS2pkGQ3d28HQM+ePiNdb7i5uqEdRiync44ODlAIOkmb9BZmdwuWiy3TxZy8qkjTElvnlEWNFBE7oy5ZK8PkNVVREgUxdVFzdXlDWZcEQaOKHI1p9/pM5guW6zmL7RoZ1PzZv/jLfPH5a7qdEd1elxfPX7F3uEMnafP8ech7773LOp0SxBYtMkSoyaoVdd1g07WlrG3jyzeOrGMFtQ2ocWhyJQRCRVjjpungY7QhStrYyjQBHq4332zWtHd37728Tt8hnMlJ+iECgxKu13bJQBVKWGgOv9UVReago0pIKtNAU4TE4th/96Yi07AUaiOaQFJLUdVkZY0VgqpJHbpPkBbiS8uw4wyCFQohfaQKXEK1kM3coYkva/IC79sfRyVyAzlrLL4KMNRM7u6wZounBGFkiaIOngrJi0vKKuP1m1PeffaMvCiI4xikZDQekiQRUhpaLZ+dnR2m8xvGgyH9nR4vT1+SliVWS9aLFVlQ4vsx1qR0uzW+7xG3QkbDPsJqri7OuFtpVqmh2+vy+vULojjmPf0ug8GAk5MjttstL18857PPPufZOw85Pu4zGjlF7cmD4688f3+s3AHhGo1/H/hz/78c+n/+paSglQS0kzZWK9qxReuCXjshLzRa5xhbkrR77Oz2CULBdOb6dZezZymrDG3c3nk+X7Af7KOUz2q9pcgzBIp33/uA5WrMD374u+S5ZbOu+OTjNyRJFy0k3ZbHMq/oJV32xnv8wetzVvMlwRMfXVa0/JAXn31BN0kI/Dabder6tarkbjZDKEWSjBBCsllv8GRNtikxRYWvfOpKMF8uuDi/Yfdgj8Fol8U6RfkR26xE+SHr9IpOt4sXGobjNuUna44efMDh0SF7B2MEikcPH7B7MOL4+JB//Jt/hw++9YC8mpKlGWEiqNYVaWHISmd0kjL8cgdvDVmhnWHKKMq6otACaxVJ3KasQcqQIPbA185XULtcvcOdXUxjRnL5CC4yzAANDYXAb/T4dYGuCqoiQ+uSIk9ZLRdMJ3dM76aNJ0BSlrWbLVhLbe3bpztN36+toNKWqtYUlaFobgl1Mx+4N1zVxqAapaE1rhC5VsBzMeo4BJm1Fl1pN1S8J/kIdxu4x6g7paJLgfI8pzvZbkukMIzGB9S1ZLFKiaMOrbjNb/zGP+bZO++y3rjUKXTNcOeYm+uc2WTKcPSQYBtijORg/4RNvkHYEKs1223Gzv4DkqTjYu3zilIbsiJDmxqhfKbTBZPpHZMl4LXpdXewuuD89JbryxlPnzzhvfffp91uo2hxd7Xk0bFAtgMeHu9TlAWh969PJ/ArwI219ouf+thjIcQfACvgf2et/cf/si/iBz4PH5xgTcCof4DVPtPbJaEfMBgmnJ+/wvMsiIper4U2BXd3M2qtiaLYhYqGHm0vZrVaEychta5oxW2S1pDLyztAMejt8c1vfovT01M8FbFYZixXW8pK8cGHH/HwvWNm0ymb1Yrnnz5nPV8x6g6osxJT1Nxd31IWBU8ePWKzrkgzzfXtlKKuWW3cXreXaqrCsl6XLGZXhJ5PKBXJIEGIkLwwtJI+R4ePePLsKYPRHvPlnMl0SRwn9AYd3nv/HcIgJggsUtZEISStDj/zM+9xc33HeK/L/uEIY2uSvuDJ42PaHcmrl6/pD1q8fHFGUQoCE5AVkOUuysxN+hM63QEqiED6ZGXGNiuxumQ82kGbmrjVot0NKbKMsqooF3P2Oz2qsqAs8sbi7Aw60pNIVbpeH4vyFOgKW9eNYCunyFIWizmTyYTNdguq4f836sWqrpsDz9uhoHNQflkEikpT1oaqNlTaoc7uY9IQ7sktagdwVVKhhAThWgGLwjRRbuDQb2V9n23QmKmEQal7iIttNh81nvSII7dym6/mhBd3RNHnTO5uePDghNGgx/7hCX7UYjQOmM0m3Fyfo0JFVeSo0Od2OuPlyzds0g2rVcnV7YzA79GKA67On3NytMN4Z5fbuwm3kwusyOn1euTphul8Q5YW+GGH4bCN8rt4ssVw0EJXiqvrK87e3BL7PZ6+85R+e5dvf+PnGfV22MzuiKTACxJef/GHzwPgj18E/grwn/7U76+AB9baqRDiZ4G/IYT4yFq7+uf/4E+Hj+zuJHRaXRcjZXykCGmdJO5K5nukWZ80XWOxaFtzN5lydXVNEIScnDxwQxftWICtdgvpKcqyYtg7pJOMWc5L1qstn3/2EmM0g/4e7W7Ex59+iucFtOIOO+M9OlFC76jN717+Dl9MPyOQil67TeSFHOztc3t9TbedcHdzx/XNgrwomUw3oBR+0AEUd7dr+r0e/d4us8mUbmdIttlyc7cgiNv0+jsMx3s8fPIMP4zo9UdM5gtm8yV92yeOY4JQMhx0MbXk0YMjtus56WbD/t4hus5JEkUUBbw5u6I/bBEnisfvHLLN5ihpmc2nCBEhRcJyXXNzu6TIa+IoYHd3l17fIa2tAOl7VNYQBgFWSvK8Igg03X6fhdak2xyrXTzYarmgvbOLNRqpvAZI4jYCtW4AI8agq4K6KijylCzbslwumM2mpFmKsRYvCJ0foaowzdVeN4nHUimHHdc12kClJWVtmiLghoKujaDp58VbTHtR1Sgp8ANJgMQKhUVhhcLwZe6ANs4RKRxO0hUSbZBSo6RCYBtPgjNIxa0EZMA2rfjixSnz5Zr1asl0vmJ3d8Sg3+X16RVxHHJ5ecf55Ski9Kjzkp3RmM+ev+Ds9WtGozGLRcHN7Yb5fM6jJ0+I4hGbTCBmGdvM4IcdvKDNzu4Rq+WcvNDEcZeqLhmEfVqdIZO7CUEQcnCwQ9IK0LXBU4LtesXezoijg30uzy+YbizP5+f4vs9m8986gn/8IiCE8IB/F/jZ+4818WNF8++/L4R4AbyLSyn6Z14/HT7y5GHflpkz92w2GXHsNcGMW+bzmZvmKw8vCEFYfF8Rx1ETkOlRa8P1zTVaW8bjXWpjsFKwu7vHel7gCUUcJizmK/7e3/11Hjza52D/iB/96GPG413GowNurm+x5PzMN7+FrQxFWnL08IRuu00QhpwcH1LmOcIarm9vWW1y1mmGlSFSeQSRu25ZpNM5aE3gb6kq4z7PWmbLDYNhn6TbQ0iPq+tbDIbeYIhFMJnPGY0Dbq9vubue8sG732BvPOazT74gjhK6SQtParBFYzLa8uDhA1aLBYHvMxr1CT2fPM0w1kfIhPWqQiDYrjN6na5bZ5qau+kdRZkRBj6lLuj1B6RlSpptaSctwjhC+T61yegkPW5ub9gJItd7S+fQoxHxCOmGcMI6s1WVZ1R5Spm7FOKiyNFaE8UxRVE7l6bnoYTCExLPgCkdl1E07EN3Q7CUlaAoa4qqoqwN2tL804BHcVxDF2BSY1AocLcDIZ1D82170WQbAEiBFB5Kyaa9cVwDYbRz9BnNPWzB90P8MAEVslwuKOspuq5YrF5wdXNHr9OmrHJacUSW5eR6ys7BMTeXV6y2Bek6Y+/wEfv7h2QF+H6X5fqW+aKk29vjbrphttjS63Z4+s4HGFOzWG7Is4r9vSOEsFxcnLPcTIg6EuGtiRPBcNCmlVTkeYkVS6SK8HzF1dUpWb6l1YrJphukhMPDwz/5IgD8BeBTa+35TxWGHWBmrdVCiCe43IGvlio1Lyl8pE4oCk3otdgd74E1zGYzrm9uuLq6Zm9/z4mBdM5ouEO3M2C1Xjeac4eTq2tLUWr8IOJgZ5dOu83ZiwuSVgtMgZSCskh5cHJCu52wszMG6zMcDlkul6zma4y27O8f8urFc5J2l+OTI85O39BqxYStmM3akYPy2lJbiReGICCIW2hdE/g+WZ6xmM7IthuqqKA3HNDudBESsqpEZikX11eEkc+DRw+Ik5hPP/sEX3pI4fHqxSllVvFg7zHb1ZYyzXl4+JBssyYIQ3SVMd3MMbok8Ed4qqAqSg72D8k7GcLAYr6llQxZLQuksGTbkjiK6PdDbu+WpNkMbTT7hw/odzsEkXLEGqnpDtqoIHBiGxzu/Ob2lgfP3iMInPNONsOze92xFA475tKHCqyp3Hwg8F0mQqdL1GqBDDAECFyCs01TpBcSVBVVWTUH1q39XCEwlLVxyPPaiYK0cVN9wz04yEWfuUAaJwCyDZGpNiBqi1W20Q04YpKUPlK6WDUlHWLM6ZVMQyd2DkUwCBHQ6Q0JWzPsassmdU5Gow2LVcbF5Q1g8JQijAL8xPLi9Jp0vUXbkMPdA77xwTfIsoyLi0ta7YSf+c4vsFiuWK0z1psNnXbSWKQdRerm+gYlYTQcEMchSgouZ68IWxnjwKPb9hj0FZYayNH1Fs9vM5vO+fjj32M03KfVGjOSLU5OThgO/xhkoT8sd8Ba+3/GpQ//p//cp/8q8L8XQlS4tu4/sPZfYF9qXoEf4Zkuq9WEKA7xZMjNzYTPP39OVhS4ZOyY6XSGNim+7yOFTxC0KEsDeBwdPUJbWG+29Ps7vPP0PdaTBVoX+CpkvZrTHw757nd+hk4vRAnLs3eeoLXAGIUQXVarBc9fvOTho8fMZzNmiwX9QY/FesVqu2qkx336Yczt4g5TabR0qb0tp3ukKHOqMqMoUrCand0Rj56+Q6k1SknSbIvyJIUuONo/4PHTR1zfXHF5fc63v/1t8vWCF5+fsjMYc3c1Y73ccLBzwMPjB5ydn+ErxXwy4er2yglggh7HR+/x5uUXBF7EfDsnzzI8aRh0Q+IwYDTsUhY1ZVZS1hWXVytqvcILfA6PhxweHDC9mxD7HTZLiwo0QjlmX1lV3N7doryQXq9LGAb/DBHYzQYENCEqSgp8T+IFPoYAU/vErRa9ft+tFr2IsNWjbhyB+s4NNP1GtGOKEiFMY6t2UBXXFljK+j77wDTKwSYB0TgrsC9lk5p8L2G+j3bXzhVpG7yqkCAsWgtUs1ZwNxzciqFRDmpdO+yZxcl9212svGO7TQkDt/pzfgufwHeCoyyvkEmbV6dTQt/H9zVj7XM321AUGaWGWPgcHJ1wM/0BEsHjBw8Jo4BWHLJZLVESklboBpbabRCSOML3Yb64cQ+zfE2nHXBwOOby7II4bGNMznRyyWjYQVBS6zVSWRAFy/Xtv3oR+IrcAay1f/UP+dh/xlcFnv0LXlL6KNtG2pTTV+dstylWwXS6wA8iHjx8h7qyzKZb2h2PsjTcXF/j+QEGQRDGjMY7ZHnJfJGC8EEEbDcbMFUjRfY4OtzDDyWb9YLeMKHTjmi3B2zTAs/LyPKMH//kY/7cn/+zHD96yOXFGaeXF6zzDF2XaKOJ6xbdzoD2oMcqy8mrkjTbUpmKMPTxJQy7Hdr7Y/LNhsGgQxD7KBm4+Ua1oT/s4gWKdjfh+vaS3/nd3+Ly4oyf+ZmPsEYSB13GgwOKtMYjIPJi8k1OXdQsqwU//vSHzJZzxnu77Bz9HEm8x3b9GbEvmNzOuLu+YWdnhCc1e+Mu+/tH1LXl5vqWi8sr4sjieRV+IBGqxAs0XqAZ7XSxNme1nVLrh2hryPOczWLL02fv0u12iYIQYW2zVnPkHtFQewUGJQXS9zHWp6wysC4Uo93pYIzBjxLCTFM3seSeH3L65pQyr5yrUWhAIqxCNKGhlXHDwLIZIupmRYl0SUiuINEc5vutgkVrN/mXooGNNEtFKZw/QGtLLZrQE5yS8D5s/f5GoHUN1uCHMd3eED+4BJEilI9FUVUV7d6Q0PcB69R8a0ebzrIKQYaSt7x+fcFw0CMMfWpryT/7lOcvX3C0t0fvyRPHSLQ1vrIoBcmgg65LPKGpsjWTyYSbu1uMpxn0+8ymC6S45tHJQ3zVop30KfKK9apgd+eEPC8wVpDnOa9ev6As/+QDSf9EX7rSREEPjw1ffP6K12eC0d6QIIhoJV18P0IpRZppwsgDq3j58g1CeiTtDkcnDxF4pOkGaxRGC1aLLVKA1iWBL9nb3WM06rNazyjKjKr2WC5ntJI2nU6LzSajlSS8ePWSsqpptd2VOCtLirqm02ljMdzOpmRljREhcRKz2lQusDMKkAo8TzIY9mkFPptAUdcFi9Wc3aNDlqsl55fnZPmGJ+88ppVEPH/xORdX5ySdFucXp3SDAd/48NuEXoAnQ5QvuL68Jt9m+KHH9eSa3/7N3yPpxXhBQL6VvH5xzeefvmG12wGrGI928CSUxZZBv0vS8gj8kMXsBt/XPHq0D0ENStJq++Tlmv4gwQsgjD3WqxXbdAtSUlbOnlvVNVHkWIPyLSfM/cs9DORL8nDzMSkJggDPu9e0W1oogrQiywuSuMVwtIOxgjcvX1NlRbM1aDBj2lKUFWVZOcjK/UBQGzTGZQxK1ZiZGgOQtU5p2ESdKXs/D/jSi2CFQQoPIdzKsK412BpjBB4K4QnEfSvA/b4AhqMRnW6PoiEjV1UJCKrKEPjSZS1WGgoI/YCyKthsK4rsBqsL1pstxpT0+m0QsFjMyNdrdJbx4Qfv0h906XZaGF26jMxsgzAF08mE169fMS1S9h8+YNg7Id8ors6XpMs37Ix2qaIQW3tI2yHdCNrtIZUpiFttsjTj7PzsK8/f16II1Lrm8uaCL158zsvXF+wfjcmqO3b39zg4OKI2Gj8UJF3JbHJBv2WxVUVaZfhRhPJ8hOcRRzHDrmXU6uCVNaNen+14QF4XyCBla6/QUUmdp2wL2GYpEoMyNYu7K+LWDk+PH+LXFk/6xAg2WY7JS9o7O5SmZjKdMZtfEgc7+GHIXmsXz4N+N8HUhRM3hYqqKEjaCdbCdrsgzVqstwuW6YqbxQTViQm6bRZpTtzu0mm1MDUU5Dx9/x0wlsV0wWwy583pKbu7eyTtFi9PL1llGi9RVLUiT6+Zb5dsNze83pzx7jsP2B3sMZ9NoK4pthtuL84x2nLx+jVKSj569oh3Hh+8XQfe3N4hdE25rrFlji0r0ukcUVUEgaTIa0QgMUo4JiIhWJdmbKkd5KMJ+tS1dmlKKKwXoWLl8gn9shH1KKRfg5eR9Aa02j0qFTNZlmwvr7FCUeqczMCmrkhrzaYsqHSNpQl+bQ60lNahxBoDkpGS0gU2II1G6gqpFJ6VSKMcZNg4lJumRuGUiG9dkUJSW4kwAt86D4FDlAm0qeh024zHY5bLJUWRU9capQSrzZpNtsZaDcrgK4muS8qyIFeStK5pxSG3izVGV2xrZ35SSrJcTsAa3nn/HUTgYZQE5bPdrpit17C0XF5ccH51hwx80kXKZpASR23m8w15ZZitNuSloS4rkIrLq2uOjx6wTjVxK+Dhww+xJMBv/KHn72tRBMq64Mdf/IBXr9/w5vyWVrdPIgLm8y2zxZLd/QFZuebgKGGiAzxjeP/ZU9ZlRS0UMlAIKdjZGZJ6isSz9AKB9EM63YRssWCZLyiDiEIbKiFY3q1YztdEfogylk7okwQhyXgMWcp7zx6j1zPOLwsCGyLqijLPqMua1WpNFYb0uj3ef/ddTJ0zn1zjSYijEGEdzKPdGyKEZL65Ybud0mq32Dnc5WoyZ7pJuf29PyAJQ6QX0+0O2B2OWK1mhG1FXVmul3f85LPPmE2XbAE1VSyWG3aPHuD7Pp3eHjsjw+lmwgfv73N9cU7oG0JfsDMY4CmPqqxYTmZsNinZKmM8HtKPXEuTpSuWqwViPScvwQvamFxTZa4IBFFIGAgmNqO720cr6VotGzgJsikwtkTYEluXzUrNYppkJi3Bep4DkKAQ1jjJsBYEocFrt4nHuxyohMPLBfNFQVUtqYXPVhvWWrPWFZuqKSAWrHHrRCcmEC4GruEcGeUIVUI1RaCuUJ6k0qJpCzyEFejaJUBXMnfJwZ7C9wKkHzYGpAZPjtMbKCWp0HhewO7umPPzc5bLFUoJXBS9JstWhKGHF3p4VFS6dlsSX2GMJdfGqVujCFMK8lyjpKTlhSSDEbX02JY1y2wLGLI8526dsd1sWW5LCq/FKImp84LzN6d0Bj1anRZFXZKvp9zObpEWIi/ADySrzYbLq5IsvSbwe3znu7/MV6UHfi2KQFGWnJ6fU5Q1SMX55TW7ZoeiKknLjGfVI7r9GM+L2Ds4QRlJJ2hRb7eIIGBnZ4TWJcpTBLFlsrxE2zV1oZgtlpS1Rnk+ddkw44KQeTpF14LVMmV+d8toMCJuRbx8cUavJ+kPQkbjFqgReIJtUWBWBdHaEsdQZmv6vX329oZcnp+yXC6wdc3OaIwnPJT03f7WV0RhhCcVnXabXidnm1WYuuby4pJRr083Dgn9EF95WAPz+ZKbmwl5XrJeb7m5uWWzyXjw4CHWgud5jEYjhsMB8/nc5RI+eYTQ2gV11gbP8/GUT7otCAKfpN0jyyHLa66uJmy3Eel2xWRyh1ABrVZEUYFQAuX7lGWJwbDZbgiCgDAMabXbjlHgNS4/7Xp7YZyXwAKyyQgsjXZBrliUCLDS9deOEWIQQrmf0VcMRwPe++BdJpeXfD6bUBtDXpakWUGWVVS167GtthitG+6AazekuqcHiEYyTBNvbqiFdjHoypGLHYrAWQ21dWIgbSyRdOtm2Uikgbf4M2vdCtL3HJh1PB6xu7vDdDpz60UFSilHrcaZn1TTzgjhBEpaV6TblKKqsAik8vH9EF1XtNs9ikJzenrFbB6TZY51oeuK+WLBfL7AaIPvB7hYzID1OqVC8uDRQ374kx8TBiGtuEW720UZODl6wKuX50g0p29eEfiWv/yX/uJXnr+vRRHwfd853iwcHO0zmc54c3bJ7t6YpNfle9//MY+fnuB5UG23RF7g1k1Ccnh4wNHhHs+ff871+pZOO6IwK744fYNfjalKyHRO2BUIz6PbGeJ7CbLucFXecH054ez1Ke89e8Le0YBWG4pqzun5Z6T5lDDSjHd3yHRFsgpYrG9ZrzVxz+fR4wPKckOebTk8OOT2+paiaIY70me7yVCqoDOI0UXN9HbC5OaWPM2xBnaHIzqtFrYsKdKcu/wWbTWff/aC6+s7Dg5cxFS/nzsIaOj20K1WxGDQByw/+v4PSdMNe6MRnU4XYTXb9Qbf8yjyDWVekrQUnheSFTWb9Ybl0oOLGqMrtKkZjXsYoSjqAiME3UEfnRsuz07ZlCmtXu/t90/TtNHEe18eEuHe2EboZjBXoa1oGP8S5UnAc6s5I6jrohnoC3Rd4QeK4wcHfPTN9zl9/dKFfZYV621GVUmMdZkIutZN+KibP2jHOXJtiTVgVAMyFdQalHDDQW1EE7AiXeugBOrt9N+xLSttELp2swVrqU2NrErKykPUFZ51JqBuO+Fgb4+z03OKxsBWG4sQzd+HgU26dZizn9JS1HXdFASnaVCeojY1upacn99xcXHrouUDSRj6aF2Tpimr1RohFXEUU6U5SiqQAq8WDEf7PHpccnV1SZJ0OTp8wN3NDZW2LBZLTCXRVUpdppy9ef6V5+9rUQSSJOHX/uyf5Td/83eZrzK2RYlFkJUaIzwuriYsNhs6nRbDXkToSb71+IS9/X0skK1X2Lpku1kS+JqkF7MtlqxnKfnaMN9OaA9DbGBZLSqOT94hifoIs6AVD/jlX3pMEBjiWPLg0SGfffoTynLF3t6YNM3IshUVFl2mFNs1voTjkzHWpCzmW3Z3x6zma6TwKAtDNEgYDgak2y3WanYGXW5n18yXC8ptRidOkMpjb28XhaDcbhHacvbmDe1+n1dnZ0zuZmzWBUmrw3e+c8LBwSGvX79GSsn+/i6r1ZIiTylS9/UCP2A0HDK5u8UKiZQB2lQYoZgu1oSBRqMotUDnhsV8QV0WaFNT6MCl8XYH1NawzTKMrqjqstHUQ7fbYTa5ozYxYdDFD1Rz+BUSF2qKdk/7WoPBCXWMFXjKd7t4IZBaYG3mhvueoqpLjK0JY4/3PnqX1y9fcHp+xnqbgfKhvh/2uZDU+9WkweL2CADWwUmsm+5bA0bYtwlRLpDYDQel56G8JofAvzdHWcqqAiHw/CYFqclAlEog8gDrB/hBiC4rBr0+O+MxF5eXFEWBMRbPD5ufyGJ0ThCGBEHoWiTtICV+k/ZU14ayTCnLituycng2DHGc4wf30XNO5HVPfC6LCmrNxfWMqq45PPZYrQuePH4PrUUjglJ4QczF9R2L1ZrTL86R1Pge/JN/9N985fn7WhQBIQV7xwdsy4z5eokfRbSSDp3+AINiZ/eQi6szJrMZR7/8DXpdj0KvyPKI7WqL0ZZu3KLlexR1wWQ65/JmxeZ0hSg9tDIkUY9KVCynWzqtFEnNd7/z8+yPdzjYHbPeTDByRlFU+H6Lly9Pnf8eTZ5rvDDAWsHBeJ8HRyFJf8iLl5/x6MEznjx+xD98+RskSZd+d4CSUJYFnifYGe3QH7TYbJbULUsUJRghuZ3MuD2/wmrNqNenFUasZksurie8Ob8AocizN/zMt75DkVdYA4cHR/iBwpiaV69e4inBQb+LAOKwxWAwZDab4wcxy/UG3wsZ9XcocteT5/UK4eVOeKM9sqJim5b0d3yCuIsMfIQpWE1XjFtdHj19wmKzIKs1/X6Pm+trOt1D93QTwFvmv3Q7e+lShI0VSBU44Y/AUYekdIQfIVBeTCgNfhBghCbLt2jl0ekn/NKv/CI3kymvLq/Js5xtWjkQSJPWrLXbEghpkcblEtwv+YV1Q0Nh3D7ACNvgy5tEMgRSevi+i38PbIQ1hqoqMVY3aUZOPeiUiA5sousSkecEyifbbEniiJPDI6YTJ4UOgoiwFZGXGVVV0ooT4lYLz/fZbFyP73IeGuKRtuR5ied7VJUl8GKs1biEOPHWT6GUT6/XRQpFnpdkpUFuNGVdYS6nbP7rf8Ljp08wuuLq8oKb2ymHe/ustyl7B/ucfXFBECjmszuK/F+DbPhP8lVWJRc35yy3SzrDLse9EYP+kNF4xGq94Nn77xO0AiazK+KOz3A3oNtRzGcXTC5njPt7ZJVhvcnYlgUvTl8zXWTEaZtu6NFudzjaOwIfXp6dYUrnImuFLYq8xPcDlBdS5D5aB3Tbe5zVN7x6dcM7T58Qh21WqzVWKHZ6J7R7XbZ6RbrZEPoBWZozHI4QeC7x12pqXVBXKauNxtoEX3qcHB6B5/Pq9JQqy9nMl2Ase/0hw+6Q0WDE8uIKKQK6vT5Jq8NwMOb7P/g+f+O/+C/5s3/u13j0+AHGVPR7PcbjEX6Zcnd3y6tXb4iiGM8LG9PNBmM0XRXSH/TZpjmaFSoMWazmrNKCbFsSBAmD8R6VgU8//gn9UZu446K5pQC1UKzSDGM0fuC9NQm5fte93CFzrj0hfYQvUMrFvEurUF6IELirrAbPi5G+0xoYU1GUW7QFvwUnTx7w5//Nv8Dry2v+1t/5J2y3OdJzUFkrJJWum9WfQGgDtcZIgd+YAASuQIBFGRphkOduJlZghXLZj0rhu2hl/LrC6BqpAFybgLBN4XE+AmksnnDJ05Hvc3x40PAfoLIGL/Sp6ppKGOdd8F3EHDbF90IsgrzIUcrDUx5x7GZEVVFR5SVVZRDS4gUBcRITaMda8IOEqnE9ZnmNEBJjFZPZltOLW16fXtHrd5jPJigpePjghDzd8uG77/Lu+4/JsxTlQRQFX3n+vhZFwGIpdMrxw30EAe3OkPF4D7C8OX/F3uGY8c6I8V6Hwchns72g7SuUEbSDgEQFfPLpC16fXRG0u4Rej2998D4D20NnBYv0jrp0FTpQPtO7Cb3ODrc31yRJRFnu8fyL5xijGY1G5Pk5H37483hS8OTRI0eP2b4kilqcHD8gjEIuFi/46L2PGA/HhEHEkyfvcH19x3x6x8OHh/S7IavFDXm+YTHPCIOEUX+AlYpPNimmKPCFpN1JSMKYfJMhEDx59A5+1AEr6XS6YAVxmCBFxHw6I0u3dLsJSTsm8H1aqs0yWHJ2ek5Va7r9LvPlkizN8b0QP9ywM2oRRjFhKwJP4fsRR8cPUfgEYcDewZi0XDLORxw/2qfVDqjmFWVeUFvN4yePsVikbPJ6lGqgn+6qarBOqmsdKFUaTVXmSOXheyHS89z6zFqQliBKIHAxX9o2aUdaU+YSaSVP33nC/+Df+3eZrFJ+63d+yHaTNU93l02IG8qjrUtBVtYZi0IRuGyGZoXo6oJoQlbEW8+BReAmeu4mE/g+WI0Q1tl3dflltJp2UetRYDBlhR+EoDWdVsQHH76HEZbLmxuKvKSuDL4KkdpQNRH2ZVHi+Q5OYg1o60Jxle9jDCjPJzM5QRQSxyGeLwkClxKltQWpyIqULC+II4/VtnIeB+VuVItlyjYtkNKyXi9YzNdYodluMnp+QLfbptVp0e21v/L8fU2KgGG2vOXgZIdW3KfINf1BQhRFPHvvCaOdAe1eQL8XE8drpjczJtd3PNh5xDe/8202s5zFcIs0MUYF7D94yP7JCaOoS75e8eL0M1pdn3a/wzP/CWleM59tkcIgpeHN6XNubi7BBoxHe+yMj2m3Yx48OGZ/d4+XL15wdPCYJOkgheL6ckpmKqKwy2Zd0G4rzq/OMVrz3ofPCAOLtSmdfsBh+4DlXYkpfQIvYLFaM7m+odimHB2d8OjkAbvjXTbrNboyBLHTtFelexO9fv0aEHzrW+/zK7/yb/Bbv/1PWa83HB8fUlUVs8UKowV1Ddc3t2zyjMVqhRSKvd0uaV5wN5nRHwwYjges1huCwDDq7aKkTxiFRLFPV0WMD9vEHY/nrz7j9mxKkrSo6or9gz2SpMV0tqTdOXjLq3N5gAolQTbUXyEAU2PKEqSP8v2G2GNBOOhp1AqwvqUQWwQCT7oQ9roq2ORzgmTIz//8zyFURJb+J/z+935AVd8nFEmCwHeHyRhHLNZNcKqvENYVKk+5zYRuzGQItxGojKEytlEGuhZCKeluKRhHWlKga0FVWWqtoSgIvBysIJGK0hj8KOb44ID5YsFsPme1XqOrmlarhScMVVVRliV5ViCKCovF9wNHOa7cFifPckbDIa0kwvMkSbvl2hwhqKryy5yFyq0cRQFJK6Y/aLPZrqnqgrI01LUhDH3KyuU6JO2Es6tbrrRhNBoiFTx8+MeAivz/41UUOVoUfOub3+H55294/eYVOzsjzi+uCHwIA+f42tsdYyuF7D1kls7wbRupfSgLfvab32Gx3fLF6zc8Ot6lN+pgC8Pe0ZjDJ0O0yLm8vmS0MwQRcH014+bmluPjAUni4/mam+s7FotdfuVXf4nv/cHvkRUp13fX/ODHP2BnuEsYRfzkxz9ACEF3r4U1HqtVRm2vSTqJs2sqjfAtWbom3UyYr2paYpdhd4dWnLDNMp4+fkKaZagmcTfLMtbLNVi4vLyiyJ1Kbm9vn+024+bmnDzPMUbT63Z5+s5j0u2G29srBn6EH4TErTbD3SGL9YL+YMRmkxLGLVpRm/Viw2q9YvdoRLsTcbuZsFrP6HX6LBYrRn4fa0s+e/kxo70ucRzQ7rYRQhCFivOLc1rJkLjVZdjvI4VEGEfmkUKi63vSkGiMN673Fp57ZFe6RNc1nvJQnocwEu3V+F6A0c1Tr6owZYUnDMVmjagsH7z3lP/On/8zvHp1xmR67eYNtcFvBVBbB0+1xoFPtMZ6wv0sVuA3Ia5V5cRL0ncMwtpYKq3xcSpD01iGhVIubNUKFBLP95q4+hI0FFmGFJIiz/CjECEMZZFyfLBPWZZUdcViuXKrSwTSOhBqFEdUZUXUarHZbKm1JgwVvucRBiF1XaI8QRAppDSUVe7wc2mKlIpW3EZ5As93K1Xlh2yzgna7RxB4DiXfAEjSNEUoQZqVBF4ESjBZZnR7CZd3y688f1+LIhCEAcNBm73dEZfnlwwGHbbbBYEvaEUtQh88JdFVwTDZpyMGTF59n80ClkHJ6Ztz9nZ3GOwM6PcknXZNt11gwpiqKlivlmzzBfPFhLTY8vrVJXmuee+990gSjzCCb37rGcPhhLvZa1bbI548O+D7P/geB/v7fPdPfchPfvQxKqgIIo2Qina7Tz6bE0WSo+NDWp2Ei8tTZATT5S3XFy+II8HB7hhfB7w+PeOzF8+JopDxeEiRl6xWa9I0ZdMM8dI0B2Pd/1YpKHP3+4cnD7i6uuL73/s+abbh2TvvMJ1MOT+75N1f+GVUoDh+9JDJak4+m7ItVvhewHqzYdQf4w0Uz198jhdbHj15iK0HBF5Enm65u7vhdnKK35KEgUe326GsM7yWT57mDLt9NtsNy9WCd548ctfxRrsvGjxXVTtNgBBuYiiVe8Naq5DCUtcNChw3G/NCH+m5LARdubwDaQW2rihqjaGm2qRYFfIrv/RzbNYz/tpf+8+5vLmlHXuYunJDQG1QviIMAvKyoipLvMYZWNc1UjqqcKVr5zFQksALUb6HFu49pYRESpBK3puNnUzYOOqxafJPjLBNcGqFZzyqMsegaSctTo4P2KYbTs8u2G4zqGuqqsJXnms3q9pJplsxxliiOHbEIq2pqwILqNo4/YWtkdISBArPC5p1oQuC0TVuJlK7hOFWa0g7apHlIWHoEQQBk9mEqi6pDGjpue3Pcst0nX7l+ftaFAGsZbmY88UXn2Kqig/efUaWloz3dgijkL29HZaLCTfXN/T8IYHs0u8+JFSSF68v+OzzT5lvp3xn8CFP3t+nqGe8fPkFRyc/S9Tt8+Pf/4LTi5cMBn02Z2ecn93Q7Y4Q1JyevmB3b8C77z5ld7fH3/v7L/j409/i8OSQTXHLupCouCLqaHaP2rR7xwwGY96czimKnMfvPiWIPN5cvGSbrfCiDmESYpWgqg1+1GY7Kbm6vmabbukP+nR7HQSCvf09ZtMFp1dn7O7uo4KIk90dLq6vGI1HvPvsPZbLNe2kwy/8wp/iJz/5MbUuub6+ZrFYsr+/z+fPX7F/tM9eOyErStpdl0yT5inddpe7u2t8PHqdNkWW8frFS4cvLysuzi+alZjBaoWvAmZ3M6Qn8AOfwaBP4HlcXt4QhiHdbpdmKc/9VNDYe/GPaGTE7jqr1P0cwCXf3GcJWMAq6/px64Q7Cg+FQ7pbrbG4FKhcb4iCHv/OX/qLCF3w//zP/i6n59fU1FgJYSBRgY+ULkXKlE7846PQpqKqLbXxKevCRZHVlkAEaGEwukTiICaOmipppgVIXMR544CA2nEIbBPGKtCYusDYCqUk/U7C+8+eIrB8/JNPUEoSRYFDmdUVSgqCICQIfNabDQKIopAwDKh1QVFusNZFqWldu9Yg6eEHEUbDdpu6jwcxnhcQhSF1VTKdOK5B0o4JgwFhEIGVhGHL6TaMQKmAokjJt1/zIlBVFevFCnUkefb0HeKow9XFDS++eMHhwR67wz5ojYdgvVwhqop2b8BidsfF7BbZDlnrDZPtLR8+e4/1OmeTZaTlmk1pSPodummPoi75+NOPCf2EskxRCgJPspjecn2liOKAR492CJOQTt/j0Tv7LJdLjIgYH/WZrK65vb4jTHzmyzsG4y55uaJYr7GqoCIlraDbTmj3xsxvb1kuKjbLGuF59AYD+sM+e7s71FqTphnb3IWS5kVFmLTQpuKdZ495+vQZ//Q3fpvLy2t+/ud/gYODfT799BM8z+Py8pL1esV49BRPS6ra8ONPPmW2nHL44IDeYMDt1RXKcxjvMk0p0pR1tmK5WXKwt8fTx0/otSMWyyUWQ609hIR+3Kfb7TJdLCiqguls0jjyapbLBf1uB1rOqmu1BgtKuZgzY5rhn3WKO6wDhALNE9cdNEP9dg3neQHKjzBFjdQSX4AvBcoXKC0o0iVBEPLf/Qu/wqDX4m/8l3+bz56fU1FjUeRVTVqWhJEPzUqvqlyKkRAuuLYsnZrUYsmLHNEQkZUN3c/fgEmlcDHlWIFogCNKehipsbV2eQtSNrMPQxQnjAZd+sMR2kISh4i65vd+/3uEUYzvBxRFQRTFBFGMtZYwChENwXmzWSOlcfJja6hLQ1UVzW0LN2w1omE3Bk5XURX4cYSPR124KPgql2SblLKukELRTjpU2lAUhlYcunWpMfyzPOAvX1+LIlBXNZPrGS/813zwl75N5MdMr+c8Pn7ENz58H0vN65vPWS0XvJq8YrtxeOy4FfLomw+BQyaTCy4Wt2R/sAGTc3t9Tm5zdg+fsLvbZ//oiND32a4zQr9Fvztkf3eXberz8SffJ82XPHny0F3VdcnN7R2v3rzm4vKaX/nVHRCSz148ZzVf4fkR3f4I6SvObl8zPhjRGye0x7uslhtmyzWd7i5V5rPeeNzeTrHacjjeR/keRV1R1RWn52eURc1gPHLMxNqgAsuxOuL29pLJ9Jpf+sU/Tafb49d//dcpioKT4xMuLy9YW4GQij/1p36ZTz7/Caeff0xlS/xpgOdLbu9uSFdLjnf2ib2QMi/ACHwRoBpNfOC7a60Q9ynGHmoQUmeSuNXi8uqMu9tbjvZPuLm5QdgO0bMdIgBrKesK4dFkErwNNWxuCa5wWKOd9VjIBkwKRjiEuQU8FWBlTaUzlHXjOmM0SPBDSTeKWa+2xJ7iz/3KL9JpJ/ytv/13+YMffsJqW9BJ2rQ7ivU6ddiwuiEJK4cSs6XFqzxUrfDwkGWOH3goP3aUawvWNMnHSjUhy6bZ6TeA0maZ4Hnu1qSNIfA9jg93ef/DD9nZ22OzzhgP+pwcHlCUFXd3d2RFjm8drmw2mzTbHkut3czHGIsfuJmKaWjHZVnhFIZgtPv+urZ4niKOQ/zAmdN0VdGOW3gSoiiCJjWp1UpQymOzXaO1RYkI3/NohdFXnr8/ClTkBIcbdzs7+E+stf+xEGII/HXgEfAa+PettfOGQPwfA/8WkAJ/1Vr7vX/R91DSY9zfY3d0wORqynKxYW9nj8ALaMddXr36gnSVEfo+YavE6xQUtSYZBOy9O2Cz3nKXeVRasMg1kQjZpgGT7YKdE8V8vcZULm7ruz/78/gyoshKnj9/wd7egPFozKtXXxBHbTr9HutsQ64LNlvNJtX8jb/13/DNjz4kiPq8/9EzyrxgtrwhK1MKSipCLu/u2Nnb4+L6nOU057sf/gLHxwdMrmaU9YTtes5gOGC1WbJYLeh02lR1jVCK3f19bqYLoqRFtxtyc33B1cUF40GfTjvmN/7RPyLPC5TyydsJ7VZC1SmZ3Nzx/MUrPv38OcbCwfERwjNsNkuCMKAsMqq6YNRp4wuBKAztVo8kCtksVqxXC0adLl4QgAqwwueTH72g0+3z0S8/pTfs4CnpAlVwPXHd+OtdxHmF73nOR1DfP33v7TyWuqodzFM0jYC1IF07YLTT82MkQisCFaECAXVOZWqEcJpAhaKXBGzSnCzNefLokP/+v/VvEoYR//R3foSxLlZdh5rcVhhTYmwTg95wT4qiQEhBFEUI6VKMQ+Hcq0I1DALZqB+VcnoDY5sU5Xsrsmt1kIJWEjIc9nj69CFHh7skSZt2FNNpt+j3OvyVv/I/5u/9V3+Pj3/yMdYafN8jn27xPOVs0M3tKAwjJFCW2mkSjHYZFUK6GDvPoISHr0I85eOSt0OqIqOuCogi0nTrQKnNzxeGEaWuKYvSBcDWJbauUF8dQPRHugnUwP/aWvs9IUQH+H0hxN8H/irwD6y1/5EQ4j8E/kPgfwP893BYsWfALwD/p+bXf0ERUHgy5me//XOcvrzkB7//I/7yX37E3niHKi/INhm2tkSRz8FJQvewjcZjkxtW+pag1+EX/9yvoWqP1c2csy/eEMeGceJRact6tmJ33OPl6zc8PnmCNIbr82uW8yndTsTuzh5Xl5fYOmJ6m1JaDWGE1TFl4fE7v/Oa0eCIdqtLXngIJIORpK9iTq9f8/L1x6zzlDcXr7m7WSPqNnkp6HbHDAYdyoOCz9cz8rKg1YoYDQccHR4AksVyTbffYzzeYXf/gCKb4XkGJT329g750Y9/SFkWjMdjrq9vuLm54fj4iDAMubi44PTigqrWxK22e9Komvn8DoQlCAPiMGCzXjG7W4JsuY3DKiWQgmGvT1WXbLOcpN2i1R5wWk0IvQ5CKrq9DuO9XaqsJo4j4jh2B6aJAtfGILUj/DpDjzv89wlRuq7wlOMROp+B25VbaUC4zAGMxZMeftBCWYkxtdMXBC5NuC5z6kqTxCG11igsH33wPv3BDrv7x/zm7/w+nz9/g/Jdjy2lQL+1BoPnK4zVbNONYwlgUL7ECz2UNMgm/szyZQaCkPJLRgJgpUSjMRiiOOb4+IjHT07Y2RnjKUlVZEjp0UlaVHnBw0cnfPe73wYLn33+OWEUsjMeU9YuTwAhyfKcbbp2ce+eoxLVxq37jNbUlUVrSRwpR97yQtJsjShrtK4IfEWZp0hryNPUZQ4ajReFFHXl4t2EpcxS8iJzWZb/qkXAWnuFowhjrV0LIT4BjoB/G/gzzaf9X4F/2BSBfxv4v1nHpf4tIURfCHHQfJ0/vAh4AWUFz59/wbN3ntIfJMwXU3Rl2d/dZ2/nAZO7O9J0xmJzxqTcELb6eGGfJEjwg4jRcJ/l7ZZXp5/z6Sdv3NVbSc7Orlkubhn0vsXB/jHGwPnpOQqPR4+fcns7pbrckLS6TG+2/ODHP2B8OObBs8dUpaAqJMeHQ5Rs89nnZ8ymGx4/Pma/k1DqzLn6lMezd55xO1nQii391iG2BqziYO8IXS45PY1pJTF7ewPiyEd6FYdHA+KWTxi6a2ae5sShR29/yN1kzmjY5ezsDf1hTG03fPCNJ3Q7fccZrOHg6JBAxczXO7x484L5fEnU8jG1AOMOsQXyIqfXa6ONT1WlSDR1XRDFHTbTDcvliizXHMc9Hj44IQhazO7mvHr5nCj08aXPcrWg086w1Ji6oCwrhxtHgLbOeNMQOIwxVFXtTELqXorbzAesQYhmV+AW/O4qLl1KkVYS4UeokGZSDp5vMQhaiUR6Adui5GBvwL/37/xl9vd2+Ov/r/+C09MpKEUYhmjrQkql8gjCGG0MRZ4jVem2GiLF9wNCKdB+gFUeRhtqNFbYBk7uVoVvvQoNkizptHn48ISnTx5hTU3gS4RQ1JVuhqk9ZlnBt3/mG3TbCev1guurG7AGT0rSPEd5bpKvvEZhauxbjLtEgrTN6rWmyF0RNGaLCi0qcLMOIQXbbeqArxhXDIrcCafcRYO6rt3/F2WBMX+MIvDTryaE5DvAbwN7P3Wwr3HtArgC8dMYk/PmY19ZBIz2GIwe8bs//seo7i1Cw+15zdOTX2JcdVmvVtRaEiUB6/MCWfqEoWQyuebkqMv7v/oeKk9YTqa8fHnFutKE4wEqU5y+ucMP2rRbI8bDHovZjN5gTLEtKA0EcZvrs2seHD3i4c47nL+54sP33qW32+OL588Zdnfo9x4w7B/yj/7R79BqGxZb2Mke0+2P2R/XyFDR6rS5u/iYR/sHJGGf+eSWSES09zzG4yHvvfcBsGV3NyYMCiaTTwj8kIP9LrouSSKPdL2lHWiqdEGoasryjlanojOWrLYrHn3wmKqsyDY1eSFphR1MOqXtSQJguUgptgGdeI91NSdJdqh13qTvaITZIpSPkD6lsaRljhYSL4qorWabz0HAdH5FtfFIszUPHx+hVc3pxQsQHodHxyRxxHaVMhgMCEQLrSuM57wVQigagjdSBk7mampsAwGRFvzKd4GpWe6wXoGk9CqKQKOD0HkYPHcwQwnKWnSV44mC2K9RoaAoKrwy55e+84y2+kv8zb/56/zo9Q3K90AosqxG+AoRtrBVhfQt2gjKvMSzgirMnU7Bd/24i2MFq1w6gdcIyYw11NZQSYiCgO6gx+HhPqHX0Ipp8hZ83yUDeyEHOx3m04oPnz0k/ZVf5O/93b/PcjohiBNMVWGBOA4pCqf+01WN1m6LokIfT7gEp9pUzQylxgpDWRgCG7gZjBD4SctF7wlAaDdXaJyQeZGjjSaKInzPWcD/2EVACNHG8QP/V9ba1U+nnFprrXBL4j/y66dzB3rdDrsHAz55ccVk1uX64oaWf4QfSWTgYsUm0ylmPiGO+xQUXF7MWC5ykmjFZrkhXVfM51NOTvY4ebCH7wvydUm30+OdZyccHR6z3SwAgVLKEV67CaPRAKFqDnePqdeKg+NDHj5+TH+nyzcWSz57fs5wdEC322Vn3GU6m/DRNx7R7iTUVYUfeLz/0QdMZjNOjo/RlWR2t2Q1S0nCDod7h+zs7FGVGa9e/5BaG6SuaSURURija2i3W/zsz3+byV2KsjO+/71P+Ma3vsNmk7Gzu0dabri6u2Kz3XB5PmW90OwMHyF0QT65ozMY0k4SLm7uUEFEGEUEQQhWUBQlVCX4ormKKspKk+UblNcC4WGtcwTe3N4RRRFlXbJau7+fszfnVLqgKCqmyR2r5RxFRFlq4sjH84XT8vuO8FsVlVv7SdGYZtzGwLUEtQsXEYa6KJ2N2Jf3vFJoWgfleS5DEAPaPfk8DMpzcWVk7uspKZFIvv2tb7BZbVj+V7/FZLUhK5se2GqHiZfuNiJw+/88z5FriZAeURRT+xqEdkwB6QxtUgqEsEgp8ZWHFNDpxAyGgwZ37yjFbgOinY7ZaIR0eot20iLwQr7x0UfcXN+y3GypaoP0hszWa8qyxBrjNARNAXC3AQiCgDgKGjuy+0cpxbr5c1Hkvr+QgiAMqaqKvCioqspZvj3XGmmtXcEwpole/8Nff6QiIITwmwLwf7fW/ufNh2/ur/lCiAPgHmd6AZz81B8/bj72z7x+Onfg0aN968cFj945IopDLq4v2evH5NWCVldy9HCH29kO3//Rp+ye9MirEiliDg726HQGzBcLp1+h5uhoTFnlvHr9HE/t0O11EQI22zWXF+csZnMCP2ZnZ0y/16MVh+RJi1evX1JvEparJT/8wff57p/+NuPxiD/44ad892dPuL274913nyBUxsnRIb1em+l0Sqcd43uS1WpOkadsNyW+F3ByfMjeaEwrDtmmBdb67O4e43kKzwMhaqqqpChL4sQwGnfIy5rFXcV494Ref5f56TmekCzmGXUluL2eYbSg2x4QBy1Wiw11nuPnhSMNtdu0Oj3CKCT05NuhmM5TTKAItI/WhYO3oPCDkiBUGOvEPfPlGoMrCEEQUpY1p2entNstknabwA9YzGeYShIELbI0AqvRWALT7OhV6IAhUqKkoK6ME+MISV3bpmNwmgAn5vFQkmaSLZrQUHcNN0IiPfe1hAJrKsBtFrDFW7xXFPgcHezys9/5Bp+9POWLl69JQp8gabHarGglbWQTe4YxVLp2ik0vImoVIH18YxvgiEV4DVGoMT1Z4Q5cr9tl0B808BGLBKoGTCKEoxl7nsCTina7Q0rGzs6YDz/4gIvLaz757AuUUiStFqWpETg9htWq8Qq4YuA17QI4cIu1rhglSQI4/obWmrquUcrJtcuyITs1n+v7PkkrcQGpRY62X/2M/qNsBwSOS/SJtfb/+FP/6W8B/1PgP2p+/Zs/9fH/pRDir+EGgst/0TzAfRPNfH3G4fEu22xNEPqkxZrTy88Y9Efsjvboj/ps0prW1rAtarSWjMdDut0BWZqRZylR7Ha7Ol+TxJbdwT6LWcFsdk3cgixLieOY4WCHYpszmU5YryEvV/zk449JFx57BwMm8wk//vjH1AiMrdndG3J3e8U7Tx5R6xW6qijylPliQquT8PrVC5aLGZv1AklIv99l1N3DVIbPv/iMwBuglKKV9DE2I8sr6rqgLFKCoEVtCkqTgaqYzpeMxkfkpeD16Q130wlJL+LBo0e0opjdUY/lpGQ5WyEJ8H2f2WxGaeFgf5/x3h7r7YaqyCiyFcZo4lZMt92mrmE5WZCmJVHUYrkqaCU+Uexsr54fY1AYXbPNU+I4JA4TDvcPnTVWKmaTO0xl2d05wNQF203BJt3iRSFSKAaDMZ4XIoUHeAhb4ykFzUVRNIh2bSr3JMVS1XVDH26Iv03AqBAS5QdOeFSDrh0eyFF5NFK67cSm3GCt5uHxPr1+nyjwef76DbWunOZAuBWbkzU7taMxlizL2Gy2WCGJjG2QaQaMh7QeXqAQOIS573n0uj36/T5KKScYMs4jUNfGORCF0xUIcPkTMgNrGI4GvPvsHU7PL7iZTGn3emzzjEproibmXTT5jj917t4e9PtiEEURvu8ckGmauq1HcyN3OgA3B6iqilYc00najSpRUfPHawd+GfifAD8SQny/+dj/tjn8/w8hxP8ceIMLJgX427j14HPcivB/9i/7BtqUtDqCVtzhzZuJ28Ebn4o13/vR/4ef/fafZrldELU6bLY1eW0Igog46tDt9MFq5vMprVzR6QQIm/Po4R6Pjt7lk49Pubp+SRxFDPsP6Hf7VKXl848/c2EbjfSt2+uQRAknjw/ZVgvysiDpdHnv3afEkYenDEkckaYSWxvW6zVFUWCF4frmyg3AjGEw7BEHIe12Qr7R/OjlJ4yGj9jd20cbQRDFbLeQFRVJq0UUJcwWU+bLnPkio6g181XKm4spn79wROXecMDhwUOMrlHWZza55u5qzfHBQ9J0w910hgpjBmOf+XxGXuR0uwmtqM+43yIOPTzpMZunKL+i1W7R7w8wBqJWh929PYoyd4M5U1GUGfWqxvgBrSih3erSbidYI1jMJvhSUfe7pKlEa8PNzQ2tTot+f0i6UsRxgu9HVLVL+L0PAnXUIUtZ5tS6QihBXWnKMkNXFUIKF28uGkOP8LCiaQ2Mj216Y6UgjGJMbahFRTtp0e0kTNcFTx4c0ElismzLF6/PaScdJxQSsgkydWgyi6AsKzabjZM8oxqeAAhr8ATQuPkQgigM6Pd7dLs9Zz0XNRjHOjDWcC+MdlBSgdY1Sri2qJ3EPHr4gHeePma+Wr5VEIbGoKRy4anw9oCXZUlVVW9vAW574IqEUooguI9Mz9xN7x651rQNVVVRlKXzEjQbDk/98bYD/+TtSflvv/78H/L5Fvhf/Mu+7j/zEhY/VFzfzLi9XYMVnBztELfh8vwNi+0jrKr58BvfZFtuKHRG0krY3zuk1++zXs6Zz6YsRUXwYJdeL2R3NMDzBVWdEwQeVV3SCxOGwyHptuTk5AFFkWL0ltqk7B/u4skhJ48OyfWA86tTWknMwdExxuQkLY9uO6Lb3iEKFdsyZTQckWYpd7dTR79VPv2OJi1TLvMrQq/Nzs4OrSSh3W2hJISRJQhH1HcbOr0uVW05vzhF147Vr/zQBYUWa6K4w9N3nlLVKbqGKIxIVxnTuzsuz+7otXoIZanrGi1K8jzDCBiNhhwc7PL/be/NYiRLs/u+3/fd/d64sUdGZlZulVW9L9McNmeG5GhkSiTHImGNaMCGXmwRMOAXG7Af/EBDL3q1AfvBgGHAhgXIhiS+WIJpW7IpS0MS0JAzPUtPT3VX175kZuUSGXvE3e/9/PBF5vSMps2haSqrMfUHAhkZkag+0Te+c8/yP//j2hJZZVRFShylOLGk3TFASIIgJElTOr11+hvrDEcDhAVJGiFTyNMacRxrufGkZJbPsR2bPEvJ8xrTyYDxeIRlOYyGZxiihVGvE81HlFmC5WhtA9fzodItwgsiUFFmq5Xmuldf5FpzUVOPBVLkGNJEGialFFTSQEkTDEt/qVWJa1moomJZFNTrIZ1Om+FkQVHENEOPN1+9SRTFTJcJjm3qZaarxSVCFwcwMEjTFLmM9PCQXO0vEorS1Lk06Faj57qEYYjj6LVzciVTJo0UqcrLdWYXj6IssUyTsBboMems4I03XuNsOOR4cI5A4bsuaZZpfpUQlxLtZVmuFIv0zIAmKf1oamDbNrZts1wuL9OIizTAcRyKvCBeRlqHcZVefRqeC8Zglubcu/+Y48Mxy3lCp92kv7GJEAk7+2sIO6G/2abmbyAtWCR6Xt4QBqZpUKvVaIQNsnSup8CEhcRgNp3guhb7+3vMZufUQ/8yzFrfWOd8cEISZzimx3RmgRS4gYustNxUXuTYjoGqMrZ3NjBQqConS1Ncp0aS5CwXKYOTEVme0+v1Wc5jovmM6fgp1zauc/PGqyAtwoaNaYJtpzQbfdJ0SlUJFvMFs1lCnufcfOkNJCbD8zHttS7LLGPvxnWG58c6zSkKouV8tU23IvAd2mtNvFqNwXCsWWWuzfXre+zuXCNaTDg/e8Z0NiOJMwzLx3IU48mEeRTT7Xa1Ym2ekOQxWZlSklFSUJQFjuPiWA5lrphNpnieje1IBAXjyRlRXLCxsY0hFfPJkKFlIqXeDSmkTX99E8c2KcuL++SKRcjF6nCoikrvNjSNlTpJRZEVoFIMW1K5AsuwLnfN6f0GeujHsh0sM0GVJUHg02rWODw6ZjqP6XdavPX6K3z3gw+RhiDNSqqqWHEC9H/KsiwqpUiSGGkYl6xG0/hhKF6YBpZh4boOvudhGKtWqJBIy1jNSOg7sOZLV0jAMgxKwLH1xqaqqti/vsvpYMBoOmM6HOmuTJ5rmjCs2qg6v7+4618c4Mt2X55fRlUXkYOmMa80FFZt0ixNKbPiMj34f8Nz4QTyvODB3SN8r88br73DzvY69bokTo/Z2rlGtEjIM4v+RhfHtxnPDMajCdPRjKIw8T2PnZ1dlrMxqkwpMkmeCgxL0WyFSOGzjEYItLLxyfExnu2xWCxwHEmlFJ7ngOkym0+ZLier72NFHM/xHBPXkQxOTqmKkjhKyIXBZDplNJ6jCgNLmqyvbRH4dSZnz7AMh1ajQ6+zRpQvkVaKH5i4joPtCJqtDqcnpyzmOaYRUJaS7WsvMRqfMZzc00tNpV6UsbbeYzafUmYxVIpWO8RQFvW6j+04vP76G4ymUwzLwnYd6mGNsixI04TJZMLp6SlSWggTTs+HPHigtQqb7TppHhOlU4bTAZYtKaqEghQ/8Njsb1HkFYup1k9UpRbxyNIFaZaTZJAXa+R5ysHhE4bnAxrNFnGcIU2LWs2n1WpSZKyKa4YWxFgtNBUr/UJpGHrEeKW/V5aVzmGlrrYLw9QqQsLUkmZKAiWm5WDaDsv5DMM0CDwbqpz5dEQQtlnrNGk36kyWCWWWocoSaZmrBSaag6CUDrM1q1DqQqNjaQdQFFSViRQWjmXheS6mZelBo9VGZmmarASKtFRZlmLYmqp7QaDyXYeiXqOXtrl58wYPnxxwOjgnjRM9Q7G6MV08gB+5e18IugKXsxBS6vkHz/NWcxvVj2wZUgp83yeOY2az2f8/LcK/SJimRZm5XLv+Mr/2V/8t1noht+78AdPplHbX4+nhMeenJXs7r7HMcpbJEChZLCYYQufoaZJjSAfbdrGkxWJWYroR0rBJ05hGI8S0DMbjMfPFgnkxxzQFQeAwGE5wPQfbrzGfz/G8gCDQW2Jms4m++8cJ5ycDWvU2w8GYcVSCgG57E9/VK7Ze2X8Ny3SoUotWs8/+9Veo11uwTFmm52S5dlhVpQtIs0nEfJaxsX4dx24S+F2++/638Gs2rucihhVn58es9Zrce3CH6zubhGGNqSNpND3ieEpSWfQ3N7nRvQFSENRqDEcD7t55CqthmsVySS2sU5UZ8+WUKFniOBbz5YSnhwmNVkhexNiugyEVrmnSbHVZ6/aZDGfkcY7vOlRVRKkWRNGEOM1BekxmU+7evcPg4Am99XV6cQTCwHY8omhGlkVYykVIE2lqGq4OURWGAKVMKhTGSp67UqVWKpIGQmoFo4vtwghN6xVVgVACJTRzMSv1QXJdR2sh+K6OFBB0Wg0m84iq1Jz8siioVnfNNEsxTAvLshBCkecpSWJQBPoalVVJWejowbZNbFuz+1SVc8EotmwbKYSeOagUVVlgKmClxMRqk5HrmIQ1n2sbfV595SUePTng9HyE7Tq6awGX0Ydt25d3+4tagJTyMlK4uPNfRAumaZKvNkV9sqBouJ7+t1DkxadHA8+HEzAsblx/g1duvEmz3sNx9PTUYrHk4OCQpwdH5EnAwyd3GY0H1EIDy/CYzydQSDw7IE0K6rWQjbU14mjB6ekxhYhp9zokyQLT1P8T9RjuBFEJtrc2sGxJmsb4NQ9pSCzbYnd3D8s2MS04GxxyeDggcF2EVPh+QFGMSWNNM/3iu69T5AlJEtNrr2EaNuWOge81dUtHgFIJy2jAcpFD2abX6TIZz4ijnPk0YWdrk153hzDokucZL79+Hc+rkyQJSZpSVjotWet3GZ8NGQxPEYWleW3K5e7du+zs7uAFAb21DpOJYLmYk2cJliXJ8owkTfBqLs1WjetiE9f3qFTG4ydH7IgtyiojGU/xfL0zMclj5tGcOEkBA9OQ+HUTYVicDJ6SV4ogdCmritF4jBKCosiZzqaEYR2lCmbTMefnp7TbPVwv0FPFSmmm4YrWKwwTUek7a1UqFBLDMjEtG2FaerV4WVKh/9YwJQITlSekeUlalChhIEwLvxbQ769hmi5ZIUlzWOsuOT2fMJkvdFiuFMIwsE0bVRYoJaiUgVQ6rM+yhOViTs21AC3yIajhOg6mYawyAUlZFchKYRgWEkGZF0C1aoUW6PNfUpQ5F1qovufQ7bR4+aWXuPXRXU5OB1yk6noIa1XfWRX5Lh5SyssI4aIucJEG/LBDUfwrnYIKhWGZOKuZifHoOZ4ilMLk5Zuvs97f4PHjRwQ1GI3GCGEyHs/wXJf13gZnZ8ecnBywt7vOsoiwDIM4ivHtOv21DRzLwTR9lEpJkgpllozH59i2JMsKsiTj7PSc8WiOiclbb74GIqJWC3Bdm7wsdJ5YKtrtLkLmnA2eMp2P2br2Ggsj1kKTTkA9NDAti1a9S6UysjRCrvTw67WALM+YToa4QUBRRuT5kmgxx1AFgetRFQrfDXFtKFLFdLKg17vO3t4OQuYYpqLTaxEtY5qtBusb65RlQZwmLJZzAruBF3gkBRweHWLaFjs7W+RZiuvYtJoNBucJy+VyFcJCpUo8zyYI1mi2WkxnU+RYMZ1NQBS4nl72Op1FBHaGZbrM5xnRLCH0ba7v74Gx4O7D72O6HiUVeVWytr4JyVJrEwiFH/ggYDA40avNTXMVvlogTQxp68IaimqVJggpUaWWArMcF8tyqIQJhtRTfau0QUgDQYWSluY0mDa2C2VpIClZW1/HCxpEcUlWCJZZxelwyvFgSF5Vuv9uGpiGiVlJ0jwjTRWGaWIoEyiZTDNaDQ/bCaiKnDiJNCtwlbsbhqlTlrJcpSc6FaDS25zzLNNOoCxWRVDtOGzLpFEP2dne4u233uT+w8cs4gglSqSUOI4D6JD/k7m+4zg/5E6suAAX7184DqUUtm1fjk+XVUmhdI3ADXxs1+G5HiU2TZN+r8PJyVOqqmAwiCjyhF6nz3Q+wDYltqHnojfXN3BMh5PBkM31PQzlUg+bGMJiOpkxOBuCKpGGhRdazKMJQVBnOh1hG47OoTo2VVbiOA5nZ8+o10M9K16a5AVEy4Qg9BlNpviBj+c52I5JlmcsZiP8oEXY6bCzvUMQhKTJDN9pMBoOiKIIDK2wW1aSqpKgCvI0hUpRZAVlXuE5PoFXsr/bw7JdJqMxZyfHmKbg7p2PaLc3yHOdo3a7PdJkycnpMc1mk53dPUKvRbPe5enxGbbjEAQ+nU6HNI1J0xjTlNTrIUfzqd4KXK+D1KyzRqOJ6zhkrsvNGzeZLiakWclar89ocs5oOKa23cSwJKPJGKO0sKwaeZ7jOoLuWhtpuSR5yXQ+xw/rVIbEdvSK7iCskSQJ48kYISWNegMpBLWgienoGUN919K7CTFACANh6iKhYTkI29VM+tWqcEMaK9ahzuMRJoblYjk+2DZKFSSqIBACYbiYdo4SNou4ZHe25OHBEcVijuO7ZKoizzICv0acxihRaXnxrEBgk6c5k+mYfq+FaUkW8ymL+ezSGSndC9R3+qqEQo9My5UakVCmvnOvxEsFSuf7qkIIk1oQ8M47n+POvUd845t/TKEqTNPEcRyCILhsP+e5bkEWRYHnebiue/m7Uuqy4HfRUbhwABfMwVJVCCUxLQu5cjA/8fz9aznlfwqEANtSDM4e8tbbr3H47HRF0QxYzBfkecHZ+ZC93Ze4tr7OwcNHWNKjUWtj4JLnFeeTc2bTKdPpGNMQ9HodHNeiwMEPPJyZRT2s0270WUwTKHW7ajqbsrunC2+G4VMLAhzboyoAJdjd3aWs9AGcLzNcs43t+kjTot9fI4mmZGmC4RrE0ZTjk2cso4hGu0ur08NwFGmakCU5gRsQeCGWtInLjDzN2d5eoygs8rTi8cM7jJMTikJLZQVBhzjJ8dyAvb190iim2Wrjv9LEMWpIYTGNchzHYWdnh3pYI00TBJAkMZZpYFyMztoOrh+wXMRYppasCoKQjc0NzgYnDIZnqEoQLRI67TWanSbKgPlyTq+xgecHPDl4ytZujV6/S1bC7DRGUrCYxlTRHN/3cF2LJE+xLJN8teFoeD6gLEqMdRNPSkRuYxpyNXSkWYECiRQrp2BaKGO1NRh9iAxj5QSKaiVboDUDlTS1fLjlQ5WBlFhVillIDMNd0b179Nd6JGWhRUfLiqLIqFYjy+bqDpokKZYpsEzJZDLi9Mzn2voaSlVMp9NV3i1X4bbAMCzyNKbKc0yhi4BVWWIath6HlgLTtFGqIskz0iSmEjamYbG7vcMv/9Iv8uDxQ54eHV6G87WaVgUuy/Kya3BRpLw46Eqpy8Nu2za+718Km+o1caYWLzG0QpK0dOTzaXgunECeZ8znJwS1EsNcMJsdkZwnSLkLlYMlbJbzKVVZYQobSpuX91+i1ewxnSw5OjxgMpkCFaUq8F0P2zNJ0ojlck6rGbJ/Y58sLlgulvhBSCtsMR6f4rkutVrAYjGlLAo2+5ucnk+YT+fUWyGNhku9EfLxs0M8q0lYb0Klo4L5fMpkdIIhckbnE9JkDiphPj8nLSIW8ZhCbVGWJY2gw1q3i2c7VAWoSoeRi9mEsjJxXZs79x5gBkuu7+4hpM/+9VdZLDOKEgK/zvbOPlVW4EgD2wgoMsXa2hpVVdBsNqkqPbte5BnLxfySViqEZLGMsJ2AMLxYrW3TarfxHJ/N9S18X2skokxeffl1Gms+g5M5hao4Hw7pd1sEQcBkMmaRj8grSZIq5ssB5ycTqniJbZt4rkVV5WxurFMLfJRSHB48ZT5b4NgubWFQSQtlWQjLxBACaep0RUj03gLD0gQhaSBWk4SGKRHKQEmhiT9lTlFBUYFhm1iuh6hCLWxaSIgKrTXgOFimRbfTZTifMVrMQAq9OFYobMdCmgZlqfcVmJZBsxESL+acnDyj12kQBiFJEmtVIql3EgohdTGxKCjSdDX0JFGFulRVklKstg6VGKUmDiEEruUgrYDPf/7zHBw/4//4P/8pBwcHP0L/dRwH13VXHAN9uOM4/pECoWVZeJ53mQpYqx2ShqFT1WrVzrzYFPVpeC6cgJQwmT4jyyd8fPec89Ez0rSgHjbY23uZOMo5OR6TRCl5UnGtv0fg1LGkhyTjfDBiNp8Rhh69fhu/5oBRMhyec3r6jGYjpB7WGI2GHD454603fw4pJfP5jDAMGZwPNF/brNHr9nj46Ig0S0k8xfGzEaBoNBqUmcloPGGt08SxPc6HZxiU+L7D2ckIIXI8z2Rrew0Mg8l8weD8GbYV0O9t0G2vYxsGk+EIVSlMw+Dk+AiERbuzRpEvIM8whIFh2IRhk0bd5nRwxv37jwl8F9+tMZrNCNqafBSqkka9RlWVxFlOFC04PjlmNBoznU7wAx/btqlKPTjjOVqpx3UEYdBgNl0gDYN+b5PlMsIwLDw3pN1tMJukWLbJ8HBIUPsctcDh/uOPsYKK8+kC0+nz8NFjWuEalUgYT2ZUjQDTAITCtEzSNOHxk8f0OhHtZhs/aGB4JYY0KKSuAchV5Uyxqg+YJsIw9RwBK50CqSvwF605pRQIA9OysRwbYZiYjotdKZxcwWRJUVS4rkcQajJRazJktJhiGAb1MMQSEjvPidMM0PLcYRhQDwIcS5LGS/I8xfd7pEnCbDqjqirMlVRZWaTkeUGeF9iGQJgO0jDIMj0XYBjykrFnGAau66Kkg7A8Kkx63S6/+qu/ymg6YT6fs1gsVnJkLr7vY1kWcRxfRgAXd3l9ZuRlDeAiXbAs3emAi/0KK6UnNCnr0/CcOAFBEqWYwsJ3FI2aQ9Bfp9ddY72/weB0QLvRQBYScq1Me+uD97m+/zJgYBhg2YJmJ2Rrd53jswOyJEaJkqAWkBc57337PRxTC0KkacLZ2QlxtKQWBNx/cJ/eWo/d3esoVWJbimarxeHxYwbDQ3avb7LVv87TxyccHz7jWm+PKs15+uwZu3vrOG6NIKyhqhSBg+O5mK5HOJ8zXUTE84gxA1q1ENd2GE9mTEZT0qwkLRRFGeNmC7r9OousYDSZ0Ww4zKZzpAqpMod4UZCnc5y1gFkU0+mX+HUbM28AGYVIGY6GnDw7Q6yEMoeTIWfDc7q9DXZ2tgg8H8s0yfICz/Op1XyiGBDgOjbXNjcpyi6VKhkNxlquKvBw9mpIx+bbP3ifvIp4a/8mZ9N7+DWPMDT5hXff4un9Ax4/fohpOTQaAY4fUKiCKi+ZRROCWkCURqRZgpcXqNU8gZJQoleAVUIfemFauu0mJdLUX2qFsVo5hk4hVqQY0wRztb+vwkCaDq4v8PyYJM6p1QKumxbLJOZsMuLw9IQccD2PZq1GFC0R8xm2Y9PrdXFsC0NAza8zm5SUVYphKJbLmPH5GVm0xApreodhqYeOlBKUSujURBqQp5oZWehCpjQMDFliGRUlBVkyJc0ERSXZ3Vzj13/lK2TLGQ8fPaIoK+IkRSkoKkWa5eRFheu5eK6HlII816pJUhokSYpArzZTmquEaZoURU5aaNKRZVmXWo8/Cc+FEyiKkuFRyka7h+MkOGXJeqeNH9T5+PZHnB4/pOGGeNgU8wXDySGnJ4eEocX65jadNR8vFZh+SW4sKM0lUTZmd+cmVb7GZDQkTpdsbW6x0VtDFQbnJ2fYpsA2od2soSo9f55Vc0xzSafhc/hwwcn9E7Yb11EYeGUDqxyynJ6y5m8xzxXHT4+oyMgqCLwaVZYRzQrsXFHz+pyPn7GYnGEXCaOBTVYoTk4nmGYdywoJOzWieIpwJbGa0NvcIkoXxOWS2WzJ/Fxw8PgZnbU6i8UZEy9lY3+PYXGICgRZ1EKpKa5TEk9GZGaGLH06/XVymXF0dIoTdAjr27RbLlUZgwTXs1EqodGwaXc7nJyc4LiCutfk8OiQ8ckIzw0J6wE3X3mD23fucfvolGvba6SyS72TkKZzru/5FPkxZyfHpHFBq+3T6m6wzBfkyZxOK6SxHoBdMI0nnJwdI5WJ024hXV3RF5lWKaqEAW5Na3yjkDJASU8XAis9jSiUrsJDhWXpQ1ZUBYiKQhkIQ+LXPHobBtF8SRjUWUOAhKOzU+4+eES2jKgKQa3eQFFg2zW2Nrs4tqDIEz2yrLRoRykcbKeiMCuy5Yx8OUMGHoAeRbZ9iqyiAAppo4TAdJfMJgvyDHyvhuc5qKpElRFJNGc0nDAazjDNgHZ/g9e3e2z9e/8Og/GEj27f5ff/+R9yMhgiLBffr2O5Fa7v41sWRZaB0jUXPciUkecZcZzieb4e1670zsay0MQsQ14sV/nJeC6cQFVW7O7ssd5ZJ88PkNYGVAZJlHNwcMhyMeLazQ0CUaesSpI0Ya3fI6j5ZHlOLahRb9WZLkdMxhPa7SbC0KGxLWygYvvaNRbzGQ9m91lrb7KMIna3+zSbDvPlgFLBaDQhzxKm0ylJtMBzXHa395iMtLhop93l8OgJvu/y+uuv4tc8Hp484u7dj7FdSbtZxygVNb9Gs9lmkRckSYpje5p8hMB2bEzToNGoI4RHt7/Gk4M5B08Pkaak2eowO5zS6bb53ve+Q7f2Nrdvf0R4XNDo2RiuT63Z5N79e2wWHsdPn9BsCtbWAtqdNmQF0VTRaTfY2Gpz86UIoZoI5eiJNQWGKajXa8RxhELpTUN5RpQmNE2T6XRKvIxwHV9TkK9vc3I+QBiS9Y0NHMfj9PSMdsul2Qo5OHhCnheMJ3Nefu1lrm1tce/BDzBskzRP6XQ7TM7nHJ+eEC9KZGES+i4SkyqPkVLnuIbtYRs20XKJUQk827tk9EkEK/UMtHyRFuaUUmJiUFYKaZiYq/TC93wsaUKlcByP3d0d3nrrLZ4cHfPhnXuAYDIZo8qc/lqX63u7LBcjFouSdrvJfLGk0agT1HyUquivddne2cR1ba0zsJoYVLat9QUMoWcdSj3e7NdCFjO9ezDLCqoyI88T4iQmihacnw+IoxPG8wX7L7/KjZs32K5gsUwRoFu7jpasdxwXISRJkupBJbFaWpprYtFisVjVB3QRMcsykixBmQrfCyiLEst6zp2AlIKbL7/EztYN4lmN0dzh2WBKWUiajR5rvYA4TkiSEXmeUSiohQ1qYQPT8pnOI8ajCaPpENsTZLlLkibEWYrKFc1mHVUWHDw9oN/dZjQacfzshMC3mC9KojghCOscPzvjSRoReJIP3r/N9uYua2t94mVJUKvT6TXZ3d1ld3cbQamXZ5QZ9XpIqRKEUDiurtYapoEsKzqdLo3NTcwyR4lS75a3BFm+xDAFg8ExqioZjkas9et6MaWQ9Hpd/q9H32AWetiOgeOD61k8evSQyWyJaXl88P7HDI4VYVjw1pv7BFtN1tc3qO12ePjgLgeP7tGoN+n3r2GKGrWazWIRo1RJHEe6ILtIAEjSFEOaTKdzDGnx0ssvo5RgbeMaZ6cnmKbk9Tde4Rd+4edxHcEPfvBNXM8jL2PqYcibv/ZzHB6d0Gy1cRybTqdLRcTZ2RF7O7s0r/do1HpkUcV0PuPg8JC97Q1MqUd6lVJYSHIikiLGrYPd6CKU3iy80uBmVelCKU38MQyBNCxUrnRrWOrtSLZtI5RiMpogsxLb9vjc229zPBhyeHKmNzqVCWGg9f981yNaCDzXpd/v47qzlTyZi22b7O5usbe7jetaCFmtmHk6ArEdC8PQY+x6VNnEC13iqMJxPdI0pkJQKEWptLMyTJOiylkuI8ajEUEtpNbQRKIvfemLnI+nDKdzlLBQWYHp2IiypCpyLYqyIhBdsAtBa0fEcaznC4RaqQppNuF8Pv/U8/dcOAHXc7l9+yM+/v5dOm3FPHnGIlGUQmDYku5Gn4MHD9nr7/L48RMG0yFKuNQWMbXQwzBsjg6POTh6hONLvECiZMlmp49n2biWQVIW9Htdtq9dQwoXz/Gp1T2Onj0gzxer1k2bg9MnvLS/w1tvvkO0iDl4esTO9j6WbTEcji4VW46On3J6dsRwdMbOjW2CepNoNmMyGRMvE+RognR9rXlXCJazhDRfUMmSMKzRW1snywUffnSPRrPB5uYGYUMX7YQwuf3RbVzXwnUlouUjzQlpHq00Ek/4zd/6S3zj20NaL3WYzZ+SxBknJ2eEjonXbzI4P+Po8AC5LXCdIfWalqNOU4llBRRFznKZ0Wq1KMqSRqOFYVrcunUL07Z56803+O733ieKFjx8qNWMoVrtBKz43Dufw5A53//etygTA8OEv/SVX+L4ZMB4NsZ2HM6H51i2Q1XpzTkoST2sUyYlURyRFzm1uk9VZaRpRpzEpMuURQZtw6V9wSdYpbMCdNJbVXpQZ8WoQ2gCD5VadZAAFPEyYjqdUVbg+CGbO9f5+Z9/l1sf3+G973yHwLeo+Q4nxyeEgUWz2WQ0PGU8GlEp6HW0+KvMFb5nE8czjCnUVEPrYhYlCnk5fAQS07CpciiLijQvkJaNKAsMUWJUNpblEDZabCiLsFEgTRvTtJjN5mSFwjJN3nnnHc7HM/7ln3yLrFQIw2SxXFIPfMKgjpSalZgkCWVZ4vu6QxCtBEcNw8BxbQzToMhyTCGZR8mnnr/nwgnkec7jg6d4qkZVWmRlwpvv/CLfv3VIXmYso5TReIRvnJJXJa4fUlSCo2enCDmm3e7SaXV5/Pghw7Mh3bWQs7MxZix449UbzMdjXMdl69omDx7cZ2P9Os1mh9H4nMdPj/ACSVYZ2IaPY4c4dkCrEZAnp6yv17m+d4OTszNG8wGng2NG4zO6bpu8KghDjyhZ4NVMTk6PmQxGCGXg1RrgeCyzgnKhIC/YuNZi41qbNI9YRGOqyiKOp0hT0V3r0Om0GY1j1ta7/Mtv/DMc12I6H/Lo4RO++ptf5L3vfoNXXv0S3e4e6/1dWvUNLLNJPYQiHyOUII1SDp4+xTINGo0aYd2n3a6zt3udmie4e+d7RHFCvdEgSROy84zB+YhaLcRxHYq8Yv/6DsvlgulszHg2YbmY8dqbb5LlGePxgKOjR2xdWyNKYurNFuOzObP5mMVixmgyJIpjlMyxLYf9/R0mowmT8ZTjwymO4XNzd59Ws6s1DJSDYzskaUqcJMS5oEDrFVIopCtR8kKyXNcDyjLT5BxDt+mqFYNOKEWWpAjToCrz1eBMTqUEURRhmAY3b+zzy7/8ZZ48fcrZ6RGSCqoU21J84d23MeUajx8/Ik5TamGDJMsJbZdkMmZwfszG5hZb27sEQYhleWBaCHSofUFtLjOYTmckWablxYWhmZK2gyNKLeTiNgijAoXEcl2E1GPNSV5Sr9f54he/SJIVfPeDW4wnc0zLYrlcUBUZtVqwGjPmso2oBUYUYRhgmiaWbRGnMVmqW4Y1P/jU8/dcOIGyKqmFAQ2zQ57NyAuDRriOZMj33v8Btz4c4JkKMp8bL71OWuU8eXSEZXiUZcTR0Rmua9Fudag3Ntm81ub+g49ZnA/4zjcH/NKXf4lOt814PKPf6+L7HmG9zny5oL9+jf5mC5AcP8l5/bXPUWZzvvvdD9jd2qXb7vMnf/JNFtGCV157CcMqsWzB6OCUKE9orXdwQ1+LmgQezWCX6XiOND2Ozofcf3rIq3ufoyxgMdcTXYt4xCKaUSmTRsMnK0tu377N/s2bHB6M6HQ1XfXatU3u3R6xvdfny1/5Eg+efoQQ8NqrbzEeTqh5bUwzoLVVJ0vO8cwaNi0m50tUVXFj/zr7N2/QbGzjuxaL+YDz4Sknp2e8+ebbdHtdDo+OtW5+WOf05FS31IIap2cntFpNnp1o4tbGepc4i/kH//Dv47om/X6T8/NzOr0+k/MlSbLkm9/6BkpY7L/0Eq12iDRKhsNTvvHH38ISLq1wjWQxJE9yVJUh6SJUjm1ClhcUZYVhuHi1JmFYX6n0SM0fqC72FGhJblWVl3r7oFWITNOilDlqJbiRZxmO7VAhqITFdDKl1u7yV37lVzg9O+V/+71/DCq/5N/fvv0xtcCBFVXX932iZYTMC8bzqRaBcSxqoY8QCt9XlJmJYTl4nrWaQ1AgDWbzBa7XQAmJYVqoosCxfGzXxJI2QmaUKtNphGmS5gWGKWgGIR3bZef6TfZvvkznf/8n/Is/+CMWywjTMrFtkzAMLxmGSZJcjhyD5kXo2QWBJQ0qIcnSlLDW+NTz91w4Adu2aHc6tN0Nau46jx9/zO/+g3+E5a+TJhVlGrOsIgbHC3obO1SGwfl4wt52F9ep8Ud/+IfE0ZLPvf0K77z1OqZVUvNshkcHbKx12d/f4/bHd0hziOIxTw8HeN4hpVJkZcrtu/fxnBq98GUCv0FU5Wysb9HpdFkuYh7cf4QwBOubPSazMf31NqXKefbsKZNkwrXrW6RZAqXi6PAp7VaPbrdDa2OD/vYOTtXFlVrPbjR9SEFCvowJak1uvPQyy7jg1ke3UYbJcqaoqlQTlzbXuXv7mLDR4c6DD/nCl97l/ASeHZ1QiZx3f/6XOXp2ijQmlJnFgwcPMas6oaejCiXnnJ+fMJvkUNlYRkq306DVbtJfX6PeaHN0fApKMp9p6fFWs8VsOqXVdRAGpHmMYTncv3+Ho+ND3n3353Aci2fPjrj98S0MQ+IZPp0gZzQ6p93bYLFYMJmOSLIF49EZICmKiryoGE+mJFGEIMcQBYu5hWkIfN+nEiaW5+C5Hp5fQ9jOZf6vywGrhaGqolIlotL9d0PqcXBWKj+L2Zz5dIqQEt8NUEKSVYInT5+wbdr0N67x1a9+lWeHT7n94fephzWm4ynPDh/z0s09ajWfslJMJhOmsyXtmkfNFjTqPaSsOD97RhxFeH6dJK+ohW22dms4lkuaZiRZzmwRsd7XXSU9LaWdQVmWKCGRhonj6nqGEJoqbbkelu0yi2JmiwVra2v89m//Nr/xG7/JnXv3+frX/wW3P/qQKIoIw5Bms8lsNrtMAS7oxEmS4Nj2qisgsaTx5xca/YtGnusuwNP5kLff2Off/q1/l+98/yH/5Pf/GCFMpLSZL87Z6PTob1wjqxSvv27TrHfJk4JWs0OVl2yub6FK+MFHt6hUwrVWSM3z+OC732MynxInFW7QIV5m3Lt3gO3a9LfWKKsKy/V5773v8dGHt/jaX/81siTm++/fYntrly984QuUqiRKZ4zHYxotn+VyhqIkiubcufsxzWaT6WgMOTSbLfI8Z5kkzGYzQqvJG2+9xv2H75FmOeubbT68fYvJdEZ/fQfPD7l+cx/XrfHg41tsrr/CbF4ym03JihRpwXvvfZO/+ut/BUqLOM4wTIVj1PC9BXE8oR422FgHWQRQmngeLOIp9+7dwTI6tBprBG7FbDYhrLe4f/8eYaPN9s4Os9mSj29/jOM4tJpNTfE1DWQBrVaTg6NDkizlF7/0CxRVQavTQt6q6PTafP0Pvs7GVpvRaECSxfT7fRqtNoXKaTR3ePLYJs9KRoM5+/s3qXvnDE6OkYahF6cuKyxL0Gg0Nee/NHHCRHcBkVx2t8WP1AVXwzN6QAbDgiLXQjB5znQ6YT6aUK+FWoHH9ZjGGc8Oj/DDJn6tzs61Lf7yV77C0dNHHB48pVl36ffW8Fb8fNO0saXF/v7LiDwmW4zp97tAyeHTx0jDJmx0iNOK3vo2/Wt7Wu0oy7XzmM5wXJc4yhDodegUijTNkKIEtVI6LirmiwWGaePXm/j1Jo4fYtkzJvOIWr3B22+/xTvvvMOv/Bt/mVu3fsDBwQEfffQRJycnlzMEFynBRb0g8H3KIkciaNQbmo79KRCfFDe8KgghBsASOL9qW/4c6PLZth8++5/hs24//MV+hl2lVO/HX3wunACAEOLbSql3r9qO/6/4rNsPn/3P8Fm3H67mM3x6ovACL/ACPxN44QRe4AV+xvE8OYH//qoN+HPis24/fPY/w2fdfriCz/Dc1ARe4AVe4GrwPEUCL/ACL3AFuHInIIT4N4UQd4QQ94UQv3PV9vy0EEI8FkL8QAjxvhDi26vX2kKIfyaEuLf62bpqOz8JIcTfFUKcCSFufeK1n2iz0PhvVtflAyHE56/O8ktbf5L9f0cIcbS6Du8LIX7jE+/95yv77wghvno1Vv8QQohtIcTXhRAfCSE+FEL8J6vXr/YaXCw2uIoHYAAPgH3ABr4PvH6VNv0ZbH8MdH/stf8S+J3V898B/ourtvPH7PsK8Hng1p9mM3qf5D9Fc3S+BHzzObX/7wD/2U/429dX3ycHuL76nhlXbP8G8PnV8xC4u7LzSq/BVUcCXwDuK6UeKqUy4HeBr12xTX8efA34e6vnfw/4G1dnyr8KpdQfAaMfe/nTbP4a8D8pjT8BmqsV9FeGT7H/0/A14HeVUqlS6hF6Qe4X/sKM+ymglDpWSn139XwO3AauccXX4KqdwDXg4BO/H65e+yxAAb8vhPiOEOI/XL3WVz9cw34C9K/GtD8TPs3mz9K1+Y9X4fLf/UQK9lzbL4TYA34O+CZXfA2u2gl8lvFlpdTngb8G/EdCiK988k2l47nPVOvls2gz8N8BN4B3gGPgv7pSa34KCCFqwP8C/KdKqdkn37uKa3DVTuAI2P7E71ur1557KKWOVj/PgH+MDjVPL8K11c+zq7Pwp8an2fyZuDZKqVOlVKmUqoD/gR+G/M+l/UIIC+0A/r5S6h+tXr7Sa3DVTuA94CUhxHUhhA38TeD3rtimPxVCiEAIEV48B34duIW2/W+t/uxvAf/r1Vj4Z8Kn2fx7wL+/qlB/CZh+ImR9bvBjOfJvoa8DaPv/phDCEUJcB14CvvWv275PQui1S/8jcFsp9V9/4q2rvQZXWS39RAX0Lrp6+7ev2p6f0uZ9dOX5+8CHF3YDHeCfA/eA/xtoX7WtP2b3P0SHzDk6v/wPPs1mdEX6v11dlx8A7z6n9v/PK/s+WB2ajU/8/d9e2X8H+GvPgf1fRof6HwDvrx6/cdXX4AVj8AVe4GccV50OvMALvMAV44UTeIEX+BnHCyfwAi/wM44XTuAFXuBnHC+cwAu8wM84XjiBF3iBn3G8cAIv8AI/43jhBF7gBX7G8f8Ag1cGsYI8cBsAAAAASUVORK5CYII=\n", + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAQEAAAD8CAYAAAB3lxGOAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjQuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8rg+JYAAAACXBIWXMAAAsTAAALEwEAmpwYAAEAAElEQVR4nOz9WYxtWZrfh/3WsMczxhx3zLmGrDG7qrrJbplkAyRNGjYEw4ApGbAeDFh+0YMBP5jQkwG9+MED/GSbhgDDpiHLgA3TosmmRLYotbrZVd1dVV2VWZmV053vjenEGfe4Jj+sfaOachVFil1gAp0LCOSNyIgz7LPXt77hP4gQAp+vz9fn68/ukv+qX8Dn6/P1+fpXuz4PAp+vz9ef8fV5EPh8fb7+jK/Pg8Dn6/P1Z3x9HgQ+X5+vP+Pr8yDw+fp8/Rlfv7QgIIT4a0KInwohPhZC/M1f1vN8vj5fn69/uSV+GTgBIYQCPgT+CvAU+APg3wwh/ORP/ck+X5+vz9e/1PplZQK/CnwcQvg0hNAD/3fgX/8lPdfn6/P1+fqXWPqX9Lh3gCd/4vunwK/9whehZdAKRmWJQGCsxXmPdQ6pFEIIgg+IAAiBDx7nLFIKhAQlJd57QACCEEAKifcBrRTee0IIeO9IkoQ0TbHW0vcdUim8d2RphkQSBHSmx4f4s+A94uZxA23XgxBkWYIUAu88goB3HqUUWmt6a3DOgRAIpSCAcx6tFEJACJ4sTTHGYKxFaY1Sir7vQQSyLKVrW7RW6ESSppKubzk8PEDLlIuLa0wHzkGaAMqQFwLnJW0NSTLC+x7T10gJwidIkRNEwPke7+N1lUqQpglCCJqmxXlPlmWMRmPq3Y6u7ZBS4kMABEKI4RMTKCWRQmKtRWuFsQYpBEWRIxAIqTC9wRgDAtJUgQg4Z/E+oJSOn5EL8b13PW74fJx38fMUkKYpQgicdTjrIATKskBKycHBQXxu03F1dYX3EucC1nmss4QQyPOc/YN9lstr2rZhOh2T5xnT6ZjO1iyuV9R1T1nkCCHpe0vwYE18vuADPgSElGRZRp6nOG+x1mKtwYeA9wEhBePxlKZpQARCCAQf4jVyDq00UsQ7SSrJ4eEBm/WaqqoBCYKfXV8hEAK8j1m6khIhBNYN1wWQUkCAsizoe0PXdvF3dUKRF3RtS6olSivatmd/f48nz86uQghH/3/777/2Nv+XXEKIfxv4twGSRPL2lw956+5rSCRt33N2vWCx3SLzjFxn7E9muLbH+kBrGozt2FVrxpOE/f05zjnW6x2CBCkypEjoq44izciyjLquqOsdt26f8s473+C9n7xL27ZcXl6ytzfj3u27CCPopWexvaYyNV98403q9Q7XGvrOYoLk6fklJIq7p/uYpqGrGgqdkkrFq6+8wmgy5ve++z0u1tdMDvdIixxjFM22ZX9vSpYqunrH7dunXFyec7VYsHd4iFSa88tLmn7Dt7/zOrazNE3D3XuHCLljPJX8jb/x3+PO6Rv81n/0Pf72//m36OuEu7cyiv1r5rccXZ+jxBd58NGGansG/gw6Q2JuM8lfxRc9Vb/gen1FMcoYTQoOjw+w3nF+eUmaZnzhS1/i3t37/Od//z/h4uKSICRKpSR5wWa75Vfe+TYEuHjxIm5K50lzjRMd1nbcv3uHu6d3cUby4JOn7KqK+f6E6X7C+eIBL86eE1zCbLoHTjIppuRJydnZOVVdcXh8iA2GXbPD+Rggx+WE2XhKrlNs0/Hnv/Md7pwec+v2EUfHB/znv/vbnF+84N33zrlcVBjnWW13bOuKk1un3L5zTFXP+erXvsB/86/+JfbmI14sHrD1T7m82mD6hO3G8v57D7m+rLg4W7M8X9JsHUVaoPOSq/WWv/7X/zL37p/w6OEHPH76gOvVgqazzA/22DYdt+/e5/LqmnysSXSCaQzbTU2iUn7zL/wmP/7hD1lfX/HX/tpfIUkS/v2/9bd55dUJzms8HkfAhYAQgiRJ0EqRKI0Wkq5paawlLUqc62mbHWVZ8tf+6l/h+3/0Qz756FMSXXByfIff+I2/wMcfvIdZn/Ptb3+Lf/Ld32cym/Hk2dmjn7cXf1lB4Blw7098f3f42c0KIfwt4G8B7B9Mw2/8+r+GtoHHD5/QNA3b7Tae1FKgEMzmE84ePccLwd7ePuUo5ZNPP8B7S9d17O3tEYJkvaoQMtycKt578jzHmJ48z9Fa8+DBQ6RQOOfI85x79+4RbMD0PctmS+d6qrphs92SCInSGlu1eBSj0QiZJYggcMaT6ZRUJ3hjaaqGqm7IsowvfenL1LajMQYhwDpDCAEBWBs3uDUOrRK0VGyrit4YkkQQfEOWFbx4vo0X7+4xtgt8+JMz/sP/63/M9//oQ3aVQjMiyyd0bUpf5ayrlm98/RbOaN770VNm49usqzVajtGpYtc3VHVF27ToRNA2grMXZ7zy2qscH8HZ+SWLqwVlMSZJUpx3SCVwePp6S5InHJ0ccnVxhfEOEQI+eDbVhje+fJe+azi/fEaWJOzNjijKFKUlOlFsthuauiXRKfloGk/b3hAKqOs6ZlJS0fcGh6XvepIsIUkTjDU0TcNkf8TJwQldb5ju7fOTDz6k+1HF4dE+u13FrvqEtq0RWqOSQF5q0hxsaPlv/Xf+Kl/8wn32D0co6chzwdnVFu8dk8keDz55wHvv/pTNyrO6ahHeM8pTbt++jUoKrjctT58+4/mzB7Tdhu22ouss870p9++9wicPH7HbVkzGU2yoCM6hlKJtW2ZHc9566y1+/MOf8M1f+TZf/fo3+Fv/x/8DxVjSGwcIrHdY73DBEwDvPbKIGY9znrpp6J0nzXP8kLHdvn0LKRXG9CitkFJgTE+aJYzGYx4/+4jnl+e0pufq0cNfuFl/WUHgD4C3hBCvETf/vwH8D37RL3vn+fCnHzHLR4QQUyhnLZPpBCsCiZYURUbAkqYFeZEjREBJjXGGtu0pipI8H7NePYgpqlIkiSZYj7V2SHtjWg5gjMUYy+HhIY8ePeHV+68wGiVcbK5BBpq65npxzaQYUagUayyti+mfEiBcQAVQQuCtw/Q95+fn9NYw29/j6PSUj588BALeOvquwzuHdzGFbtsW7xxJmpAkKdZuCM7jFewfltQ7gfPw/MmaPNnj9Tdu84//0Q8pi5JJOaerKhIFSa7Q8phf/dZf5Gpzwfd//IcsrhakhUTrGUUxQvkMmViCjyVQkiTMZjN8iAF0tV5xcXlJmuY0TcuTJ0/pnYmllwgURUZelqRpxvf+8LuMRiNu3TlFenj08CHT+YiDkxkXFzVXq0vSVJPnOS706ESz227Z1JfIJDCdzpAU1LuGPC9xxmM6R1EUjCcThJa0piVNc7I8ZVNvSVRCcAHbG+pNhe06kkTzwx/8Id/5zq/w1a99k+9+73s0piFIi9AK3xnKUcLewZjpLMeFik11yf6RpCg0R9mUnzyq+OTjp9S7B+y2kpPju7S7CwQtUii0SvDBgbd8652vUxQZjx9/RFEqRmVGbyreeuMtsmKEM57a1oymmjxPqesG72P637Ytn3zyCavNgmL0Fr//vT/g2fMz0rygbw1KxtNfaYUUGiEEUkoIEEIsaYsixzYdddMgROC1V+/z9a9/nfPzC4yxZFkGXmFsR13t6ExL5w2Pnz7CuJ5yXPzCzfpLaQyGECzw7wD/AHgf+H+EEN77Z/2NsZ4sK9hstjjnef31N9BK432s/5u2BgKj8QhrLI8ePaFpO6RQKKWw1iGlQusEKeUQQV2spaxhNBpRliVd13FwcMhsNqOu6hhIjGE8HlOOSqSUZGlGmmX4AF3bEUKs1bquo+9b+m4oyK2jq1v6piXVKVmaMh5PcN7z6cMHrDcbnHPUTY3zDiFjXyHWuBYfAlmaoqSEIHDOMx4Hvv2dbyJEYLep6VpBvRUk4ZCL5y3vfO03+Df+xr/Fwf4B682CIFt2254//IMPefz4jIurR8hsjc4bNtsKqUbUXUVtFhSFpiwKDg72uX3rNuVohJCSq4tLkiRjvrfParnmwYMHnJ2fo7QiEDg+PeJLX/4SAU9vO6azKdYbrlcL6rbi9v0TrKhp+g0yDbRmR9WsafuKXbWl72PAI4AUiqbpyLKcLMtp25aqqui6HmMsTdNirUPrDO9jf8g4g1ASISUeaHvDw4ePsNbjUXzve9/ngw8/pWladKoYjTOk8hjXYlzL1775RS6vn/GDH/0Tvv/Hv8fj5x/y/OwBVbXl8OAAYxyPHj3n0cMz1uuWJMnxHoRUSClp6pr7d+/yza9/nXe+8Q7Hh8e43nL75Dbf/Oo32J8fcDA/wPaWro59FCkkIsSeRlVV/MH3/wjnPT9+7z3+4W//Z+TjCZ31+BB7RDEIaNI0JU3iPWydxdgYjNM0BTx93+Fc3PTz+Zzz87Ob3kdRFLH0bXZst2tkIlGZJi0zVPaLz/tfWk8ghPD3gL/3z/XLQrC/d8jdO/dYXi3ZP9zj9qv3+fD/+3fIJiMmZYm1PVKCVoreBapdRV7kJKkgeFhcXSOEvmniGWMRNjAqCrI8Yzyd0DQNvTV0xnDvlVe4vLpitVlzfHLKaDTG1fGEScqcY3nMdDymWseUXGmNAALgrMV1BokgVZrgPQzNszzPebG4pLId6aggCInpOoosoyxynOlItCKEgJKSIi+GJpPFGcf+fESik9gos4GutohQYPsU02f8p7/9+3znO+8QvKLrLElhmKg5L86uyKZb3v76MSe3cn7ywwXvP14wTlKc3zBOEpJsRqI1idYoJXHWUe0qDo8O+cY77/Dpw0fsdhVN3VIkCqSImcd0wnq74sXFc77yla+yP9/no59+yNXFFc5YVtsl67Dl6NYebbdld71F54L9wzlnT68QAsq8RKeO+XwPO0nY3ztgebViu6xo25Y8y0nSlDTN0GmC0IK2a5FSUdUNZTainIyYjqe0bYOzCePpnB//+D2urhd4L+n6nnIyZjKdstyu2awXPH3+iB++WzCdKerqEuuvcWHNwcGMpq55/OiaD396xYtnPfVOYo3GG4+1kGY5ewd7TC3YvkdLwav37pMlgsvzFxzsHaJQJDLhYO+As7MFwXm2mw1SKISQSCEw3nJ5eUmRj7hcXGOMI5MpMsmQ0qF8QGgFOjYAAyCGBqBAEIKPt9fQkA0hcH5+zuPHT+i6Dq01AolEoZOEq6tLttUWlac4EfBSoLX6hdvvX1lj8E8ugcD0lqpqAMFicc3TyzO0Tjg+OmJU5OgQKEc5velo+ljnz2ZjpILNZsVqtUMgSZIcQsA5RyJjRzrNMtq2xRhDmqa0TYtzjldefZWHDx+SpjlKaVTmKcoClaWkZUGRZexWWwgxPdOJJhcC6z3eOnBQ5CWm62nbls5aUmupmxaVpxRZwa5p8N4zn48ZlSNWyxqIaV6WZUwmU65XK0xv8D5wfV3xD/7+f4YzKQf7B5y3FbY35FmJ6QKPHpxxfHjG5fmGk8PbJBnstlu2u5bZacrBUcJsP3BwkqLLhiIPHOwfM59JLs9a+q6n7RuW1zmm7ynynL29faxxtHXLZDzFO8F0UlDXNWVZstqsePz0KcW4YFutmUzG7B/sUVcVfWd49OQxxVHP/funnNzeZ1LkTGYjMjHh+nJFU7VkecJkOubOnXsIWUIQrK63CCEZjUYUeUFelOwfHtI7Q9VUZDns7e9R72rSNMEYw/VqSTCOUTmi7zqsNfS9YzrZY121eC/oh1LP+zjNaduGNM85u7jE+Q3zecqrr93hYP+AP/zep6yWHbYXVDtHqjOSJCdLM4QQ7KodRTbi8aOHrJcLxuMC61q0TKh3Nd//ox+CUiyvrmOgSFJ0KhAagvNYY8AHrLWxtECg0oS67XDOMSsLpI0lDEoMkxhQSYJSCjHcyyHEe94GR9+1LJdLPvroo2HSohAIpNAoKVgsLrHOkBaaTV1hraVMy1+4/z4TQaBtO+qmpeu62ARpK86uLxgfzlFa0tQVWIuQkt1ux67uSdM4yhqNC9q2xbkGrdKhGehQQZIohTEG5yxXV0u898xmM9brDSEErHW88spr7O3t0fcWaS3G9HTeIVON6XrqqmFeTAghkKYJeZ6xrWqEF3R9T5okOO9x3oOF0HWAYLfdEaSgGXoBo7Ikz9MYPAg3QWBUllxeLbDGQhCcPTMsLp/y9pfv0HU9+3sT0kzQ9juyNOPenXu0dWCz6ijzCU+fXpHqfdI8cPvOASE8YblcgQgcHKbcPTrki1+4R3ANFy9+grWWznRsNjvSLOHNN96kHE/46KOPqXaxsUkA6zxpnmGt4+zsBbt6w1/4zf8GH330ETpV7B/s8eDTBzGjCJ6ub3j67DG3T0752he/yuLFjnbbkGhN7QNSKPKsJFEpnQm8eP6czWZDkiSc3rnHvbt3OTg84u4rr/Di4pwXZy8oxyWzozGXlxdcPD9js1ihhWK73CBEHFnudjXzeZwOJWlJ21jqbsHyeotMdUyPq5bXXr/FF976i1ycfwpI1ust1oA1kCY5e3sFfddhWolSCXvzOc5seXF2xv78gFE+x/SW1fWSNFUcHxxTtRVPHj0iLUvOzi9IEkWWKO7eOY2Nyu0O0xuCDySpGk50H0vWQiGAvqlJRTzHCeC8v2lmJ1pj+h5rDFoqyjKnahvCkHUulyvKLI+jxRAQEooyZ71ek+UJInFYPCDZNtUv3H+fiSAgBJjesFwuKYqc1WZJksT59XazwVtDJhUJKs7bdcKbb77BV7/2Nlme8A9+67domx4pFUqlpKmndR3OO6wxrDdrnPf0pqceLmKaZWR5hpCK58/PmE3GTFLBbrejmE7p+562bgkh0PcW7wKj8ZiTu3d49/2f0DYdcpjrChnrOaQkzTKO51N2bUPV1jjn0DrejCHEOXlRlAQXm4XVrsL0Buc8wQe6VpIoyeq6JUsTyjxDyIa+3zCfj8mznE8+fkxwKX0nCS4nLQre+dZXmR0FHr14wv7REUo0INbMD0aMJhPOnm0JQcZmKfG58yLl8OCI69WKtu7QOsG7aqjdNUIJmram7Vq0Uvylv/QXWCyu+M53vsXf+4/+Pkorml1NOhaUZUnb9jjrkUJS7XasFy1pmjEejxHSs1lvabtHeFKMscznc+588Ta/8s13+PKXv8xs/4BiNKbuWjabNTrRlPOMtq05e/qMj977KRfPz1hdLxEePvjJ+0wnE4IHJRQEBQgWl1e0jSVBIULCelWxWu6QwnF5sUFLiTUPObu4Zn++By7Q1Dmr6wVt3ZEXOa+8+grnZ49YXm+xxnDr1VvcOj6mbSvG44zRuGBbb3ny7AkfP3iAEB4hBbduHfOFN97i2fPnmM4QnENISZpolIDGxGYpztM0LZkSGGvIEh3vD2tpux49lG3OudgkROC9J3mZIQxjRB9CzBYGHMU3vvE1fvjDH+JxEUugBKCGcvXnr89EEAAI+FjfSHkD5KHVeNOiAF0UJDLWWHmeIaXC+0C1a+haAyjEUIcpGdN3Z3qcczRtS1mWFGWJUgpjLJPpDKUUT548Zb1a8613vo51BmsNIXiUTuLr8rG5Y41FCslsNiVNE+ymRQuJCwEPBCnRaUJWFMwP90mqHd1FjwgtWkqSRNHUFcYaZnqM8R5jYi+AALZ3JEmKFJpUKe7eeo3t9hyE4eCwYDpL6NsM23e8eHEGQWJa2J++ScBy9/Y9anPJxVPPfHRCSst2fcn19QoZCp4/WeCcpMhzptMxNkQAy3q95vnz57RtS5aVEGJDq8zH7OotXdujdYJONKvlitu3TpmMxzR1TfAeZz1pkjMpx4yKnN2mjmPeuqfMx8i0QMuMpq3YVTvazoIqODo85Ctvv82vfOObvPWFL3Kwt49OUlCKPaU5dbcI3mNEg/dTDmd73Dk6ZbNY0lQVTx89YbNa8eUvv81qteXDjz5kMppifeDSXyPQCK9QImN9veWD9x5gjEXKFuED5y8WeBSpHrNZX7JeVfR9h04kUga22xVKCXSi6fqOPMvYPzigqVKKMmU8KSjGBUmmObt8wWoXMN5wfLJHolKuL5dsVhskguA9fdcgBaQKvO0JHrI0YZSmbNfLCDwS4EMgSeK2tC6WD0miEQGcADGk/+ElAE1rrOljc5nAdrthPClZrK6RWuNDIACoXzwD+EwEAaUUSgmM7cnLEZPxiMX2GmEMxngSKaEo4SWa0ASePn3GdrvFOcN6s8M7j8EhMISh4/qyEx8nDA6lNCA4OjpCIOi7nq7rqZuG7XaHLmPDrq4qyumUNM3oiOOquqppmprl8hoICKXwHmwI2BBwDOhEYLer6Lue4ALBBhKtGZUF1XaDVoo0SWibBq3iZCIEKPIcGyBTY6bTKXdv3+fd959RVSsOD6ccH8/pasvZ8zUhGCaTAiWhryaotOGPf/A+UlsunwlCu6PpGuqt48XzBaYes1kFbAQ7kqYp0hm89zx/9pzLiyvSrEBrj5TxNK3rlr6z5FlBkim8sPze7/4u89mMn7z7HtVuh7MRi2H7QLv1KGPYmYom6ZjkexwcHGI7RZ44lJQY05KXJftHt3jl/n2+8+1v8+W3v8y4LJEhELwltl4FUkiCEgQDpreoINibzRglmr6dcry3R5nn3L1zjxcvzsmyhMfPL3l+domWGiUUIigUCfvzIxLpCaIiTXLqrcWYjiQrMK5hcbmmaTRpmjOdTkiThMX1FUdHUwJT6k1sUAqpSJIUEPHAyhRKS3ywjGcFvTME0bPd7Hj27AW73WY4xQPe9nStJ0tzskRxsH/EbDbn+uqK3WY14CMCeZEzGo/p+z5OoULAMaT7iUKIMIwQI4q2qRvqpqYscgiOn/70fb705S9StTtWTR0zVCD4z3gmkCQaKUESSFPNZDphtBnjtaI33ZDJSLwXsYE1nPCr1ZKXBChjPM6Zm3Ge9x7vbIT2hthpTdOU3a5ib38/jqLcANZRKdvtjoPJHlmWsqp26KJglI9pk/ZmbltXOx49ehSxDInG9obe+wj0cA7lPb5tuVou40gnCEQQpEnKeDyib2uKIgKWnLEUkwIzlBqT8ZTNbkeiMk6Ob9N1ESDTNC3n52dU1Q7rLBcXL0iTwGgEXddwdV4z24c/+O4POD05oV3nfHq5pek2CJGDTxmPjuh2KevtmuBD7D8I6LuezW4bN/M4GcoACUh26wqpBXmW4qwhK1M+/fgTXrl/nw/f/5C6qsnSgiRL2DYb6pWllz04QyNa1H5JeWdMYx2dqxmPpty+c8rR6TH3XnuDWycnvPraq0wmY7zp8SFmrt4FjDEonSKkRCIRHpyx2C7OwOvtjtlozBffeiOOdvOMo8N9Hj2/4o9+8CPquuLy+prOGqRQHO2fMJ1lmH5D12/Q0iF1xnpds91VpElKno9RuiBJS6RU7LaGvYM55Uhz3l+h0wwQyEQjZKCqKzI0dbNDqMCrr92lanc03YZmI2mqBmcsKkvQiYLgybKEr7z9JaSIB8jR0SGL8/MbwI/DI5ViOp2yvL6mNSZmWxhwAV2mqFQjBCil0ToGK2t7Ei0Bx3bXs7c3Z7wYcbHZkKu4xZ39jAeBWFMLyrSg61uapqEoCjrhkc4AcQzoh3mqQGCtjThz54dN6m9O/ZeBobeWNFEopTk4OuTo8Jh33/sJZ2fnjEYjlEpiHZ9n1E2DUgfMplOWux3eB8pyRFe2ZGnGeDzhYrmgXi6ZH+5juljfCxmwHiye1vTooOlNj5QSJQSZ1qRaoVUc0UghMdYCERvedz193+M8JDrhYG+PNJVcXp4jhSDLMjarhmdPLhgV+3RtRV4EvFiDMti+wVuNDJLgJF0VaBtH7yAtU06Ob/HWW2/huo/YLGIdWRQ5PniaTYM1ljRNSVRCayzWxFqyby3lKEehadqaLFOs1itOj08iGEmnFGnOKJ9gjMU1lsm0QMie7WqFHwtsF/AuNt7u3b/N21//ErfunDI/OGQ0HkUOQCzowXuC8LiXtXIBaZqhpCJRCabv8cZie4PpWpZd7NdoCQTH8eE+hye3yfOUk9NDLq6uefDkMZeLK4ILJDKjswItMsZF5G0sFi3eSg4PD8mKKZ1xbDY10iuSFKSCsizJsjzW4VKCl7hgsd5RLdd4Ybn/6h2mB2POrzydaXj+rL7B+EMg1QpnPHvTGf/6f/uvc352zm/9/f8YYQ3L60XkwIh4j3dtN/At4mEWfKA3BgL4HjI94CWG7FYIgVKx5pdKkucJi+tL6qbCBxAiZrc+2F+4/z4TQUArTZomKC3ZrDYsrhd4LbEi4D3xNPfgvCBLc7yw7HY7xuNJHAM1zZCmxc3vfXzDSisQgvFkwv7BAWKYsV5cXjI3hul0jk5S0jSnqmqM6SlHJYlOCCFG2ywrKMoRSaJwD128YUWsz9Khx+DaFoKPUFovyYsSCfiXxBEp6bo21n3e4V1EyKVJQtN0KBk/qPnBHvfunnJ+8RwhW4q8RAjYrhuW1x13v3LEeDxitdnS9hvSDLSumEzmGJdjuwbTVQQvSDQkSjAeZUxmGpX0COFIk5TZdErV1Fhrh9FnSgixOWudRwWPRCOCAi/QUmN7S111BOfRUpHIFNtbRCaZFHM22y0ne/eQ9NitZW96RNdYymLG7Tt3+MrXvsT91+4wmpaoLEUpicATvEXgbpq4bWfYVi06zZCqRCBBOWwIeGcRwSPxERXX9lTbDVIm5HlBUk6YTQt+9TvfpLeBjz79lPd/+lOulwskglE+oiimSOHY2ooi36MoBSrLyIqSbV3z9NkTrPWURcF6fU2R5zCQdwIR0CVlIMszlptzklJw+94pdbeh6bfs6prFlSMEgdYJiZKkicJ4w+nxPvfv3qKrdnHacb2krRt0lscpC4F2GP8x3M1DRT+Q1SIy9SXRqaHl5OSEzWZF3zeoINBJzieffMRqt8OT4l0cR3v3Gc8ElJakaUyRvXf0fU8ICkMgEBBSxYYVEYeeCoHWMR0iCPrODAysgA+OEGIgzbOMEDxJkrLb7Viv1rRtOzQeY4YhEIiB6dV1XXwcEfH9xhikkHRth1Lx8ZMkpelarIfJfI4A6q7FhhA7wUimo4KDvT36tuPs7AwI7HZbnHOkaUpZ5KjRiDTN6I0jCM1oPGPv4AAhHLvdJVmmODreQ20TpJ9S7zymi4Gt6bbM9hWHxylm2zKeBtbbjs16RVFqxuM5nakRiWdXLXj67H1Wmyf40KPUCOc8bdsSfEQsIiLTUrysxUMgTVKc9dRVQ5IrvLNMxmPapkEEkEHQdD2NrCnyKVYLsJosTyjzKfPpAePRjDde/wJvvPkmr7x+j2KcojOJ0BKCJ3hPcBbbd1gT8RXrbU3TWSbzfYQUCC9QQqJkzKa0FDhrqHdbqqrCGMt0usdms6Lzz+iMZe/gmCQree3V+8zmU54+e0rfN4xHKVI6Ls+fkyYjpJ7T9i3bZocQgTRTBOEwtsU6Sd3scM7hQ6Dre7oBlzAaZ6RZEkFU1RX5VLGtVtTtlrPzDcaW5FlOliucbSB4ppMRb7x2j7NnTzh//hRnehrbc7h/SNU7jHekKh5a1tqbrDbuD4VEYIPDGIPWKQLQWnP79h36vqOpdygV+QPb3QZrPagc50LskXnxC/ffZyIIxM47JGkCQiCVRChNZw2JToabQOKtx/aRaDGbx+6+dyGezD6OUoQQSCVumidSSNq24cWLlrpu6PouEoKcQ4iA1prgXcQZytgZDz7Qty2m73Hecn5xzmhUkCQRh9D2FtCRhuwcfW+o6wYRAqrUSK354pe+TFPXvDg/xzvYrnakqWY8mTAeFWid0HUdPnisNRweHZGkGR998hOC6hE6YzqboVTByeGM1bJmu9silOPo1oSvfP2E01slH7/rcFRM9wsaU3HrcJ833/oCH378Iav1il215MWZYFcvEMojtWS727DbbkmSBKk03VB7ZmkCWHpjiKxdB85RjEY4AQcHM/q+IxDQSWyKGW9IvCXPC87OLzjYG1OWEybjCa/cu8vbX/4Ch8cn5EWOShOklghhYxBwsZwxTY1znmqzYXG1xAYdiTQhILwYGnIZNknj9XKG3rR0XUO9q6h3O4SQOK2xztP1PaCZ7R8wH4+YfvELOG/IMkVvWqaTCW1v+OTRCx4+fkBVV/QYgggURYJSkGiFlGD6DiE1LjjqrkYIDzKj7Rtme1POl49RlRsg4AVts0JqwWQ6phwlrBYd3luydESepTx7+pS6rrh9eoQUgte+8CX+4I9/Qtd7RqOS8XTKdlfFA4l4b2sVpwPxwAAVuBm9ZnmK8wYfYnaptUBrgfUChEIGInJRJb9w/30mgkDfdWRSM5nMeP7iHIsgSxK0jyevMxYlAnXf0FlHkAqVpFgX66Ku7xFCoEWs46QQIALexL4BzsUbnbgJnAsY29N1LbdPTrFtw7rdEVRKniq00mx3Fd4YwLNrtvS2i131oCiUZnwwo6mrCBn2Fte3MXUdyoKQKrQs6QSUIaGvHPuTfebTMQiHx3F5fcnZ5TnWgQuWy7NrNu2SkzszJIradtRdQzmaoLIGr9dkU8e3vvEr7B1p2q4lKXq21YZ8XrCfTLn/xjH7pwX9w5plvaGYTNBygpY7OrnBqzhPdsFBiNcpWIeQnmJU4ENL19d4GUFXzhlQGVJ4yjKlqnfko4Ss0NT9jqTI6OyGyXifbmvY9TW3Tw65ff+Et966zdFBRlF4lAaVlQgZCH0Tv0xN6HaEtsIbS7dasjq/ROVzgg84OwQBoZBK4wVUfUvjOpwMuGAwpqVabijyAjmeEaTG1g3eBTZdR16OmO7NGI1HJEUOespoNme5XvLg2VPqdkvb1+zMLiJB8UglCd5h+o5E6pgFKoMJDc5aqsslu+qKO/cOqPotdtsynk4Z1yDDApn2HJyMEMIj5RRrOpbbHT/+yQd842tf4/4rr7G/f0Jwlr29Oe/++Ad0sudgOmfvaMZuu8K7HjE0tKUI8YCzmhAUIcSDRsjAcnNF0+1AWdICVOJQ2qGtR3uB7OMYUYh/BdyBf5HlrUX4OPronSPIl8IgCukDRVEQgsOYlt56ZJJTVTuyrAAEzvsb8YsowCDxzsRmCQJEFG0IAtI8ZbvZkSYZm82aLNUcHx6wXl5Rdz0nJ6cUWcFyscRZE5uPIQYRIRR9azk8OuLeq6/w3nvvonVMleUguiGkoO07fvrxR7gAje0pVU6RlJyenGJcg/OGbbUaPsCKvYMjetdycXnG/umc8d6U3WrHarPFGc9yfYVzLcvNC5I8cOvebZ6+eMj3f/Axpd9HJoA1JKOUdJzw+PwRV+slq23HndMMZxOcTfCAlx6VKpTWVNsqCnsET5ol9H1N3+8wtiEvZhjjMI1BJQzXMACe+XxGQID2ONmTFjn5RJGUc7TSnN69xf1X7nDr9gFJYpDSoFOF1AneG3zfY+odod8R2i1dvaFtWqrlmmp1TTrRBGMjq9IGpAqRUQeY4GmNwXhHb3ps3yHwCO9wvcELh7AOLSX9rsU2Fc12xXh/j8nhIeV8jkoKgtxSjlKyQuNWsRQxzmOtv/ks+64lyUsQlrpbo7ZQ1TvqZoNQPXde22f/aI73AdOD6QTBSvJCcHxrj+ViwSuv3eX6asGzp5d8/OkT3nrjTWbzff613/iLCODdH/8hiewoEk+RCVItsKan71tARhCUAKUFwugBEBUFc+qmout3uNCT5Yqs0OhUojqBUgJvLM4HZPADAennr89EEAhEhl7YbiMSSic4N7DtnGc2m9I19dBAc1hrb9h/LxVvQoj9g1j/RFWiTL5kngWCj8o/SZJSqZoQArvdjqqq2d/bw4fAbrvj/v0oQpIkMV2P8GJLOiC6IpgoQp2LoiBLU87cixi0ZKxdvXM8fPiQ3jqMNfjEsn8w4+TkkE8efoCnp+0qrO04OJpydLQHwnJ0PCMpJcurDZvljpPDU27dPmG3XnN1cYl1Lad3Zvzk3Z/y6OkjHj8859Ys4fBoD9tbdpsdz59ccrW4ZrfuyJICETQX55c0dY1QAuctUqUEYhmSJmmcXAi4ulpF+m+qBqUkzcHhmOPjE1bra5yNaM08K1httiQ6ZberuXU8R0tFlqS89uqr/Mo3vsbd27co8jyOfrVCJpqAx9keZzr6vsO3LbauqTZbttsd6/WOtmkJaYc1HQSPRyAGZZ8kScmSFBkEwQa89TgXSLIcpKbv49jXKU2RpngXpw2WQNO3dM4yDwGV56gguH16l9V6y3K9paq7m3sJIZEy9qh6F0vVF+fP2FYbvHe03Y5bdw44PT3lzp1jttuK3/md32dxuUP4yCfRQrHd7DjeP6JrDVpognN88skDZuM9vvn1dyjygrPzc3SiyMjojcU6x2Q2oen72BRXkRQkpSCO/FVko4o4NbBNG/UF8gStYkPb+Qj79sJgQ+QdvJxO/bz1mQgCANZZ+u02jveEwBpLANIkIQwXI4SYFtkQhsgmb2b43vuhq29puxbwFEkewRLBx/EOgBBMJlPqXRVhybsth/sHKK1ZXC/Y7WKDpSzLKPwxdNCjPFkUc1gsFjxfXHB0vI8xhizLaOs29iKIGIWm6bA+QmilAp0EdtWKpt3R9Ft6U4NwzOY5eSlR2nH77gEfPXjE4ydnSARH87uM8imXzxfUu55EC5TM+OiDT3DCkmrw1lEWYzb1huvLHc4qjPF4I9mbzkl1wWZ3jfcepaKAR6rtjTxbnmcopai6mt4E0jxhPp+wWNSMJ1OOjw6ZzfYiUce0GONp6pZqWwEC18c+TTHNOTk55ptf/Srf/OpXmJUZioiVV2mKGEA1zvYRz+Etpu+oq4pqV7Hd7thudzRtC3l3Qw6SUg2NMU2aJEyKEdusoBcbEhm1GLxzGGvp+i4iMLVGhxxc5HQYP5BwROwvFNMZSMG0nPLqvde5vt6w3bWsdpvYTcfh8Xjn6IxFSYXdrNizc2bTCVJ7skyzWi0ZjXJEUFydX7NetZRpgRSSq4srNqsNz56+YLPacHpyRL3b8PjRE8pszG//o3/MyfERQkiKyQRXt0QtONjb26PpDF1n4vRGJRFIpUAy9LqUIASB8wHrAyFI2t7hvaVqDKZ3SO1wysVGt/yMjwiliOOU3RDV3KClFoina9O2KBHYVQ06zwewhY/Y6EH7ryxLiiJjtV7inUcnEjPwsaWKvHAXAt450jShQUQu/dWCO7fuMJlMuLh6ymazQSlFnueRMzCQOYKPwSZJEparDdfbJeUoIwyPF0VLYqAxXY/pDUiBVprxqADhePb8MbtqTWt2JJlivj9Gac9kVmCtZ7m+4PzsktV1Q5lnrK93vJAXPH96FsEeXoGT5OmI2/eO2K4/4PDggMlowmq1QbqU3bUhy0tSMWacTRgXY9pRz3q9QGmJ89D13YDNUHR9x2QyplQ5b8znuNATpCPdetI0IyDoe8tkMmNx3bPdVEwne4zLKYvFNVlaIILi1ukpX/vK27z56n3GRYZWsckqk5SkLBBKRg0G3+P7Fryl7zp2u11s2HY9XRsxE4k1mL6jNy06TUhVDs4ivKFQmlle0kpNrxN8lrFarWnaFmsbQnB4Uiw2Vi8IgonlnPGegCAgyMsRtveM8wn3777K9WrNrm6wzseyz3RD03eA7qaB1myRjWM2LREyovOscYzyGV1twAayMuNwb45WioO9QxaXC5SQnJwec2YdXdPx6MFjzp5d8Y2vv82v/vlvko9GNNZTTKZ01lK3LUmaEm7kPgTGxoAWBAjUULIMmW6QOC+o66gf6azABYFMQOpY7vWh/4X77792EBBC3AP+L8AJMaP/WyGE/50Q4n8B/I+By+FX/91BW+AXBwGpyNKMaiABEeII0A+peOQUCKwNCOfQWUZvDFJExpW1loODA6QSdF2LUjFa2t5itSMZgkAIPxOAHE/GtFXDYnlN28fU/uVjFUXBZrOhLMuB9OMYj6ZkWcZ2uyXLUvbSecTbD4Kh3vt44xNLhe2uIgDHpyccHh1SjnOWq0t2uy06g/nelHJcstqsCcGyXK94/vyCtunJ0xyNpq16rlmihCYM9WGiMqbjObdP79B92fHKrS/GjnpvyVTJcrVD+pJxNmM+OWBvtkdbNVxetaSjhLIosZ1lXW+iSk2A0TjyKu69cpfl5pqLq0vK0RhrPUVRonVK3zU448ELDvePOD4+5bf/0X9KUYwZF2MO9vY52t9nOipJhcD2cfJR5gVCJxBcbHT5juB7ZPB4GwUyrXH4QSDUuXgCe2sHGHGcEti6ol4tMNsN2hgS70kIpEJgTBu7+BhkcGA9IUROhhAK23Z0DnzbE4KIUGiZ0LUdvTUcTA94/f4bbHcNV6trHH6Q+RLoTJKmisk0xTnDcn1NklqatmC32VFtO3K9RXhFniTcu3WXg1tTTG/QIuHqxRVHt46Zjmf4w8D14prVck3fLRiPco7vHrLc7PBCkqQZl9drrhZrhNSx70LAB27kx4QSKCEIQg6wGImQCu8FbRuBXlIWSN0jtUSqOOJu+vZPPwgAFvifhRC+L4SYAH8khPhPhv/3vw0h/K/+eR8opvRRIWg0GtHbQF6WjMdTVstr8ixhNplw5/Zd3v/4I3SacHp6irOBqop86TzPI4c6zfC+H2rXOK9FCPrKRKEO5ymKgsl4QqKS+KFs1qy3GyBKQY1GI4QQ7O3tsV6vWa/X7O8dcnh4yGq1QipFrjIAsjSlpR5EQhRKKgSRDWasZzaZcnR0RLtbUVUN1gVUUEBs8BjjODs/48HDa9q2pe8TEqUInoHtZphOxhBSppOcRGkcCYlO+OrbX2Bz1XN29hyBQAmN8BqcJMtzJuWEUZFDsIzKFJ3IG/ZZ11usdRR5hrGWfrfharFgW28x1mBMGNSHc6xxrFcbvBMUxRhnA1/6wtt8/3s/REpNluQUaca0LMi1RAaDdYbOWPJI5ABn4mnuehIJQYIUL9PbCKP1zkdk3MCfT5REix5b1azOX7B48ox6saDb7qi3W/quoesaZLBkaRTVcF0PzgJRycgHAdZgW4cRFlBsshIpU7xMMJ1Bpwl3Tu/Qdj3hk4+4WF6hpEYXySBIkzEaaZ4/e44LPdbWVPWGIi85nN2iqwPtNpLZDvcOuHv3lMePnrDdbOk7S7BwfnYFPlAWY0zv8G7L9fWSv/db/4CL9RWTyRj0jq636CSlabs4FZGxIRuEiNMtEVA6jsxfKlprpUGoQflZo7WGgX2YqITa1fTdLyETCCG8AF4M/94KId4nSo3/i69BXjmEwGQ8wQVJlpccHB5SVxVSCg4PD3nzzTf55PEjVJLyjW98g+AF6/Wa7XZL13V0fcvR0RGLxQWTyQScH5BeITKyBFEqWyryvODVV1+jbztOTm+RZZrZLGc2m5HnOWmaxmxkyA6MMTfBSitobMd4PKKuGmazKYmMqkZpmjKfzsiynLrtONjbY1SO2a1XeC9IkhznLM4KLi+XXC527B/OAEXbJjgrQThSpZnPpwTrKIuMzbrh5PgI8NR1xfJ6ifeGj95/hDWBcrRHte0Y5SXeOLx2bDYrDg7GKGV54417eA3L1Yq+Mdy6dYI3gc1qTW8Mxaig73u22y2rzYpqlzCfzfnkk0/x3jKdjMmyhODgg5/8lL/wG7/J3vyA7bZiVI7Z399nbz5lXOYoLLaLk5zI5QjDl0P6HiUCFo8kTnNeimk65+MIJ4TIuEsU2JpqueDy8Sc8++hTrp4+p68qlBRRaswZlJYkWYpE0ZiANwYhAR+bhzKAMx3GWbxXLILCOpge3EJ5iWl6iiLnlTv3ubi64unzZ/jgGY0mTGZTshzSLOA8ZJlm/2CKMx1eFty9ex/pco7md3j+9BnjcoRCsFou2ay3lOWIvrdsVkvGoxHee8ajCbPpjJNbR3z3h3+ISyyr7Y7OCWbzA4rRjGfPXgzgH0nAR8Ja8IhgUSqP0vXWR/n7vIiEOBuABILCW4mwKjY4u4Btf8mIQSHEq8A7wHeB3wD+HSHEvwX8ITFbWP5X/D15XnD14BGvv3XITCZ89NEnLK6vo1Zbqnn48BFPHj/BWkfdbfjxj3/MyfFp3Gx1zWazoW2jFLkQgjRJWG6WdG1HlmdRwz5JaNt+6CXA6ekpm82GW7dvcXl5xlu332J//4CnT58yHo8HAk/kMUgp2WwiK2w6GhF6SVVV4Byj0QFf/fJXWVwucNZxeus2B4cHXC+XXF1f8+Lsgq4xaJ0TpGA0KSjLESfze5ycdoynY+7eeYOmc1RbQ7ACbzy3T0/56IMPOTza40tfuk+gJ8gOv7I8GDjsQViyPCdNNLPJmNsne5y9uKSqdixCx8F+ThCG6d4B49mcNM05f3HOdDylb2G7a1Ey8iScczRNR9tavFPsdhXGdrz5xuu0TRXRnG0MFL/9D3+b1fWGruu4e/c+t2/fIc8yMi3p6w68i+o+DKexN9iuxjRbUt9FbIUxeOsQXsRywMQu9qgsGY0KlBZsLl5w/uQhV88es3j+mKcPHtDtqoi8nExI8gzvJELF6ZCSEp0kaCni67U9tvfIEMAF6t2OujV0vQORM5qM6dsGYQyjUc6rd+6zWF5zvV0DkrqqcS4Gstu35sz3cqrtNT5EnUjTOUZpyv7+AalOWa+XfPzdD9hstkzGE1575YTZZMZ7774fhW6NAyl451u/wmodRW0D4AO0reE0H+EGfExRRnHQpm3i5ElFRqLWir6LE5bZdE6e5SyuFvSdochHMQMygXpZ43OH2VmmowlL1j93//1LBwEhxBj4fwL/0xDCRgjxvwf+PWLo//eA/zXwP/o5f3fjOzAqM6qqou97tNKUZTrU5VsO9vYIPsTavW2QSYYeBBmbpmW5XPHixQu22w3WWkKwpJliV1V0pqfuWrIiJ00SrHGUZQEBri4v+aM/+iO22x3nl1dcnD0nU3cxxrJcLjHGUNdRkkzKSDLZbDbMZlN0mrG4XA0GEJLF4opMJ3gTx2oX5+fUdUXTdixXK1QAawzGGuq2Ic0L2sYzneeURc6onDCdJRRlQaILZuM5TdXw/PFTpPKc3jrizp1jLi6ekmQ5T88FvTF861vv0FQNP/rjnxD8liSZcOv0mLZuGJUps70ovyZVGOS9Jmyrhq43NFXPbLrP8fExu92WuulIMsne/j7TvTnBT/DWMptNePvtL/P+++/Rtw3lfkFwgo8+/ISDvQPu3LrLdDJDacV8PkNoi2l21LuK1kmEUtGAxTpEVyNdS9/V7DY7NqsldV0TfCxl8rxAIdnf36fIM1xbcfX0AdfPHrM8f8H6+oK22tLWDU1doxJNWmQIPLbrEWrAl0gxGNMolJSEYOOG7cE4j+sDni06u4oZhbN0TRSyvX/7Lr21fP/dP+ZytSBIgSRhcnzIycmE09MJf/C936cYFVTLjuX1Blem7M8PuHt3ynZ7zdOnjwHJdLJHlme88dYXODg85ez5Cz766Kec3DohyRJ+/N67BAmeaMKikwhk8y4wm8zwwWOcGUpdi/NRqr6pdwihojbEZEyWZKwWS6qqRVKzfzDHdAZhDKIPyB6C8v/lLfinEwSEEMkQAP5vIYT/F0AI4fxP/P//E/B3f97f/knfgaP9aWi7FqUUL168QKiUg8NDur6PmIA0jfLPrsYLG8UXfYin3SLW0lonvOSip1lCXVckacpE6+joIiUHB3OKouDZ0+fUTcP60SOElAPlFK6urri6WgwKxTY6AsHNaDBJkhu58O12x3hUEoRnt1rz6e5TZpMZWiVUVRWdk2SczRZZRt92pHkGKJrasFxteHFxRW96dJbQ94ZyVKJ1JBxpBDjQieQnH7zL1eKAXbXk9TdfoSwnPHm24OGjp/RtF3X49wtM63j08FOurhbcunXKrdunLJZnrDdrnj17znrXYQzk+YjWd0wnUySCi4sL6rZm73CPvf0DRpMRJ6dvcLC/z6gsaNuax49K9qZTfu3Xfo3f/73v8vTRc956463IpWgaJvMZ+cE+obrEmpbddgPlnCwr8NbS77bY9QLR71heX3H+4oLF5TVta1Eqw4uEJM1QUnN8fEyRZVTrNVdPnrBdXNJsN7RtDUqQjUqsC/Q+0LQ9Mk3QUiKFIiiJEgGlQKjoYtQZj3MdfefoncdL8KJmu1pQ5AlpnmPqhq0zTOUh909vs9vtaD9o2TUNRwenvPnq6zi/5umTp1ycVRzuK5zVnJ9f0o4EMiTsqsDV9Tku2Mh1kfDeT37CJx8/YjKeMpvP6axhNJkgE83T52ekM40NgUQllOWIpq6j9FuaDbB0DzL2SxKt6PBY0zMqx0gh2a7XbALUdYVpPPlcYZqWIsnoG4NUgkJlhM796QcBERk7/z7wfgjhf/Mnfn5r6BcA/HeBd/85HuxG9ODp06c4JLdv30VIiTGWZNgYLy3G2rbhyZMnKJUQQkQU7nY7mqZlb3/KrVvHeG9v8AQvnj7H9j1f/uKXIcDHH3481J+BoizRSUKW6mGDRyMT59wNVTNJEq6vr9nb2+PDDz+k7fuofDQZIbyIzbM2/qy1Dc56kjQ2bJI0QYhAEBHznmWRJ7/Z7ggC2q5DKEVV12RFRpIIhAhkSYbvHbdPTnj48EnEt/cVXgTyIkHKgh/98Yf0nWM2G7Fe7whOsrhYErxgca04OJ6zWK64vFry4jIq6cxmB+RpwWqxZHG5ZFSMyPKCJEtxLvD87BxxKfjhH38csx4pGI9K2rbBG8dv/8Pf5upyQVcbrq6u2Jvuc3pyi3uvvBLBSE1FtV2yWS/Zmx4ikwTTNLTbLduzF9hmxdnFC85eXLBdN4SgyYspQuc4FCqXTOdzlFZsNxvWV9d0u5rgAsgEpzOSskCjcCLQC00iErTKYtZBQAoXxfSDQ2nIModODaFuaPuOICGRiqbeUm1zErVHsH1UNxaKwzu3eOerX8c6x7vvf8DixZI/bt/j1p0RVbNiPk/4whfe5Ozxjlwf4nrJf/F7v0ugpigj5yDPC775zjf59NMnfPjBJ/S9Zble0/Udl4srdKawwaJDrNuTJCPVOV3T0ewaQgZJluC8offR7yL4ftCfiHDm4H0kwgWB8IH79484Pjzk/PwCrSQyLxiV5T91oP2pBgFi7f8/BH4shPjh8LN/F/g3hRDfJB7LD4H/yX/VAwkEdd1grYtEkc5weXUZacDT2Y1jjxBR7ikoGWfdGPK8YDabAVFkwznPer3GB4cLgdNbt6I01nVMPU3bR3ZhH0U7qroiL0t88LTdzxqAL3UJXuq5XV9fc+dO7Hv2fcQeJElK3e4AbrwPTNvfoAYJgSzL8b5nPI3qvSFInIvINE8kd2itKcoiRv5gMX1PWY6joUXbc3rrDsb0JOmI9brFBUVe7LGrHSk93mlWq4rjg2Om0whQUlpzfnHFbtfiQhy36iRDq5QsLSgKw3q14brdoLWiGBVxqmECwYUBRtyxbWpMH/UOrTVcnJ8jhWZ/b4/rqwX78wO+9PbbjPf28NUlm6tzHnz6CdebhsNXvkTwRFUnAl21ZXXxjMXlOdv1hqa2SJGjVYF3AhskeZKgBbRNxeLqktXlCms6jBcYkbKxFc50SJ1Fwkw2QWlN5yVJECgxKPF4i3Ce4AI6zShKj9oZur7BBk8hFVJ52irDljm+b+naqAOxvS7YPznhW199hywpePTsOVIFvvTWV6iafT765Mfcu32XWSnZrQTPHl+xq7dkhafuG4pxgXOB1WaFCw5HoOk7jLX4YDlfXLBpVoxmBSpPkcQsxhqLs57D/UMm4wlVU7Gro7S9ShR90xJcoKk8ImzJEk2ZF9SVQUvNK/fuIKXgm9/4Kp9+/AlGtRRFxnbTkSS/BBZhCOG/IDqA/pfXP5/XwD/9WAQf0DpBBEkS4kabTKYkSULwnqIcoZSktTbqCCp1Ax2+vLykbduYUQRPVdXM5mPOFwvSVc5kOmW9WvPhhx+iiEIdQmk609M08XSwfYfMNWpI4ZMkYTIZ0zQtu12Fkgm73Y7bt+/gnz+n2kbDUEKg73tSqQfN+HAjBum9j8YkBRyd7JEkJyyuVzx+8jTOcIUkEOmjSsdMJM1S2q5jb37Iwav7fPTBRzgTS6AsTxmLFERO2/aYXuOcJ00KZvMxQSQkaU6WpNx75T4PHj+maSyQoRNFkib0xqFFj04yppN5pGLb4YSRHmMdnoDWUaTy4PAA7wxNXTObTjHK0mxr9Eih05TvfOtbvP3lLxPw+OC4vrrg4aefIPMJk+kUZy279Zaw2yGtIRWBIksZFQWmq24MROO1i5Dduq7xl4GL8zO2mxbnDEalVE6y7ByrqqZ3O0ZFyeE8MB9PmYxG4AJBeqQaBDlEFCpBQJJm6CTFWEfTW1AKpRN86MGbGKTqit5YgogaC8d37vLOV7/JfHZIVqbMJwVp0nDr+JQ8y2k1PPj0Y8bjA9759q/w7MWHLJZPybNJ7Cv88PuU5Zz5/pym7qiaiulsTNO1GN8wnU9Y1zVCJWidxoaedfG5pjNGoxK9UixWUQ9hPBpFHcddFanFxhK8oywSBPD86WNGZcndW7eYT8fYLKGuKpp+R57nv3D/fSYQgyH4YXyn6Dsz+NJJynJ0oxO4v7/PannN5voaLyPgQ0pB25obffyyLPDe0PU1VSUxxrBarTiY76O15mqxokwSkrQbWIiOEALb3Q5vevbyGUIIjDHDRo61+stpwnK55PXXX+fy6gpCoBscil++xkhciDzvZJAib5qa8XzKvXunjCZTqh9t2VVbVJJErPgwuVEv1ZWrijwfMZ7Mme8dUjfv4Y1jOplSVR3G1sz2TxCiRCeGqlqiRUBOc9arNZLYVMqyAtM72tbR24DOJVk+SLIHwW5bYXtLopPBlKUnKzOyrERnUeQlkrQSRuWMF8+eslwuGZdj9vf3cH3PpJzw7W/9CulkjPMtrms5Oz+jqja8evdVpgeHeBvYbrc0Z+f4zZpMK27dOiHPdyi1QZBTjA8QqqBzgWwyiepLpsdZh1I5fe+pjGfZWBatY9laqqaHVcPZouJoPuf+rTvMC0meAZlGaRmFZ2XULhQykqaEkhjX0XUtee7w1uCdpchTUq1o2pZgLKbu2C43eCE4mB5QTsfUmzNEojg5ucWzp895/HBDVfd87Wvf4dnzBzx/fsb+YUkxGSNVx9MnF/igECJjvdugE002KjC2IfgI8c2LgjQdgwfpFcEbdtsdWmjG0zHRMbpBWcV0UjAaFZRZhtYJy+sl2+2ON994E7znhz94wHze8O6P/pg0Tbhz75R3330XE2BSfNa5AyJKfRsXT1GHoDc/k05q22jg8VI4RKsI4qir5mfgl8EiDBHhxF3XkqYpXddzeXlJ13UkOpYT1jqkjlp2SZpCiAKPDAjFzWYzAIbUjVeBNR5jDNPpbFCETWiaGuEjn8EYQxiQb3VdR9qxjDZeo1HGrTsnnF9c8fjJI6TyWBdTz0xr6qYl2Ej53Nvb5/j4lKdPn3F9tcL0DhEk69VuUEvOUConSwVa9aSpwVhHQFEUI9q6AiF49PAxVdVincB6STBExZxcE1zcmMHB3jynKEdol+JF1EYwzkbDjsEMNE5eDBBo25ZURFm3r3zlbV77yttgo+LSZrXixYvnKKU4vXuXtMjpW4eWimq3Y3d+xt5Ycev111EqI0lGzOanHB7dozWw3Fak4zF7+3t44ZnP52yKCW3vqOua5bZm1Rh2JlA78L2laSra2mKN4M5+wXycwSSHXJKIwTYeEXn4Wg8KVD1d39O2lrre0TQVB/vloCZ0xep6RTGa0TcdIdEUecl6taVzO2b7gjIf0eWBPOv5wluvobXi4wef4IXFeMhDVABSKlrdW9OTpPHQWSyuGI1zEJHN+Y13vsXquub5k2eYvicZtAMWiwXrzYqqrWnrlmKcs9tuaat41hweHjCZjFFSMRmVBB/Y20vIMx1l0aeH3L53mx/86IfoPKGclMDq526/z0QQ8MEjM0Vft0iR4LXGO9jUUf1mbzpF64y2XaBVTjmeRF01X5OnCSFEpZxExeah845gIE0iiaVvenASnaZIlbHZrggi5WB+RN00uM6TpRnrXYWUkr0sxweBkhG+XNc1INisVxAcEgemJysyrpZXJEqT5RkdhoCl6lsCGZNyTJHnvHrrNXyT8OG7D7m+qlBpQds0+GDRXpCrJGLBDRRoquWaerOl3mxJ0kiRLouCuq4IxtJ1G9IsQ0mL3V1SFDmhr2jqhlFZst3u2Gwrmqoh1Sm4mDY65fFJLCGUCkgt6Wx0Q9aJZjqdsq12sf8iNSIEGu8Y5Tm+TxAIdFFyvap45bXX+PW//JfJJmO6viO4msXV+2jVkxS3ufPGnyfoBO8vePbpe/zOP/4BT54/5zvvvMn+LdB6xP03XmF++xV0PoFdRV6mjEZjxrMpzvSUWcre8R6N7bDLLX1t0C7BtY7gU7I8Q0pJ1fd8enVNUhvk3phCnVJmY3rbIVxDrhVp8OTSkScJShU4AU56eg+dMTRNjZaePBX0/ZbV6gJVJpTzPbbVNaPJjLCboVzC8sUlm3XPq7e+xNViSb29xlQ1o3SfjJyjyTHUV7z69huY1vDpJw85PTxi7+iQH/zoR3Q7QzEpMUKxt3fCevMpvW+QuaRuarI0Z9Nsca0g0SVaz+lqCGXCarvBmBaZ9RRlQi88P330kIP9KUf39uMhtt5Qm44f/fGPCM6Dd6yXPx8jAJ+VIOAcQQ7sweAATZJluBDou47FteOia/HOkBVF5LkPsFetFFJBlqWkiR6syqPNGESZ8USnECLt2LrYX8iyaDratX20vq4bQugG/EEzjGT04GDkfoZnD548z6JLrDF89StfwRjDRx99FKcJIZCnOmoJWoPpwHSGxeWKF88vYopubKy7kySCXrLoqdDWDV3TDKIY7obr4H2cLCSpAjxdHxmLWgnyXNH3Lc470jzFhUBV1bRth3WOJI2SbEJAoqLrTfAOOcgseB/nzzJEvzqtFUpJsmIUNe6FxhqPd1EIlSApyjFf+erXeO3NNyI7UwlEcOQpzCcl09Et9g9vEYTnyeMP+Ye/9Xf5g9/9KS4b88rrd9jVUVq+nM7JZ3Oc1NTLJXXfsX90QJIq2mpQdgo29ixsBz6gpQQX8MaBzgevP0dvOjZdxVR72mrMLgkoDDq0CBsNOBIVgzqDX0RAEAb9wCjvTdSSdAHron1Z5saURU5wljTJ2G07ri53pGlOUYw5PcnY7rYorWlXPdZ4XrufIVHsT/d4fP2YUZ7zpS9+kawo+Z3f+QEHt0akacF0f59nT5+z3axARpei5bqmqmsEkiCjelWSJpjOMypnGOdouy7iPUyDs9E/c7VdURYF1lmquuXoSHG9XOIhCuzwGdcYdIOWmtYa46O8mFQpUijSkaata0zfRa31ENN4b22kkBqD9EO33Q+a9VLdsBG1Tm7osiGE2GDLykFWLNxAM6N9eXQK6k3sS4QsGzQLYgrWdR2r1QqtNNPJjOVqydHRcTTUzJ9ExR5jSHRKIiIPvGlazs4vCBeXbDYbhOCmzNGDyWmSaIo0w3Q9Td/H7vawXlKlm6ahLMvBpyAMWAZHmuUgXpY13DQXq7qJMm0SMp0hlUAl8oYQhfgTOgwhwqu7vv/ZWFRHGzYhJV0bZdfTLME5x+3bt/nWt77F0fEReIscdPWno5xJkTGZniKl4PryjE8+/ICHn37CZrVgelygpKBzgTzJUHkBw1Rlu9vSNDUyS2NmV+/o6orddhvl1vsecCgRYg8kOFxXgZFI15NrKHS06hIyxJNdGFTo6L1F6wxrMwge5yP12Pvo4ee9x1iDGhyuFX7o+UQH6nEx4nKxpKprLi7O2GxXfPvb34plX56xd3BAWY7w4ZLpbMazF2dsVisOv3PEhx99zMHREX/uz/05LhYLJrOM0XjEK6++SjYa8eDhRyAM49GIvfkek9GE87MLql0bIdcyoFNFCLGwURKyPAHvIUhunZ4iRODFi+cE6xmVI7TUZElGIyt0oqP03j9DT+CXYk3+L7pCCBBExJEP97+SUa+/riu6ponKwfGXSeRgwDi4EFkbxUmbpo2NRQ8gcYPb70sfwWQQZ3BD8Oj7boAs5zene1EUP9MnlJIw9CLKsiCEwIMHD6ib6L4TQuDRo0csFgtmszmTyYQ8LwiDJXWSphyfnBCAx0+e3EwB5vM5ycBLaLtusOO2MNiZ/UnGoxhEVSGO2rquYzQaMR6PabsW5wJZmqNkMgRByWw2I0sTsjxFSNBakqbJzSQjGrHEG97fMCs9XdfeBJm+72/YkdY58jy/IbPcuXuH1994gyzPsNZExfCuoUwViQzkWmCqDcvzc2zf8Pr9O5wczHF9EwPebJ/R3gHJaIoXkr7rcNaSaIHUEmvbCDFud7Fsco5EKzKtKBLJrEwYZxLlWoSpKVTgeFZyenLA0fEBo7JAK0GSDH5/pqNuKpq2xgdPYAiEARgEZ17iQl7K1Xsf6/qujSrR5aigrne8ePGC58+eU9cNSqes1mvG0wnT2YxXX3+dvYMDQDKazhhPpvgAk+ksZrbek5c5fQ+379xlu92yWW/om462brHGcuv0FkVRYKwhCI8PliAsQnuqKsqOTcqCNNEUWcbpySl5muNtYL2qKbIxs8mMpm4jFX+wdP9nBYHPRCYAMRu4IZwNXgOjckyiFflkRFNF8cWsKOKbGkRFIcQGXIi9gKhIFB/TDzezGPzdXzbw+sHyOppaRsUarQVSxpvee0+SDTc9kGUZ49GYtmk5O4t+8D1RbOTF8xc3tamAKIiqouqLRPD1r32d3lje//CnpFmKCbFE8cOJ7JzDWoOxFqUUqZK47mcb9WVAeOnN+PL1FEWBMRaNilzzEEjSSHXOfVSneYluRHo88XnsELxevjc/CK5IFRWXX4qoGGMQRHy8dw6VRmfn/f193nrrTQ4PDyAEvHeRQu0dSZqQKkHf1AjTMRnlvPXm6xyqknFywPm65otf+iJH915lNJuSTKYEIXHek6SaMk9JZMD3Da6vaXYbujb6II5HBZPeI5MoDpsoqGqLEjAe5Rzuz9jPPUWeEJSgSFNGZYLrJd3Ws6ta2q4BUpJUIZ0cSqOXaXJ8Ly+DAj4SnJzp6Zqag+MTdrst19d7GNuz3mzonWWxvEamKR8/eBA1LYCu71Eq4eNPH1C1LYv1mr/9H/wHXC02dMYwLqPFmBSS+WSKty27aoc6khwfHfLJp5/igyPVMROwvidISd81JCryYvzAvbi6uGS1XOGNp60sXWNwJnB5sUBIPxwm4ga9+vPWZyQIxAJVyPiCvA84Y8izlK999StcLxY8ffwYa4d01XMjxyxEVBQWSmJtrJ+FjDgCqeQN6EdKOQQLjxt8CZIkjamg6ZEyJQR3szFfbkAfAqPRKPYc0jSir4zBS8/tu3c5Pz+jaaLJqR4soossw3Y9venprWGz3UZ14xBl0pumJvj4Wl+y6MLgGSeG4KaUoigKlFLUdX1TFmit2Ww2g8NRIEmKqN4j1TANWbCrKiaTcbRL7zukEkPt7yJSEgbSir8xQpFB3gTAACjh0fpn71fKDtP3vPHGG3zlK19hOpsiRMTpe++QgzWb0joyP5VmNJmgOGRkNcKM6XXBm1/9ItPDA3RRIJMUY3oCgiLPYxAIlrZr6Oot9W4dJ0MukGpNmSWoBLJUk6pAX8bsYFRkcXwme6SIwhtZljMa5YQUEhxtF0VJjSOWRnqQEhsIji+zoZdZICFE3r611Lsto/GYw/053f17TKYTirKkalquVxvWux1VXdP2Pb/5zjvsNte89+57fPjxJ6R5gfGOTz56iEehMk1V76h2Fb/6q7/Kd3/vd9iuFgjg1ulpJHLZyIxMswQhJW1rhh6GiaAqE3sXwTkefvKMpmnJs4Q0EVxeXFHvOqTyyJQIroOhR/bz12ckCAyyyINKsJYh+q5L2JtPefr4EUIKppPJwP5i+MC4kRaTf0JqTA26bCHIKAIqJVqrQbNAo1S0JyvLkr7v2e0qwJOm3NTEYvhdgiDRKd6DkpogQclAUaZMJlMW19c0bYdUCuuiSCXZAESxjp9++BG7uo4SUL1BJRo7oAqNsQQRGWTGe5x1N3LpaZqS5/k/RWd+GQQuLi4oyzJiGaSmKHMQis5YkBJjzQ3hJM0TxqMRVVUTeksIg0VbCPHLR4ML4QJprtEpsc8yHJA+RD+CEAK3bt3inXfe4c033yDJM2DoTRgblZaHr13To/KSotQouUN3kuKLJySzA9K9MSLPEUmCD+B8QMhYbo0yjTAdptnR7Nao4AaacZT9zhOJRpKnUCQCyMlTRaYliZaUMo36e0qgk2GcnKgYwLOEQE3bNRirAR8vPGKQMY8iJFJqNArrY38H2ZFIwXazZDqbUxY5xk3YVTWWwJOnz6nbhslsjpSSN996i76t+d3f/UPOz5fs7U1xPjCdzVhuNhhjODo95Ytf/CLvvPMODz76gKP5lMPjQ+7eu8PzsxeD/0V05tKpQtgenEMIh+0NWnpGxZTgJcZEGzlV5KRZwmZd4YynnOQEbCyZ8bS/DD2BP8310v77JTeAEDVVqs2aJ48f0zTRw88Zi1axq25tH8dYQtD1/WDlJHHe4q3FhRDHeQMI6WWNLaW4+X40ihtpu90Opg76RjD0pQd8BA/ZwaMvNuNiOl6yXFzTd2YQpnzZjPQ0bXS+lR4uF9f01qK0xgeig4/S2OH0fmmEIlXEgnvcDSbiZQYzGo1u+gFa6wgeGXQQu6pFiMiahMiVcC5mH85ZJpMxs/kM6xxNGyXU3RAEGPoNznuEkJSjMSEENptNNKsIgSzNKJKMsij41rd+ha997SvsHx5AcENGFcuwRKUEkeCDpnMC0oIgIeiUtJyQ6oJs74AwykAp0FE0FhkDdqoKUunxdUU/fE3GI6azKcascAjKPMXFj4WQScCSaYmWIPEkitgFVwpJiBMVHd9jWZbotMa4GuuBIPDOD05RHiMsSkqUEiig6w1926C8BxFod5I8y5BAU9d89OAh49k8Ctb0hrppuXfvNl3X84ff+yM22xqlBb31mF10e/IB0ixjVI5YrVZ89NMPOTo4xHYNf/7X/jwez4/ffTf2Y6zFOkuqMnQqCdaBhuCiS7EclLnLIkNLOZDqQAQRLfakpHV2uIflTYP7563PRBCIMmKDeUiArmkoihHVbhNlv3VMM5d1PXxQUBZ5VBEOAecdSiusszeDEOcdYVCKfXmSvkQgvmwU+uAwJo4FhYivIU0iECkMIidK6cgKHJRcjDFDk0yw3e5izTw0z14+r28d3kZhECUjLFgoibOOYGx01hleZ/7S5spahJLIof5/iVp8qbc4Ho+BOEF52bTLsox6W9P2kVueFxlZkdN1AWOi23KaJTRdExtiIUTVIOsQMmYbIQSwljTPuX///o1Ii9IJQkBeFBRpyltvvMlbb73JyelJ5PDfNK4CUmms0eikpDaQT/fx6YjedwRdkI5Sgi4QaY5Ic7wIIAUixACN1mAMru9pthuWiwVd05Bqxagcs00b2r4nHWzl5EAOCs6TyJgxKkEkMCEi/fflNR5KqyzLyLIcKRucsxHJN7BRgx/ETIgZJoASkX/gjaCrHS5Y8jwjS3KctVwvrllvK05OTylGJe+//wF13fDhTz/ivfc+IM+z2MPKy8jwcy7KfyvFfD7n4uycjz/8iOkoZ28+GUpVOD+/oKoago9TMGN6pBLIIHAikCSKRKvBcDdqMIQQ9RikkBRF9E40pkMrjXdD2ak00P3c/feZCAIvazIlJM5Z8B4liIafeBAq1urWDLTiyNzr+p7eGJSKzDsRdUfjGjwAvI/69d1AVY6fcRRt6PueZkDGxR6jG6ycBT4wdNujZqAcmI5pmkVfAx+13EO1w/Q9etB1F8Pc2RoTyxuiLwLO32z2YEPMDFwgLTLUULcJJenbniyLH4vWmvV6jRCC8XgcNwwxqL3UQEQJbPBR1DRJUIkmIbrXVk1F03WsNhuCh4DADrZUSioSnUavOyFRSmOMpWk7vA8RpGQd3nn29/Y4OjpkNpsxnowH8cpormpNj1AZNigIml3rmB+f4FSOJSMtJmjhETqHvCBohRjkw0SIiD5JlHI3VU1b10MTuKet6sEPwEbEooBkMHiVSIKUCG9RBJSIsuRSSNygxhszPn8zOtZao5MEpEGEaG92ozsZXh4W0V9CD0EkOIexPdb1dGXJaJaSKs1sMuNytYpmrC5mYG3T8fHHn0SPS51ytbjm+PiEclQyn8/4J9/9LtZafuPXfwNnLX/n7/y/edjs+MbX3ua9995j72Cf6+sVIQiUjpD11DryURYBWcEzGk8o8pKmamnaOFl66T1Z5hlqGKNbH23j0ySN7Mh/xiDwsxEEBn+BJNe0TU2eZ3gXfQBj97yPIpTeQ3CkaWz6tW0UEdFJpBwzBIHoqyeR0RAo4gsINE0TnW6Vupkg3Ci2hICSacRq1y3JWNP3hq7rI0hGSoQIfyI1NyDCcBN41BDI0jSl7yzWB1zM/4mvJo4ijYkjS+883vlYXpQlfqivdeJvRoLxJDCkacp2u70ZYb1UPNrb2xsynzCMguINgIgeizpJEEPvQcqoq3BzkZDDKR6FP4y1vP/+B7FMAKyNAKldVUUSVwjcuXOH0WQc5b9UZEG+3EBCpphNQ9M7bo3neKFxMsEJjRUuGowmGWgQIRCsIQSL8B6cw3Yd2/WaZr3BWU/fGa6ulqzXLW3bDZ37aLKqhYpiIB4CPvoTuFjSBEXsDxFt4QkC0zusCzf8Aa3B+yFTGF5/vM/itVcqjuFFiA1r27f4LtAWJUpnKCE4OjxkXTdcXFxRVVXsF3nPcnGNDIqmaXnjjTf49V//dfb29xBS8vjpU5qm4a3X3+SnH7xPtd1R1xuePnuKUJLsaRTSuXV6m9ZEyrEg0oSDiwY7RVGQ5wXL6w3rVR2DuI/SbS4EgnfD/RV9MYs8J/ifNYB/3vpMBIFoChqtnwVi+IBj469r42nQGz9QbWM5IBUoFcikRiUJq+12aOy8NI8Q4GPqKGTEIMSJQrhh+PnBueglaShISVmWtG1706V/OV3QSg3KNXG0V7VVbMAZO4wb4yZ07mcnjxuAOEIqsiKnLErapom1v/dxO4rYXW+NIQCz8ZhExbr/5TLG0LZt5BpkUVnJDUKSLlhciPgK46KFaz+wI411ERVpLCK4iLS0wxiUKMcltbppEtZ1TZ7nAy3a3jgrd33P8fExd+/dIUk0wZlB/DIgxNBbUAlN1WCMYzKNmolBKBxE1JpSBKUg2BsTUi3jGC4Q6Jqas+cvuD4/o68qtuuKzaaharqh8Rtl+aUAQYi7lJcjMB/LvsHIU6jIqgsv90cQN5nQS5QgA0jIOx9LAhmGeyKOCZ0DJaNasTc91hna3RaVFAOrMwabumlxNrpPR/Kbpd60pFnK/XuvkCY5T588Y7leUWQ5qUr40Y9+xHd/7/fYXK+4de+Y49MTemv58N2f4HxgNtsjNz27qiYZmLVlltP2nqppaDtL3Xa4oXcWBsORIERUSfYWqSRaaUxv8NajxWd8RBiIqaWznixLsX2EQjoXbZS8j9bVOk1IEoXWgjTVCDmmNxbjHFIGhJAgI2mDALbrb+bhSRodWqQcPAjccMMMGYG1UYSkLEc3GoX7+/vREce/ZAnyM1ciY6ibGqSMGASt483cdjcMtjgWhDRPyPIcPwigeBsJMT9TK3JRG94YnCvRMtxMPeII09yAhtI0vRkd9n2PUAEhozJwLG3iWG+z3SGlYKIia9AYE9+jjXBr9bI5aAfkIFAURXw9LmrtCwGj0Yj5bM7R0RHO2RsnYZTABYtUEX4slYybMEA5GkfXICLoxgcXhT+Fx5qWdrPC9g1FkaIzjZIyOjg/f8GjTz/FNh3CQ9u5qOXnAsh4MvvBSOSl4vYNGFZEGTERXuJExIA7EbHpGlwElg2CplJER+vYzH0ZyLkJDN4FhBaARziHdBbfRyVj3xu2yw1920fthSSCpkzXxGDcW+bzParNjv/P3/m7XC8XHB4fMBmPuLy45B/8/d/i/NkLTOt4/bVXefvrX+F3f/e7XFxdURQll5cLfAiRVq8ybt86oap2nHXP2e4a+m6DFJrxZITpLaY3sc80lEFCx2uKg6qpML2JBLlfsD4bQSBEHnuqHXmWcb3dxJmwUHTWRWUhnTIaj5lN8ug3EAyjMkf1Pe16E/HxUmEGSKyxFkRE00WmYUqaZpg+puMvG5FxEhDLAobpQZT4Ukwm0xicgqXvLdZEKXOpYg2dpiku+JtpgrHxJM6yDGssTVdFI5NBMKTZVYyKMgaQuiZLs+H9EzUPNhvqpkaJEZPJBK012+2W6+vrIU2NUOPpdErTNOx2W7JMx1m9eQmMGsatUqCUHv4d1Y2EjH0JHwJayGGs6RCCgWeQ3vRSlNAoAVmes7e/x9XVJYvFgpPTk8HazeGcRaeRaCSDR4tAphVJniGcRUgbxT3wIBwEi2m2bK8vCc6S6TnkGqEVbV2zuLzi/OwS2xmKrIhBW0mMdcPJHzAmxGlAEj836+LpPQwcBtyFwgvFy0AGEufA2RADCvHaJEIgkFjropy3jH8fm4UBj4yqPc6igVRKMqURoaetG/q2xzhPkqWkOiHYOH0p8oK+Mzz45AGPXzxnPBkxHk0wfc9uu+X64gLhQcuU6WzOp48e8umjaFvnBzNV5wPOGWbTebRLW16zXW/Z7Xpa45iMSvK0oG16un4DBHobvTjyLIkBs7Xxv85j+1/idEAI8RDYAg6wIYRvCyH2gf8QeJWoLvTf/2cpDscKlnjji6gYmyYp5XjEcr0GYZFpzmQ6YTxKaesV3sSJgE4UOpEUKo/yUn0fTwAJ0meMRiVSSvK8QAhJr0ycAqTxRuj7Pjb5spREJWidUBQl3nmuF9fx1JAqAm8GTwLpY/BIk5Sm76ODbvA3/QOpNMrH5t+oHHF0dBwputbdlDxSRI+C3jpEEkU2d3WDaVt2ux3T6ZSiKG5MUV5CiLuuuznRje2RqRz0GGJ67kKk7uZFSaITjHH4EANDkqYkSRoDoFJonbyUQCD2HAVFWcb6uI0a9l3bcn5+zna94s/9+q8BIebJzg5gJ4UzLvoJiECZaWSqwHSIZGja4cB2OGdod2u6aoeSgLcwTHGqXcVus8V0BmfB6Ti9QIL2HryJEwGGaZLzUV7Lxoax1oFEiajtKFS0mI8pJj685JVIXprTKqlIlCRRCufiOE7KyJkQAB6CcHgb8NaQpYoiTZCAMxFnoqWmNS3WGIoiJ89y+rYmS1KcdXRdz/7ePLpWLZfszWfMpzPOdxX7szlKZUgpePD4IUIGhITeGJLBBVolBYf7RwQb6JoOJfWA7oz/DSL2fWIjO8QDRAmyXMcSCUmapZFJ+Iv5Q39qmcBvhhCu/sT3fxP4RyGE/6UQ4m8O3//Pf9EfJ0nC4eE+r7/+KrvVNa6vGY0K9g/20UnMBlA61j1DI3A2ncVZqo9KNV4IhNKxLveeVCT0IRqEJmmKUpq2bYcUMcRZ6zCnf+ljOJ/NB1BOPCEvLi4Yj8dYY6iqKjrB1k1k2iUaY1z0IUhSgnMooaPteR9xBaPRmKPDI/YPDjg7O0Moya6qsF0fBS5edi5FnD075wgI6jrOlXe7CqXkDa4BoGlqkiR+bFEpqBvSYzVAiyMMN80iLXez3t7oNMZsKKFt26HUCAOrTsSGU1lw995dltdLLl5cxN7H/4+5P4nRbd3T/KDf2632ayNi96e9bd7KzKrCxkYIBMYWCBDCYmLBgF7AhBkDmgEDPEEIhBggBEgIMwDJIFnYCBDYwkKis8slV1GuvHkz895zT7ebaL9mtW/H4L8i7q10nnSZrJTOkrb23nH2jrPji2+96988z++ZJ/7oF7/gB599Slk4cpJSPAb/VHWk5CHMKBJ1VaCtIoYJ7cBphUqRNHUMc6A7ydpXaU2YZiYyvjsz9iMZjbGPJh+F9wlXGgmUCYoYRtEIsJjOlsNNaYs2Vg7CzDJzQQayefkLi5ZD9B6/pTbVojT1fl7OBwmR0Uv+gcR6zeiqxhrNOPQcj2LLLauCcVm5+vkxX9EzDRO2cPg5sNquGOaR92/f8erFc64uL7l++57f+93flbZOG66urri/OzCOj+EhDq0069WK/X7P+/fXsjoceuZZKFMoQ0YtlabkEIxDTwppaSUWNsdCbRJc+Z99/WW1A/8k8I8tv/5ngH+ZP+cQKArDjz9/zqcf7XmrzmxXr0BF6trhbI2rSmKGu/sHUAG7Knj9as84jDw8HCmNI2b5pjulGH2SmzSt8aPIWYd5ZJj7RSWXiJTkpJlm0Vrv92vW6x13t7dM04w1Gh8jry4vuP5wQ0ZTVDVdN2CTptCZjJaSXmmUtehKJL7TMNM2DevVCmsc53NHPwyEFBhnzzxNVGUpPalOGKU4d4JMr1xNDqCU5XA8UxaO1Vo0AjEFQvTMQXp/lCLMSoamWrFer5Ypd6AsC4xWaLVMjmOisBprNWVZMM+BGCJVWZGAsqhpqoamqDgqRVKZ0Uus1/F0T9v8mO5wR/Iv0FkxDT2uqRB5TiCpQMaw2mxRaiIpJ5WZrglpAh/ox0Q/W0K0dFNg+nAmeM/D3R3fvuvoY4MqtigEd5aTxxhwVmThOWpQiYQMja2xWPsYVGtARXzS6JB4pFWhjBwOLJLynNExYuwCI13aFFk1ZHKIKGckAFdBIuEJJBvwauT21HF3uiNQgIWkMj5F8jQRw0wKgbKWVKeH48Ann79h8jV/+IcfeLi/59nlBcYathd7fuenP+YXX/0xaU7kAFVREWKmaetlNgW39/ccTg/000g/zEyDX1o88XugoF21oh0IXlbTy2Ys5UjX9+I+tH+5M4EM/F+UUhn4ny4o8Re/RRx+h+QV/j3Xb+cObNYVLy8VpTnw4pmladbM80RMHh/OrJpE3dQ8v1jT+RmfEus1XO5WXKyNRC11A8dzRzd4eq9xpWXWKx7CiFKJKXZUDXTTWZ4ABRhVsFptOB0GmnqJsk5JfOxGKgZlRHxiyoKmXtGPEeNKAYK4EmsL2SYohdWGQgvlaImM5ObmlvHdgE+BlCNaKXSp8DoQkpSgWllQkcJBWzfEKbPebCirinkWqtJuv+Hh4Ratk2jIyXR9h1EXGDR+nCl2FaPvCfNM6QzTeAZmCid9c84ZV2Q2puF06lm1K6qy4XQ6MZ570jzzR6cj3k8knclG0nyMAZUG+sMNOfQS/3Y8sCufQU4olUkWsq1ptjugJxUFQa0pzI6gR7LO5KJGhxXD6Wtu7u85nh64vrnj3dsPHO/vmc8zZa6odAF6IKYZnRcxstbowgIzMUdIkiblnEMvk++E2GZJAUMQHYISt2DMkdnPEAI2egoKtNWgPIqIVRmVFSomtJV2IWsICrwJ+MLT64EH3zPknpEErsBUDj8OWFdCUhjn8HnG1Y5tXfPJJ284Hg/8yR8pvvjVLyF5sk78nV/8XfYv9lx/uEFFzT/0e3+dfhwIKXFzd4eyhruHew7DkYT8O2JGqMsmL9J2udGNU7K9ygqdNCZajFUM+Ux0IjYz7V9uJfDvzTl/o5R6DvxflVI//+3/mHPOywHBn/r4U+7AJ28ucl1b9hcbPl69FEiDXjPNI8fTHcbCalWx3m64vn9A24L9ZkuhHfUbiYLu+5Gb21uOXU820i+9/+Y9thgoGstH1ZpkIsc+YJ1jt7tkVW/YtBd88+Vb7q47umPH0N9jrAxlZOAUeP36ghAGnM0Ym7i8XDGMR2KcF6GSyDyrssAYT1kBzHTnmQxYK6q0oigY+p7NZg1IsszFxU62IFHaom1T0p/uKFxgt11xf++ZppGhO9HWtXju0cQ5EUNC2562KZgMhHBGMWPN4+rSYbSnLN1TiZkjMlwzFc+u9pRlDXmQHtJIXLgxitoVBB8Zp5mqbRmnAVcY5nEgo2X19MhvULKitFbAlvI1O+acMdpSFhUxRZpyxRR7TqcT79695du3H/jm7Xs+vL9h6jpUmGksXK0bGqdF3r9ImmWVq8l5sY8vmwpJma5wrpB5jIcUw7L686SsUdpJib846XKSzYWIr9RvTGgLJzIs7YqyFlJEK4Mra2xRUtXQrhRzn1ivNlxctfz8538oA0lkSDr5yGa7fUq4evf+PZvtinEcePvuHdY5rq9v+Bf/xX8RYy2f/eBzfvrT36GqKzb7Hf/s//Z/x5/8+lcydF7eyxlwVtq5sMxirHX4JCvS8/nEOI04bVj6JLz3NE3N3d2Ji4tL/tLwYjnnb5afPyil/jngHwXeP+YPKKVeAR/+vM/hCstf/4f+CrvdjtlPbPuGzXbNhw/vCLFntW65uLgQSKbKuLJFA5fbPRrN6XDm9ZsXXF5dMHqPz4lz17HbRFJu2OxXrPYth/4Bbd/QrlYURU2YMnW55uULxfkA776yzPM9SsH9vXACnz9bs9nsmMYj3Xnk+bMVb95c8O7dif3FczabLe/ff2AcJ9br9ilq2y/46KauKNuakD1t23A6abyfWK1b6qri00+ecXi4Z5zGBXYZKMsJ72+Z55a2UazaFd4HVquWnCJGL16BnCHd42xDXWn8fId1hot1xel0QKmZ16+uhCyTIlVRkpJi6CesUcCZnDxF4cXK21Tk7BjHgckY+i6jG8dm2+DDyO3dDQ8PL9hsd5RlJU8/ZcnZExfZ8/l4JE0zdlVQmpKQFit30xC94/bmlp///Od8+dXXXN/cc/dwpO9HwjShoif5RF1oCluhEOUbLLZvvUzrs6g3fQqLK3Lp9+2iW8hS+aQ4k5Jk+aUgWgOjJMzT2MVluoiFUOpRYsSSnYZKCZUyRVXi6pbRZ7phQhlH3ViyUuz3Oy72e06HB9F2pIitSlwhLcGXf/RHEo7jNMY5DqcTP/nJj6mqii+//DWffPIJN9c3/OLnf8hnP/icoqw4PBw5n860qxaNJkRpbawWSlb20hIYY0k2Ef28zLvk6/fRk9BMY6CpJeW7qdvvvP/+oglELaCXQNIW+A8B/x3gnwf+s8B/d/n5f//nfZ6ydFw921I3FUOfePnyAqUVqJmqFmJsUze0qxXNekXXzzzcPbBaN0LMLTX7/UaMRTlz7nuGcUXxeUvdgLaAy4x+z2q7IgN9N/H+7S3n4y2F1fzuX/khn77eUNeZh4d7/ubfvOWHP/yIH//4Dc6VrFa/zzh4yrLm5uaGfjD8I//Iz3j+/AVffPEFXTdQliXv373nw4cb+n5gGEbqGnb7mn5MkAc2a8vtzYHL/RWr1Z7nzzcY1ZGSIaWAn05stpkQBvr+RFk0XFw8p20uyNlIT6sMQ9dTNzXby8Q4jFxdXYCSHr1tay4vLW3b8OzZFYfDAx8+vCdnjzEWrUeUGhnHAaUaynKmqmtWK7ERhzBQb9ZY7TEriyYSwsiXv/6Cj968Ybe/pK4LtBJoaZJIYXCWeZ6YTh2r54KEn6eOAtFX/PEvv+H/86/8Df7W3/pb3N7e00+e2UdQRnwT2uLDyLE/Y02iMhCTXqTNcmM+bkkee3Z5gj+WunITaKGwEb0cBpOfmL0mxYhG4bTBGiObhpiWfz+gf5OObZSwFEgJV1Rk5bg7nPnm/TXZtNh6w/X1e4Z+RpGZp0EoSwq0Nsyz5/r6hg8fPlBVFcMoZp7Vas1ut8MYw263Zxon3r//wN3dPXf3D8ze8+HdO5y2iykLITdFAZ/IZumR8yDZjWTRqBgrVcE0jxSuICfRrVSlozt333n//UUrgRfAP7fALizwv845/5+VUv8q8M8qpf6LwK+Bf+rP+ySimpvZbq/ouwMQxCNdWdarF4yTyGS9H9luN5RV5M3LFzjrONw/SHqtkkjnwjhinFitd1TFDmdhDhPn8cyq2tCYhuvbW/pO09rn2CJyfz7he2gq+N2ffc71dcv7t3/C7//uD/l3/SO/z+3tPavG4mzJD3/4E/7Vf/VfYdVOfP7ZC3JKXF3UvHi+JYbI2D9AWlNVz7m9u8Nax8efvuHm7pqvv/6azXbFdv2ai4uGnCOn4wfWrebFy5e8ffstQ+9pmx373QXznHi4P/Hq5Qs260vu78/s1iuaesXFdsPFxZ7NVU8MkbatF16Apygsz55fLUj2gd3W4uxE3w2igUBRWMdud8GbNx89zTTmeeL29obLy5LLj17SnwemrufDt++o6h0hzuQs0tqQxFugVJJJukLasBi5/fCB5tNAtCKYHrqeP/jX/w3+b/+Pv80f/OEvefjwFf0wETKSpKuMDCyNIqfIoR+wJuM2rRwAyxVzXNgJmsIWKIXcLLYUE5hRZJ1QtiCoQJhFRelDwnslVu3lJuJRKuy9zAEeLdVakqOUEiehQkxko890Y1iSfyMqJmII/OqXf0LbtIR5ZNXUWKuZs+F0OmKt4Opc4ciz6En2+x3eS/BNWRbc397hvWcaJ5xz3NzeMnQ9RV0t2ggwiM0+BLE7K8QV6PxMURYLjSsTougmjJkpS0e1xMoXzvH+3XcX43+hQyDn/Evgr/0ZH78F/om/38+TUuLu9pa2rQl+BkSmO0/jgpYqmNTE2A+EACjLar/jw/v3HI8n6rrm5uY97WpFVTdAorCObfuSr778lofDifNw5vmLKxqz492vvqWqVkKknUZsKDBpy4cPX8r6T3l+9IOPefFsy6qxvP3myNdf/gnPn72kLhTPLtZcXPyAwgYe7g989PqK9VripC/3LW9ePuP16zd8eH9NjIEpTNxcz9SF5uPXz/j44zfE6OmHM9988yWbiy2ff/IKw7z0uCU/+tGPaeo1X/76G55dvcLamvdvbwFLWTacPn7Jfr9Dlx8Wk0zCOYP3M11/5vNPX2KM5nB4IJNpa808eZq65f7+gRgSH330MT/4wQ95eDgQgudweOCLL8TZePXJ5wQfefv1t1RW8YOPP+f5i2eSlKQNMSRsVmhtyWkWWKmXf//XX3/F678WUFZhtOVXX37NP/8v/Av8v/7GHzJ7hWMkLGKcfvb4mCgLR6oK7CJCmlNEFxZjxaeQVV70EglrzVM0nbMOZwussURELut0gVGBsRMTmoiFhLisFIsZLOJjFrVgVou6UWYFeiFWkcWEo7Th4Txw7AbQFqUNKUWePbtiHEeMilSFpSyEVoRxHI49SmXRnziJmReremQcBzabDePYS/pUVXN/f8/x/oBVmtIVGKUF9PqIBlOK2QMuYczC3ljk0nMMiBxeYYyiKA1l5XC5kNVllorgu67vhWIQRMH3x3/0S8qqpKxqpmni/fv3aKV58+YNztWUpabvB06nB+5vrvn2m28pipJnz55xOBw5HB8Wdv8eZzUpGG4+dDw8dPRDT+0iD9df8s2v7tjuYN1YVDakyXG6H/HTQJgVNx/esdu0GJ3pu9NiWskUVvH226+Yx4711vHt22+4u5O1j8qBoTvx0etXfPzRx7x6+ZrT5x3jOPKHf/SHjMMzPvv4Na9evcCHifvzgdcvnlGYzKtXL/j49Uu64z2b9ZaiKFhVNXVV8uqFyE3rusXqhLMVxjjUmwvatuHt9S0hyF68qQuKbcvtbaA/H7m82LNZ1XjvWTc19cUlz66ecf/wAFmx3WxZ1QVxdjjX8vL5BZd72cxUF3uMdmQ/8uPPPuP5xQs0BRmY/EyIinoJXU0pkYJHzyMKuLuRSPmsND4kvn37Lb/4xS+4vr4BU7JywhEIaMZpZJg8KddYA9qJdyTkSFJIUpOW3X7+rbogLy5PHwI5q8Vg5tEWbOXEOqu0QEtgQW9njFZPKVPxSUIsO/dHKIpCiUjJGOqqwK1XHIaBYVpcq4svojCGZ1c7VIyEyjKNA5DJ2bFqazJZZiUkVq2E5Vxff6As7KJuVDR1CVloSTHM1E1NjB5lNW6xRKOWf13pKKzFR1HDOmexVok+oy7YrkqqoqCwFqMtw+CpVyWHw5HtesNbbv70jQd8Tw4BpTSvXn3ML/7wF8xTQuUHDscDMUb2+z3BQ86aoqqIc8fY9YyTsOeqqpShVyVWXj97op/pTgfm/kvmuSPGke2mwerE6XCkNECYqAp4/uw5P//5HzF0B/b7PdM08OL5c2Y/sWolJbZtaj755GP2u0u++NUvUUrRj3c83D+gUHTnjsPDgc16xZvXH+NcIUEl6xXrVUtT/1V+76/8lK47M449b999i1Wa7CN1UXKx3XF/c4dVBqcdV/vnpJR4uLvj3bt36NcilFFE6qpAqXn5f9wJY2+z4Xg8Mg0DVisu9xfiPYiJoRuFmjvMNEVDoR3PL64AuZHOh4MAK3SkbRvajz7icDgyWcVms8b9+IfsNxeo6MjRSOk6zWgrYS0ZeRrFJI5AoxRdd8YUJWEZ4A39SNf1i+GJJzFLiEHgHd5TRMGlx5TJMRJiXGTYohrUWmOCJgSezFMaMYkFIskHJj+JczQGSqMWJoIEfhqbQXnJuNAyZExZlIIiepKVICiMNcL4qwqqpsTWJa01tKsVPp+xzsoO/nRPUzfsLnbEMHN3myisYcrVk++jH3qK0kLyIkVvaxSBh/sbUvT4eaIsStqmIuWEMdDWpfAWtByIy9wSV4i2ZPKTMBlUpi0LCrPm5fM9bVVTlwVj35GjojvP4q5NAr7le30IoFnVWy52z4gxUtoGpycqZ6nciugVzpWo7ChdxcvnLxa3mF6SXcJCw4lQllgjarSg7ylrzxQ8q1VJzmeuLkvWqwusLbi8bLi8KPjVF6NYR2koi4q2abi9vcYow831Dbvtju1mi3MFCs1uu+Orb255dvWctllRFiXr1Ya2XeN94O7unpvrW9brDc+fP2O/3TGOPeO5w6B5/fwlzy8vOZ8OlBtHd+z4xS9+zk9/+ju09QarS2xh6U4DZEXhjCTROoWxnvP5CAOcTifqesN+e4FCy0Q8JaxxS4ISODPhmgKrSnabHU29JqVE153wIcjwC8l3KFzBdrslNpmqcOx3G3btFrKhP3uaeoWztYA+y2KBe6SFaCwu0Kosif7IPHn63FMXjk8++ZQf/ejHfPXub8mO38lBEFNc6L/gg2eaNIREoRM5l8QQRd9ARhmZ6MekZGPgWfIoxYEaY6YfPSl55smwW1cYV6KNA/TimwgLg9IIdTpl8U6gBD7ixImXtYhtmqambiuGHNhsVrx+85L87VsympTgYrfheDjgbGa9aiW4ZBwJWrB1Xd+zah273Y5pGtnvNux3rRwg3ZnCWurCsFnvnshR/dhjaiFHp/yY0ShzM6dljWxtYg4zsw9oAtt1w2azWijcmamXKsHtdozDwGrVEr7vZCFjDHXZ8vmnP1jw2xGyTJeneSKHSL2rqIqaXAWMTiijGOdp4QbYBdwJVhuC9zjrKCrNFDJttrStJcVIXRbUlXAK5ume+4eZ9UZyDkIwrDdbpnFgt9uLf7uqePXyFcdjzzjOvH71mv1+T9NYVqsWpTR3d3c0dSuruwWTHWOi73ti8KzahmHopMwuS5wTrNlDWZKzUIA/+ehzfucnP6Nd7TmeOuq6ZBwm9GuRDTvnqCpHWVkOR5mVKOXRGLSyrFvJUTwcD/g5UuxqMcOsNXVdYbWlsEJFHsceY0pyUoQs7MKiKJZQTI01BUVVC+Rl8epbW1DVDU29oSwLbOnQzghmzMvKMRvZsz/c3XH3/gNq95zSWF6//oh//B//x/nimyNffP2WxCQ9epaSOyaR+k7jiLJQN8WThj/lKE9rLVsCcfrJAfLoEclZE33k3M/MfqLQGWcN66YEJbkGIYoTNcOiFBTjWs7i+4BEMJmQIeRMVRas9htsoen9zGpVUdQlEJknsX2/fvOaX/7JL0lxRKFpm4IYxB05WVg1jrqp2O/3dN2Z58+vnuYab9+9JacsTENjQBna1mFPkio9+1mIQY++Ea1QyVMuDsG6KgnJEXMgTD1jB0MURWT0M9ZpCespJBw3Fu4777/vxSEActJtNjs2GzgeTzhXyYsx+2XoVWF0QVU0KOWFkxcmjBMQ6Gq1oXAOP45MQ09bN9i6ZfQJawq2m43Yeq3jeHjAFY4QA9fX79FasVo1oFp2uw3ffvsNV1eXwngrCoy1zH4CpXj2/BlN01DWhq6TtNf1ZsN2t1uchDP3D3coDU1boEwiRFEtrtdCLX7MOihLaRu0Vnzy8WdsNhtWqx3jOFMUJVVVs16vljI2UDcFVeXYrDdoI/kJlj1KWcpSUGbjKIaoVbNZtgXgbEXbtJImFAJVpXGu5Hw+4oPg0uq6wdmCYZgX2XFm6Caiz1hT07YrdrsLUlRLqYqEfcZAjhNET/IT8zRyd3vH3e09b55/AkqGcc451ts19v01KYwCXNFSySkjbAVLwmkoXUFVFLgnkpKg5FMWkvBjZoJYgRVhmhmHiXMv5e9AYNWKLkNZR/CROUbm4AkpLOIbublY2hNyxofIFDxFLmgrUQMqB01Z0a4qtvs9r19e8uH6mq7rWbcFP/jsNe/fv2e9rlEgN7JbCVD0+TNpsdoV0zQwzxMpywBXI98nlKHvRzGy6YyzGef0U/6FD/5pjakTtLUBbYlIfNqpm9Epk+YRP3usNtRFiXWWMabFMfvdQBH4nhwCKSXGcQDUkzmmaWoKV8ju2HtilKGOqxoUkZhlf1s1FVlBW7c4azmfjlhlWLdrgqm52G+wWqMzGKByjuwNTdsQUiDEd2irqcqGsl5JuovOrLcruv4MOXN3f83D4Y623bDdrTDG0axKSUrSmhADTdswjj27iy3T3DPPE7v9mrZtyDFCWujFyNPPGBhGT7tqhCxcVmRgGEXr3XUnpmlgtdqzv3jG/f0tWoFzNfvdM8pKwlaTLwHFer1ejCICXy2KSuTL2hFDZp4iduEKFq5g9gPOOXa7/TK1zguJWWNtQeEaQhTsttEFTbWiaVq6blxyH4AkZCdlMswJPw5LpWO5vb3lI6WlLC1rzl3P7e0d4zRRKDFKPd6ERhu0gkIbSqtwyuDQOG1wVqGM+i2LtEGrR7efJsyZ4GeGcWSYIjEpdIZ+Svio0LYkIZbfKXimKJgyltwBrRUpqgXIkZlCoEyJoq1YXWxxleaylrSk7a7l/sHjlOdq36JMZv/xC9rG8vLlq9/EmSVH3YgD1BrD/mJP15349u23aGVxhaNyz0k5kZKiH+fFoJbYbYXp8Ehz8gtPcpom1lXD88tnGOf45u1bvn7/jlVTPCHxCAGjFaWT90BZGoIPC/Tlew4VAXGlpTjzcD4yTvNT8Iiw86W819pisEQvfW/ZNBR1IYQfG8Xvr52U5sqSTMWm3VG6gqnrMVkAEbvtc4xThOh58eI1rrDMwVNUFejMdtuy3a5ABfp+QClYrSuUShSleYovu7i4QAClE85plC6wzrBafcrxdAASMY4YBXVbQVYyXbYG6xT7/UZciNZirOF0PKKsYrMVOGVRavEbaE1VNbLnzuXCmC+YhoOkLTsBjUiUWoHQZQNpYSLKJdRkpbRMlBUUhcM6caDNc1haApEYo2uGYUZbhzYV1paAoahqXFmgrFmUghM5hWVdpbBFSXfu+IN/4w/40T/072bV7mnWaz7/8Y959vwZX7+7lnI+Z0JI+JhRylBai7NWeARaY5WhtAXGRHHz5fwkGspqcQAqg9YCC/Fz4Nx7fMwUOtANnq6fl0m/IabEHIMMG7VCRYNhcSGiFuKaSHR9DFSrlo9/8CnttsY4/eS4nPojn37yiouLC7765luquuT1q59yefWclDNd3+Oyo2kb5nmi73uuLtfstxXWRBTSyoCEw9TtmqpZcXd/z/l0ktlWzqza1SJmipCEFbFvV1xdXmKLkqJQhCTqRVdUgKI79/jJY41lGKbFYRJpqoaiLL7z3vteHAI5J8apAyQauywKnFWczgfGYURrS+EkeLNQBpJCGYPVjhRgHKV/Cj5hlP1NVltRkBe5aV3XqBg53N3QtjXC5YPNtmX2I2Pf0W5bfPTsLtYYl6lqyzCKdfby2Z6hnyQNJsHYJ1zhWLUNxknmfVFaIRwZKMPSeKKwSlOXMqCqojzBxnFgu90zjWLJjWFe3G4eYy1161B6i9FuSQOyGOPQuiHPnhQcKZR4f8IY6fPFIpyX/y9IZSXf/MLJZDnMM3OKWCeZAuM4LfZVS0yB8TzStiu0Lok+UbYVxpRYU5Bixi6WXUF7RWKYCPOITgFjHefuzJe//ppx/QvOp45XLy4prOPjjz7iZz/7GX/y62/ojgPJCxPvaTfvHIV1lM5SOkPlHJV1QFwGiAgYBXhMjso5MQd5SvbDwOkc6MaZttRsN5F+nCksoLV4HVMiLf2/tAPyGskrJm1O1gqfI7YquHr5gssXW7rzgRQ8dVVxsfsRZVUxjAP399e4wvD5Z59QNQ1FVYm1egySbKQS9w8P7DbN4uqMWGvwwS9o+ETVrinqhv3FhtPxyP39PTkJy1K0C4nSSZ7Crm6oyxJXVlxcbvnBjz7n1I84W1JVLV03cHt9yzzN3N09cJpmjqcTq1X7BM39s67vxyGQEt6PdN2J9WpNXUsakPcjMYXFC51Ji4hkHmaMM1QpgdOczx1lWaC1EQOMdZDkTRPStAAmYZ4HhvGEcxHrNHMcqV2FLRKrTUm7qun6SF2v8GGmbkoSa8Zh4vJKbthpmogx0537hcU3kYnEDOeupywtKXtS9mgNVVVilWLoz+SM5AYaxzQPnM+GnBR1XQmbb73i1N9wOt8Ro6CxyrbCe8/5PFFVLQon5iBlKdya7U60+X3fEVNivV4TwhJkssBR59mL4cQsdOGc0MZSOLuQmBIpibV4nDxlWUEKhDnhNgXaFgt7YUlmnjNlVWAMArZMksYcg2c4nZinaQGYCKrMak1T1/zkJz/h9d/+u/yqv8GHhF1YCSCiH2tEzuu0EfLRIuxRC2bcaP00Xyit0H3HceR4PHI8HJkmx7kbsGqpXLQcyrBAUVm4iFpYg4/OSh4Vj8gPiSVQEjRTOKq6pC3XsHAoYkpMU+Z3fvIjjCu4vLrAFA5txT5t6rxUF4mqclRVzTyPXF7tqatSchqcbACKumVOCa0STV1Q11LVrZqWGMICxxVykc0ZnTJFJWa6V69f8827D4yT59Wrjyhtxf3dA2TFw/HIwzDy4cMHdjvhZPyP/2f/pz/z/vteHAKgaasthpEU4HToZCc7RaYxoJJDkzC6WIATiXGYOQ4dgczkZy4u9lRFyTiNdKcelTIualxTMZEZup7T4UDwE+mc5XCxin4OPH/+DB8mwqjoD4HVuiLOklWwqq9wecAP8mY+9mfp2UikOTCdE6vNCqWh8zNkxdh5+n6gdA4THdnAPMg03KkldSdphvPAarVFZS2WXWspTcu5OzFPsleuty0P5yOEJBWAn7m7fsc4TlxcXnD1YsPDw2Gh/FgJpzCOwpbM08y09JuhimzWm6VG0FhdkgtQs6XrOxSLWw9RA6ZykK2BjpTWYRMo5ZnShEfoN2bWuDGgh4nQn5iGCNWKj370Qz755CP26zUqO2Kqybbl2asrPv70Ge/frYjxjJ/TExDEFJbkFMmBqQ1FpXEm45ys8gAKZzEadJaEpMJZpmz54nbktnfMMaGcplqVFKVBG6n2HmPFSApNgaIkZzkAU1ILX9AsfoNAkcSjkZfKp90/oz+el3Wb8CPKsmW3vwSEd1CWVlqBxeVXuIJx9FT1irpqmSfFfnv5BKoty5JpGpY0qkipFVVbU1mB4EiAaIEr7BMOffYDRiHQ0Jjpu4nCaD7c31Pakh/+4IewW9PUNRcXK5KG7rMXkkwcvucxZMZYVs0OZyZSDkJ5yZqmWpHCgNEF62ZHVdV03QEfJhSG09CT4MnNVpYtcxEYZgkLSSERp5mzD5zPZ8ZxlDJMGc5DT1GUFFnR95njcUIFjx+gizPT7IUr0GiMtZzO58VKO1I5S+McU5rJPlBoA0bjbMU8ek6HgRgyq6JBpwLvJ1R2EikWJVsgJyFlWlPQdwOHw4kYM9YUhFFjVEVpC3JQEMAgII95Gjkfb/niiy/Y7ffM4VNOpzN11UiO/Ryf4KohpCdSsZ/9gtxaJuFzYPaTJBGHQFNXgABOFZpZDxSqQCdPlUFNnoBHmRltEwZFnhJ0kfBwJM8d4xw4+Uw3e1ZtTZomjC4JWaNcg6ss+4sVu/2eOEf6MEhJngXOEVUiGoVyYJ2SSiNL2yFZjUtqdfBM88TgHcfeczPAh9FQpJFNZWjaEmXESpwTkKN47VmwY1GTlBKbsdLCI0iKLHxVUogEHzBKo7PG6IJhiASvMVYSrNP5geAzRWGYxgFFhODJBG6P91hTcD6P1OWWVRsZB/E99L0YzZQyjENmGI5sVgYNhFmMWGGembzAQbURgdboZ1T2WBImGYyWIbLKUjmNXUd3PjH0HeNwIOfMetOw3wgnIzn1Xbff9+MQkIw9xzzPOOMWJHimqirZ/ZuC/cVeAkems6CltEw8bVlQtw3GWMqqILY1Tou2eoozYRFh5JyXF19RljIss9bSti3DMDL0A2phB87zjAK22y193yNWVi0EWoRXl2N+cq/NPhDnzDCMyw0nkVaCupKhGwoKUyyRVcMy6FwUePPMuTuTEYfgw8NBnGbW8fAgKCvh6kXGruPi8pKb21vmeearr74SpNgTM18m/fM8U5Y1j9mMIURurm8Qjbl5CtrwQZKM7AIxLQonGw8fUEkxMlOlEYJixuPLiFm5JezVM02DCF905Hw+8/7de96+e4sm8as//mMunr+RgaGz0hZUFbv1muFwJoyBFAVOSZIknUeM2uPXLGGxCEUppsUvrxknz/u7I9e3ExG1KA0DlSsorUblCEkxT+Hp+y9A2CWCXClIApZNSdgMRssNFVNkHAa895zOJ2Yv1OqiLBbGhH7CqmldMJ5PxCiJUX0/MI4dxsjhez4N3N3eE6OhLAUndnFxwbmblpxMwb5HL9yIEBNosQ+XVSXUYCXI/DAnAhHbFrSrLeOQOBw6Xrx4ASiOhwdSCsu6fbWg2mUIbJ6clv/m63txCEi8l0yuh6EXhLU2BL+UMCrT9yfO3Ynj8cD9/R3nocdVJa6qWG3WWCuDw0ykqCxTL9FbjyBGayXa+3Q6MU0zq9Watm1F2tkLvWcYzwvWSabxu90eAGMUGUeMnqpq0cqQVMA5B0ZIw+fzmX4YMFbShHOM9F2PtzMhCjsuuEwIonCxthARR0oLRdjQ9x0piXouLVPk0/lEVcka1BYOmyKb3ZZPP/sUHwL3h1tRMi7sdOfs8gaVQ1Qt/MAQAvd3t0sOgqw1UYqqqhfpdca6AusKfIgkLWTePoxYfcLgUKUmBI9TTnBfZHyUr88PHafTmQ8f3vP65Quc0fztf/1v8nt/7R/G1SVWK+qyYNXU1KVo3GdryUYUeh6ZK5DzUy6DJER7yko0ECprUlLCH8wSfHrzcMDHkqquaXzPqnZUVmPIkCJ939F1HfNCmX5kTOYs8mSd1SLKkaBbk4XXn5bvqT1oikLaK2sMx9NhSTUWDqEPMrS0tljox5mydBJyYlb0nWeapEKYJs/V1TNWm4Lz+YSxUunM88w8CyOgrksSoLxHISj8aZqWzAyFMY4YMw/3B66vr+m7iRcvXmGtoOvbVYtzlu12jfeyXnyUMH/X9b04BFJKdP1I01TEDH6U3AGFoixruv7MueuYJ8/xeODm5gMPxwOXz68whWO9aSnLUoAbKEprGacOV7c07Upy/haoZIyJYRjY7RxN03J9fb3AQh3OZNl7awtkzufT8nTV5PyI8RYzS1FJAEjICW2F8lJVUJaF2D1zJsx+SY6RN0UMCq0dq1WNtUuU+DwJ5tzJdL6qKzbbjQzVUkAZxRwmvJcBqbGK0/lIu26EIFwKHcfPctA4555SlqqqXoRWjrIsF1mxHKyPT8a6rinLUsAX1hJ8wDoL2aAwhJAY0kRdapqyZk6T5PRJCohwHzQ83AtU4+72ht//vb/K1J/56uuvebi95sXHb3BGs9ts2K5WVNZQW4d38oa2VcWUZqLyQvONsiWaVMYqGdxpZchZMc2RaQ4MHs5z4u440KVEs11xUTXsmoLaIX8vRvrzifO5x0dFTurpAFg2goBUd9IGyXuxbmo2283Te3OaJoGRGEVMgWEU2KxSinHqCTFSVkJpLjKE2ImDsCkXj0eW19JHqkZTlpmUDdM08nBzR+kcSzADrRXyNIuOQqqUgA8Bq2S1entzy93dkfu7E6vVhhiDaA/qAmNkiDqOw1N2xeNB8l3X9+IQyBmGcRYbsFKM0wQKqrrClY53H95DFqHIOI3EmGjbVgQ2KvNwfwcqUxUFTVWRoyErwWqnlJmmmZwzTdOw3e6YZ884TkzTzDCM7Pf7RQc/yRrMarSBrjuz2WwlnUiaSym9Y6JcVczBQ4qUdY0ylnk5dVWGyhVo1JLVV5CVllRhYygr2X4AfPj662W1qGjahs1my3othqB5nlmtW4ZhYO4n4hilRRk6Li4uSCmx2WzkKT+KZkA4DLI3ZolrFwVgKSGsSXIdH1uHsiwXBZ6VfbsPtKs1g09YXeBMhQkFxsq03wZJ/JUVq8YVcsD148jt7Q3T0PPq2RV//OuvCaPm7sN7Xrx5idWKzXrFbr2hcSVNURKKkq4bMMj3LmtDacU+G2LEe6hb4Tg+pkFNc6CfI/eniQ/3HafRM8VMUc/sryq2taUy4JBQ2HkUJHjKhpyWbMIcAYuzZskhWHInZKNLu1rx7NlztrsdVV2Qk4Bbc86LsCpxPg/LATEK1suVyyHq6UOPNQ5VCck5xhkwrDYr+v6BfDdijGYYBw7He+qyecoenOaZ/cUlSmuctVhXoJW4LU+He/rgxU9SFDx7/oyL/RX7iwumcTGKzSPr9ZqUxDiVF+rwb7dZf/r6//sQUEr9FMkWeLx+APy3gR3wXwKul4//t3LO/8c/73PJ03lC6wPD2DEMI26h1CgU0yQZgtYVNG1NVVWSiacSs5/op56+O7PZrDAqMY0J5yzTPHLuJ7quI8bIZrNhvV4TY+L29o559jKHcAVlWZGip2gqUlIL6ltuIjlFlej8YxCKTJSboKwrnCvxMZFnQYoVziHJN4ayqAlJE5JapsiSoqRU5tyd+PLLr9hfbLm6uqBpaqyzhBgkdsxIdl5ZVSitOZ1OWOfQ829mHSkK9jzGyGOc2uMcQL42t5TVQSLElpJYUnAFoe4XabaUsBatLaWpMMlS2JqqrCms6AMkEFigGlprCd6oSlJOfPGrX9HUJTnM3H14S7l6zv31O8LwY2wj5harRTdRaE3jCrpwYup63KrCOUPhNM4ur73WOFtitSVGz+Q9k0+MXvHu7sg3NydClkzCGDzrqqItFM6AToF5GiEJfz8uqcOCCpBAMqMlmEM8CUrWgW3FZrtlvdksceYa5yrWbcP9/T0pZ5ySmLhxHAHoup638f2y7swc52uuLq9o2y05S3s3Tz1tW9H3Z46ne8k4cA5XWMZ5wi3EZDFThUX9KdyEtITdhqrFz/MiO6+pyobVak1V1xyRNW/GojVYZzifOnKW2Vbb/iXgxXLOfwj8dXlhlQG+Af454D8P/A9zzv/9v+/PlRKHw4H7+xs2m5UIKhY4hrXy5Bz6gYeHA029Yre/pGmkTTDJU5el4J6jTLz9PElf5iwp2aeEoK7rnnLaH/v4x5SfnD3T1LHZbLi7O5LiiNaO0+nE6XRehpRx+eYPkDJVXaGsg3Gi64al7RCYZ4gRi9ycX379lq6fefb8is2mZZ6gXYsbr121NE1D3/dst2tijDwcbjmdTmy3W46nE2UpPe/sZag4e8/9w4MQZkG2HEVBURQ0TV7EPoK4eowfDyFQN9UyzBSIxjhO9ENPTpmyrJfDN9EPk6xoxyD9bila9IyXft17iBJvrq0FYxinmQ/X1+y2G26u32GJ+O7E4eY9w/lAYxq6h3vOh+OS3iNtRQ6RcejxaaZIJU6VYMuFIKyXFXFiGDr6eSTbgs4rvrk+cneS9inNM4WGykJhElYbYvDM47QcarIeZLmx0AarNUay0ckYtHU0qxWXL57x8tVLXFGIurMsKcqCqqlwvcPP84L6Fi+DtY7dfg8o7u/vWa1W+DmLwMqWKBTbjWKeg1Rc1gglOEWqqqKpVzzcnyjLGmvdEqNeLAPGUazDs5Tyu90OloNbsGoIkdsLSn69aYGWm5trrNuJVfx8pmkaSbD+jusfVDvwTwB/knP+9eOA6t/WpWCeZ6Z54MXLZ4Dl3J/ISdJ/jXEcjodlgAfrzY6+75+eeptmTeEM/XAm5bjw1mc5LetKBms5c39/z9u3b3n+/DmbzebJoTVNE9N0FrKQsUxj4HweqaqGaZQ/U1U14zhJZFWGjGacAome3Et46GNWYPaRGCKByNgPfPP1W27vDozjxG63pqwsL80zqsrx8UcfU5aWb99+xTzPGFssIApELNL3aKXk9Rllnff4Cu+2W+qqwi46+sf+f7PZUpYV4ziJFXU5BJpV+xQ64n0gjRMh5mVIumKab5jmwDjNNK3ocxNgnMMUC8duVszDiHU9RVHjY6AbR87njrZp2K7X3F+/py5q7g8nHt6/5eHtt1jznNPDA+fjkaHr8eNMaQt0zkTvGQ4jejKEsUKvWtxKzD/zecTqxPFwB9bw/KNP+dCduDtNjEFRaotWM+vKUtmM01loO34WSpV0cU8+BbcoTa0xWJVwxqCNo2hadpeXvProDS9evpR0aR+ockEInkRiuxeEXIwJ4xxlLRSm/f5C2itn2W63bOc17aolJyPDRl1wcbEFMoXrRJ7t5ClvjKMoJP9yGmf6vqdpWoqiWKApHj/LBkch1WVVVfR9L1sLIzZuY/XTDKHve9brDdvt9t9yKAj/4A6B/yTwv/mt3/9XlVL/GeBvAP+1Py+CDES/fXF1SUoeVzqmcZbeaL8l58w3X39DyokXL1+Ss/RHYoRxOCf9+2OGXAxRtPNh4nx7w3qdxU2mFIfDA7/+9a8ZR4mNds4xDD11XXM+n0kpcTwdqMoVt3e35HRPTpo3rz/CGklzEblnJGclh8LiSV9v1otsOaGyou8HjtOBeZxp65awUXSnM+NwZrNpaeuStKoY+x6jajarNRqYhpG2blAo+k7yCoMPUtKHRbizpBe55cmRYnp6Y9R18zQHkCj037QHwQdO57MMsIpqgWo+DhItFxeXklbkHP2poywaqqqmaCq0Etuu6uD+5pbudOLF608ZJ88wzaAUn376CZC5v70nxYxDc/3Nr/njv/v/par+KmpRFhojKjqbNRrxCUzBM3YzfhwIXU/qN8x1hSWQ/cDtzTUvPnrNq9cf8yfXPyeoAu0yKcO6Kdi1BZXNGISRIBj3hPdJBoLLfWCMBqMwKmOVBKmWTc16t+Pi6ordfsdqvaJcDtecM+M4Mw4jTdtQV83Ccaip64aqqnDO0TQNH3/8Md57Lq+e03VncQxaR9/d0zSOTMTaEgEoG4bBA56iLMUBuKRNpwVP771HK+n/y6J4Souepon7+ztSeqQLGeq6xTlH13Ws15vFN1LIOvJ85ng8fuf99w8ii7AA/uPAf3P50P8E+KcRBeY/DfwPgP/Cn/H3nsJHXr244NmzC1IK9P0JpRXzshZp2xWXl1eitnMFVb3m9uZ+cbs5zuczisw4DXLTVAXaKMqq4f3791hbst9vFpdW4Ec/+oH465c0HmO0rKFKx37/EfvdJb/85a+5u70lZ0NOmpcvX/Hhw3s22y0g09Z5cTY6pSRSTGnp/Y4nrJG4K2ccQXkudjtevfqIeR45HO5wRou01sI8TqQw0zQlVhvGOUCWfL/gI2S/mIQcbbPCGktZlVLeKfBe5hWP6cXTNC4JxHHR12f2+z0oRdfJuixnIelI7qJImZu6JbjfJCBX1lA4iSbTpSPHQBpHzscjN2/f46qS3cVLymrFerPj+csXUo11R4ieuRso7Zbj3Q1//Ad/l9cfv6S9aNmsNzx7/pJ1vebm23dSvVhLpUtJXiITfeZ46NA+s2lLVNJSOlc1/TBzf+iJ2aILTQyeolZsa4vKA/M0EiOQhW+YiU9gMgWy0WAJKc+LM9VairKkrEps4ZbNh1oO04gFzqee0+lMWVaEEJ4Gq0ophqF/irM7n88UxZp5yrJlwdDUa6xxeA9Gl/h55nQaF2oyxDRgF31M07R475ecCJlZsDzhjZHk5Jzj8pSPzH7k4UEyIJqmIadMVdY4W3A6nR7vtb+cweBvXf8R4G/mnN8DPP68/M//58D/4c/6S78dPvL7P/ssu0KGaYeTxzlNzJHD8chms2Wz2TKME0VRst5s6PuRru9JUWLMY5gxVlO4CoVis9mSNVT1gapawB3R0zQVRVFweXm5bAtq2lbimeZZk1Kg63q8j6xWa/puompWFEXB7e1bYpJvfl3XHI5HrBOia1nXT1P6eRgxKNkO5MzUj5y7CVdWhNJhdEbrLPJPYyisY54HdFORFhRU3wmAcr/dihqwLGmWwU7hCtYrSSwepuHppk2JJ6VgSonT6fj0sRDC00xBqhiZWeiFY6+0JP9Ok/T8Wht2V3ucK9FFs2ghZMAVY5S5QMikpGjWWwiZ8PoNhgRxwqqMH3ooCsYxcf3Wcn/zgd3VD9lsNnz2wx/hsuJfOf0/pboqS4IH5gnjLFZLe6OVw+gS7TKXF89wpuDbb9/z/uaOkMUdGKaetixZF5kUPdF7YlRkDCihBoOkJGkeb+5HrsgiV1Ri9skLskugaRL3js/CIkySYGS05FFkpI2s61rWrUpR183T5gCk+ri/f6Bta5wrOJ87pvG3NQENPsqW4eH8QFGUuCUzU8RpAnSZpongPfuLPatVKw9AZ6RNSXEJqPXk/Dgfaokxcj7fPN6HT7OwP+v6B3EI/Kf4rVbgMXRk+e1/Avg7/1afIMRAN5xkzddUGANXz55J+ZMyWSnmEJnmnlM3UlcNIXjapmHWmru7jlW5oi1KjLOs1nvmMHNxcYE2mZvba6qq4uWrFwzDwDB2hBCpm4p21S7kHsXNzTccHt7jXMGnn37KNEbW6y0XF5fEGDkchMq7WsvTb7Vac3F5wWotPSAZkg/kEHHaoFMmx8S5H5jnGT/PrFcV49gx+5GmrNiu1jwcZoauZ+h7EhbnKmwhNKCmaiic49mz59zd3VHYgsIUjMNAiqJySznjCvsE23ic/ltr0Fpzc3PDZruhqGqUMYspRzMvIRaME8PomafAaiE2l5sNVhlylrAUs9wwVVHw4vIZSVnpD1xFWa+om1aGZdHjNKg4E4aOpBLd8cCHt9/y8ecfsd1sUZ9f8mx3wbdffsPP/+7PBaSZBQ5TFiWlsbSmoG3WVE1J61bstxW3h3t+/eVXdP2EMi3JzzgFF21FxUwKXvp/BSFCzIq8RJRrwQgv+YgKjbQrWuvF/BSXmypLyIli2ZgIrLQwIhUPwVNUJQ8PD1IROMMw9MzzzI9+9CP2ly/pz4YYPTmLWElpOWSmaaRpGub5SPCJ4BL9OLK7aMldzzRNzMxM48Q4ThRLQvaj5mQaB1arGqUfK8DEfr9nHGdBqAdZ/WptGBZQyaNY6C9NJ7AEjvwHgf/Kb334v6eU+utIO/DFn/pvf+blg+fcSca6IJgCdf2CeZ45Hs+kCKt2Lb3ZJKrAspC1Xr30zForXFFQ1hUhZIwueP78Bcfjgb7vl4ogUVXF04T/9vaGcZQ+2lrNyxev6fuB+7ujPA13G5wraJqazz//jNvbW6ZpYrNds794SdOuqKoK4xYFXcqoAlRM5BBJw4gyju2+IvkJFrFO350Y5x7nDHG9IoeJ4+nIPI/EXLB/80JQZccz2+2W4D2ng8Ajh26iOw3kJFZmVWeGYZA9f0x0XYfWIkRarzdPA1HvvVhqU6IsqyefQYwJcqCqGlarimfPX7BqV7IKTBEfI4fzmVVdCx23qqmuntENgWkMrLBkJevIYYlvd9ZQFZZ5CvgwMZ0O3H34QPSe3W5Hudvy+vkrfvq7v88vfv4LvvjVr4SW3KwkPTkrSlfhTElVrtiuHVeXLe8+vOfu7kG4gcoS40RdFlysa2zsSYTFQaqIPhNiJmNBJZSWp7U2QvDVgFMatfw+I0/7RJbDYimhjdEYJYyGtl2hlMiuh354wr5Zu2L2E9oobm4/sFt/xDTPgGK1qpn9wFdfvUcpxYsXryiKAteUlFXF4XzL7D0XV5d0p06AsePE/d09bdMuSHWHXVucM4zTQNedGIYO6ywXlxcUhaPvZEPlXKZwivNZDibRjkhux3ddf9HcgQ64/FMf+0//2/08KcoKKATPZtMQlp1214nqSQZeK1arNVfPrjgeD+L80gqrLHUlYMfClnifiD5SNfWTBbVZ0nOPxwdevHhB2zZ89dVXfPjwjqurK+Z54nQ6sl2vePXqI07Hgb4fKFxNjPFpIGOtJkb5nBeXlzgJHRRnmA8kL5N7kxVhnBjPHSpnysKSU0BbScitqoKqtqTgUZsNhdUio51HdLFmu90JZDILM9GWhrvbO3a7HefhzOl8Yrfd4ayFJXUmhsg4joJp14Znz56xXst0uGkajBUOo7WWpmmpq4ZUSRthjGW7kfJ/tdmSY2K6eY+tK6It6PqOunQin21asAXTfGYOCYzEwocUcc7KbGW7hjlxe90zjDNKWbEvA+vNhlXzivVqy09/52d8+df/Hdzc3jLdy0GktcElReFqjClR2jKHzP39gdvbB6bRo23FOM14H6lbcXTmuUOVDokfUiQCMSliVqRFBfQba7J0APkxiXipcnLOpMVbEEIkk3BOUVSFiJqMZbVakXOS3j2MKC1eFDtrDod7/s7f+dv89MeJy6tLnNP4oBjnGW0Sxliur99RVStevXpF4SreXX/N7e0tn3/+AzJnCldQlTUpCqTliZfpRF/S92ceDvdSYSwq2O1mD2hhKE6eVbunKKQ1LopCFI/fe7KQgnkemeeJX3/5xYJHFinsRx99yjgEunOP1ha0TL5zSDRlJdbhriPGJIgoWKLIT9zfvUWpgHMF1i3ru8XJpbXsX7331HXN3e0df/Nv/Gv8/u//VVat9Pfn85nVasMvf/lLxnHgcHhgf7Fld3mx4LGU+OuVCJ5SyqiUJSdv8qQFzxW9Z/aDePajlJ2Pqx0UVGVN+Vz0+017wTSLbr4tWzncCmkBrLXkAE3ZsN1u0UYxF/LEcbZgniWTLsa0xI0JUHW324ksetEbVHWNURZtHNYUoAx1uxH0eM483N9z+uZrLl+9pL68kihwazCuYBko4KyDogJt8VNAG8PF5SVq7mEcMFHTHwMPxxMpeMZ+YOh71i8Lis0GPwdW6zWffPIZz5+/BK1FcwAUGCrlcK7EmILDwy1f3X3Fh+tbtC0Q/cCItVq4/XGmsIJHC1nckwm9fF8yKUqPb5bkIZUl2stoLa2BfsSXSf8sv+CJglxQotBPlVNZOpoGMiVdf6Lrztzf37LZbNjuNhxPD7x4eUkmkLIHEpeXe6qq4ttv35Nz4HB4ICYRnOmomecJHyN1U9PULfvtXh4sUVKVh2EgYRkneR9pLcEqXXdms97SNC3TmHi4P9I2e4qiZLvb0jQNDw8P33/ZsEJWaj7MnD6cFtljou/GBQ/unnTsd7f3zOPIN199zasXL9isBKIxzRPH7kyzXhFz5nA60Q93rFYV3gu4UynFr774AoUAQ/th4P5wz+QneVJG+NUff8HnP/gB4zDztntLVTb80R//CZeXV7Rty09+5/f4/Ce/T4yOsOCo9KPizGpUSsRZDEACv7B4PzJNnhi93Pgohm4gpbiUp5qmqRd8tqMo9CIUyaxXG3JOrJ+vefv2LUop1putbFBCYEweYwrqZo0rAttdj1aatm1pV+sn5Fhd16w2G1xVkROE2cshUNZI0kZBigNhmujPJx4OBzbPnrG2lqoopMy2lpwHwas5hykLmHqGTtqn1XrLfHpgaFp8M1NVGo2nO018/dWXvH9/w4sf/R5aW07nI+fjSQ6pzZbVekU/jnTnjgKN9ZlSGWpXcNcP/OrbazqfKNctYcpoldjWFReritpNtEVJtBbv80LREdtxTkm2A4+moSz6B200hStR1qGtXYAmkv5TFqU4TXUg5YCxhk21lpWsNrTtihgDh+M9Shnads3xdKRqaj797HP8BInENA6YRT8Qoud4krWtK0o+3HwghMRm19K0Ig0uC0keCt6z2+6Yp5nDwwNhDjw8PGB7gykUTbumXa3JOdG2DUVV0qxa+uHA3cMd2paSh7C/XB4ApRC8v+P6XhwCKWV+8YdfoJT0suNwpiprjGm4u+0EuOjFG75pdnz1xTVffPEtt+/PXD27YLdfs9o0qDny4fYdp/MBbTTrpiVOibZd01Zrgs9UruGbb7+lKDyb7UtQiZuba5p6yz/8e/8efBgoUFSrinVrGYaZH3z2Kbv9R/yVv/KP8vEnPyP5NedChj+F8rg0oZnIeiaoCdOUFEahBo+KGU9FsHuUFVS6mie0KUhpJkVxxb19P6G14eWbShRqZYVKkRxntII0zTRFgbWKOc4iE9aO2W+XoV/DeDyQVU/IiaK5wpYVZSMmobauRVMwebKXAdocZlm9lgV5GiDMqDDi6NBNQ7lak+bApqxwCXKEMSqyK9A24ownvv8FZu7Rdcs0ZFK9odpfMHQPlPbEtumIWXP99lv+4A++5Wf/zoZ2isSHEzdffs2vfv4LSm34D/z7/v38+stf86//a/8aRcys65JGG4opcThMfJlKhssdZtXS9W/ZlyM/cp4fWsWbukBFD9FQ5Cz5E0h2ITngtJbUnqxR2WIw6OyYR4MtM9rN6Dig44ROidKUlEXDEE6cxiN2grWuaIsd4ziQ7hTtaocyW5xq2OxbxqwIeRRMXtR88c1XyxO6oV6VnM8dp1OHUoaVK9hcrCUiLUwMpyM6qSUPMYHVEutmFVlncHDx4pLruzON3dFuVxSFxjpFyp4pB2oTmFTPmx8+l8/rItmseXvT052lGvyu63txCPjg8V5wYk3TsNvun6bcbdtye3vLMEzUdcPsPavNip/8zk85HO/o+o7L53vR1EcrsVLa4pzsfp21NE2DNoZp6MnZP4EaVpuWcRxkjx4z1jgaW1JVDnQk5Uy7Sny2vuSjj3/CxeUn2EJ08qWGFAMmByxRDEjGoRYKjLIGXZXoAHUJbiV3kU6RHD0pNIRpQjQsinqaMLagWTlimMnZUxSyIsspMo6yrqyMISCMf1vWTMGJI9EV7HZGKh4Qj0Up/804KwfKOBGmER8jpigoqkoqoBCIswwujdGs24bRZ1xVLwRiJyXygrZKM6R5lMzAWSo4a0pwlqosCYW0LlVdC+i0jyhtGBdf/+nhnrfffMNXX37FzYdryrrk6uoKW1hu3r/j9OEWE+V18VEgM8F70mxg1JTGsLY1m1XFbrvFGJk7xKQWLYAi+kAI8urGRRkKsIAFF04hT6YhAZdqqcaWP6a1oSxKckpc7PcoHPcPdyJAQtOsVriipd3UnLt7zsMk6cE+4ecZ1lvqqsaHCPTsdjvadi2T/JQpC4mWGzpHCOHJ9feo+fBelIJlWdK2LZtdJCkDRIpSUxSaaY50nef29oZV2wKKrhslcCSOXF+/pa4bSWD6jut7cQgoFC9evMB7z/NnL1ivxR55Wqal1ha8fLknxUyOcHV1Kbz8psCVVuAbhyPj1JOV5tnzF4vLsMD7eaHRziQydV1xcfUZ0yRBIGSRkpyPZ1b1mhg9D8cTwzhSVhVNs+bi6iVXL9+gdEFaJKRlVuQ8ouIEMZBVAqx4CbQCY1DOQMgYPEZHiDPZS2CKwkEolt5TU80e4xzWZaZTT0ozxq4wpiAF8atX1QpTt0Q02VhMVRNiWijLGm02bLdXQELl8PQmzynhhxEVZjlcF559HDJFU0ns17IG1DlTWMNud0FRN9I/WyECZzLaWlQWqzEhPTELjULoSkZjjRB8H8VKIQSCjpz7gRgz54c7bq5vuLtdPBL7Lc16jSkcz58/J5576CdImaRBOUVTOawrUNqAMdRasW4F4BnDuNibgaiBJWwk/ptBJfwpK7HAS3+jF3hEeeWUxRzV7FB5hJSJybNerZjnzN3drZi3rObU3XN9fU1RKS73l5weOi62O3a7C+Zp4nzqSN6z3q6I3uNcwWq9FkNXnCjslnGcmGcxuymlF0q0DB1lPX3EFg2rpmT2IzHM9N4zzQMf3r8np8Tz5y/YbHbUVUHfPbBaWZ4/3/Dhw3vevT995/33vTgEANpm9SR5HYZxsbnWYgAxBmsLtNPCzJ9mRj+w2W0oqxJ0ptUtw1Az+5mqrshK4cqGjDz1i0pTlOIlKIuC29trisKw2W6YphFjDLowzKPn7nDi7u6B3e6S3eXH7C5eoVxNmgXQaZ0GeogdeZ6IkxB8c1WhVxsoapQuycmSjIJ8QtETolBunU6CHDJCkdEaSm0kGCF2ZH8gzBOTCjhXk3NBxqFci6p28tRVFqzFFQGjJxlwGYFbEj1pzqTgySmQYhDjSwqYBCElIpmyrpYM+xEVPc5o+Tt+pm62KFOIC89Eye6LUZxqj9FgGlmxGdDLiu0xITmmSEwZyQFVkkGIJqEYhwFyXlSVhu1mQ2UNp8OAQhR8QUk6dTZgC8uqKQmVIy6z/E1ZsmlqUhRqsXYanYS9J7Fdwn1ISeLRtVaoxaOfUxTkmFoqByQVKMWI97JW04MgvwoDOSYeHh5EPKVLuvOZaQrcxYgrLdpm7u/u8EEUqxebC+Zx4rCsZq0tuNjv2ay3nM+dzCxiIubAOA54L8gxrS2gF+9AKSKhxavQNA1oRdcfGIYObcD7kdu7az5cv18qwQ23t2KTL4oKY2Q2ZGzgw4d333nvfS8OAdm3Fmw2G25v7+j7ge12J44sH9FKM43SB7dt4tyfmeYJly3deGa92XBxuadqGoZxYBhGYgjMPjIvhp+qXPTvOdIPHeM0cHsjq7XbuxusseSklkmyoWr3XL34hI8+/gmb3UtyNktgpAIVIJxhOpOHkTDOjFG2AkW1wlYl2dbEaNEoAah6US2qLKGYaRqZxx5FWgxASpJw5num8z3zFAjTiCs2uGJLwGCiwlCAXQPCStAWTLFMtKMnzSJDTiFAkh9KSfJSmGaOhxNzijTrlWgRzmemvqMpjUzeUySECa0FkU5eMGpaQ4pLbx1lGKrzUxKRIeFTeMogSFkAGGiDLRQqW1brtQhxcqaw4uev65LVgtd+uLvjdDxIYk7OKKMIJpJVojQak0UeXil4vlmzaWvm6YHKKTAWhQFEOWeMgEZT8Mv0Xy2R42lpCRbtgFYolXmMg5uniXme0ZNFqwjWolLgeDxQlTV9d0uIcHn5gpgS2ii8H8kxEmdPtoa6qrh/eFgOjoLd9mIxhEWqsuLu/oHz6cx6vQZk6Lff7ynLkt1u9+QDkZbFME0Ciy2bkq4/8/Bwx/5ii3WKeR7IOQixSmU+XL/l/ftr3rx5w+R7mnPNxdWemPvvvP++F4eAWCgt3gdCiEzTzOl0JifxC2QNIQjDbhg7EiKT9FEwWnPwT+EhAp7wWOcYp8DxeGKaRtqmpyzsgtaWWKZ+6DgcDlxf37C/2HP3cEThqJsdH3/2CT/9ye/x4s1naFeRU0QXCiwkP8D5gXQ64rsJ77PYX5cSWCkt+v+shVuT8pJAO6DCTMYzdUe644EwDTK/KArR0aczce7wPoFKxODQdktEkY0DU4KtQBlyCJBHuUFzIvgJPw6QAoYoz12tgMw0Dtx+eM/xdGS121E2LRkxNeU4Y6pWyuIcIcyitM8KlOTk5UWma62VTAed0DlJ9SJSSVQKaOTGYuH+pZzR1mER1aNBeATR+6WNMaiUOB+P3N/dcjoeCfOMTQntLLhENmLM0lEOxv2q4bJt0DnSDz3WWFQCkiItWQJyOqkFBiMdmoBj5LzUSkCmjwAZRSJGT4hyaBSFZC3WZUEIgeH8IGlAc+b5i9esN62YrQrL7C05XzHN9ZJpkVivWiFPT5NkU4bI2I9YW+CnmbIsWTUt2mS67kgIYQmRlQNrnucnMVKMkdPpxIYWYzJlZTBazEPPn19xPgtQ5nC4o64LmqbkdLrneJS1pbWJ9ab67vvvL/He/vu+HnkCh8OBqlrCOu/v6fuBVbsiJVitVmitxC6MX2S/4uI6njuGacQWBcZIgk7McD53PDzcMw5nyspRlZYUA+PQ09Qtzhbc3x8ga4LPHA93tKsLPvvhR/z4p3+VF68+w5br5T0V0coTfI8fzqSHe4aHB/zg0aak2taUdY12xVISi+c+R8hxIs8zcRpJU09Ms3yOsaM/HrBGodbCtSd2+GlgnBJJZUxVgB1x7QpTlEugSiIBUSV09iJyCTPz2JHChDNqcTMmSJ6hO3P94QO3N9cUZcFqs6FoWnwQzmBd1xRliQoT0U/4scOmR9ugzCzk3lLSk6Nh6bUTiRQ8wScZlCp5CgtRJ+GDtFCla3j15iPIMPU93enEPIyonDmfjvz6iy+4ublmmkaIAbfcwNkqkkqYLEYeoxXP1y21UQydQEAjggvPXvgHaE1eDD0sPb5Wi1wamVlYa55+CB/gN+KhwlrKoiAbcWA6a5iAaZ6o6zV1XdH3J1KCslxRFgWb9YqHh5l5munOHau2Zb3ePFWxc5rRaM6nDmsc26tnOGvohhOFKxb/gWaOM8ZYSZhajGOFK6ARkVpVW6pyu8w5Ivvthh//8IfCt9QaV5TcG4WfJ9abNVYrrj+85/Li8jvvv+/FITB7z+FwoG0FsJFS4ng4EkNcGIBCBbbWMgWJVipKS9vWlFVNP0+C7D6dqOuW/f6CcZy4vbllnkZ8GEnZM46R7nwixcjUzqxXO2KEomiZfaJdrfnBD3/E7/zu7/Pi9Q8wuiUltZzO8iYf+xP96UDses6njjgG2tbR2oKirFBWE3NYpMMiYldpQIWA9jNxGohhRPuZSoNyBmMUFUni1CZPfxoZ54QuHc5KUGZTlujCgo7E2BOyIpOxWRJ+wzQwzR1V5bAqQQqkONEfDxwf7pjGnu12w/bykvV2h0bjJ6Ho1k2LLSzZD4RxYOpO6MWDnpbg0LywwZNMzmSgmtJi2fXkyT8O3nnMDEQptDWY5Njutrx+/Vqeag8HTscj8zSRU+J4ODD/8k+4Py56fBDtfs74HJn9RAoThS5pS8vlqiGHmaE/oxVECRUiR0mmVoInlnmAWiCiRgjUeqFUay0EX2cN2hnSQgVKMYgCMwaGqWOMicLJatdqQ1FaTt2BaQlpub0bxTg2S0UXo7wuNzc3T0YhgegaonWCn0dxPp94eHjgeHqgqssnwrZbUGplWTEMoyQgZ0Q6bONT69J15ycY62azFbtxWTIMA2VZ8PrVK/b7SyFZn8+i2vyO63txCIDYXV++fIFzYs5Yb9YUhcRzF4WAHZyzPLu6JOaBzXZHVTfSOpwtSmXpL9cNOUfO3Yn7+3uauiLGSN+PGJs4HO5Yr9ZM88R8e49WJev1mrIq+ewnn/BXfvbXefbiE4wtSUmD0uIuQyi4OcnPXjlyUeOspl7vKdq1yIKjR+cZYoaQwEc0soO3KUKMEIJ46xXUTQ050Z8lh87PEqrpyhVlvaPZXtJcXNHuNminSHnAx0gEjDWS8BsnpvFMzl78BGEmpJF57DgcruUA2GzYXLzAVS1aO1KSexm0WGKXO3gaOsbziXLJwIsxoo1eqhuBcubHyT+Pk3e1RKU/4k4UxjmKqqZdReJU8OL1a168eM7hoef+9pbTwwPzMBC953w6cR47puCfnsYg4qvR94xTj4qBQpVsqorSCBtinnrq2jLNgQKDUTK3QQl8A6UlOTmEpzWgNUrWgDk9IbyMNWRjlrZzZp5H7GTpxiMmJnwhevyyKNFW0fUnQhCrcN8LmWkcez76+CPOpxNKweF4IMYo7EHjaBphOxhjKArH+XxiGCemaUBpSVWSSrgS5P3ChBiG4YkYrRScz0dubm44Hk9sNutlAPoIlClklVyVNM2KslhR1wZnBXX+Xdf34hBwzvHTn/54QXhlbm5K9vtXOFcwDGKr7LuedtWwv2zph3vatlogDIGmKXn18jntak1VN7Ju8RPrtmW7XfNwCPTDyKptmaeGsiqpqxW3NyfKouKTT3/Eqzev+fgHr7m8fIVxpayWQHLhlVoINSI1Lcsau7+iWe0otaMqakwh68McJnKOpBhQPmBSQpHQcaZYPO0iEpEeOvqZaZEkC0NwxcXVK5rNBapsqS+fU2y2mFUDOhFDT4iepDJEhUkjfu45HW/Zrlty6EgxEOaevjsQ44wrNPWqpmxbtKmIIOtWZTC2lBI/eVSMDOczQ39mk5J8/eS/h8zzmAOoWG4ypD3IC4Q1L3ZdYwvadk2Mltgp3rx5TVWVvD2/53w6cj4emaeR4GdiCqQgsV+i7F0CG5RhmEdiipTO0haWVVXi55HT+UjKHlNoTEg4Y0E/IrfECeicw8f0tAVQKj/pA0QZvHwcyavMS9UwTwN2tiiVqCuHtlF8/zni48zkR7yPdNcdpSuo64a1W2O0aFuGcWC1Wi0Pm3m5yS1Ky5AvLMCTqiopSkeMAWsdp9OZw0EAsykJD0Ki7OUmH8eBOUj0mkJjbcnF/oIQBHt3eDjhQ6QqG3JW9J1H68TQR+r6ex5IChCTnNZaKy4udjRNLfSZBe1sjPyAKHDGsYc8UjcNlxc7ERvFhCJyebmjaUpeXD4Tt52NzL7g+ctLmtYx+8R28wxj1+z3b/jZX/uHefH8BfXaYrQEdyqlFwr0kl2XRVfijME2Ldk2KJ8wEXRM5ODJKZOI5BzIYQQ/w1JOq5whBvzY4/sOnTykgJ9HUgxUZUVRltTtJUW1xZQ15XZPsdlBUYDVpDgT44wSpYA8yuPMNJ6YxhNu3xLmAZUi09gzjsNySArPMISEs3LzJhTGlZSPT00JP8T7WUr9ha68zBVlr27MsnXIi7DG4kyBV4Y5pqdVoLEFZVXTrtcoXXKee6xSTN2JoTsz9QNj35NClNARlYkBsBJH/lhPpJSYxgkFVEVB5Rx14fBhJCRPUomQhGsQo5LoMKUWiIiAPNUkAza9BNLID9BK5hopgk6PQaeQohdnaappVxWbTUU2AWM1wzASciQpWe/1i39hGAfW6zV939N1PVOYefnyJdvdjrffviOmhLKasihxRYH3EXxYtgNwe3tL08h6vO8HrHVM0yMWTAbN0+Tx6UxVay52F0yTZEUGn3l29ZL7BzEVla5aKp4CZ2sODwcOhyNx89333vfkEMg8PNwtp55it9twPsuTX+i8BdvtejH8nFGLOCVEDzlSVwUp9czzRDYyiGsqx7Ze0587wn7P7B3rtiZxgdIFq/aSF69WvHn9E958+mMKazFqImfR9mPyUpqmRUAi+3GNRhlLtlIeEwJ5uYFQkPMMOUCcUHGWJ2myYot+uKU/3GPw1IW89NoayroSpZ8xzNHSDSOFdTSV3PzKGaIficETc0BbeULnFEhhYu7O1JXDGUXwmRgC4zhJqo4pKMqGmDW+GzDFmmwtGYV2VsQ/OsgAcxmiFc6hVZYUn0VwpBMovSC6I6BBZYvTUu4OKRP9TIgZW5TUTUtdt2gKnBm5v/nA8e6GOIndeB7HpwNFYB7LU3l5arqkCDEwjCOFc6yKinp5cgY/Y+uSOXoSihQVac54LYKrx6+DxWUnDxGN0fppFqCVxK3JWaeegCMyD5DqabttKWtFNwt0VhdCi4o50Q09zUpi2vtuoM3iWZFPl+nHgXKq6MdevBrRkya42F+QEhwejozzRF3V4vI09okA/agSHMfxSTLvvWfyA9OcKIuKohDL/DxFxtFjdEFZZEKYmeaJqtRs1jtUNvT9xPn8vV8RWpqmRCmh2mhrBfjR1MJ7X/hwq1XDPEW6bpAUnRQZh46mXaFIlIXEkjsLMc7MkxwWF7sNd4eRmDx1XVFUK5pmy7Nnn/Py5Y8o6xVpjgiBRsl+Oway+k0gRU5edtdZoSJkG6Qfn2fUHNFKXIUxzygVUETUIgryMfJwuOfh4RZLpG4Fr52CxlbFAkJV9ONIPwqDwFaaECfy3GMKRxikBdBGvPBqKc+nacKPI1cvXqCVwRqY+4mhn9HKSclZrZlCIsyBdiHrqvxYFksmYopZHJFJuHhW3DdSMCu1RG2DRvPUtmcheBhlxfI7jwQfsY8JPNZAoSiLgtPhgeF0IKfAPI74aYIFhS4jgGX1mIWdqKOEyHbnjqqo2Kw3tIXB2IzGULQVadbi5IxLUaQCTmnZnKS0FGFCWzKLU/A3lYDs4PWSRWiNUJMfSx9rwFqF9wPn7oiNIrG2sv2lnwYu6hpbOtwyc0g5YwuHnxRfff2VQGG16D9iSlgn/zZjDcYZzueOwhU0TfN0w4N8T/XSzlhr8f43aPxzd8/Qe1btht32Eq0ttzcH6romBXkIKBxVWaPQrNdb1qcz7z98+O777+/nJlVK/S+A/xjwIef8e8vHLpDcgc8QeMg/lXO+VzId+h8B/1GgB/5zOee/+ed+fq1o2lqm3EHQzJeXe+CRntrRnTv2F3vWq4YUe4ZhIEYvWXVKJuUSihNkp82SL4elqSqGqZCQSmTg4sqSqxcvqVcbcjZPJXGKkZAC2IxywnQjpeWA0hA1cZ4YpzvG7gyjx2XZ8yssIc1kHbFL/59jpOsHzsMZ7TSbzZq6sMR5JC+GKayl63qO44BzhnZToRxMoafWK3IS6o+rhDbEMtRCKaZuJPpE3axhlqATHzJj77m4uMRojXEtOUyiTixKtCvRGVJOchPI8Yf3sjIsikKEVdEvwzUnVRGip09ZCdA1RaKXNGNblKg54OOM9xLPFWJEG0vTVJw8zNNAjhJhnnMii5yQlDNZS0S6UpCN9Mnd+UwIkf16Tds0WBUJcSSqjCodlbOEfsZPkdo4ks7MISxrTNlsaK2f/u0ZYS/kLAeDXjImH8NX5BCQV2OeR7oukc0ISta1KXrxP6REIjGHmSJ6XFkwec/Qn6RNcoJ+M85ycXVF3/UYZ6nbhmEaKVyBcZY5ePqhh5Tp+55Xr15hjOH+/h4hXFfs9ntSStzf3ZGyJuWZeZI0ZWtLSlfjp0xZ1JAFcSbMikYkyMhQsa6+Wyfw3SmFf+/1vwT+w3/qY/8N4F/KOf8Y+JeW34MwB3+8/PgvI+DRP/d6jGs2VnM8PTD7CWuFoTaOPX0vCr/D4YHDwwOHh3uhrcZIDJ7z6QAp4JwBEsfDA935SGEN0c8oEptVuwBAJenn4vKS9XpHygofMygHWZKMU4g8puHG5AlxJqWwlIswdgO377/hdP+B5EesRXb9v8WmCznjU6KfZsZ5oqgL9s8uaNYrsoGowFYlpq4YY+Q4DCRjqNY12EjIE6YAZRUhSN6AQCcVcY7kAOiC8dTLUz0vqbrZEJMmRmjqNcpUKF2icJiiEuiGMkhPsfzaWKSzCRhtKMpSpMbjSJjF9IOS6kMBOWWCj0QfZcCYJevRlSUpwzALLdqHQFUWtG1DUxVMfUeOUm1opZYQ0ix6/hRFSZniU3TWvFhqnz27WijPgcnPhBzBGJrNGm0sfvIQxQAUfBAl32OiEPKETik/MftSijK/WeaPPAaToJ5gnuPYczwdSFlmKplEPw48HB9AK66eXy0y3n5pPSQ0pGqkRbi4umR/ccFqtaJdryWlSmuGceTcd/gQyCRCDByPR87nMyCGudVqhbVWYKZWUqK22y3OFaxXGy72l1zsLtEYxnGmbdZYU2K1o3AlKYjHxs8Td3e3wh9Mf0Ercc75/66U+uxPffifBP6x5df/DPAvA//15eP/qyy1zf9bKbX7U9zBf/PnT3A6j7iioB9Ff308H7HGieJqnCmqin7oGIbI/d0dIQb2F1tyjmSVKXKkMCJQmYOXia9LJD8RvcKWFcl7snK8efMjXr78BGdKxs6TzQyFgtCTZ9HhG6yUyBlyEK03CtI8MY89eE1drES/4ERuqwgUOsiQMwb8PBHijK0cla2etPnjHCiKCls4MgrvE8En2tUKra2sQ2tLUVqm/sQce8KcyLEmeOkRy6pCJRi7Ufh+HonZ0gblGkzZoosGbSK4kqJYQBsxkuPyNMyi/X8U1MzzhNFgNUzHW4oUcfUKlZslwAVA5MA+BAqTyHjU8gR3Swsw9xOzj4SsicriyooiZbrjNTrV2HGkzgqvBJeelEIbi1ZGBpcGkk2s9jW7q4LCavxRAkoyCZLCKEPlamY30uWOoBPayoFgY3wKXSHJ10vOpKwJGXQ2RLVIonOWuULWaJTceFpeE43BqAqna9FHGQ0W1pUQgZPy4ltpRKSmcs16XaNNzbrZ0lQN4zCQY2T0ZxSIzDcpSThqV9SuJCyBJjFFSl2w3e3ouw5tNEPfLcnYEnbarnbkqDGmxE9yYysNp+M9IcycTqK6dcYx+Zn3H77BWQkz+QsdAt9xvfitG/sd8GL59Rvgq9/6c18vH/vuQyArplHx4fqOTEJbRcieh7szd7cPeB/Yuy0+BZyrwK0xLjBG0UtXpWOMM/3DJJPXLDHQ89BhciTMAR8NPtYUqy37y89pmueooClixDCippHYvSXMCV2tMFWNzsXyBll2/zmQ/IBVicvtJxSVRdtISB2JGZs9Jgrw0vuJ5APaWoq6gZgJPpBjImXJuU8hEbzHDwGXLZUqSD6hy5qyaAjTTNcdSbqgadcUNuJDIMRApUuin0hzor1YkaMmRA3aoqsVtt2RygoVA8rZBcYRfoPU8kGefCh0VqiYCNOE0ZkcR/oPX+JyFAR3mMm6FKuuUSRj8CrgTCD6M9aI2i5HT2ENU0zMAeZseRg9HjAuM5zfoXpN1XfklJi1Iy1yfkWB0Zay1JgioWrLs6sdznQc768Jflhcio65T5iocEGgI31lmMol/m02JDJaI0yHlBefvlQzSSmCEucpRqONIWqLUWZRREoWQV23bDZbGS4Gx/P9jt6cmcuBdVvx7duviFPPxcUFl/uWECJGWYyaaYorvPaooAlTAJ15++3XrDZbnC3wPrFRO5wtiClSVI7G1BirSQgCrlyi3WIMdMNZDtnSsd5e4WdFCgo/95JnkCf68R5nNOMwkqPh+sMHsvEMw4FRO5z9S5YN55yzUuq7Dct/xvXbuQNXF1t8iNwfbzFWnvAxRm5ubnh4OLHZbBnHgRCFs37/cBINdTCs1pJk3J+HBbLp2G4vUFh8CJgM3ge63lNsdvzgBz9jvb7EmhIVM9omCBO+65jP94RsKF0p7jmr0BiSySgiaepJKVA2Na7ZonIg5Q6LvIHy5AnDmbh49pW1GO2Is1QmOSbGYaIuSyEFIwi14/FEYZZE4CUGO8dEPwyMw0S1LlitW4zKzCkQgwcyyc80TbMkMidmHzBG4sWLukZbJ+tJLZPxx/xEEfdIbqI1BWZRAYq+XoJb+vOBarWlSYHsPVEpPEaAHNZgjMSbhxBkqEaWrIjF9TZ7T0hZXv8lAFVmGx5nE6MKQKKqHcTMTFx680xVGarWQR7x5wmVA+SAUm4Bqg5kpTBKCzquLFCFBLJY5zBG/As5LwpBZ2TmuKxGRfoldCHr5O+ljHg/tKZpJOOvbdbYqsBYy7rZkIO8RnVVs11vIScudnuUUlRlSXfuKeqGw0OHc5YUJ1KEqigJPnM+dbx8uaXvT5yOnbgSyVRlhVkYF94HycXM+UkvUBYFZVVSVBVKOVIMWFvSthZjEnrhHBqlqCrF0AXmyaMrz+XVBSlpEb59x/UXOQTeP5b5SqlXwOP48Rvg49/6cx8tH/t7rt/OHfjs4xf59uaWsgHvPXd3dzw8dE8QCmMMIUq+/M31LR8+3FHXjq1qaNuaafAcjmcUmtV2Q1Wu8HPk1A2EaWZKimqz56PnH/Hy5cdyKoa0aPtnwtDTH+7x0xllG0yYcWFGuWKRvkoIxDiNspJsanRhiZMk3ioCOczEYWDuexkYKkW1XqNSYhw7WenMnnkc2bYrwjwTk8SfT9NE0VpSihROCLMxZoL3GKVo6xpbVMRxJnu5KUmR5D2rVYu1hmU5JY7BolhItfKkYVH2ycz20TefpTdexDkqS6aBigUxytfl/cRSLMtMREu7JXODijAMIqpKEh1utGUez3Tn7in5SDwEkteQyITsaS9qZhUhKuqyRo+ePI8Y4zFq2ecnRYgDfh4XY5Z60iuEGBZpssKVBVXd4JzBoHAuYjRoY0nL12yMhKnmJW2AZZCMyigjDsmsWPrvmqZd0TQrmXPYErRkCUoRpRb69Za+77FW0oOqtUjMy7Ihlo66qZimAbJHq4Km3lBWFUZXaDWhlRNLeJrZbNZorRl6SRY2Wi/gm5kUI0VRUlc17WqND3Duhv8fc38Wa2ma7vlBv3f6pjXuMSIyIqfKqjqzj3vAlhhsgy9AlgAZIWRuAMMFluw7JKQWSCAsSyCmGyQuEFwgAZYlS2AhLjDiwjR22+52d5+xhqzMrMzImPa0pm98Jy6eb0eV8ck+h3P6oFpSKiN2rFg79t7rHZ7n+f9/fwyR9WrFclEyjnsWzYqcIqVbUthI09TcHl9SVTWLZsPu4S8ngejfAP7rwP94/v//5Zc+/i8ppf5V4B8H9v+gfgBIPTqMPdppFkvJZn88YapKE4LHOnkDn07tnMba0NSVjAy7I8djy2q1wehCPnbqCYcjh4cduIp/5JPf5ONPf42yWEBSci0OgTh19O0DXXuPzgGtvAh/op9VayJM92FknAYKlVFWiSkoTuTkSWki9D2+axn7lvZ4nBNtasYpEBNkYxm7VuSqZJTKdH3L6XiQN7DVFGVB4ax0n2PCaSMOQ+dIg8STh5iJkyf0PWGaZNSYAgY3C6vAKE1TNxLpnaIs/CybaI4B7SR+HcQzL5E9YvxJSs+GIWEipuhFL5BlcYYYUAYKV5J9QTSCfDdaU5cV76aJ/X6PUhpXlkxTIOSECgFbZDCwvloQbKJKGkxFOmVSrwipJ/hIe5hojxPWZPI0Se2emOXLsnkZKwvXOkvd1KQcMVmRrBUdiTZom/GTRI9lRCGplJbsAYXkVkotMluPRdWntaWpF2hrOeyPjN5T1ZIfqJVlmiJF0eBsRd97QvDUdUYrsTNvNmcsmppu6NjvQSnL5cUTmsWSlOH8rGCzPsPYgr59kLJMiQXZzCNNHgnWKVGVmnEYKcsFSjuaasE0RPpuJEWPNomqXsg0xUu/5+Lymtbf40NktVqx352+c/39WUeE/yekCXiplHoJ/A/mxf+vKaX+W8DPgf/K/PT/GzIe/BwZEf7zf9rra6N59uwpP//mJ6w3TynLknGMxPniNk0jSlse5/hPrp+y2iyoKs0wtvjgWSwgRei6gbLQTFOk60dOw8QHVy/45LPfYL15AtmiAW0SmR4/tYzDjmk8oLPCoCnmiYIyzC45CUuN2aMLQ0qeOA0olQRDPmaZeYdAPweNFK4gTp5x8hTVAnxk6gdWq6XcQEKadeFa9P5aUy8X5AzTOEkasDYi0BlGwugJs0Emh4hvW9HrWyumFOcwtsTHSB4nqXlRaG2lSa4UkVEWEBljpNQxOb33MeSsmHwg+IDSmRgnxu6E0xW5WJLnrn2KeSY6VWg9yOefJwXBB7nSzuiy4dSSFfhZ6pwdKJcoVprSlEwBbEjoEEhDT3c6EvwJayKF01glw1bBpYuGQ2lZ2CnLFMCW4rh7RHCnmKQEMm5ON1bvFzqzKMdYIw7sLO8yjCFrTVaayQeOp5bFakkMYggypqAqS1L2tKcDerVgtTpnHCQl+j7t8VOi7ycuNwWFK7HOSZDI2FMWNVXRgDKsVyVFIcAcpxPTKFkDdqYtT9M0U6nlBlU4xziNDEOPNklGjECKkWmcGKcDxiQOhyNhyoQJDseWru+YpsA4TlT1XzCVOOf8X/2OP/qn/4TnZuBf/LO87i/9JYzVuEKy3e4fHni4P3B58RTvw1zTRZSBq+srtptLYhoJMUpNXImgYvewn8dVcqVNGM4un/Ibv/1XePL8E4gGXTboGICJnERjH6YTYWqJE7iYqRZr8dUnT8qQkpfmoFOYwjCFkcl76sKglTTEHmfeYhopqKsKlTImZgqlGbqe5AOFLYghvnfQLZbSadYzCzCME3Ge1xuliT4yhBPGFihjST5Qu5I8SbS3cyV9P83kYnHB+flzmpgAhS4KFAZt+lkBKSXCLzP29ExwSoj3qagKlIb2dKRxC8pyKeNPHwhEmqpBKUvG4iMoH+YutqKqatQsTxYzz9yHUCK0aceWaBOurMhDQNlAyiMxjZyOD+TYsVnVpMnj1S8mGczTCWct2om6zoco9bwRO7HWWizQIIusKAnBz5J0wZprI6M3bbWExjxqhpVBGccUMvcPe4qqZr1aM3gxsfkgITfTGEgLJS7UAG07MQ6S8TgOgbfDa8GPb7Zst1umSaYrbS+EaVcIEakdRowS9F3X9cIw0IpxmohBbldVKbmTVVWTlaLtelIYKYslZelIKdOPwnZwzlEWBeOQ8GFisVhS18xS5F9xF2HKibc3b7l+co0rHW9u3kr9pg3KZkJKqJSoSsfHn3xIjgW3d0f82KN0KbBLP+EKS1UWPOKiVpstH334KZ/94DeoqiV+UpikyTFD9DNc02NMwNqEjhZjNM4ZtE7kaZCZs5WPZe2whaPzHcpmlE7EYWRsO0I/yNw8w6JpKIqK/tSikiL5wGG3lzFbjIT5RBV+XMIj2QfTOOIHYethrCy4mMjzZmCdoz91XFxfEcIkSTwaQhjJvcYkSNmSgGQiSUsdq4zF2Fkbn9NM/3lcWHoWQ2Wsc1hXklxBYwuU1XTdEdtsWRhDePyBZY3WVkRZs9bAaEfKCmMdRV2LXl5pVps14wzICElGapEAzoIFU1psZdBWNtvoJwqjKa0TQlKa1z4SN46WE/wxK2AaJ6Eru0LgKMbNTUExEVV1RdsldEyoOaZN4IIaZTXGivEILbkKMUOYYbWPO2TXduyPB7qu5dmzpxRFTVXWaCXSXD8JlnwaPdvNGc6UAJzavcTcWcU4ikfkcDoRY2KxWos2IAriPZPmgFyHs2buBTixQsfMYrHEp4mub1Eqo7XYibv+SFkp6kVJJjKNgTzfno0tUEnWT12571x/vxKbgHDxKxbLZg7pSHz6yfdoW09RFHTdCZMUYDAGju0JbTLLqsI6xf5w4ng8cn52SVUXxACXF2esl+d87we/zubsEq0MZdVAhBTSe82/MVAWFtUUqKLBlA3NqkFZSwgDMWWcqygLQ0pO3HsKqqbAxMgwDUxdR/bS+Q4xYYqSjKI7dVS2oEsdh8OR8/Nz2SiQhlVZVOz2O4wxVHXN4XggTxGjDDpm8RKME0obfOhQWnM8tpxvN6K7N4E4WoauJakJVyVM0YAVyW+OiWn0cjVGUpDSewSY0IJyDKRxkDBUpTG2QJsCU8qinvwos23ZWrDGzNdnN7cMFdo4XKUovceV3Wy+kpl71dRwOmGc5Xg8onIiGyPaCi0jvyYuOJ1aDnuFNY6q0OSkMLp4L+QhIUk+RSENvbmhGWMgpCQTEuRGoLIiEUX9WJb0w0B6NIUpM1ugE2BQxoiAyjjKeoFxJZMXutXD/Y7FshYNxThyOByEiuw0fS/Aj9VKEHibzYrT6cTl5SXEwO6w5/7+Hm2uKcqKGCe0kY2ubfcslzVlpfGDlDBaG4mEQ1yVj47HaeYBWFsQUoufTkwTgs+bPNZlzs7XLFcVOwt95zk7X7JYLjl1DxwOBxSZYfhLiiH7h/UIITCMPYdDJCQJEu2HgdOp5+zsgovLS1LqaRaON29foShARawr0CYjm3mm7Y5SY9ua7faCFx9+zNWTZ7iqmUnFI0Y7yDJH9jEIxkkpClfQ9QFbKUzhZmhFhy5KlK2lY58Uh9t3OKcEwtFPcnqPE4uy4HQ6ECO4qpHGlLa4ouJuf8CHyHp7RlEU7A8CUIlZXGIoTdv2TGPEKjULdzzX19e0xzeM00SzWHCYmXW+a9HWMPYtWSty8EzRkzHUtiR7T7YBn6A9HvFhwl2eEWIgxQmXpfmao2A7U5TEWuuclAVzEGaIsNE1wXvCOFCsV0xegKnEyDgFhkk882IE0tRNg9ISGV4VgsYqZ6aDtZbd/R1+mlhvHCmNrBY1H7y4ZhwCu7sddbPAad5nLeZH8IGFPHnqqiLMzc6ubUFpCuvou0EckVpIx4+hqzFFrHWgEtoarJPNLaQonf65b+LKCltWGOtIGfw8DbJOc3a+wfUWYzXWWlbLJa4QX8b9/T0hTNzdycL92c9+xrKRxKD9w55hPPHk+plsVEVNjhMeD0pMZNlrXnz4EXd3N3TdkfXmnGkaJENw6ORmUBTsdnuU7ciMHI5HjJLcyM1mibGBV6+/4vXrt5xtr9Da8vXXN/gw0iyWBJ9YLpffuf5+JTaBnLNAPnYtzknwY1GWbLYVMWXqwnJqJ/ox0vUD45BxhSYmifUexp6qLpkGz83NDU294unTD9ienVHWi9nnnslIt1WXhniamKaRRwXcNAVs0VAvGrL3jNNARs0ClAmMot3tGLqeYrsgK6m76rJkeXHF4UGyEZrljO7qR8pmiUlKTo2ixBjLODcL6wb6bqDrB8qyfE+giTlj5pHY6XSi76Weq8uSh+BnutHpfcTYEIOAUaulZAY2K5arFTFH4dq1R/rTgeXCCSs0CZlGZSQLcQZrKBwpSE1cVDUhJJS2OGtJSc36WqErWSNJyD4ErCuorZLmoDEUdUOzWpPblpRlHGeNnQk/iW6m5RjTY21FcVGz2mxZrtdCONKaPI1oImjYbhsWywUvX32LK53cvOZGsUWBVnN894AuC6qimOX/UnLlnKVJqsRQlHOmKCsKowWMqswv/tMWZYtZKh6xrsZqgXluzpZcPTnndOroh46r6494eHjgeDwSwoS1Al7ZH/a8DnfUdcPu4cCZuuSblyNlWbPZnjH5ga7r+dnU8fyD53ifGMaCxXJJs6wIcWQKE7vDjrbtubp8wursjNOh5f7+DcYG/Nhx7I+sNw3ea7746me8ffuK7dkZ/dDyzcs31LVEmG+2Z0xTz49//JPvXH+/IptAoqpLxsnLrWAYqcpI06z50Y9+zEcff8BqtZpxyx3b9TXH0463b+/ROnFqTxRFyaJZyzWrXKCVoSqbOYxD3Hxa6Vl4okhR5rH1Yk30mhwVhV1RrFaEydMPI8VihXGWGDwqKXb3D+QcMHpNCB6jxXjSd3tev3zFkD3nz5+BdQyhw5U1w/7EGCLnV5cUTcN4PJKVRlvHMHmUMiLwsZK/mKKEnpRFwel0wvuJZdOI9LRtCclTWf2e4zcoAZp+sNpye/uA0pazi0umUYCqYZLI82k4o6jNTEcSWpBSWUaWWrwHYT7NjasFgaakWaiS6PxJ4vqzzhFTwMdAXRVUVcmwfyBrTdk0VAsJick541yBVoopT0zDSN/26KwZuoGiNKisKV3FZnPGxeU1Oif293cYldE5sT5b8vGnH3HzcE8CukHKRenlWKxxZDRxDHgg1RWFcXMfIaJmf4LSCuEiznTrqkKnhLJy81GmQBmHdg4TEtaKhFkbRVUXtN2Bu3vRraQI3//B98kklsuG5fISV1hOpyOT7/n25S3aXHBz+5rDYU/MRoJuTyfKsubsbIMPnpQCwzjRtgPWKUKcmKZOyoacKMqCfhzZPTy8pwk3TcnzFx/QdZ7D4cDv/8Hfw4eOuilYb9Z0rWe323F+cSWbXVXTj56H/e4719+vxCaglOLDD19wPD1wc/OW/X5P8HB+bqjrmsNhT1YFxmaKoqQsa97dvOPdzR1lKWq1srDU1ZLt9or16pynTz+kaZaQIXgJ7ZQTL0sDSSdUXWASxFhg3ZJ6swVXEIdWBDHOoXOW9KJxJHovxg5liQjK248Tu5sHdvcHzp5fU6+2dMETjaUuNIfxAVOULDZbsrEkZdCuJCQYfaRqljjrxA4bRbATgwelJE4c6ZkcjjvGoaPvO2onzMUYE9FU7A5Hzs4uyN7z5uU3PHvyAVkb/DAK09APJN+TioIQPTYFrHGza1ba7jGmOcseyOBMScyzUSjLKU6MGCW4bu+lAWWLAlM4fAasQ1uxKmM0BlHXKdLMEOjJMeCKBcx5B217oh966sWCyyfXWKXo+o7SGqqqIOSBs6sr1tsNU/CUucT7wNDLpmJNSVk4dJ676jGhq7nzjzSUT6eTNBRn5FgIER0TdbMgawtFIRuBNhgreoDlqsIVFmsVTW1597Dj1atXrFZbrHF88/WX3N/fi9uvdrS7A5MfMFbz4oNnnJ9fEXzi7ZtbnCtE2ahlZGmMo6pqpnHi9u6O42ng2bMrQhy5f7ijbaX3cHX1lHGM7I4HUXYaQ3toMbahcCXtqccYx3pzhQ8D7ann449/AGqBD4nLqzXWWs7PL/id3/lHvnP9/UpsAimn951QGTNpiqJgGkcWiwXDeOR4HChKhVIN45BYLc4AxTgKQHOxWHJx/oQXzz/l/PwJZ9tLXFGTYyQFYbZhLBBRKqKdIkyJ9tQRRk9R1qiyIvW9aPPrRq6944RaLPD7A6UtUFoJoaeyTIcToRtwxnJ5dsnFtaDJumPPYrnG+oxyJauqxJQV3eQJgCsrhtETQmK5XOPHidOpYxgmlpdnhEnj/cQwTXjvGaaeruswRon0dhpompJh6NCFYex63nzzLeuzC3b7A3fvXrNebxm6jmk4olIk9CdcufwFLIRIjJ5HQFgKMudHKXyUn0fX9XR9QBcL0danPMuFBZElEWcanyJYg9FSk4unW6Pn7jwpMqeQUBiLm1WEU5g47B84tQeaZsn2Yiv8xixJvOfrFW/ePOBDYHO25ebuhrIq8MHT9wN931G4mkW5onIlp7GVkir9IlNAz+pGHx7dhcIa8D7hlAXtpBFqSrQR9WFVaaoClguHM4lTe6RpHE+fXZGzNCy/eflz6dhPI1134utvvmK9XrBer1jVH1JXC9arc75cfktRNMRkePLkKS9fvuKrr37MkyfXuEISlVxREMlszrZk7Rmmln7sGaeR1focZ2vGwXPz+p6bN98Qg6aoVhRFzaJes2hKqvpKSMuq4Mn1Cx52O4q5BA0+Udf1d66/X4lNYBxHfvz5TzieHtjvH/jww0+5fvaU3UNLiJ5Ipu9ODPcntosPKI1hvbpksVjRdgfquqYsKpp6y3Zzxfn2CUWxQCG3hJwjKQdU0OQcsAq5egXP6BOlW9AsRcp7f3ePKUrKWmrulDM2BGHFF6XYnic5SZTSs602sF5taRYrHrqePkSWVcUURoFtrlfYqhHxR9lQFyWnwxFb1KxXZxzTgXEYyEkSlhSZ9nRiGAb6thVFoRZ9+lSWkCN+GvHTiMNTWcvbV684O7/gfLNhaE+slgvC1JH8gDGK6AfIDdba2WMfmMYRZzTOSBxaIqO04NyCT0yDJ/qMq6x00Yki2/UenyK2sCgr8lZTVmQvzH4zC2UKY+bRnZd/v3PUZUkcPTGIZXjoTwz9ie3ZBlctmYYR5RT9NGLqkikn3j3cYQo3ZyhAWRY0dU0KHTpDYSyrZkEfR+lV+ImsZwCSgqauOfU9YRKOgZr9FNMQ0KXBFWK8ykrjioK6LkihpSgdOo/s9vcE7VksK+pqiVKGvpvYbLfsd3sWywpjwBUG6wQR9urbGzbrC5ypWC3O+Oqrb/CTYNCmITH0gWlKbC4uqZaGrjtwdrlhc3YGGk7tid3xQFaOGFv8lNC6pCyWvL69Zb1ecH15xRQG/OT57LNPMNZxe3ekKtc8uXqBjwfqeoGfeo7Hv6Bi8C/7kXPm/v6eaepm0UbmdDyRM1xdXfHzr3ccDgfGsWVVwuk4cti3aJMx1hCDxqM4Hkbub1sWdRQDjdbkGfGVcyTGCdLEkCJTP5ETVPWCwtZobdnv7tgfT6zWhil4TAatLd39AylEqqIUKkxpmeKJMA60pxPtseP88oqYNad+oFgI3dVPXsw8TYWpKpSXN3+9WRNiZOk9pqmpvKcsCvqum70KRjrgSuFjJMVEXVUMbZT8upxoT6f3yOymqjgcO25v3nF+/XRm9nmCH4hhFBNU9ECaT0dhAsToyQEKJzPxPPP5jS3ww4jSmrKyOOek0TaKki1GKQWMlSbcOAewhnHAuZKiLLCFoy4KCVWZMs7I6xTWMk3zNCNlgh9ouwM+XFA1DTFqqkVFP/UoK9TiV2/fYJWirGvag8SgV2VFmuR2EbynsAVlUcBsGMs6Sz6hkhBVa630PObZv1KPpiENyshEwAf6fqQsNSpH+r4lh56YPFMaMMngCsPQe4xVtO2Rl9/+nMPxjpvbt1gH49gxnloe7vY8NB1alXRt5t3bB/LbPZ98+ilPnz6Xen8QJqGrK7wPvHv3lpQmhrGfYaKecUwMXUBrx4urNc8/+JjSnbFcnvPRx59ineHd3WuOx4Ht+QLnFuRkubvf0w3vaI4tm82W9XbznevvV2ITKMuSy6tLprHj1B7Z7fbc3R74/me/IaqwyXN9dc39ww1KCUjhdDqyWtWCGE+a29sdRjdU1ZKqXKICJAJyHjDPXZN09aeBYRgoTYNzBXEKHLp2zpgbWaY0U3ZEl7B7eKCuKlxR4OoaKoufWm5vbjm9u6XRlsV2yzijslfbM5ntx0RVlKLFV8ImJEiJYFxJUTWA5CwWpcWae2JIaGeElluIH6CqxGU2DCMgHf6h7zg/27JoGnLbs16tePXyW9bbc8pmwdC19G0rNJ/sSNELaQeBZ8hD2AQplDIqzOIaEGFPwlmhEGENYRrxBOKcnMss2NGPISFkJu8pywLrLMZYqqoWpV0MwtBDYbVGFZZhGlFkQvR0/YlTeyBrAXPYwhJyIOSAKxwPDzsWTUVd1+x3O5wWA5BSmmEcMIcDdbWUZinMxig9p6hlhmEQEVOhmcIjwFNCXo2RXkBOMI3SbCuLzHKhORyOpNBiysjZ+ZbNdsvp1HNzc8fp2PHhRx+z2z1w8+4tw9Tx8Ucfslw33L56x3I5G9k83NzckSIsVitiBIX4D5QSwdU4jdze3/LN733JxeWasnR0fc9qsWa1ctSLhnEIfP7TL3jx5Am/8zu/S98Ffv7VS2ypcYVi8h7jOk7HAUXkYX/P6B/4/Kc/45NPv8dy+d2k0V+RTaBh7BqOx4mpLzAKqqqgfdhzf/NOhB25pq7O0A6KasTWiZwPTGmibx3Wrrm++h6XFx9R1VsxjZggoZjZo7PB5ozykLqB2B/JeU/UWkQ1w8ju5p4QAsVZQxU1qWs5TAGdMq5YYuoGVTYyWjtODIcTeVlh1xd0y5pu9NT1BUu1YeoGbHGGNxHrLBZLQmFLh46QsmZ1fo1PimBKumkiVgt6NNtmCXZPXWvubu/xMbOwFeMoYRN13XAcOrZmCVrT9h1Pnj7l62++5e3L1/zgh79Of+yJnZRAKWeMqlGpQbNAZS3GqOAJ/YG0sHJjKEQ8E1KiVY6irLHW4WMkDEeKsibHzNT24k5bL4hdj98fcFqzKgsRHoWAMwaMxlYVh/bEpBW5rpnajnEMqKqhnK/fQzdyOpwIU8I6ByFTu4bu2KOUYZoCTnv8EClsTX/qyFiUU8Q4cVQZrxLNwqGSJ8WJFBI+SmJS1ogxx9Uko/HKEExB4cQPYGwBWaOSBi/X9bSoiLZGlwUpHWnvDugxsT+0pH4iD4F+13O1/oCz80tCAqdqxqNC6ydkDK5cU9SAdpxfKU5tyxdf/QGr5ZqyrKnqkqfX55hCkcYlsX9Ke+w5vms5255RpjXrYgtkjqd7bu5vqBrLZvct5MQwvWE4ilR7c36GcQts43Glo7254dzV/N7nX5DajhcvXnzn+vuV2ATE799yf/dAYTWrRc16ueJ0OpBU4nDYceqOEiyxtiQ76+RzQKVAVS3Zrq9ZLtcCnIwRU1rRrs+wSZMNmjg76eL7TrdOif1hx/39Dh+1ZLdpiH5knAKHU0+1XFNVBQpRaqUoCUN1WeMKhVs0mLKkcZIRqJISA6I2knFfFTirGFMSr/0wMo0ToLHGMM7a8bJpMDpxOJ7mHsXw3tI6jmI9ljAUjfeJmBTaWuH7KcN2Tl5q25aUpRYeJ/EodG1HsQlgZENU2ZP8RPATOXq5JCslgM4kslZBjonL7lFl18+5emTpvOcwcwJTQCGk4+gldBWYGftxTpa2uKIQrsEcExZjJA8jx/0BP3mWKzmxjJYMAO+9jHZnK3FZlExmoh9HYtYY55hiImRJQtY5kdXjOFA4CTlEYp6k+WdL0HbOGWBmKIp11xqJH5Owm4BPnmVdoXxJGHoe7o6Mk+fq+gnnZ+AjbLYbqrrCuRo7lyOH/ZIQJrZnZ9zf33J1fcGpPXF3f8uPfvTH/PCHP2TRRG5u33HqHlhtGwpX84PPPsP7THvsubi4pFk07Pc7co5cX12yXFSUpeXdzTtIgYe7W96+fc1yucTVJe/evSVbx3g8sN8/UFaJ8+0579684x9E+/iV2ASUhmfPr1B4lk2F0Ug0V1HiSsux2zFMgzRzfKDvRrbbJcY6QogoY1hv1yw3S2xpSSS8n1A6zjprMFlBzHNu3gwnTZFu7Dkej0zTxPb8CU3TzKOyeQyWInUpja4YPWTZZMa2oyxLKu2wxmK0ZMpnFNFHjFZMc5dbZvoiXokxCmAkJ0Fnm4pxHOj7lvPzS4zOvH1zy9XFObu+p2oaEnBqW0IUeawxTkoVW0jqstKc+o7t2Rk3dw8cjkfKqmK9XrM/7emHgb4/sSWKeShDThK3lWIk+YhxIiMOs624aRaMw0AkYYwlA13XMYyjXKGVEh3CDBERWTGC8UoRRRYN/EwwftT7O2vQhUWRmfycK6A1XXsEMsvlksIZCifj3JREQ++cY6KX+b2z7I9HUlRoJ98/HxKhzBRWUVixDOcsakM9h40666XEEQ0T+jEsPQk0lRSJYcJPBm012iqs0WgqsAsgs97UnG3PsUXJqeuEBBwDWY1oK4zD58+fc3PzjpQSL1++5LPvfywKwv0OYzSH/R5nS66vrrClWOnrusJow3q14sMXH1KWIldu2wMoWK42lGWBc46hb3m4u2G3OzKNkfUHZ9T1gpfffIspa24fduxv95Tnms++/wO+/fYlV9dX37n+fiU2AWs110/OUExUznE6Hri7u+HFiw9QRrFYLsidMPpXqzW7+4MYV+A9kx2VcYWlbkrK2QGXckDn9J4YnPzE1Hf4aaIwkkq82+2IIbDdbFg0jZh2pgmtDT5GnC0oC0sYBgIaUyimsac7nXCmoKgalHVSZ+c8NyEDWsPkByEYhwIfJoa2xViLstIgs3aO/5pPqxwDrqwQTYMlSrAewzDfGlyJdRXKylhLGUs3egn+8F7CUJH6MCtwgyEGT9eeaLqWnCNaC8sAJZTZPJ/GrixlWJglQDRFcRo6JzbnYRxnDbvjMW4shMQ0TYTgKayMBDVz4u/ctMxRNoTHDActmR8YlVEkoh/R1hGmieAkVr1wBmc0OXq0UtiywFn7nrBsZ5bhEMIjKZT9fk/rMk1TsWwqSudIM5fBipMAvAc9YAqwRmNyQMWJ5BWehNaJGEvIUiaUTYHKkZzAunlTzJYpZOHSqsz+tENp8NFQNoqQBs6WF+R8ScqBuq64v3+g7zu6ruOv//W/xmYjaLGLi0sCI4fjHUrBOPWUZYV1hn440bYtzaKSJO3oORxOrJYbqmqFNnuaZs3zD16wXK+oqxXj+I5FKaEwddPgqpKLqyvWZ1uePn363evvL3Nx/1kfKScedjecuj1tzqiUGOc56RQmXFlQk3FFwdX1M8gFKY/0bcswTNT1mYy45pgoly1WS569JP9IQGf04/vEn5hF3z10HWVVYZ1jt9uhkVjqfupJObPaFox9T+gnbLWgVIb2eCDHTN1UuFKulzEnUvTiSkN+PXRHuSLH5QyMbKmqata1S8hG3w/kGZLR9/JDXyzXTLOhJyaxy66ahqpZIJFf4ov3UW5Mgjt2+ClIPLsSx93tbUeMEzF62vbA2J+whcHqAjuLflJK+MlTL/S88UhUvPfiGbTWCv03xhnh/Qucd55luKLgU+9NL1ornNHytaeAIpFjRKuMm91/2Si0pIiiMZLGNPYSMpoTVitSkOSfx5uWMRIVZp1lsWhIaSChaJqau/sDx35iDAJUXS7MLI2WVChjNSnKaBWlcM5CDKQwEWbscC40JEMKkg41tCPdeEQlw9XZE1whKU7aluyPR4rSsjvu2GyX9N2RspGg0dDXOOc4tQd++MMf8Ed//PvEGFkuJXD3o48/gqTYH/aUtQShvrt5Q1k2nJ9fiIM1B3b7Owrn5kAWw3q1xpqCaZwoy4aLS8tmvWSxXFAtGs7OLqiXC1Cavqg4Oz+j9wPrzVq0HN/x+NXYBFLk7v4d3anFjxOXFxesN2tevnzJ6CfpqgqCVJpKypGCpKq0XYs1De/evaGuNvNV9YJKFTgtPjfSjP8KkzDvYxAfeggU1mFQtIcj+9PIomlQCvqupW4WGAXt8YCyhVy/+5axPbFwDmcdYfJgBMKFMThnUFn03X7qKZwl5YD38vmnUZRyVVVROEd3OhGCRysY+x4fAs1iQXs8iFV4VpjF2RI8jR7tKsp6QUCRMihbkJTBx0nm9sFTVyWH/R5rFYumIoaRrt1T1AVOZ2xh55JI1HwpJjGrZHHaRaKEhyAde5jVf3MuXpix7I8BGSnNEUZZBEUYLeWClwWlcqQwhuQM2ovn36j8/kpODoQp058OMla1wiLwSahLWgkrUBSgktE3jIEpZApbYLRiGCKhn8COaFfitCJGSThSBnROxOCxxqBiIE4DBsAmsgGVC2FM+IG+y4zTkZAGFs2WZrWhcJb9/uF93HpVVxSFpSgL0bgcdnRdy49+9neIKXJ/f8s/+1/6L7DZbOYQ0oZ3795S1zUff/wJfd9hi5rt2Zbj6YRWGYh0/ZGcoSznuD0yxjQslmuiVyKd355htKDRj6cjU/IUhWW5WFDVlUBtQ+TQ7XGV5euXX3/n+vtTN4HvCB75nwL/eWACfgb88znn3Ywl/2Pgx/Nf/1s553/hT90EYmLyE8vlklD49yz7b755JXbawEyU6TntforGsDmriTHip4nDcc9XX31B1wVSyjTLkmqxFTpwnEhTT5p6gh/FgjmOWJ1FGlwUeO9p2x6t5Kqbk1hmnTVyTc1Qu4I09bSnI3GcsLUlToGH/R7jCqrFEqfAD73EcQX5HFZDnEainzBaWPBd2+MuL5mGgXHo3p+qMUTRFlgndB8lDjdbalmoWTH6SL1y1IsVIWb6qaUqK/wkEdYhRhkvrRYYa4lxYrmqCXkO/0gBUhBst5ZGZ9e2rH0Qnl7W+JBmX4QhhEgIEbLALKdpks8z9wNExl0xDe17pp+zFp0kSyAGL6VFThTWMCHjWqOYY93yvFFLxt/Qd1QzHsw4y5iSlE1JqFLWKLw1c0KPJSShL1unUNYxBNC9p6wjdeEIQShKMWXUvJl5MpNWuCIK7ThHklGQA9aUOKcprKEoGrQp2WwvWSwrUIp0SHRDx+XVGf14YrFczXZng/eRxWLFelPw+tUr3rx5xRdf/IzVaoH3I5vtmmEYGIZeDoHCcXd3y8X1louLC5bLFUpn3r57DRmapqYoFkxetAOH/UgMam46LiVkVEUedncYp2nqEmvgcDxQNzVTnLClIirPw+H2z78JIMEj/yvgf/9LH/s3gb+Rcw5Kqf8J8DeQzAGAn+Wc/9E/w+u+f/jgGfqRp1fPiFPg7eu3QglKmrpZoLKcWn5KfPP2FXVRUzdPgExZFVSVQ+nMfn/Py5dfcXV9zmJZoHUGLzkBaeqJs8pumkZcXc45dcLTV9qwqBvKQrTopXP0fcfYdzTLFVZnuuOeh8OJqm5QtWHsOw77HVWzpF4sIGe69iTACxKlE3ru1Hfk6HHGMA0D09BhFcQwzXmCkRhlAflxFNZHznN0uPQHlBI9QWZEKYuxmbYfOI5HjHG0XcdysSCmzOjH2eFnaYcTKRaUVSVBLDlCCqSA9EmiZxxk8ymdhGKG4LH2ETv0i4fIIBRm9r6P40RZiloz+lFwZ1rGfvqx+ThPG9QMQU3By41svooLwUkUjBnpt0CicCWucGijhKPoZyKSEvCLMXIbiBn600BROGwQLcXgI6OPWGOJSW5pIUpPQuVZRp4CVity0MSc0EYRQoVSDWVhWSxqmsWKGMXoFBkFZLuuqOpSyrfdwGq15XQ6Yo2cvqvlit/6rc+4vr7k+snFHLB7xhdffE5VX/DB82dY4xjHga5ref36NfvTPU+ePGW9Xs4/b+jajhAnlsvFHMRraI8dbTsSo+f+wbCOS4apZ5h6zuutoOsUfPXVDTmvKWrpa1gHq7O/AF7sTwoeyTn/33/pt38L+C//Gdb6dz5SysL7x/Kw26GVxY+Jzeqcqm4YfWB7fiFnhr9j2dQoJHZs0ZQoIut1zbLZ0LZHDrt74gdPoFBCqwmTnM7jSD/0lEVJ4Ur6rqesG1xRU9RBuG/aoFVm6Foe7m6pqoplsyB74e0RJggWUikLnkxdFlLTTqKmc1Z4+kbPqURhkrk5MHQt1shJE0JAk98bXyDT9x0uyYYQ4wwJVXr+vgu5JwMow+nUky3z+E1y9cqqpG2PHI9HjJbXadsTS2Mooif6kewMRDBzk6099QxtR1MuiFHSeh4NTCllwXxnGa/qWevetj1917NarbHWYWxBTP08DXFkm/CDf7+PpJTe3wqCl3SiEDwpR1RUpCjKvTybsgDKVAoT0WhI4noUGbg0ex/9/ZBFoGQzSkd89ILuMkIgHn1AOY1ByaaTE4FEqkqyE+SSxMmLvLlpahaLmqoSeAgqoPSINpb1VrQT+/2BoixFQRlgtbzkdDqSY8F2u6EsHd/77GO67kjdlGijiNGzWq05217w1Vdfc3Nzw2q9xsdhFoMNyARiRVE6Xr96RUye5WKJUopmIdSsySeO7QPaRrquJcRRbDFKpj8XZ0v2pz1DstR1hS02vPjoCd/1+IfRE/hvIpmEj49PlVJ/FzgA//2c8//rT/pLv5w7sFqVlK6i3bcM3UhTNmxW2/e18OFwS5gSm7Mzmo+WrJc1o98T40F030mxWTds11IzLRc1zmmUnt8wZIxWxPkUW202kCJFWbPebMXfPgw4V2CUZuhOHPZ7pnFgUZdoEmN7RKdEZTU6efw4kKNn0Qh1yE+jLFoNGivX3Nmqm9J8KoVInLMCyJGh74hzolHOMunQRU3KmTBN+HHAKi03hRC4f3igrGriLH89HA6cP9mI27ERkElVltJraDuMztSVo+9P2KHHzviyXJSPgUrSRY+R0/FAUy/BOIyW8Zr3UmJopeVKHRPOzZbnlMgwqwc12lhpkM6mJD0TiGKU6UNKSUw8MZBinGOxZJoiCzuDSuQsi0UHcfulGGAGo1prsUZ6BBIuyvxxub2ASJN9zvTDhNWaReUIMTHlSFmXOGPmRKJImITzoLQmkzDWUFQF9aLGzJ4IawxaZ4oSRt+jUkF3PHF7e0dTL3m42zNNkefPn2JUxXq9YZpGhqGjWZxRlgXkxGIhvg3rLMMoCdoXF+estktCmlgsFvS91P9F6aiqQhK5rSbnyOnUobWmqBRVI5mG6ES9qLCFfM/2+wFFYr1a8LC/ozudaDvFqT3w2Wff/84F/BfaBJRS/z0gAP+H+UOvgY9yzndKqb8G/J+VUr+Vc/6PQM9/OXfg448u83qxYr/bsV1tqMuGuqhJSZJ65QTSLKqa9WpFVWj6MdG2b1A5cH6+ZbNqIAXOzy65vrqgsIYcx/nNIUx54yzNYkHZNPTHE8vVhnq1wYc9qIBWWvoDp1ZO+XlSkfxEO98aVBT8dvKawmmUtaTo5dr5SAieNx2LiG+C9xDiLNyR03/oWvb39+8lwM46utORerUlTJqhayFGmmVDd+oFwHp3x7PnL8QTESLH45HLqxWkQOEcJAgps16uGIaOcRhYLrY4J9LloZNI8LxYCEdv3gS0UpyOR5rFkWqxQmk7L16JX3eF3DRSEit11w94H6ibRp6XEYqvKYghEVPAaKEED2NgnOT7E2OShixy05jfRGIMUlICxST0Xx2jZE0gZYAiY53BakNROIwdiUm8I0VZEIInhow1DgonQqgYsbahLAx56qUXoUXIZbQSd2MKKOR7UDc1q/Vqpl2PBN9RV4axH6lTjfc94zAw9J797oHkIcVM6SqMslSuYbM4Y5jumfzEz3/+Ff1w4ur6gmZRi5MvJ06nI+vNkqqqhVeR/ftma92UgDgg15vV+/5L3/e4wlCWDusKbNK0fUtV1WzPztHGEPxE156oSxlx7g5HoTntHjgc9v/wNwGl1H8DaRj+0zNhmJzzCIzzr/+OUupnwA+Bv/0Peq2yLLm+vOTV8BJnNOtlQ31xwf5wROvMclGDsZxfiFa+7/YUTnN+tiRnz/l2hcqe06Hn8uySRVNjlAhfQgyzOETSZaqqBjQhZlbrFVk7QgJjC3KKtMcjh/2e4+GIzpGpLDjtd7IZpYiPkapqMLp6f53NMaBmlLU10skW+KYR9VwITGli6EVS66eRw37H3d0NdVUzDT26htPxwPZKkoCnYaAwGqsVXdtSVzV+GslRmpZdGEXVNnT0Byfe+KQYe2k6xRBojwdSzEKZcZbJB8Z+JPo4u32lMVoWjnYYOJ2OoA2uFI5/mJ1+rijn0aAk5EzTRIoSIx9CxBQaTIEyGcxj2GcUaGeSTeIx9QctPweVHzcC9dgaFDVnzoiMR5DamSw/wyxMwTzX77IpQVJI/Hphyb2MVa1WRCOx8tZaVqsVoVeQvVB8QUbIWp5jjCDIy6pkuVqRyJzaFvKEwjCmDu4kfNT7iMbObk84225FbhwjVmuGrqNYFFRlwRdf/JS7+3cUxa9zfiFoudOplXyJQs0lIHPQTBAUmrEYq+aNQM9sDUGrLxY1TSM2dB88p7ZHa4dzlfRegmYaIjp6rBLCdIrQ1IIY+4e6CSil/nPAfxf4J3PO3S99/Aq4zzlHpdT3kGTiL/7U1wNWi4rry3PaQ4sfW0qr0AS26xqM4ez8guVqxdANHMaOmDvO1g1KZ5rKMvQncjBslgucUu+7yVpLLaiyhF5aJ6e9NhZbNfgpkNG40jEed4zjRNue2O/uqZxlWZccvNSopxBIOdOUBVpFTscdWUvWYFE1kBUqMwt/EjlmyR4Mv7D+QsJPowA1TkectVIjG83pdGDsO/TkGfsWW9eMw8Bht8OcKaw2dKeW1WYrNmBrGdsTpxlJltGMfU/pClQWfNmp7VmtFxTGoaJnHCamcRIxjk7zxljSDQPTOND1Jyqx58z5AyLXfWxU+jDNBhzJAhiniaYsRTAVEzqJ/TpOkcRMHy5KovcYN4m3P3r8TANGyYLn0eGnBK2mlBLtBTM5FxklJ52lJDAaY8EgFKBm0VB2LYMXZJqdpw/DMKA2Ehw7nnbiOCwsWs1RZUjfJCOj0Cl4ptPIqW1pKmksrlbbWanqCT5SOkNdNjhjWdQV0+CZxlbi6+JACGrOPdCkFDm1J84vzgjBM44DZVkxixkJ/Sh1DdID6vuesnIMQ0/fd/K1WEtRVNR1SVkWtN2A1o6rqw1XV0+x2rK7eyBHS2nFju1zoqk2vH37lu3HV5xt/wKKwe8IHvkbQAn8m7N67HEU+E8A/yOllPhW4V/IOd//aZ8jBI8mcrZdMhz3tKcHTod7yWc/PwNtWDQaZyNjHlnWjtFrRj9SFGIXzkHx5OqaJ1eX6CxX+EeMts4JooSDaiWhja4oQTuGoUMZkaX2KYk3YFbRKWfJKc4JSI5+EOmyVpkYxGde1QvKqnqf42e0JoZImCbGYWQcRnHgTQN+mjBzGq6fRhEJaTkLw+SJ3jN2LT5mpnGgsobDbqI9HigLJ5LTw46qket8WTjC0DEpRagamFOHY5CJg/gIujliq8BqS5g8QzeQZ+mucSLEsUZi3adxmE9tmQDIzUkWvOQUhnlaIbes2Efq1VpCTkxCm0g2UdJ+zAzw1JIiFYKnqhtyGPEpzSj0SMx5bnbORGCtZVFGCaARGCxzQ1AYBUb3on6cPQZVVdJUnmlqZ4SYhdlBeNjvub7YYo0hBjWj1cQB+cuPYRzZ7Q8oK0yCwilSNmzWl/icicHSticWdUPXdiy2C0pnCGPH8bgnliXl2Tn7/UjOiY8+/oj1ZsE09RyPUhE/PDyIH8RYjC0oywZXVCidObUiLTfhF3zJ1WqDuMoNXTfIzyIbnHOcnV1zfv6MafSURSQyop2DqSUlzbPrp+Ro2awuWTbbP/8m8B3BI//b73juvw7863/aa/5/P2IMlIVi93AihA5tHRow2nLY3xBz5ni6paobjCqY88IZ+xayRFo39RnPnl5TlcX7K7hySmS686I2xs4/eIUtJBR0nLzUzErNM3HpDZROwBWPnWg/Sh3sCokhH8eOtj3MufNSYxZlIfLP0TP0A33bM44D0Q/k5BmHgbquMVrhp2m+CMtNKKVAWRRM48DheCLnzGa5oG9PMxvAo3Km63revX3L9uJSFjwTOtfkFEkxYpQEn1or3xfvI23bC+hys4U54YgUgYSNwqOXTruZQSuejCUlKIvqfa1urSVEqdmFhx/F1JQho1HazB1+jXEFZdWgUiY7R/ATwUvEm+8LlBJ1pStBBbk9SFiJiIRCEik0VsxSSWVUzGQjqb1ZiRTZaDOLlixV4Wa/QUI7N98ooO1a0nYl74VgZriqnssAmQ5o2Y3pxxHJqJpVkcA0JYpqgdU9MfQSZosEuYxjyzS2pDQwTRNv3xzos2Oz3XB5ecHV1Rkvv/2acRxo25bdbi9BIkaayU2znhObJDEqzT2AcRQ9hrOOgPRT3rx+y3a7pVlu6IaR02lks04Er3BuwdQGgteslhesTMYta37j1y7IObO7+xWHiiil6YeRn/3sZ5Qz0rmuhCP/8HBPzBltrQSCJkPwgbpRKGVQyuJsyXazpSpLxqEnaMFgF64W0mySOlVJ7hSmKNHOiclIKaxzhHF4H6IZZhRVURbEGDBasGcZ0cT7ccCnxNC3syFpRKvVTAQ6Mg4TfhwZp5EQJikDcmSaRsqiIClN155EMzAOhDDNyUmWaRw4Hg4YrQl+oj3JDy9FAWaG4Lm5ectqs2YaB4zy0mFPCT8FtCnIOUmAZbN4r58fhxE2whKIMRCUzO+ztP/nRGBD9p7TqcVHi9GGZd0wOUuYmXzWCJxDxpZKLMlz+SCBHRqwuNJSOofVhjT3L2KcIaeuQNsR60qUdqAFMAuPbkVJUooRnK2kN5ATOmdCUKQs//aUE8YVAlpBUVb+F9OGlMjz1EJpiS9zxhDFNoHWBmUU2sz1t3lMNJZRa0ZjixLrCo6nDucVMcFysZaxIZovv/yS87M1WsnNwZiCt29fk4oFVe24vR1YLBe4ouD27p7b23eSRGysHDg+cjzKdGGxqIT/aOF0PNF3LUVRMAzCEYRMCInDsUOZWjIZkqbrBmEXaosPmZwUy/UZWQWO/Ym6qRj6nof7u+9cf78im4Dhjz9/w0Ob+fSjpxSrFcaJ+cXVkcqIJddPQRpGOoOpKVyNdQ7nVji3IsbEfn+HNY7tZgusyFQok1CFJcWJ4EdyVeHJnPoTdVOQo8Rfl84xWUvbdhhXkrSjbTvqwjGOIjcdYsvYtUxqZJgDSAdXcLY5Z2qP7O7uRXATPSlPjL5n7DsMoJIkH03jwHja4/ueqashJjHjjD0pZHRI+Ozxo1iMlVKibygd6hgZ+yNjd6AqFEMb6UfPIvOehmSToygM69WS4/EgJ1tKjENHdIayMigjHXWfAkVZSjkQPSZHxtOew1iwXa/oTwdMEJhJ7QrJO1Tg6oacPaWGFD02KbR2RFuBchirMWR8NujksRF5TszoekAFsKqA4NFqwGgRjWZFWAAAvMtJREFUEBHTbDiSsix7O8NQHhd3FgIT+X2WYFWVGJdYeYW2GoMhKEVW4kQNKhOUxWnD6DOLSth+2WayzmijhLWYNU3RgIG2n0A7grIMU4fvdhitubq65t2bI/008M23r8hKs1mueXdzoq4qji30xy9pu3dkDNuzKy6unnFoPYmG88unZFXg48D+sCNGT5wCYTRstmucMdzsdgxdix97TscdRVlS1zXb8zMmr1Gm4Gx7Tnvqebh7y9XVE6rGcXa1pu96cinuMDMNdP2RaRxYbv5yosn/oT1iglPn+e3f+Sucna1ZLWva9kiOnvOLK/zkaU8dp3Riu91QVIXEX48Tw5DwY0uOFoVl1SwxNWidxEOdFGiN0nMCD6CMpm9PTGPPpqlo24PIXGNkv9tR1TUpWtpOLKpGRSYvmC8/DgzbkWin947DHCNhHNjtj5wOB4qyEHKOhr4/iWtRGzTyOfzYk7xIiYfu9H60NvQ9pVPUZckYPH3XczqdcEUhuYuLhZhvspQjWmemkBimIBiymQyU51uHcwZrDFZrytkKnVVgnGamYEqkkDDz/N17jx9H4jTRtZ7KWU5xJFpNs1gQpxFtK5TWFM4Ro6bQcR6JSniqKzUWuYYrMm4u3QofKHxkHEds2VCETFaWNHSgvdB+ldCgBIsuDsfgg2gVtCIruRENcYAsp3eIMi0ojGO1cCxXC+73J1xTE7JiSsIayEYkxhgHylJUNV57spZSwGiZ5tRlTbmoQItxKw4R48DHkWM74EqHskJ9StpgXUXZrHBFx+HUsVxecrr/iq+/eUvdbHnYdxzbQEyGjz75PpBwRYXWMIwDOXjCOMqo02lCKKiKkuNxT9ueKEpH17d0nePqyWeElFk0DWXp2O8eqMqKcTwwecPmbMsQWu6ONyyXBQ+7t6QUZPS5Wn3n+vuV2AScNfzVv/LbXF9fUFcFfXdkvapZLi5w1kmDZLPkeGxYbTY0qzWvv33FNz//mnEYWC+WHI5HVIbiqWO1WqG1EViGGlFGkXOQUyeKmWU4HTAkDIk4Sjrw4/X78vycu9sbur6ldA4/TXgfCTnTnTp8iKAl7zCnSFUWDH3Pzbu3TJMn1DXDNFAUlqHvJAA86/n5j+IZQXudTieqekHMSpJ1cayXCyJwPJ44HVvKOqKVpigqaZwp9b5ulNeRuv8x4EMSeiUFSBuF0jJj11o60MMwvE+8fXx+VUlK0DRNM2h05HTYE02mN1BWlTyvURRVjXaWnAusiriiwboStMbMjb0YA8I0XEKcyGEix8DQHjHWUVUVOUsjUmmN1ZasFRFFehQUzX4K9X6MmAnzGFFEjY9KSin1ikJzfXHB/cP+vX15GHquzs8BxcNuz7qsiKj3gBY1Jy6puZRJCaqqpokNu8MtykSqpiBnTd9N/PzLr2maBSkk1qsNMWYeHnZcXV3hvWe9XIF7xfH4JVVZc+omPv/8Z5xfXvP06RO+/OJnGJOJfqDrOg7TJCxGCu4f9mhrWCwbYlL4AInIMPRMfk9RXYJyhDhS1+c8fXrJOE2MM5uznCynbsfxeKQfC969fYNSzKGow3euv1+JTaAoHL/7O7/FH/3x78Nmyd3dW56/eEZZG775+iucK7i+fkLVFGhbMYbA4D13ux1+GFmv1oBiGCe6bpCFY0sWuca5jLJKtOlhJMdJXIXTSF3XqCgfN7agMJr1oibHwNQPFEZEJIeTWJZTDByOLT4kUh7xc7OrdI6uPfFweytQkTSHQiaJ73JlPfvemcdRAssYhoGQMtoUkvs3TigmptIzTRPDMAjAZJDQi+3ZGdYKvy8EOVW1dsSQaU8dy9WS9MjZR6YuOQdSMqQYKIxj7AfaGEVlaMz7jSOnmcKrhCFQGM80nEjJE6xwBoZxxMbAsllgy4qUJpzNaF2gbTkvJtEC6JnA5KwlBU0ZGkKYcOUMa43ydWgjRiBNJgUZ6+WsZ1+HbFpaaYkNJ0uzkEfNAcBMHRKDNefbNeebDfthJERJW95st6zWGw53tyJaSjCGSNGUKONQ2pGUxsdM2/WE28ip2xFijy0y33z9lnESIMzQ96yWK6w2VFVDzvCw2xOmwHK5JOVMUTQ8ffqcqtngDj3923v8nG+YUuLh4YGhP3I8nSiMZbna8LDb8fbmlma5YNkuaXuPsRWH44FT280lZuL8bEF72vNwLylQ0+Q5tSeMNYyhpR0OYBJZKxaLZpaew/3dw3euv1+JTSDGwO3bb9ndvuPm7ddstktCGGhbz8Puls3mjNF3VE2DchWHuyOnvmecMVZTCBhtcCbTdi1hmiBmCtPglAa0OOdCmJFYgdoVNM7h+440iWnHaGjKip/97GekGLm4uGD38MDD3Q7vA+M4sN+fOLQD6JaxH+i7juA993d33N/eUTcN1ogKzpMY+47SFjDHVMk1WTj/0zgJ6TYmwoxFSynSdx3dOMwx545hHN8LaZyTcaYkz3oW1QZQDIPn8rIC4HQ6MQw9SjGbgR7xX3LFlumA3EiqqoKYSD7QLBoK6xi1YdkUnHYt09CCMbTHI9kr7PqS87LBlBUhWlypEQNDgSlLyBL4ZpwEfViTCDmIrBg9u//cDIKR09wYi8qJmD1CAp5DOo0h+tk/ATO1SPQfKUvHXNkCY6xszFFciddXF3TfvsEUDrTlcDjy/U8+obY/5Obl12StMK4GraUkUZaEYQqJ/anFjh0P+7f40LJeV9y8u+HYDhSuFHNY7imsIwbIIaGN5ptvX1K6grqpObWvuLx6grUlOY8smgXn5+d88803DMOAsyKEGoaeIUmg6263Y5wmqsWKUz+hdIkra6qo8dGyPdtyeX6Js4bd/sT93S2n7kTXtcQsoqqLqwtBrBmNK0sWC4m9X61W74Ns/qTHr8QmELzn519+Qekc7969BAIxjLjSslwv0U7zsy+/4Pr6Kc3qSnjyxnJ5/QSrNHXdkH3gcDyhsuJie4bWRubf+jGkUtJnc4agIotmidWw38kCVyHLbL098frlNzx/8SEpRO5uHzgeO8hwPPW03cTD7sRy4VEZ/OjpTi13N7fsHh7QWlEU9n2ybHc8sqgagYDMFKQ8a+nznLsdQiRlkbNqpZmmaU4jVnNabZxlpQFlZBGZOYuQecg4jhPOlXLt3z8wjgNVJQacGDN5jmIrC/derBSCoM6zjvT9hNaKqpJcg8opep0Y/cgwZt68eY1qBi4+/iFlvUCXFQSHrRwxJMCBdSKSmgNfReCfyPSkDKOfpItfSsdaz4tdRmOPWgHe5wIopWF+g/9CsCTqOdk4RSYs7MdIGCeO+xZnC5q6ItkCHzPvbt7x8ttX/NYPPsMpaPf3jDFQZotCY5TEzkc0/TBRoOm6jtvbb+lOFcZVXJ4/xRrDcrmkazt2u3uqqublt6+4vLxAG8OhPXE4HbEmcjz0ZDWx350YvLASP//8c0KY+PijD2Y14IFh8Nzc7WkWCyEOlYuZKGXZH3uMKbi4es56vWa3O9KfdujZT6CV4uXLr1mdbdAGjq0R5yWG+4d3pD5yf3/Ps2fPxAH6HY9fiU0gZwFDTNPA+dkFx+OenCLr7ZrNmUQ/7/dHtudX3L96xdubPdM4cnFxQQ6Rdn8gjh5i5GJzxuXlFecXF5JDaAvBccVAzpqsI1YZbAFhaAlJoUwhXXg/st/v0UrhtOXVy1e8e/OWySdQctomLLt9S1NbrHXkJOm4h92OoesYmwZnRXijNKTwGOCZ6LsWQyJM/v2JFoOc/NoWsyNNaDwpZVJMSFKQpBQPw4QtHFobtHaQ1Xvu4ORbYspUzlGWJcPUE1JAGcXgByY/Ql5QVzV920lktVLkZURZyVvsTie5YecMccJp6ZlMfuLduzesn1Si1agaVFlRZOkDqDwBBWBBxTkkZMaYzZ6KnMEHCWwxVkZ6xkrirvcTOc1KQWRer7XIrzNZCEEznkxeL8nIN4siE0QjkaZJJiwxUljNfduCceSs+NFPfozTio+eXGEU7Pf3qLlJiHagHcpWoDWHw4GURBIcQ4dWhqdXz0gZzrYbXvlX+ClQVTVVXcu/RSmGYWCz2bBdL8UvkTNNs6R72PHy5Su8H5mmAa0SZSHK1YvLK1AW7wO73Ymu9zQr0Q5IYIhmvdlwe3fAMnK+rjEp85Mf/wxbGPp+ZLnNDMOEv71jtVlSVBX9qWVdNXRDz/1uN2+2f/LjV2ITiDFBNOzuj7z4+BnXV0/IOnI47vnxjz6XmWpKjIPnyy++4eHQ8eL5c/q+59U3L2n3B5zS/OB7n/H8+XOxchaC/UI76QhjyFmLp1wVKBI+j1AscMrRngZC39H3PZvNlnEc+dnPvmC/O2Kc0Hz7KaBtwf7UczVW79OH2rana3v85Bn7AWfN+7lvYQyLpqYfRlnsj2/keUY/DSPjlKgaJdQg1fM4EJum6X0K0jRNjOMg/IpZryDec0ddLzgej+QsmQFVXRGSl0ZeXTCeBGSqzi8ktXc2DFVVhZ8mCe1ImX7sKJzDKE3wI0ZBWVjiOND1HRdFITHk2gLSUMs5EZKfQ1Ek0FQbQ06BGLMwFufGnZpFOTkjGHRjKArRduQkwFiVE0R5nTxzG1OW+TePtyfke6C1Ee2ED7NwSLNeLNgPwoi8v32g2ZxRVCU6J/7+3/89hu99wm//2g/ZnK25P+1kk8AQMcSkyBGKqmF71jAO93SnOzabS66vnvDmzVtubm55+fIlh8OJruuoqppu6FmtlixWC4qqoGt7Ntsz0TBUNbZs+PrlN1xcXAKRL7/4nKZyNM2KzeaMgKFte+7fvCMfe566hnHyLJZbDoeWn/z0K4x1/PXf+SHPnj5hmAZ8yHz51Rcklbm6/ICfffFTtM0cTy2nvmVZ11z/2q/zwQcfAPDm9bvvXH+/EptATnDz9p7Lqyf4Ud6MVVPSd4H72z2nTvzWd7d/h33bc3H9jN/8jd9gd3eP70dqW5BDZLPZUjcNoDDWgC2IyiFXVYXSpXStlUI5w+l+hyqWxNRx8pE8TuwOe55cPeWnP/mc07EVppxRZKPwIRHDhDu29INGK1Ec9p3EbfspMPSDqBYLJw7GPM+2+56YAsMwoBVEL5jvyXu0lVhy7wN1rTgejlRNQ9d1NIsF09DP7r2ePOvqq7p+3x+ATFVV3D88cHG5JaVEVVa03Ym6KUXxOE2y0BQzNizT9y3v3olyMKXE8Xicyb4WZ+R7lAuD22y4e/lWSE4xyelrFSkptNLYohIRzjy5eDQLiYLQSnNzGimKArtYkocOax2D92TynBwtI8UphveEIm0N1kmJEWOYoaWPbkPIOqOsxljH5AOT90JYSiLMX6+XDGGCERZNTblecf/wwKvXr/nt3/4N7KLi7c1OAGdKE5URXLmSxtp6cwZ5JOXE11//nHHyHI8HtIbNZsV+v5fb6/kZqERZFiyXC25e7XjYfY1xBR+8+FBky9fXPHv2DO8Hbt69RSE9p6pecr87MU2BJ8+e0bY9h2OLtQXHYw/a8MNf+02qqqYsDPd3J07tEescl5fPqBc1YYLvffprxOy5vb8FHJtVw+GwZ7lc4VzF6dR91/L71dgEpsljTMnZ5pKvvvmC+90tn37vY5p6RVUcuTx/yugDn3/xBXXT8OLpE968+pY0BZ49ucJ98IzCWDbLleDKrURtKWtRM51XLDZAiHPCTkAVK0qnwTrK1ZYcOzCWCDNU087wTE2M4EMmkvAhMU6BwmqmMTIM0yz2mRiHiaEfqapivinIzL7vW9kQciJF0eGTFCkmlMrkmHmMzp6mCVsUpPl0ezSRkDNd180TAsPFxQXWNeScqRc1RVlQNQ1106A0HNsDrihYzHHrXdtSGEsKQRqDwTPERHc8CdK7HzjudixXKxGbaJHXqtnRZmaUt0iDjfx71fy15HnyMSPHUxKUd5jmOO8QKZ0lBEn7iSmJVHgGjL13GeZZSK0eeznSL0gx8VjYSo+E+VYgUuM8W7ZjSvJ3kkiJVSENvxijwF+IvH33hvNXaz75tR9SNFu+fbcjROYysaKuFafTO1IZOb+45ObdLYfDXtiLwXNxecYnH3/E/cMdb968oSiFsxhTIKvM8+cfst8feXNzw9dff0M79Cxz5nDY0zQLPv3ke3z15RfkpLi7fUAXFWfnF+8Pk6KsaZoFD7sDu8OBs7MLLi4u+PlP/hidAuvNhn5/QJnMB8+vKUrLGEZWixU5Wy7Pn+IcTNMDX/z8K8hGRrjf8fiV2ASWyyWffvoZ6ITWTuTA2nK2EdzSD37wa7RdT/TSdb4829KeTkLtzYbCGrarFcVM+oWZMGsMypXzlU+T8yxi0Zq+3aPLJbbU+JSo1meo2FI0C6YQsUVJUdaMk/AOfQj4EMjGMIXI4dizWS2Ykse2AynBNMmG4FzPer3EGMsUZYJwOh5nqKUlh8cTTfz6OQRMeMwDgJwU3kdgvjqnjPeBqpYwj8KV5Ayr1ZqyWrLbPbBYLFgsFjjn0EZT1xWXV5d47ylcSZgiXdejUUzj+F6IA4FT27JcLMg5czqdWCwWpOxROcnGqRRlVVEUhchtlfql67pkJv7CijPbgecFG2aPREoBZzTTPM0J7/0CzDLgR7CITFZU1u+buzE9Eo1FlyGSYbEfy0akZ0l1JGfRFIQovoJ6uWaYRvkeG8WirJmGji+/+hJVOJ5/8n2urp/wcJoYpoBrtKDJiprd4Q1dG7BaUxTSdFuvzhmnieunl6w2C4yVcsVYTUiaP/7jP+Q3PvkNmmbBejVgy4qPP/se++OBv//3fp/vffYpn336Pdpjz2azpp88p37km69fcjq1bLdnPHv2jL4fybND8tSe8MFzfnmNyYmqLnn97i05J7bbI9uzDU2zYvfwwO3tPT5MZCbW28zD/QMpaVbLs+9cf9/dLfj/46OuaxaLJYfDnmfPnvJX/spf5cMXHzKNI8t6QeUKFlXNJx9+yKIsOdzecn12xvPrJzSlE/FPfyKEiaJ0xOwZp56YBWARkRoUbaSOtY4QwVU1WRuysRJgWlcszs6JaExRUS+WGGtkEcb0mK7NOE3s9kdigilEjqeOlBWg8V42gpQQFFiGaRrfh3hKglGcDfYKlTMxRKbJz7cCNXe/4zwnT+89/ORMWZacnZ1LHkJZcXl5gTGGzWbN9mzzHg6aMyyXa6bJM46B4AND13PY7+cE5ISfJkIIdKcT4zD8wjcR5Go9TJOo7ICmaXC2kJgwZebZvHqv9f8FC4BfGHTsY4hImoM9pE8hsJAZKxd/sQHkeTygtJlTgfScezDbmecJiagIE3F2fYYQRDCkDBkJGo0xgxYO4Xq1wphZhm4tq4VEyf3oR3/ET3/yU5xxbLcXNMsNi9WWarGmbFbErOknybTIecTZzPZsQVUb9rtbdrtbzi/OSDnwh3/4+6Qc+bXf/DWsk+TnmBKPROZpnPj+97/Ps6fPOT+74vrqGcHD+fkVVVVx2B9Yrzdst2eklLi6vuB73/uE8/MNOXumqeODF09JGv7gj/+Ydhg4v7wUtoVxPH3yAU25ZH934O//7d/n9Tev6fqWPK8vcSD+yY9fiZsACs6vznj38Iq7hxvqpsTHDu8H6qri3ZtvSSlzthHgZ4yey80G5xxjWcgbKCWMURgr3WUfAy5OmDinDWHFOGK0GJKMpjAF0+Sx1lDUFd2xYHNxye3wBu1KirqWhN5JFpU2mqQUUwi0/SS1ZMocj728Ca3cOLyP+ClSljBOssAXTSMnog9o48hh7oBrjfcSuhp8mCcCQg3WWr+P8ypLMVSVZcnFxQVn52cYa9lsl1SVY3u25uxsS1k6iqKQhagtOYmeoOsGTE4EL8rBnMUibYym6+SUrKqaGDzH4x5bi/MPL4IfkS4njHPwS53mDP+hW8B8qZ9bm2DKknyUzIGpHxj7nhjTHI8uTb2UHqPOkDJAScmg1BxoojRaQVLSg8hq/qxZTESkRM4a40qJIwsCZiFL07msK6zVTEPPOHTUy5qqamgnz+tX3xJ0w/Pv/xZl1dCPntWqYrWoWS0NftyBP7JeFWgFPrRcXK558/ZbvvjySz777DOaRclqu6RqSparJeEh4mNgv99juo6ykTShq6snrFYb/BQ4O7skRvjqy6/ZHQ8sFyv6ruNl1/LJJ5+yXn1AjJ7tZkUGNtslGNgdd0Q8f+Wv/i5lWfLq5Us+cM949/YdVxdXbJZbFtWCJ1dP0bqfLdENTbP4zuX3K7EJ5Bypl456UfB7f/AzUvKcn2/46MULysIw9EecK1ktVmyX1xROxmkmJzaLBcM4MI4j7UmirK6vr4QIq0AT0Vnm8MpElLb4tiMnTwSmoad0wqqLaKrlClPtcZWw/au6BjVhUsQmK2aUFPABQhTfw/HUiX9dCVdv8oFj22GdeMDdybLeCI14nPxcP0pJoJWBLKdvjKICDCGQFCIKmrvoZVlKc28WDG23WxkF+R5dCBTFlQUxJ+rlgrIsscbiqlvCsSXG+fofZ5Q5wGOnXUGYPNkV5Bg5HY4si817bULOE0rV+CBR62hFSpnHIkA6/7OcN2WhGec4k42ThHnEhB97/DR/fVk2Oh/EG6BmV+IjUOQxWeiXbxdaC84NICE8gJRmcAsSI6/0Y5q0gTkhqaxLqjkK3I89odTYpmS7XtJOitevX3EMluwacJanT8+5PK95/mzN5eWaUrWY3PHwcMup7TA20/UHmkXBqzcv2W63vHjxnKquadsTq3LDdnHG3e6Bh/2e+4cHUJrPP/+cjz76hO36jLP1Ob/1mx/w7es3lEXBxfk5P/7pj9ls1nzwwVOOxz13dzcslgtRjvoBu6q5fHoONqEtDGPHR598yNj1/N7f+w/43d/9XS7Pz/gn/5P/BLoIfPDDhq++/Jqvf/6GYYa3/kmPP7UcUEr975RS75RSf/BLH/sfKqW+VUr9vfm/f+aX/uxvKKU+V0r9WCn1n/2zbALjNPLv/Z2/xfH0QL0ouH5yzvWTc4zNWAtFYWgqiyIw9R1pmkjeU5cVdVnMoMwj+92O/e5BOvBGY8xsJCIiQLgg19KxnxNvRBGndWLyI2OMYC2mLKmXK1abNevtGYvlUjrbzkpzDAgxcWp7hinQj5Nc55PCh8g0Rdq2Y5w8IUQOhwPjOEKWLjcpv+9yP8pjH7MHpsnPC1b8BSFI80uhiQkWiyXOlZRVjXMFx8Neuv0qMYwD+8NBTtmcKcpKVGtIB/0xp2EaR9S8hMdxfN9089NEipFpHBkmT5g1+uPk50addOulDPiF+kQKA3k99b40SBLkMfQItFRm+CGIJHqaJibvJZB0XuTSf5CT/xdXA2YMmJqv/LL4Hz/no78ghZltoMwM9CxRRqS1j3yFwhmMUaicCDMifbVcUlUVX//853z++c8Yhombuwf+5r/zt/jDP/4xMSmqugIVQAU26wXBD1xenVFVBXVTcHa+5dkHz+RmOo18/fXP8X7i2bOnfPbZZ7x48YIf/vAHXF8/ZblYU7iKtu1ZLtZ8/3s/4MMXL1gsGp5/8AEvXrzgdDoyDD3Pnz9jvWpwTrHb3fLy1c/xaWB7vsSnkaKy/Pqv/4D1eom1mvZ44urykt/44Q8E7R4jm+2GzWbD5eXld66/P2/uAMD/Muf8P/vlDyilfhP454DfAj4A/h9KqR/mnL87AwkJwPgP/u7f5td+7ftsz1acn63ZrJcYBcumgZQx2hJDYr97oFUHnj1/zqIueXi0XXpxzZVlwfF0oDpUXLiNbAA5CENjvl6aGVfdjZNgplWNHwf6aSKGiLGOxWpJgcH3EkJ66jp08OgsnWvvvQR/FrUIe+a6Vmb96T0UwljLONNlK+fmhplgvOfv2i9O1F/aDNKcYMzkhT5sIkVR8OzZB5ydneGsvOGGsaeuK7Eb9x3H44miKLi7u2e1EqOQMZacFZOfsEqabHVdo5TieDzSNA3ljM9OKWGMoet76lIyFkMMOK0lQm2Gb+SZMvy+FMjp/dem57o+5cDUt5IZ4Uf8NEm/YZLegPd+nirI94EZAvLLpYEx4oSckABUNesG5PmzijAnYk4Er8lqBnFGMHNYawxyK1EKiqIQUKnR2HkDXi2XLCbLsl7z/MWH3D28ZZoCr1+/4adLww8/OeNsYbi8OGO13vD67TuefvCUq+trXFHS9QMpBYrC4sPEdrthsWhoVguKqiaQWSxXbDbnBJ9RyfDm1TsOh5YPP/qYxb7mcDjgCkeIHu9Hrp9c8+LD54ToOT8KmsxUln4cyDHTVBVNWfPu5i2LZc0PfvB9aldxttlwd3dDDJ7D4cD9/Y71ZsNm9RfYBP6k3IF/wOO/CPyrM3D0S6XU58A/Bvw7/6C/pK3m6oNLkoZPvvd9NqvVLF01vH71cz568YRhOHLY33IcHwgeVlNBeOi4effA8dAzdJHV8hybzzndRjb1Bv10SdQirTWFI6WJnA3GgNEF+12kblaMPhM82CmSfKIoayYsExY3RVQMmNORKozovgMfSOGM/lQTa41XQn+ZFAwxURqLj4bC1SzrwP3DwNhm9KLCas9isaLvB0bvCdkQsiIpjZ37G1qBMxaVYZpkbu6KiovrK4q6YHO5YYwDu9MDy0ZsqUZPvH79LYVzTEnT7u+Z2j1THymzJeBQriFHT/ADpilwpiJHjdj4NSEGlDHErNHHCeMVi7JgGCcm54UuPMM/lJGZenq8roPo1lVGW8jTCL6nMIqhG7EonLH4riWcdjD2mCRKzJQgAjkr4SnMkfEpyG3IFA4bSsYQmKaABMbIhmmNTA9iiCSViToixUGYScKZMIFKUfIQlMEnjY0GFcHmQGEz19sti7Nn+NOO/e1P+cf+2qeUruDdqyOHm8xHH15zfb1huWlYLycKo/ng4+9R1Ft+8sXXfP32NS8+fcGVMXzv2RVaFZwOI6vlFUaV3N+cmE4lOsto2IWaw5sDq23kapl5dnbBw87hUybEyHqzplSRulSsmzVtexAKdCmK0UUj5qC7uzsur65YLqQJvT5fcjfcsL7esCgNd/7E/thyvP/LySL8l5RS/zWEJPzfyTk/AM+RMJLHx8v5Y/+Rxy/nDpydNWzPzjnbnuOKmo8++h7t8cDQHXj95h2FA+9PfPvqa5HuZsvv/eHvUZULmmqNH8GYmmnM3L3bs2y2NG6NLQqiDqic8H5AKycAT1MgYRORarVmv3/geGxJ40TyEVsU+KwxaHLXo+sKV5XkqURNA0obulgyDoqgAslEYhY02RhlzjdF6VZXZUGK0LcT0WdyVO9vDfJ9kMmFuAnnJJ4819tJhEYoRbNc8PTpsxmdbtifHiirgto1TFPH/cMNu/t3bDZLnC2IvidOkf4UKNwCZwppbAZNd+rou5EwRYx2aGWFCpSl1s4oCJkwRZLJkAT3pWfX3/vjWylZjHO2AwB5Nv2mAHPoS3c6zcg3YS8SJkxKWCWfL86koPgIZlXpl8aTwgu0RYEZPT5I63FmnaKUIUVPComklZCNlYY54zBniF4yD62TkWvMkLKwCAgTNnoMNb4bOI73NBW8eL5l1ZzR3r3i3/13f8rf+vd/yiefrPlP/Se+z7Nri/cDZTHwk5/+iDEpPv3eD6m3FSMjXvVcnq/IKrNYlMTB4Kzm6mLL/n7H/f0tz59folTg9ubnXFwWnJ09pyrO6YaJh91B4tKzkdDU6Em+h2TQusCYjMmRuipQl2csmpL7sePi6opTeySowGKzpnYVl1vHcLzhtP/unsCfdxP4XwP/srwb+JeB/zkSQvJnfvxy7sBHH17k02Hge59csX944NtXb0l+IqcJYyp+//d/QggjD7s73r17wBoxxiyaJc8/+JCn1885Oz8jRwnqWG4cdZNQKhDDhMqath1YLbaMfY9daIL30jNQmbY90LYn9CjJNWWzJGWom1qwW87JhuIcxjm0jWADPsV5XOXlFIriUQ8p4WMgEihKi7aKcZrwQa72fS8KwLIsZh1CIs4z7kdBUZYVKT8kaykKR7OoOT/fMgyS2Pzpp59w9/o1MSbevn2HDwGlzOwtMPgxcDyd2K4rtJZ0X1tYQaqfTkQv7su5qY9WmjF4SBKD/pgRaZylKEphArriF0YeIyEgItqRmDFSIqUgY89h4ng40HUd0zgIsvvxJvFL2oI86wDSXI5oJZkI75uBSkmT04kPwPs4j1AlHNX7QIiRx7NOGekrKCVuPVQCM5Og536MTEiE56fnIJSu76gXNZ/88Le4vr7m+uIZ714GDqe/x5u7e949ZLQ78p/5J3+Xp1cb7nYt/9bf/LdZbbZcPdxw8WzD9nxJaFsGs2NVLTjuXtEeBgpXU9cLQurpphPFpiT4Eb878vqN59QOGFfT9iOHY8tqs+ZMb2gWMhUqXI01DucKdvs9Dy8fqJuaPAu0pMcy8Xu/93vs9jt+8P1fp9RnXF9ccLb6Pne3J74L//nn2gRyzm8ff62U+t8A/9f5t98CH/7SU1/MH/sHPpx1kB13Nztubm7oTh1GK9rTgeViw27/DW3bslxe0Z4OkqmnQKNJ0VEUNWUpwaJlZVhtDKYYGYc9x74HDHHKxLImpQk/iebcWYWfeokGs5qgNG3XwRzWYYuSqmmwBwFhYCxZG5LWKJfx2ZOjB50RabtCaSvNtRjop5F6W+FK0ZOPk8cZmcXXdU1ZFIx+QM5DiMHP9fFj2y3JqaxgGHpiDCwWC07dkbouJaTyeJTx1+QpipLNZsM4TJSViGLGcRTF3azQKyuJ0WqPrSwUrfEhShS5Vu//X7tHyEhmsVpR1Q1FWT0W8O/bghqBN5F+ye4bIuM4cDoe6Id+lvkmxmlkHEdhDc5f5XthUZIxb4qJqCSD+tFf8ZgZWRTlzDGcCD7+h/UJ8+b5qL3QzL6FWSFqjEXnPPMmH9OPHsePirJwRK+pypLrqysUipubG25u3hJSwjZL2nDi3/v7P+LJC0GGvbl/zan3tO0bjM2slzCakW3d8NWPPpd4NlPw8cef8PNvviazog8tl89LTt0b7u9v2Ww3MFV0w8Dx9oG2G1gsN4QQOBwOrFZPaRYNKUU5qIwCEvvDPbuD/PwmPzFOnruHe3LOlEUpobpWo7Ti2eUzjDl+5/r78+YOPMs5v55/+88Cj5ODfwP4Pyql/hdIY/AHwL/3p73eMEw83J1oqiNlWXM4nNjvHpimkX/kd36X7//wd/mDP/gjQijYrF6wfzhwcXnOixfPuDg/w1gh5RelRquAj3se9gHflrSDBzSb5Rn7XcIoERdJQy4w9B1+7KhKR1dIhFUEirIgKyVhpMUDyjlh69sCXEBirCZCnMT3jiZ6MChMVvST59C3nF/WFHXFfn+i6zs2q4XUzlphnRWYBxlnDcMYyRpQCaXsfO1NhBiY/CQ4rTDNZGDNH//RH/H08gkxjpRlyWJR4VzJw8NhHkOm9yfpMA4weorCkMlyAs7z/piiPNfYudWv3nfeJx9YGSukXm0IMeKUfu8UhCSEYyWOSZ0lMWjoO9q2xWpBcY29nFg+BEIUoY8scnh/FUF+HaMoPvN7q7T8mdYSh+ZcZtQT4yj8AWPMvBGIOEfnhNFgtSLN/zcKFLLJpGCIIWKdAFSM0ZjCYaolPg28/PolbWd4++od//a/9RPuHwKDKymqgm/f3fD//Jt/D20XjKcTZxfPOTy846/9o3+N4/5bxt0B70ve/fyW/XHPi49e8B/7q3+dL7/8QkaR7ZGr62sSGVs04k/wmXLyFFUNdw+cn29YrdeEFLFObl8xJl69fsVy2bBYLGkWDW03o8y1ousG3r274YMXL1guVzzs7ji/XHHq7tE2Ycr6O9ffnzd34J9SSv2j80/nK+C/DZBz/kOl1L8G/BEST/Yv/mmTAZCx2B/9wY+5vLjkw4+eMvmGYexZb7c0yy0vPvw1vvnmwN/+9/8uddFQVhdcXrygqdfEZLi93XH/cMPl5ZLlsuBud+JunxhTSUgWZwsM0HdvWS83NPWSGCOFsaLr7w6C+46JpDTGOarFgn4YMc4Kh0BrsjZgCrSL5NwRg3jaNZqUDVMEnRSFNkwp0Q4TSRvKuiYrzakbWCxqnFbz/F/qa60U2lr6fgKtyJr3cdqSISCBm8tlg3OOpm74+c+/YOg7VqsVr14f0Nqy2Z7T9SNt2+NMyThN79/kw9Azji3Ozhl/WcJR5twIUk5oLRmCEgoi8/8wTcSUmbykL8UEzEahx0Vn5gZhjpGsxOY7zSPRqioZ4ySbzDydebwBpCQtvFl3OLcZZPyYY5q9E3PS0HyTSVlCUY21hG5AKzPzBwxqphIrMk4r6sJh5yQirXnPeAx+IkxzOag1KQV0DjR1Ba5kGG75+us77m/uOR53jJPhNEbWtkbZFT/98gYf/l2eX1/z2z/8jB8++YCL86e8ffkF3//0A959e6DUS2qX8V3ix3/4ORfbp/zopz9F6xVff7GnqGo++OBjUsrc339NinD95CnWCatxuaplWhA8bSc3p6JwwoVQeWZtllIGBbkVlWXJzbsbrq+vedjf8nAwxGgZhp7Li4///JvA/y+5A/Pz/xXgX/nTXveXH1VZ8ekn32O9XLHb3XN5tUVpsM4Sc6aqV3zw4nu4v/+l5P2ZkrbzVLVnvV1RlQ1td8v+cKQol6joGYaWQ5uxxZL1as1hf8fu4UAYB7piT86wWa3ouo6H+xs26y1t3+NjBKOp6pppDgJFa0KGKWc8iqiNJPsGA1GBKcnRkqWfhs5JeIQ+McSMLipsUTFME6P3FItKVIta46zFWrHd5hxRyr0fkaV5/KURGW5VlZydbXk43nNzc8MPfvAZx+OJm3e3GFvQ1Ave7A/krGi7nmGYKMp6ViV6hnEUJ+IszkkpCRNwjmfPSZpwWX6OGDMvwCzy6JRFCv0LjeB8+sLc0JRrdkqBlCJlIQGg3Vzry8jPoY0l5UAmSOOTua8wNwPlaxdZsexRjxuA/LuU1vPN5PHfLWPVR31EnlkNhdVYo94bmmbXETFKE9clZlCLhzCSYmBztmWxqfjq61vGcWC1Knl92zL1E4MDUxTYouTbNy1Df4fKFf/cP/vP8O5di9YrQqho+1ua5Qqs4fLqknfv7vn13/wdtosDrlji+wfGfSZuNxyGG27ub+jaHmWzgGW0ZrdPNIsFDw87iqqirErOL86QbMZMWZWcXVxydfWUtu14+/aG1WrDm9dvOduc8/bdKz7/6Vecnz3l+fMPONv+xXQCf+mPuq74T/9T/wTLZcnf/rv/b0Z/4IuvfsLzDz7m+slzxmlisz3nH/vH/+O8ffUtYWypFzXnFxesN1uur1bEdEHb3qPNKM0s65jGA9aWAi9tD0xDS+csdzdvWTYLmtLxcH/L7uGW5UKMQyFnElBUJdv/D3N/FmPblqXnYd+cc7V77X7v6CNOe89tM7Mys6pYrCpVFVvLFmlIlgXZfDBMwy8C7AcDfrBg+MlPerEBPRkQYAM2YIi0LYmkDDZiUaJJFqvPyva2p42+2X2z+jmnH+aKc5NUXbLMKhp3AxfnnrhxIiJP7jnWHGP8//d7HrPpFD8KEZ5HbQWFtdQWaiRaeA5/ToDBf/vGLU2FEIpNVjNf54TCQ3i+iwVLc5JWTFVrB8cMA1ReUZYaa4zDaCPe7r4NoJR8CwDVRvPJJ5/Qbie0khYvPz8jzwtOHhxjjAOfSOlTFrlLEU4CDA2Iw9omS+/edNNM/KVwPbOUeEK5nbs2BL7/dhUolEcUtfCj2K31rHFvHutk3LZ2aC9hrWsLhHAKRu2YCPcaCCGkC3uxAOWX/bypm8PrbgMIdz+4FxPBvYxYuW5eKnzPb4aCzkOgQtXMFdyfubcey8Z3YBuHIbYpsAbqqiaIXOR3rZ2QqqtCdnf2mNzesd2uydYr6lyQURMkLZTsgFLcTgvm08/Z3zng4V6fYTvhd7/3iijOENJSU+K3fHwCkiTm6PAIWwf0WvuslxWtIGFZTFCBwqslfiiJQzfEzfMtNzdzVusN/cEQz++9NVxVdU25XjcsiTZxnLBabSmKiuPjE46Ojvni+XO++OwVg77kl37hf8jh/r+mVOI/sZeAxfyOIBzQbofkxZZBv00UewSBIi+2ZHnaSG9HoBPGOyPaSYw2Na9PzxCUDEcJWZ5SFBW7e2OsjRHSGUZubm8RQmN0SZ6tMXXJoN9jvVrgKXcgkBLZqAKjVoQUivV6jRd4KD8gLyvyytlY81pQatmo8WKscc41a3mL/drkhskiZdTuUCMptaWoDFlRvX3CB1GEl5aYbYmQ0r05cY7a+/EZuIisqq65u71jPp3xjW99xNnpGfPlguF4zGAw4ub6EmOaBKTC6efbSYc0LTBYlOc56a9Ub5+4Urn2xw8CVOCjrcFqQLvrehhF1NrSjhN29w9QfuDUgxZoEoOtrqHRN7jA1RJfKXwFWbF1sNAmwsxgfwof5n6K+6g3jG3CY7/MKNSNOcjzHPTVNopiISXS89BF1SQZi7ejhftWwynEXNSfuR8iQnN7EGAcx1hKkNKC1W4YWpScHD/A6Iq7yzWbJbRWBdsid3qFTQF+iB+EbPKav/Prv8n7j494dLiLEprDRyn9bhdVw916yuHeHqiK0aiFrXzKDGRVMhxEhLqP1zlC64qnjx+6lkRrFssFz1+8wtgS5UGtS6q6fiv/TbcZV5d3HB4e0el0MdrS7/Xx/YBO0ubk+BHXtxtur9f4XgddCb7q9bUoAgKoqoxax4x3+qzWll7/oRuU2ZIsW6FkTZrOGAwj8iynlUj8UOB5EWm2ZL1Z4wfO0CNVizDs0e0GjaouJwyDRlpaMRj0mE3nLgC0yBmNHCPOSk3civADv+Hqa+cgExY/8MjKgqKqAUGpFZUNnKLNb1HXGiF1kxTkoa1mm9WkuWHUj9DCQwsPGURsy5qsyOl3u4RRQBCVSJW7AZoxaEGjIpTOFK0NVV0zmUzJsi29bp/lfMlnX3xBr7vD4eEx0+mUm5sJSdKirlPKssb3A6I4YrFYOS09HmVVYZVz2N33/0I4CEsUO0qOqSs85QaLYRhSa0PS6dIfjR3q24JAYq1T8Ln1oH3LKNB16UhDunRMxcYpWVXVW4uRW4c6mXGta6qiaJyHzVtSuISpWpeuQAjVYNoadWYzSagaNLkbWroiLHBGI6GaHgeaAnLveHSdga5pOhqNNS69qBW36LRjev2IRzwk/1M1H75TMrtd8oMfP+dynlFoiTWCrHLJzEaGPD+dcHO75uhwn3BP09/vUqRrlPBQUczV7TWh9IhUSJ5lWOOsxJ5s45sT6jIjzTK2myX9Tpt2EuN5AiktVVUwXy1RKmK7yd7i5qyFXnfQ/B1Dvz9gtVxz+uaMvZ0dfu7nv80nH3/B5eUL1qv5V56/r0URAMPxyT47u33mC4tSBq0r6mrNejlDdhWjQZe6bLGz2yXNBFEUIIVLh213HpOlzhRkbUUYKupKMJnOMbpiMpnw5MkTB6OsHDJbSMF8saAsK+LYmXKqeuve9HVFWeXNm8W9U4RypJu8LvFVSFl71Nr1qdqA5wd0eh6L6QptLQrJJiup8TAyQMsALTy8OKHWBbqsGfkBnvKRno8KfKQ2iKYHd4ARC/fW2NpwdnrOaNynqgpubq9ZLVe8++43qWvNx598ShxFeHlFq9V22PUgYrtNqaqSMApZr3JKp7Zpemg3YHNtuSAIQrKycJAOC1YIirKi3e+QtDus1lsGgxoZONefEs2+/X5wp2usrlFKosuSzXrdYNElm8222VYojDSN0MgxFRz5uEaKxhzUrAaFchCToijBSrzEd4NC/VOz5uZAC2EQnmqWCY1Y6R5uiPs5EU6F6SLUnFOxKisoIfAqlBLEUcze7h7p9orNxh3q3fdHiMry8PiA//ev/xYvL+YopSjqEusrah0QxgmzTcn29S1rW3B9Y9kZ9XhwNGSzDYl9S42GoED5hv7QpzYzSgMaH4RhPr+j2C4o08zxCSonY7+4vOLq5o6dnWN63SFVlQOWbqfzZQsoFOdnFyRxi8lkwsPHD3j4cEQYGs4ufkyvO/zK0/f1KAICPF+SpluEVHTaHbSu8WTA1eUtQlsePuzR73l0uh4y8F3sdFYwn8/p9waMRuPG6lkQRz7rzZIoSthuVyxXa25ublENuff07IztJsOTaXNj9NDWo6pKkiT+0sGVJA2q24VYdHtdlquN60GtR1ZohKiRasvuzh4Kn+V8hhW1a0Rx+XVZZRCBmzxb5WN0hZUK2fjzhZSu96ZRvDV/KY0Ex03AheL2boKxGs+TbDYbDg+OCIKI169PAZeyNJnN2RmNEdJx/ZerVSPndRqAutZ4Ur2d8DfAY2hs0bV2B1WYxpxjDF4Q4och2zRnIJX7Xm8dD27H73byFVo7qW5Vlq4wa40uK7ZbVwSEkI0VuckU1AZj9FvvgeMEaKfme8tWcF6MIKjxPP/+uzY4Mwlo7nkC3BsQGy8CzeAQKfE831GdEW+VksbkLkuyLpvo8AKsYLPeMptO6SY9hCg4Pt4hij6i0Ibf+v1P+fTVJdrWYBWL2RLdHSNFBNrni88KZjdrjg5jqrRGVIL9cYI0KXIQ0m1Bupmxur5Gxz1u1in9TkxdeggRUeQG5Vl0DYvFhhevr8iKksPD9wijDq1Wn+PjY6aTKUVeM5+twFrWqxXBvu+KQ7PKPTgY8erla9L0q8PBvxZF4H5ffXl5Ta1LwkARh05nXWQZqmuosiW2WlMUhtqWbDc5V5dT1sucw/1jpFCMRyPyoiAvUsJAMRzvkOYpQRCRFxW+Z1ivViznK4IgxliQyr0x0iynqkqiKODmZs5kIpBiB10XbufsKY5PjonjNh9/8jnKiwhDS61TVusFJyd7mLpE2y1xyycMJZ5K3HAMiZY+WvhURlBWNbbUDl7iRyA9hPQQykNXGiNEk7brLNCh70Qyi/kSrGU46qO1RQrFfL5guVrT7fa5u7tzQaYNqkwbQ5qmBEFAlqUUZXF/elANqKOua6SnqI0mLwvXIiiFrJ12/75lcIXPGZ+EdIxC2yDE3FBPUzcyYaxuVoTgex6rxZy6GdJB4/7DHU6tteMtNusJ3fgFdKMhcHBSR+PN8owoBBc4It8WT3EPJRHSzQyaxaMrOO4WID0P6YWuXlmBEkHT0rhtQVUV5HnGYj7n7i5xw1SjyfOU+bJiZyekJuPx412CKCArVlSiJu62eXV6wWx6gZAhQdgl8U9IlyGv1huuXr3ki59MOdzt0usqfu67z6jGIaenS8pqw2C/zc1djjQRoUroJAm62jTR9oIis0gRI6WP57c4Pb1hvVpxcvKEo+NHhL5iu01ZLRZUZUWeZvR6PdqtDuvtmizNmVyvieP6K8/f16II5HnBzfUd1hpacRvfUwTKZ7acM+oNCT3J7O6Cqq4gKDG+YrNdkxeZiwOXcH5xznR2x2o5o6pyHj46wQ88VqstSbvHk6ePXWDDZsv+4SECD125vrCT9NDWcnv6hijyyfMtwiYUWUpdFpRlwWDYZzpZkrQTgiBkMStpdxL8IGSxKBiOYkytue1JlBDs7Y3odmKENFzd3bJebpGmZNcMQXrU1qXgtJXX6OdBej7aFo1X3gFFjHUqxKoyZFkGuIKU5VvM6TkH1oVrSCnZrLccHBxwdXVD4Htv9QBCuFTlWmtnTGpswdgv9/V5XhBEEe12m6IsMaZCC+3YDdaS5aUbCr59/jfrwQb5pXVNWRYOJ9b4KILACbOyLMNTihqDsdrBPKE5gLVjA2oXjuJWgc5KqK1BWDfQLKuKvCiR0nNGIOseHKLBkTlu4b168cu5gJTKJRwpD1TgOJHaoK3AF196OI11EvMsTbm6vGa8p4iiCF9KZAiZWbOtNuTlmk5H8Ys//4TesEuQRPz+Dz3+4IdnrLcleblitbqmihKisEWGx3pecHE2o9+PuLrbsrsXI72MIATRjhgMRmBqpFdT5imX51OMzpnMlmxWNUcHDyk0bNYVlxd3LFcrfvSjT/nud75NVlUMhzvk24zhcIhSHvP5nNvbO0ptydKCq7Mp77zz7leev69FEdBakxcVB3sHdDsd3rx6zW16RysO6LRb3FyfARVh5FPJilxItuvCeeuVY78b37BazinrHERNWaUsVwWbbcZgOGQwHAHgeQFFXnJ1cY2nYrabHKM9olYMuGjxMPTpdNoO0y0cYXg0HPLyxRvqCo5PTnj+5mPixGd32OHgIOLps0OEtQS+yw5wvn/J7e0NF1fXZGlFp+WzXG+JlJO3rtOcbtdQVDV5WTrACPeadtO0tAKtrYsWt4K60lxf3+L7Et8PuL6+YbFYYK0lCmNW6zVXV1ccHR660aJSFEVGUeS4K7ij7XiNv8DSKAa1k9eGQeTwZEK/XclpbdDGUYH1vST3/vre7N6rsmiu/zVKuEJltGa7StF17W4Upn67KjTNgE5rTdmEszrIihsa3k/7jbHNUFM0hcaJfMCFr9xzGrUxVLgtgbAW1QwWlXLaBCEVVkgMjjvgvpeHVAqUaWqbG1IulkuiVgDCoJSPihR3mztqCZ1hxPR6xoOjLqNxm7zO+OjZkFYMt9OK09M1dVawmM0oshZSdshtQF4lpFXFm+tr4o5k/7hPknjcLl5wvHtAIEuOdtsIs+X6ckk78UFHWFMShwMeHByT5gWt1oqDg2Nub6b89m//Lu+/+w6z6QStHWF6vV4iBLx+/YrDowM26ynGZETh1zx3wKGm3Bs/S0vmsxWmLqnynNfzG9LNnKOjMbq2UCkKIyjLEhlGhEmI73uoQFJXEaGBLF+z3S7ZpjVJu4OuDZeX1wTNE2GznXN5fc1773xEkWs22y3S8xgNB5wcHTnve5GDdbvyqirBGj766ANWyy1lLdnfn+AFljj2effdEzrdkDJL2Tvo04o6LBcrEJZWEtIb9PD8GklFVpRYpTGNoUY3/MKyqomioPEeuCHbPXm3KCrKqsaTbjW2TbcMwh6eH3B+fo7neeR5gRwozs/OATfwcrGAgvlySVXVTvyjHZDTVz5+4L09jGEQIVCkaUatmygx5YqEMS7xyfMDpybk/gnqZMNYN8HO09R9z8AnikJWi5TpdAqCLxOUmmGg/albSF3XWG0QQXMDaNKF7gEjSkqkFE2eX+HmMNJ7O+OotTMU1VK5VgrjNgPy3kzl5hBCSTwrMULhcd8OuJxKXZVYUWBlRWQVeV7QbnvErYib6QWb8o7Ia/H+o2+ijObm4oz5dMb17TkVklYgeXIycnFnkeWLz87Ic5+bm5Kz8ym6qlnnUAuDqmBebp3PxZvyRXTKoONzOI7ptAyhJ0jiiLgVc9zepTvYod8/QG6W7Owe8PTxI8oy5+L89G0gahwHRGFMp9NmvVkzHPYYjAWffv6KbreHtl/zmYCpNdlmxVm6djMjWSFCQ17XXEyuAdB3a4QU7B5G1MJdNzflnGG/i+/VCARxLLDGR9cCU1fUpSZJ2iwXKen2jH6/Q1Fsmc8mtGLFoB/jS8XN9ZzNcoUf1rRCifZ8FospgefTG+8Q+pYsnfP+ex+SpQW///0f8nO/8ISr63N6owAjS9JyS1mmtDoBStQYu6Hf6RL6HVA+i3XJejFH+AItHNdvVdaUyqOSilKot3FYZZpjaAI6PA9tBbVQVKZCaesizbRhk2asyw2RiMjKjHadkKYp3aTjDDbaUNcVWVoihDvwVV03rQZuDmGcGsEPYmoDZVaCEngYlOcGgLq2xGFM4Pl40jp3pqlAlwhj3UAw2yDqAi8Mmog0y3K9oag0vhdgbe4GdNagq5y6zqlqBzqttW3ChQTSCIdfb1DsQlokDgduhXVFs66wwrkutZWNH8FSy8q1B4rGeGVASfd7YVFS0IoCAm0xWmBN4R4+mRN9CZ1h5QZbQV0qPL8DniNOx2EbXdYuzyZUVLjAnCAc0ks6qEXBNrMc7o/54KOAb304pqoCPv/8mn/wD36PrFiw3NZMlilVrdBGUeYBufWYW0Ucenz+whKFHseH+3S7gjiJ2TvYw3RG6E1M5AkenEQYKxmOhlh8skKiZYuwPaC/t09epqymG1Lr8/Lylje3E94bjBBJ6yvP39eiCFhryTdLFusprXaIlTWr7Yajk4eodkRVK+5SQTvpUtoIYVM8pQhCjygUCFG4SruaoISkFQZIIZmXK1b1mqp0MtZ8myJVhacM7zw5xFMlcSiJg4CiyGm3FEpWWFOTbResK0MrDtjbHbBerynLOZPJDXU94cHDY5LugJ29HV68/IIwPiBoheyNh5RpSroReCoHv2b/oI3XzgmikjIvMZWFOCSXgtRYlmVFIcBTAoRPUWSNN96SxC6FuRYSIXxyI8AqVmlJrjU29JBhAEXJauOcgVpbiqxonpQ11jo1YFGlVNqRhu7VjaYBAllcKq8VTh/gCdOAUD2E8InDFr5SCGowBVYXSF0htMGWBTbfupjyBp662abkRU2728PWJfl2g68UpqowOqMqUsoyIysKitqtRsvS4CuLMAJlBMY2NmBr8aSEhsZUmoqirlxhQzXDVgPSoDwBzabF6YFcIKqwCk9ZfAnSQGELamGxtcDYEKl8lNKIeovOLVXRIU0N+Jb9vQdglsymd2ht6I9H+GGLF89PwYvY239Cv685Oz1n3BsyvfkxRVHR7Qx5dCz4xnsB3f4hN5MNL95oXp9NsUKhK0laOKn4OgvwvTZ66XOzmSPVljCO6HTXJJ1T9vfHHA89hi0HXe30uyxWCxDQSjpsiohZ6vH6zZQwGrGultQFVPKA11eWoP2vByryJ/ZqtZxZYrtN8UOPq9srkJIHDwM8FRIGLSBgZ2efOIKyKCnzjFYvoSot68WCoijYLNfUdUU7adHtdJFKuH7TuBXfcJgQxwFVtcGimczvKFKBVBF+5CNCgfElZ+dXzNcr2u0On7z4gqOjI+JOh1Jr8rpkd38P6QsePzoi6bRZLnr0ewnb7Zo0XdFvtzk63qfYbplOpuD7YEqydI3RhkGnhy+tOxzFhijykIM2Rhu8QDk2gbGYhu+flSVSSMIgJIxj8s0aYy1pWtPZH9NuJWRrQ55XhF5AXmjKckNd1y6gVEq0KSlqp1SsrEZjqYxu1mdNnHrtnpwGi5YGhRu8dbodByRp4KjmPl1ZCkcMrnUjxHErRdHIf9vtBF8JtssFSgm80Kc0pbMfawdXdWae8u36Vtgv03OttRi+RJEjJcI46Wxd66aPVxjj2inlKxQNn7ARJBljqHGkpvtWo6q1SysyNSBRDXAFaGjPAt8P3soMup0em9UGXVs+/fhT9vcOGQ33iKOIu5sLjDbs7e5ydnqGlII3L6+5urrmF37hlzElpJuUKMp4+uQB3f6QVucNKmihgogsLZhNVkwmK7TeUuSaRTrH4rYyl8KAqAmjgEEQ0o3clT+IAoQUeIFHnMTErZjeoMt8MafT7bikJAXZssN2rpnfvvzK8/e1KAIWgxWGVtJiudoQhgkWSZEZBwLJ3ZR8dzwGUTDNNyStHkmrR7otubuZEgQBWguq0rKotmRZjVWSOGlR5BVeEHPy8IiyXvP5Z6/Yph5S+AjbYtDtIT2frVjz4+efs1ltyYuc3BomtxO0kjx99IRtmRN1EshzlGfpJAFpuuDZOw8ZDHucn9Vs10ukrVBYtKmodckmnZFEHdotn7qsOTnZ4/TVK7ZVTl6sePz0iGyb8+LlC9r9IXK6osgd0rzSmtoIBxOVIVGrx2q5JVCKotIcJHtuVSdbCKUxCNKixFlyK3wcsaisCypd4UlBIAQi8LBKOiu0VBRVgdVO72+wLoHIc6Gr3U7XtQXaSXhpkD/WWHRVUWuDUB5CWYRUDTAlpJ20EFZTec7RJ42AKsPWNWXmfpWN3FgK0fgQau4zDYwxzbrvXhHcqP6EQ4rVunYafeOyKoVtAlHu/zxOwqCEamzSoiEcO12BVAKBwvNUk+34ZUakQBL4Eem24OY6pdsO8KTPi5evOD+74Vf/jV/j5MEDsrQkabd471vf5LPPPsP3JKP+MWWqiPwO5XaDEh5np68ZjsaMhhEPqyHDvT1anQ51rpnfLbm7XeD5CVUlmU5XrDcp682ass4pypI0X7AtehSrkusrFzAilUT6TYCrcgXB832M0RRFRaBit/6sqy+d2n/I62tRBPI8Iyu3dPtdFqdrHjx8RK8/wmiBDjy2qxlSSbbrFcaW5HnFzniH3d09Jnd3LJZbOokgDCKkEqTbDbPZCq+leDx4jMEQ+hGamtvJDet0TaU9xsM9TC0o6gxra2Y6Zb7d0u4kFGXG5PaGLM1orVc8EAZtNdeTO/Z2dlA2I/Jhudywu9tHVznWVlhbs1zNyTZb8q27nlsNgbR88O5TrDF0O12qfIc8b2NNjRcKWiqg3YsZD/aZrzNmL8+orSASsM1y2kkHjaTVGVBf3oKWlManlYxZr5YYG1GZAitAeJHb2QvT9Ns1ZeVaosD3CIVC+IFDrltDKL3mqSgIVeiGfhKCMKIVt2l3Onieu1FUZYHnuwyHqtaYe92+F+BZiecHhJEb1ko0Vb51wNIkdi2AhLrIyTebJrHZxcd7nkQpJwy6jynXpm5aFfFWmVm/lQy7HEcrNLW2jd+ARifgoEzSgrUSqXyk524C92nQvu9jPQewRXhvAadOsyLJ0gyjIK9L0k1FLxmRtHqcHD/i9nbCzfUt77//Ee+9+x7DwYiiSAl8ie8JHhy/RzveYXq7YrGY88F7HzKZz6irFKwgDEvaSU0cF+BBP+yy2w+J4i7tZMh6nbFNHT0qzbak+ZqizAnUPlUZcHNz40JjspSiKthmW7bbNVUJYZgABikqilQ3hfRLy/gf9vpaFAHlKcKWIk0Lh4aSPt/6xnf48Y8+RlISKI8wDlgv524g1IAl20kHJRQX55eURel4bsbieSFClBRVgbbu2qit5fzigizfOpWeJ0m6bW6vlpxfTKmQqJ2E4e4uy8WCvNa0BwP6wxFPnjwhiGPubm9J2h2kJzFZga5y+p0EXebcTiZcXV4QRxEY21iU51itSVp9VrMprSggSRyyejDoIkSPxWLBJl2xs7PDwyfHdJJdok6PEsP1zRQ/9vFCj02WNvFoEXG7S5EVhEno4trnS9apSxGurcb3FaXFhWxql8JUaWeasVJQo6hw0t5aG+IgwGJQShKEYePnL7BCEEQhXuA34A5Nmqa0o7CJCL/39gcI7aOEeosgw9MIE5LVGSIK8EzAqtw4dl7TMkggUArjeQgpUKpZjzYsAW00xjTXeqCsNGWlqY1FWyi1droDKxBKglTOdGRoskdcbiJNMpFFIjyJ3wTU1JRulWlcUXHSYqiriiwtSMsSLd1Qcnq3Yrtd0056PH0yZLlYsl5tsNaSZSk/+Yf/NUIYqjqn3FToSrKcpxgjODrcx/MltS2pbMWoH9JuWaJYY4ShncSMuhFpWhLIDcOOot+OEbLFfGFod0eMRgNq26cofVarFd1el812Q1mVvHr9krOzU3b39xmPRwShz8sXr7l4fUmeZYx3xoDl4m9P/tDz90eBivxfgL8M3Fprv9F87K8D7zWf0gcW1tpvN1TiT4DPmv/2W9ba/+BfXgYMldlisLSSiOl0ymq1ZrPeoLWmnUSEoYcVmuV6xWhnzHg0cuKUuubo6Iib6xuyLEVKSX84wAt8VvmMsqrxvZCqNlxcXVIUK6p6Q7ebUFuaLLiMSii6oosKQk7PL2i3Wox2dtiu1nh+wOXVFXc3N/yZX/01Pv/kY7LZDZ4U9Pp9ri+vmS3mZFmOEh7pNiXPSjw/Ii22VGWNMJp0tUEJSVG4Ch9FLtBDSsHxwxMmkwmIkIOHD7AeTKYrtmnJg8eP+YPv/Yh0m1FjGO/vcXd7x87uLnEnojQFq3TJznDIarVBlK7PN9ZSGu0Ohgaaw9S2itIoJ3u2AhnEGF0hlcAPInTDRyzqurl6a7TRpNsNUeSwYxLHJPSUj/FqhA5Q1jR5DwqMwfckxlMIT6IFWF2hqwJpIfR9N7REYAPfmX+wWElj9HLaBG1w8xErnJ4iL9EIKmPefo5F4HkBSNncGizaSDyaYaL0AOVMU1I4CAwOqqKtW4feQ02MdnL0VhDihS7kJssqLi5XpJsV3W7JRx9+iBIBr169oSpLdsZj6qpkb38PYwyFrrBW0W73iGOfKIwR1uIrSRzHRO0utdAEypDVGeu0II7aWF0jjGTQH6ONZbNZURcL4iCi05LM0zWVsSRdSdLR9IdddnZ3ePCox2RyQrfXZTqb4Xkevc4j9oZwe3fLYBARxxF/42//4afvXyl3wFr7P/qpIvF/AJY/9fkvrLXf/iN83S9LgNVokxK3OqxXGcvlnO9//3t40kfXJXHkIUSNH3jM6owo8olij8n0hjTNCIMQLxBkRYHQwjH4Wi1MBudnVygVoLX7S50vbjA2RxvBZLridrpgvS2pkGQ3d28HQM+ePiNdb7i5uqEdRiync44ODlAIOkmb9BZmdwuWiy3TxZy8qkjTElvnlEWNFBE7oy5ZK8PkNVVREgUxdVFzdXlDWZcEQaOKHI1p9/pM5guW6zmL7RoZ1PzZv/jLfPH5a7qdEd1elxfPX7F3uEMnafP8ech7773LOp0SxBYtMkSoyaoVdd1g07WlrG3jyzeOrGMFtQ2ocWhyJQRCRVjjpungY7QhStrYyjQBHq4332zWtHd37728Tt8hnMlJ+iECgxKu13bJQBVKWGgOv9UVReago0pIKtNAU4TE4th/96Yi07AUaiOaQFJLUdVkZY0VgqpJHbpPkBbiS8uw4wyCFQohfaQKXEK1kM3coYkva/IC79sfRyVyAzlrLL4KMNRM7u6wZounBGFkiaIOngrJi0vKKuP1m1PeffaMvCiI4xikZDQekiQRUhpaLZ+dnR2m8xvGgyH9nR4vT1+SliVWS9aLFVlQ4vsx1qR0uzW+7xG3QkbDPsJqri7OuFtpVqmh2+vy+vULojjmPf0ug8GAk5MjttstL18857PPPufZOw85Pu4zGjlF7cmD4688f3+s3AHhGo1/H/hz/78c+n/+paSglQS0kzZWK9qxReuCXjshLzRa5xhbkrR77Oz2CULBdOb6dZezZymrDG3c3nk+X7Af7KOUz2q9pcgzBIp33/uA5WrMD374u+S5ZbOu+OTjNyRJFy0k3ZbHMq/oJV32xnv8wetzVvMlwRMfXVa0/JAXn31BN0kI/Dabder6tarkbjZDKEWSjBBCsllv8GRNtikxRYWvfOpKMF8uuDi/Yfdgj8Fol8U6RfkR26xE+SHr9IpOt4sXGobjNuUna44efMDh0SF7B2MEikcPH7B7MOL4+JB//Jt/hw++9YC8mpKlGWEiqNYVaWHISmd0kjL8cgdvDVmhnWHKKMq6otACaxVJ3KasQcqQIPbA185XULtcvcOdXUxjRnL5CC4yzAANDYXAb/T4dYGuCqoiQ+uSIk9ZLRdMJ3dM76aNJ0BSlrWbLVhLbe3bpztN36+toNKWqtYUlaFobgl1Mx+4N1zVxqAapaE1rhC5VsBzMeo4BJm1Fl1pN1S8J/kIdxu4x6g7paJLgfI8pzvZbkukMIzGB9S1ZLFKiaMOrbjNb/zGP+bZO++y3rjUKXTNcOeYm+uc2WTKcPSQYBtijORg/4RNvkHYEKs1223Gzv4DkqTjYu3zilIbsiJDmxqhfKbTBZPpHZMl4LXpdXewuuD89JbryxlPnzzhvfffp91uo2hxd7Xk0bFAtgMeHu9TlAWh969PJ/ArwI219ouf+thjIcQfACvgf2et/cf/si/iBz4PH5xgTcCof4DVPtPbJaEfMBgmnJ+/wvMsiIper4U2BXd3M2qtiaLYhYqGHm0vZrVaEychta5oxW2S1pDLyztAMejt8c1vfovT01M8FbFYZixXW8pK8cGHH/HwvWNm0ymb1Yrnnz5nPV8x6g6osxJT1Nxd31IWBU8ePWKzrkgzzfXtlKKuWW3cXreXaqrCsl6XLGZXhJ5PKBXJIEGIkLwwtJI+R4ePePLsKYPRHvPlnMl0SRwn9AYd3nv/HcIgJggsUtZEISStDj/zM+9xc33HeK/L/uEIY2uSvuDJ42PaHcmrl6/pD1q8fHFGUQoCE5AVkOUuysxN+hM63QEqiED6ZGXGNiuxumQ82kGbmrjVot0NKbKMsqooF3P2Oz2qsqAs8sbi7Aw60pNIVbpeH4vyFOgKW9eNYCunyFIWizmTyYTNdguq4f836sWqrpsDz9uhoHNQflkEikpT1oaqNlTaoc7uY9IQ7sktagdwVVKhhAThWgGLwjRRbuDQb2V9n23QmKmEQal7iIttNh81nvSII7dym6/mhBd3RNHnTO5uePDghNGgx/7hCX7UYjQOmM0m3Fyfo0JFVeSo0Od2OuPlyzds0g2rVcnV7YzA79GKA67On3NytMN4Z5fbuwm3kwusyOn1euTphul8Q5YW+GGH4bCN8rt4ssVw0EJXiqvrK87e3BL7PZ6+85R+e5dvf+PnGfV22MzuiKTACxJef/GHzwPgj18E/grwn/7U76+AB9baqRDiZ4G/IYT4yFq7+uf/4E+Hj+zuJHRaXRcjZXykCGmdJO5K5nukWZ80XWOxaFtzN5lydXVNEIScnDxwQxftWICtdgvpKcqyYtg7pJOMWc5L1qstn3/2EmM0g/4e7W7Ex59+iucFtOIOO+M9OlFC76jN717+Dl9MPyOQil67TeSFHOztc3t9TbedcHdzx/XNgrwomUw3oBR+0AEUd7dr+r0e/d4us8mUbmdIttlyc7cgiNv0+jsMx3s8fPIMP4zo9UdM5gtm8yV92yeOY4JQMhx0MbXk0YMjtus56WbD/t4hus5JEkUUBbw5u6I/bBEnisfvHLLN5ihpmc2nCBEhRcJyXXNzu6TIa+IoYHd3l17fIa2tAOl7VNYQBgFWSvK8Igg03X6fhdak2xyrXTzYarmgvbOLNRqpvAZI4jYCtW4AI8agq4K6KijylCzbslwumM2mpFmKsRYvCJ0foaowzdVeN4nHUimHHdc12kClJWVtmiLghoKujaDp58VbTHtR1Sgp8ANJgMQKhUVhhcLwZe6ANs4RKRxO0hUSbZBSo6RCYBtPgjNIxa0EZMA2rfjixSnz5Zr1asl0vmJ3d8Sg3+X16RVxHHJ5ecf55Ski9Kjzkp3RmM+ev+Ds9WtGozGLRcHN7Yb5fM6jJ0+I4hGbTCBmGdvM4IcdvKDNzu4Rq+WcvNDEcZeqLhmEfVqdIZO7CUEQcnCwQ9IK0LXBU4LtesXezoijg30uzy+YbizP5+f4vs9m8986gn/8IiCE8IB/F/jZ+4818WNF8++/L4R4AbyLSyn6Z14/HT7y5GHflpkz92w2GXHsNcGMW+bzmZvmKw8vCEFYfF8Rx1ETkOlRa8P1zTVaW8bjXWpjsFKwu7vHel7gCUUcJizmK/7e3/11Hjza52D/iB/96GPG413GowNurm+x5PzMN7+FrQxFWnL08IRuu00QhpwcH1LmOcIarm9vWW1y1mmGlSFSeQSRu25ZpNM5aE3gb6kq4z7PWmbLDYNhn6TbQ0iPq+tbDIbeYIhFMJnPGY0Dbq9vubue8sG732BvPOazT74gjhK6SQtParBFYzLa8uDhA1aLBYHvMxr1CT2fPM0w1kfIhPWqQiDYrjN6na5bZ5qau+kdRZkRBj6lLuj1B6RlSpptaSctwjhC+T61yegkPW5ub9gJItd7S+fQoxHxCOmGcMI6s1WVZ1R5Spm7FOKiyNFaE8UxRVE7l6bnoYTCExLPgCkdl1E07EN3Q7CUlaAoa4qqoqwN2tL804BHcVxDF2BSY1AocLcDIZ1D82170WQbAEiBFB5Kyaa9cVwDYbRz9BnNPWzB90P8MAEVslwuKOspuq5YrF5wdXNHr9OmrHJacUSW5eR6ys7BMTeXV6y2Bek6Y+/wEfv7h2QF+H6X5fqW+aKk29vjbrphttjS63Z4+s4HGFOzWG7Is4r9vSOEsFxcnLPcTIg6EuGtiRPBcNCmlVTkeYkVS6SK8HzF1dUpWb6l1YrJphukhMPDwz/5IgD8BeBTa+35TxWGHWBmrdVCiCe43IGvlio1Lyl8pE4oCk3otdgd74E1zGYzrm9uuLq6Zm9/z4mBdM5ouEO3M2C1Xjeac4eTq2tLUWr8IOJgZ5dOu83ZiwuSVgtMgZSCskh5cHJCu52wszMG6zMcDlkul6zma4y27O8f8urFc5J2l+OTI85O39BqxYStmM3akYPy2lJbiReGICCIW2hdE/g+WZ6xmM7IthuqqKA3HNDudBESsqpEZikX11eEkc+DRw+Ik5hPP/sEX3pI4fHqxSllVvFg7zHb1ZYyzXl4+JBssyYIQ3SVMd3MMbok8Ed4qqAqSg72D8k7GcLAYr6llQxZLQuksGTbkjiK6PdDbu+WpNkMbTT7hw/odzsEkXLEGqnpDtqoIHBiGxzu/Ob2lgfP3iMInPNONsOze92xFA475tKHCqyp3Hwg8F0mQqdL1GqBDDAECFyCs01TpBcSVBVVWTUH1q39XCEwlLVxyPPaiYK0cVN9wz04yEWfuUAaJwCyDZGpNiBqi1W20Q04YpKUPlK6WDUlHWLM6ZVMQyd2DkUwCBHQ6Q0JWzPsassmdU5Gow2LVcbF5Q1g8JQijAL8xPLi9Jp0vUXbkMPdA77xwTfIsoyLi0ta7YSf+c4vsFiuWK0z1psNnXbSWKQdRerm+gYlYTQcEMchSgouZ68IWxnjwKPb9hj0FZYayNH1Fs9vM5vO+fjj32M03KfVGjOSLU5OThgO/xhkoT8sd8Ba+3/GpQ//p//cp/8q8L8XQlS4tu4/sPZfYF9qXoEf4Zkuq9WEKA7xZMjNzYTPP39OVhS4ZOyY6XSGNim+7yOFTxC0KEsDeBwdPUJbWG+29Ps7vPP0PdaTBVoX+CpkvZrTHw757nd+hk4vRAnLs3eeoLXAGIUQXVarBc9fvOTho8fMZzNmiwX9QY/FesVqu2qkx336Yczt4g5TabR0qb0tp3ukKHOqMqMoUrCand0Rj56+Q6k1SknSbIvyJIUuONo/4PHTR1zfXHF5fc63v/1t8vWCF5+fsjMYc3c1Y73ccLBzwMPjB5ydn+ErxXwy4er2yglggh7HR+/x5uUXBF7EfDsnzzI8aRh0Q+IwYDTsUhY1ZVZS1hWXVytqvcILfA6PhxweHDC9mxD7HTZLiwo0QjlmX1lV3N7doryQXq9LGAb/DBHYzQYENCEqSgp8T+IFPoYAU/vErRa9ft+tFr2IsNWjbhyB+s4NNP1GtGOKEiFMY6t2UBXXFljK+j77wDTKwSYB0TgrsC9lk5p8L2G+j3bXzhVpG7yqkCAsWgtUs1ZwNxzciqFRDmpdO+yZxcl9212svGO7TQkDt/pzfgufwHeCoyyvkEmbV6dTQt/H9zVj7XM321AUGaWGWPgcHJ1wM/0BEsHjBw8Jo4BWHLJZLVESklboBpbabRCSOML3Yb64cQ+zfE2nHXBwOOby7II4bGNMznRyyWjYQVBS6zVSWRAFy/Xtv3oR+IrcAay1f/UP+dh/xlcFnv0LXlL6KNtG2pTTV+dstylWwXS6wA8iHjx8h7qyzKZb2h2PsjTcXF/j+QEGQRDGjMY7ZHnJfJGC8EEEbDcbMFUjRfY4OtzDDyWb9YLeMKHTjmi3B2zTAs/LyPKMH//kY/7cn/+zHD96yOXFGaeXF6zzDF2XaKOJ6xbdzoD2oMcqy8mrkjTbUpmKMPTxJQy7Hdr7Y/LNhsGgQxD7KBm4+Ua1oT/s4gWKdjfh+vaS3/nd3+Ly4oyf+ZmPsEYSB13GgwOKtMYjIPJi8k1OXdQsqwU//vSHzJZzxnu77Bz9HEm8x3b9GbEvmNzOuLu+YWdnhCc1e+Mu+/tH1LXl5vqWi8sr4sjieRV+IBGqxAs0XqAZ7XSxNme1nVLrh2hryPOczWLL02fv0u12iYIQYW2zVnPkHtFQewUGJQXS9zHWp6wysC4Uo93pYIzBjxLCTFM3seSeH3L65pQyr5yrUWhAIqxCNKGhlXHDwLIZIupmRYl0SUiuINEc5vutgkVrN/mXooGNNEtFKZw/QGtLLZrQE5yS8D5s/f5GoHUN1uCHMd3eED+4BJEilI9FUVUV7d6Q0PcB69R8a0ebzrIKQYaSt7x+fcFw0CMMfWpryT/7lOcvX3C0t0fvyRPHSLQ1vrIoBcmgg65LPKGpsjWTyYSbu1uMpxn0+8ymC6S45tHJQ3zVop30KfKK9apgd+eEPC8wVpDnOa9ev6As/+QDSf9EX7rSREEPjw1ffP6K12eC0d6QIIhoJV18P0IpRZppwsgDq3j58g1CeiTtDkcnDxF4pOkGaxRGC1aLLVKA1iWBL9nb3WM06rNazyjKjKr2WC5ntJI2nU6LzSajlSS8ePWSsqpptd2VOCtLirqm02ljMdzOpmRljREhcRKz2lQusDMKkAo8TzIY9mkFPptAUdcFi9Wc3aNDlqsl55fnZPmGJ+88ppVEPH/xORdX5ySdFucXp3SDAd/48NuEXoAnQ5QvuL68Jt9m+KHH9eSa3/7N3yPpxXhBQL6VvH5xzeefvmG12wGrGI928CSUxZZBv0vS8gj8kMXsBt/XPHq0D0ENStJq++Tlmv4gwQsgjD3WqxXbdAtSUlbOnlvVNVHkWIPyLSfM/cs9DORL8nDzMSkJggDPu9e0W1oogrQiywuSuMVwtIOxgjcvX1NlRbM1aDBj2lKUFWVZOcjK/UBQGzTGZQxK1ZiZGgOQtU5p2ESdKXs/D/jSi2CFQQoPIdzKsK412BpjBB4K4QnEfSvA/b4AhqMRnW6PoiEjV1UJCKrKEPjSZS1WGgoI/YCyKthsK4rsBqsL1pstxpT0+m0QsFjMyNdrdJbx4Qfv0h906XZaGF26jMxsgzAF08mE169fMS1S9h8+YNg7Id8ors6XpMs37Ix2qaIQW3tI2yHdCNrtIZUpiFttsjTj7PzsK8/f16II1Lrm8uaCL158zsvXF+wfjcmqO3b39zg4OKI2Gj8UJF3JbHJBv2WxVUVaZfhRhPJ8hOcRRzHDrmXU6uCVNaNen+14QF4XyCBla6/QUUmdp2wL2GYpEoMyNYu7K+LWDk+PH+LXFk/6xAg2WY7JS9o7O5SmZjKdMZtfEgc7+GHIXmsXz4N+N8HUhRM3hYqqKEjaCdbCdrsgzVqstwuW6YqbxQTViQm6bRZpTtzu0mm1MDUU5Dx9/x0wlsV0wWwy583pKbu7eyTtFi9PL1llGi9RVLUiT6+Zb5dsNze83pzx7jsP2B3sMZ9NoK4pthtuL84x2nLx+jVKSj569oh3Hh+8XQfe3N4hdE25rrFlji0r0ukcUVUEgaTIa0QgMUo4JiIhWJdmbKkd5KMJ+tS1dmlKKKwXoWLl8gn9shH1KKRfg5eR9Aa02j0qFTNZlmwvr7FCUeqczMCmrkhrzaYsqHSNpQl+bQ60lNahxBoDkpGS0gU2II1G6gqpFJ6VSKMcZNg4lJumRuGUiG9dkUJSW4kwAt86D4FDlAm0qeh024zHY5bLJUWRU9capQSrzZpNtsZaDcrgK4muS8qyIFeStK5pxSG3izVGV2xrZ35SSrJcTsAa3nn/HUTgYZQE5bPdrpit17C0XF5ccH51hwx80kXKZpASR23m8w15ZZitNuSloS4rkIrLq2uOjx6wTjVxK+Dhww+xJMBv/KHn72tRBMq64Mdf/IBXr9/w5vyWVrdPIgLm8y2zxZLd/QFZuebgKGGiAzxjeP/ZU9ZlRS0UMlAIKdjZGZJ6isSz9AKB9EM63YRssWCZLyiDiEIbKiFY3q1YztdEfogylk7okwQhyXgMWcp7zx6j1zPOLwsCGyLqijLPqMua1WpNFYb0uj3ef/ddTJ0zn1zjSYijEGEdzKPdGyKEZL65Ybud0mq32Dnc5WoyZ7pJuf29PyAJQ6QX0+0O2B2OWK1mhG1FXVmul3f85LPPmE2XbAE1VSyWG3aPHuD7Pp3eHjsjw+lmwgfv73N9cU7oG0JfsDMY4CmPqqxYTmZsNinZKmM8HtKPXEuTpSuWqwViPScvwQvamFxTZa4IBFFIGAgmNqO720cr6VotGzgJsikwtkTYEluXzUrNYppkJi3Bep4DkKAQ1jjJsBYEocFrt4nHuxyohMPLBfNFQVUtqYXPVhvWWrPWFZuqKSAWrHHrRCcmEC4GruEcGeUIVUI1RaCuUJ6k0qJpCzyEFejaJUBXMnfJwZ7C9wKkHzYGpAZPjtMbKCWp0HhewO7umPPzc5bLFUoJXBS9JstWhKGHF3p4VFS6dlsSX2GMJdfGqVujCFMK8lyjpKTlhSSDEbX02JY1y2wLGLI8526dsd1sWW5LCq/FKImp84LzN6d0Bj1anRZFXZKvp9zObpEWIi/ADySrzYbLq5IsvSbwe3znu7/MV6UHfi2KQFGWnJ6fU5Q1SMX55TW7ZoeiKknLjGfVI7r9GM+L2Ds4QRlJJ2hRb7eIIGBnZ4TWJcpTBLFlsrxE2zV1oZgtlpS1Rnk+ddkw44KQeTpF14LVMmV+d8toMCJuRbx8cUavJ+kPQkbjFqgReIJtUWBWBdHaEsdQZmv6vX329oZcnp+yXC6wdc3OaIwnPJT03f7WV0RhhCcVnXabXidnm1WYuuby4pJRr083Dgn9EF95WAPz+ZKbmwl5XrJeb7m5uWWzyXjw4CHWgud5jEYjhsMB8/nc5RI+eYTQ2gV11gbP8/GUT7otCAKfpN0jyyHLa66uJmy3Eel2xWRyh1ABrVZEUYFQAuX7lGWJwbDZbgiCgDAMabXbjlHgNS4/7Xp7YZyXwAKyyQgsjXZBrliUCLDS9deOEWIQQrmf0VcMRwPe++BdJpeXfD6bUBtDXpakWUGWVVS167GtthitG+6AazekuqcHiEYyTBNvbqiFdjHoypGLHYrAWQ21dWIgbSyRdOtm2Uikgbf4M2vdCtL3HJh1PB6xu7vDdDpz60UFSilHrcaZn1TTzgjhBEpaV6TblKKqsAik8vH9EF1XtNs9ikJzenrFbB6TZY51oeuK+WLBfL7AaIPvB7hYzID1OqVC8uDRQ374kx8TBiGtuEW720UZODl6wKuX50g0p29eEfiWv/yX/uJXnr+vRRHwfd853iwcHO0zmc54c3bJ7t6YpNfle9//MY+fnuB5UG23RF7g1k1Ccnh4wNHhHs+ff871+pZOO6IwK744fYNfjalKyHRO2BUIz6PbGeJ7CbLucFXecH054ez1Ke89e8Le0YBWG4pqzun5Z6T5lDDSjHd3yHRFsgpYrG9ZrzVxz+fR4wPKckOebTk8OOT2+paiaIY70me7yVCqoDOI0UXN9HbC5OaWPM2xBnaHIzqtFrYsKdKcu/wWbTWff/aC6+s7Dg5cxFS/nzsIaOj20K1WxGDQByw/+v4PSdMNe6MRnU4XYTXb9Qbf8yjyDWVekrQUnheSFTWb9Ybl0oOLGqMrtKkZjXsYoSjqAiME3UEfnRsuz07ZlCmtXu/t90/TtNHEe18eEuHe2EboZjBXoa1oGP8S5UnAc6s5I6jrohnoC3Rd4QeK4wcHfPTN9zl9/dKFfZYV621GVUmMdZkIutZN+KibP2jHOXJtiTVgVAMyFdQalHDDQW1EE7AiXeugBOrt9N+xLSttELp2swVrqU2NrErKykPUFZ51JqBuO+Fgb4+z03OKxsBWG4sQzd+HgU26dZizn9JS1HXdFASnaVCeojY1upacn99xcXHrouUDSRj6aF2Tpimr1RohFXEUU6U5SiqQAq8WDEf7PHpccnV1SZJ0OTp8wN3NDZW2LBZLTCXRVUpdppy9ef6V5+9rUQSSJOHX/uyf5Td/83eZrzK2RYlFkJUaIzwuriYsNhs6nRbDXkToSb71+IS9/X0skK1X2Lpku1kS+JqkF7MtlqxnKfnaMN9OaA9DbGBZLSqOT94hifoIs6AVD/jlX3pMEBjiWPLg0SGfffoTynLF3t6YNM3IshUVFl2mFNs1voTjkzHWpCzmW3Z3x6zma6TwKAtDNEgYDgak2y3WanYGXW5n18yXC8ptRidOkMpjb28XhaDcbhHacvbmDe1+n1dnZ0zuZmzWBUmrw3e+c8LBwSGvX79GSsn+/i6r1ZIiTylS9/UCP2A0HDK5u8UKiZQB2lQYoZgu1oSBRqMotUDnhsV8QV0WaFNT6MCl8XYH1NawzTKMrqjqstHUQ7fbYTa5ozYxYdDFD1Rz+BUSF2qKdk/7WoPBCXWMFXjKd7t4IZBaYG3mhvueoqpLjK0JY4/3PnqX1y9fcHp+xnqbgfKhvh/2uZDU+9WkweL2CADWwUmsm+5bA0bYtwlRLpDYDQel56G8JofAvzdHWcqqAiHw/CYFqclAlEog8gDrB/hBiC4rBr0+O+MxF5eXFEWBMRbPD5ufyGJ0ThCGBEHoWiTtICV+k/ZU14ayTCnLituycng2DHGc4wf30XNO5HVPfC6LCmrNxfWMqq45PPZYrQuePH4PrUUjglJ4QczF9R2L1ZrTL86R1Pge/JN/9N985fn7WhQBIQV7xwdsy4z5eokfRbSSDp3+AINiZ/eQi6szJrMZR7/8DXpdj0KvyPKI7WqL0ZZu3KLlexR1wWQ65/JmxeZ0hSg9tDIkUY9KVCynWzqtFEnNd7/z8+yPdzjYHbPeTDByRlFU+H6Lly9Pnf8eTZ5rvDDAWsHBeJ8HRyFJf8iLl5/x6MEznjx+xD98+RskSZd+d4CSUJYFnifYGe3QH7TYbJbULUsUJRghuZ3MuD2/wmrNqNenFUasZksurie8Ob8AocizN/zMt75DkVdYA4cHR/iBwpiaV69e4inBQb+LAOKwxWAwZDab4wcxy/UG3wsZ9XcocteT5/UK4eVOeKM9sqJim5b0d3yCuIsMfIQpWE1XjFtdHj19wmKzIKs1/X6Pm+trOt1D93QTwFvmv3Q7e+lShI0VSBU44Y/AUYekdIQfIVBeTCgNfhBghCbLt2jl0ekn/NKv/CI3kymvLq/Js5xtWjkQSJPWrLXbEghpkcblEtwv+YV1Q0Nh3D7ACNvgy5tEMgRSevi+i38PbIQ1hqoqMVY3aUZOPeiUiA5sousSkecEyifbbEniiJPDI6YTJ4UOgoiwFZGXGVVV0ooT4lYLz/fZbFyP73IeGuKRtuR5ied7VJUl8GKs1biEOPHWT6GUT6/XRQpFnpdkpUFuNGVdYS6nbP7rf8Ljp08wuuLq8oKb2ymHe/ustyl7B/ucfXFBECjmszuK/F+DbPhP8lVWJRc35yy3SzrDLse9EYP+kNF4xGq94Nn77xO0AiazK+KOz3A3oNtRzGcXTC5njPt7ZJVhvcnYlgUvTl8zXWTEaZtu6NFudzjaOwIfXp6dYUrnImuFLYq8xPcDlBdS5D5aB3Tbe5zVN7x6dcM7T58Qh21WqzVWKHZ6J7R7XbZ6RbrZEPoBWZozHI4QeC7x12pqXVBXKauNxtoEX3qcHB6B5/Pq9JQqy9nMl2Ase/0hw+6Q0WDE8uIKKQK6vT5Jq8NwMOb7P/g+f+O/+C/5s3/u13j0+AHGVPR7PcbjEX6Zcnd3y6tXb4iiGM8LG9PNBmM0XRXSH/TZpjmaFSoMWazmrNKCbFsSBAmD8R6VgU8//gn9UZu446K5pQC1UKzSDGM0fuC9NQm5fte93CFzrj0hfYQvUMrFvEurUF6IELirrAbPi5G+0xoYU1GUW7QFvwUnTx7w5//Nv8Dry2v+1t/5J2y3OdJzUFkrJJWum9WfQGgDtcZIgd+YAASuQIBFGRphkOduJlZghXLZj0rhu2hl/LrC6BqpAFybgLBN4XE+AmksnnDJ05Hvc3x40PAfoLIGL/Sp6ppKGOdd8F3EHDbF90IsgrzIUcrDUx5x7GZEVVFR5SVVZRDS4gUBcRITaMda8IOEqnE9ZnmNEBJjFZPZltOLW16fXtHrd5jPJigpePjghDzd8uG77/Lu+4/JsxTlQRQFX3n+vhZFwGIpdMrxw30EAe3OkPF4D7C8OX/F3uGY8c6I8V6Hwchns72g7SuUEbSDgEQFfPLpC16fXRG0u4Rej2998D4D20NnBYv0jrp0FTpQPtO7Cb3ODrc31yRJRFnu8fyL5xijGY1G5Pk5H37483hS8OTRI0eP2b4kilqcHD8gjEIuFi/46L2PGA/HhEHEkyfvcH19x3x6x8OHh/S7IavFDXm+YTHPCIOEUX+AlYpPNimmKPCFpN1JSMKYfJMhEDx59A5+1AEr6XS6YAVxmCBFxHw6I0u3dLsJSTsm8H1aqs0yWHJ2ek5Va7r9LvPlkizN8b0QP9ywM2oRRjFhKwJP4fsRR8cPUfgEYcDewZi0XDLORxw/2qfVDqjmFWVeUFvN4yePsVikbPJ6lGqgn+6qarBOqmsdKFUaTVXmSOXheyHS89z6zFqQliBKIHAxX9o2aUdaU+YSaSVP33nC/+Df+3eZrFJ+63d+yHaTNU93l02IG8qjrUtBVtYZi0IRuGyGZoXo6oJoQlbEW8+BReAmeu4mE/g+WI0Q1tl3dflltJp2UetRYDBlhR+EoDWdVsQHH76HEZbLmxuKvKSuDL4KkdpQNRH2ZVHi+Q5OYg1o60Jxle9jDCjPJzM5QRQSxyGeLwkClxKltQWpyIqULC+II4/VtnIeB+VuVItlyjYtkNKyXi9YzNdYodluMnp+QLfbptVp0e21v/L8fU2KgGG2vOXgZIdW3KfINf1BQhRFPHvvCaOdAe1eQL8XE8drpjczJtd3PNh5xDe/8202s5zFcIs0MUYF7D94yP7JCaOoS75e8eL0M1pdn3a/wzP/CWleM59tkcIgpeHN6XNubi7BBoxHe+yMj2m3Yx48OGZ/d4+XL15wdPCYJOkgheL6ckpmKqKwy2Zd0G4rzq/OMVrz3ofPCAOLtSmdfsBh+4DlXYkpfQIvYLFaM7m+odimHB2d8OjkAbvjXTbrNboyBLHTtFelexO9fv0aEHzrW+/zK7/yb/Bbv/1PWa83HB8fUlUVs8UKowV1Ddc3t2zyjMVqhRSKvd0uaV5wN5nRHwwYjges1huCwDDq7aKkTxiFRLFPV0WMD9vEHY/nrz7j9mxKkrSo6or9gz2SpMV0tqTdOXjLq3N5gAolQTbUXyEAU2PKEqSP8v2G2GNBOOhp1AqwvqUQWwQCT7oQ9roq2ORzgmTIz//8zyFURJb+J/z+935AVd8nFEmCwHeHyRhHLNZNcKqvENYVKk+5zYRuzGQItxGojKEytlEGuhZCKeluKRhHWlKga0FVWWqtoSgIvBysIJGK0hj8KOb44ID5YsFsPme1XqOrmlarhScMVVVRliV5ViCKCovF9wNHOa7cFifPckbDIa0kwvMkSbvl2hwhqKryy5yFyq0cRQFJK6Y/aLPZrqnqgrI01LUhDH3KyuU6JO2Es6tbrrRhNBoiFTx8+MeAivz/41UUOVoUfOub3+H55294/eYVOzsjzi+uCHwIA+f42tsdYyuF7D1kls7wbRupfSgLfvab32Gx3fLF6zc8Ot6lN+pgC8Pe0ZjDJ0O0yLm8vmS0MwQRcH014+bmluPjAUni4/mam+s7FotdfuVXf4nv/cHvkRUp13fX/ODHP2BnuEsYRfzkxz9ACEF3r4U1HqtVRm2vSTqJs2sqjfAtWbom3UyYr2paYpdhd4dWnLDNMp4+fkKaZagmcTfLMtbLNVi4vLyiyJ1Kbm9vn+024+bmnDzPMUbT63Z5+s5j0u2G29srBn6EH4TErTbD3SGL9YL+YMRmkxLGLVpRm/Viw2q9YvdoRLsTcbuZsFrP6HX6LBYrRn4fa0s+e/kxo70ucRzQ7rYRQhCFivOLc1rJkLjVZdjvI4VEGEfmkUKi63vSkGiMN673Fp57ZFe6RNc1nvJQnocwEu3V+F6A0c1Tr6owZYUnDMVmjagsH7z3lP/On/8zvHp1xmR67eYNtcFvBVBbB0+1xoFPtMZ6wv0sVuA3Ia5V5cRL0ncMwtpYKq3xcSpD01iGhVIubNUKFBLP95q4+hI0FFmGFJIiz/CjECEMZZFyfLBPWZZUdcViuXKrSwTSOhBqFEdUZUXUarHZbKm1JgwVvucRBiF1XaI8QRAppDSUVe7wc2mKlIpW3EZ5As93K1Xlh2yzgna7RxB4DiXfAEjSNEUoQZqVBF4ESjBZZnR7CZd3y688f1+LIhCEAcNBm73dEZfnlwwGHbbbBYEvaEUtQh88JdFVwTDZpyMGTF59n80ClkHJ6Ztz9nZ3GOwM6PcknXZNt11gwpiqKlivlmzzBfPFhLTY8vrVJXmuee+990gSjzCCb37rGcPhhLvZa1bbI548O+D7P/geB/v7fPdPfchPfvQxKqgIIo2Qina7Tz6bE0WSo+NDWp2Ei8tTZATT5S3XFy+II8HB7hhfB7w+PeOzF8+JopDxeEiRl6xWa9I0ZdMM8dI0B2Pd/1YpKHP3+4cnD7i6uuL73/s+abbh2TvvMJ1MOT+75N1f+GVUoDh+9JDJak4+m7ItVvhewHqzYdQf4w0Uz198jhdbHj15iK0HBF5Enm65u7vhdnKK35KEgUe326GsM7yWT57mDLt9NtsNy9WCd548ctfxRrsvGjxXVTtNgBBuYiiVe8Naq5DCUtcNChw3G/NCH+m5LARdubwDaQW2rihqjaGm2qRYFfIrv/RzbNYz/tpf+8+5vLmlHXuYunJDQG1QviIMAvKyoipLvMYZWNc1UjqqcKVr5zFQksALUb6HFu49pYRESpBK3puNnUzYOOqxafJPjLBNcGqFZzyqMsegaSctTo4P2KYbTs8u2G4zqGuqqsJXnms3q9pJplsxxliiOHbEIq2pqwILqNo4/YWtkdISBArPC5p1oQuC0TVuJlK7hOFWa0g7apHlIWHoEQQBk9mEqi6pDGjpue3Pcst0nX7l+ftaFAGsZbmY88UXn2Kqig/efUaWloz3dgijkL29HZaLCTfXN/T8IYHs0u8+JFSSF68v+OzzT5lvp3xn8CFP3t+nqGe8fPkFRyc/S9Tt8+Pf/4LTi5cMBn02Z2ecn93Q7Y4Q1JyevmB3b8C77z5ld7fH3/v7L/j409/i8OSQTXHLupCouCLqaHaP2rR7xwwGY96czimKnMfvPiWIPN5cvGSbrfCiDmESYpWgqg1+1GY7Kbm6vmabbukP+nR7HQSCvf09ZtMFp1dn7O7uo4KIk90dLq6vGI1HvPvsPZbLNe2kwy/8wp/iJz/5MbUuub6+ZrFYsr+/z+fPX7F/tM9eOyErStpdl0yT5inddpe7u2t8PHqdNkWW8frFS4cvLysuzi+alZjBaoWvAmZ3M6Qn8AOfwaBP4HlcXt4QhiHdbpdmKc/9VNDYe/GPaGTE7jqr1P0cwCXf3GcJWMAq6/px64Q7Cg+FQ7pbrbG4FKhcb4iCHv/OX/qLCF3w//zP/i6n59fU1FgJYSBRgY+ULkXKlE7846PQpqKqLbXxKevCRZHVlkAEaGEwukTiICaOmipppgVIXMR544CA2nEIbBPGKtCYusDYCqUk/U7C+8+eIrB8/JNPUEoSRYFDmdUVSgqCICQIfNabDQKIopAwDKh1QVFusNZFqWldu9Yg6eEHEUbDdpu6jwcxnhcQhSF1VTKdOK5B0o4JgwFhEIGVhGHL6TaMQKmAokjJt1/zIlBVFevFCnUkefb0HeKow9XFDS++eMHhwR67wz5ojYdgvVwhqop2b8BidsfF7BbZDlnrDZPtLR8+e4/1OmeTZaTlmk1pSPodummPoi75+NOPCf2EskxRCgJPspjecn2liOKAR492CJOQTt/j0Tv7LJdLjIgYH/WZrK65vb4jTHzmyzsG4y55uaJYr7GqoCIlraDbTmj3xsxvb1kuKjbLGuF59AYD+sM+e7s71FqTphnb3IWS5kVFmLTQpuKdZ495+vQZ//Q3fpvLy2t+/ud/gYODfT799BM8z+Py8pL1esV49BRPS6ra8ONPPmW2nHL44IDeYMDt1RXKcxjvMk0p0pR1tmK5WXKwt8fTx0/otSMWyyUWQ609hIR+3Kfb7TJdLCiqguls0jjyapbLBf1uB1rOqmu1BgtKuZgzY5rhn3WKO6wDhALNE9cdNEP9dg3neQHKjzBFjdQSX4AvBcoXKC0o0iVBEPLf/Qu/wqDX4m/8l3+bz56fU1FjUeRVTVqWhJEPzUqvqlyKkRAuuLYsnZrUYsmLHNEQkZUN3c/fgEmlcDHlWIFogCNKehipsbV2eQtSNrMPQxQnjAZd+sMR2kISh4i65vd+/3uEUYzvBxRFQRTFBFGMtZYwChENwXmzWSOlcfJja6hLQ1UVzW0LN2w1omE3Bk5XURX4cYSPR124KPgql2SblLKukELRTjpU2lAUhlYcunWpMfyzPOAvX1+LIlBXNZPrGS/813zwl75N5MdMr+c8Pn7ENz58H0vN65vPWS0XvJq8YrtxeOy4FfLomw+BQyaTCy4Wt2R/sAGTc3t9Tm5zdg+fsLvbZ//oiND32a4zQr9Fvztkf3eXberz8SffJ82XPHny0F3VdcnN7R2v3rzm4vKaX/nVHRCSz148ZzVf4fkR3f4I6SvObl8zPhjRGye0x7uslhtmyzWd7i5V5rPeeNzeTrHacjjeR/keRV1R1RWn52eURc1gPHLMxNqgAsuxOuL29pLJ9Jpf+sU/Tafb49d//dcpioKT4xMuLy9YW4GQij/1p36ZTz7/Caeff0xlS/xpgOdLbu9uSFdLjnf2ib2QMi/ACHwRoBpNfOC7a60Q9ynGHmoQUmeSuNXi8uqMu9tbjvZPuLm5QdgO0bMdIgBrKesK4dFkErwNNWxuCa5wWKOd9VjIBkwKRjiEuQU8FWBlTaUzlHXjOmM0SPBDSTeKWa+2xJ7iz/3KL9JpJ/ytv/13+YMffsJqW9BJ2rQ7ivU6ddiwuiEJK4cSs6XFqzxUrfDwkGWOH3goP3aUawvWNMnHSjUhy6bZ6TeA0maZ4Hnu1qSNIfA9jg93ef/DD9nZ22OzzhgP+pwcHlCUFXd3d2RFjm8drmw2mzTbHkut3czHGIsfuJmKaWjHZVnhFIZgtPv+urZ4niKOQ/zAmdN0VdGOW3gSoiiCJjWp1UpQymOzXaO1RYkI3/NohdFXnr8/ClTkBIcbdzs7+E+stf+xEGII/HXgEfAa+PettfOGQPwfA/8WkAJ/1Vr7vX/R91DSY9zfY3d0wORqynKxYW9nj8ALaMddXr36gnSVEfo+YavE6xQUtSYZBOy9O2Cz3nKXeVRasMg1kQjZpgGT7YKdE8V8vcZULm7ruz/78/gyoshKnj9/wd7egPFozKtXXxBHbTr9HutsQ64LNlvNJtX8jb/13/DNjz4kiPq8/9EzyrxgtrwhK1MKSipCLu/u2Nnb4+L6nOU057sf/gLHxwdMrmaU9YTtes5gOGC1WbJYLeh02lR1jVCK3f19bqYLoqRFtxtyc33B1cUF40GfTjvmN/7RPyLPC5TyydsJ7VZC1SmZ3Nzx/MUrPv38OcbCwfERwjNsNkuCMKAsMqq6YNRp4wuBKAztVo8kCtksVqxXC0adLl4QgAqwwueTH72g0+3z0S8/pTfs4CnpAlVwPXHd+OtdxHmF73nOR1DfP33v7TyWuqodzFM0jYC1IF07YLTT82MkQisCFaECAXVOZWqEcJpAhaKXBGzSnCzNefLokP/+v/VvEoYR//R3foSxLlZdh5rcVhhTYmwTg95wT4qiQEhBFEUI6VKMQ+Hcq0I1DALZqB+VcnoDY5sU5Xsrsmt1kIJWEjIc9nj69CFHh7skSZt2FNNpt+j3OvyVv/I/5u/9V3+Pj3/yMdYafN8jn27xPOVs0M3tKAwjJFCW2mkSjHYZFUK6GDvPoISHr0I85eOSt0OqIqOuCogi0nTrQKnNzxeGEaWuKYvSBcDWJbauUF8dQPRHugnUwP/aWvs9IUQH+H0hxN8H/irwD6y1/5EQ4j8E/kPgfwP893BYsWfALwD/p+bXf0ERUHgy5me//XOcvrzkB7//I/7yX37E3niHKi/INhm2tkSRz8FJQvewjcZjkxtW+pag1+EX/9yvoWqP1c2csy/eEMeGceJRact6tmJ33OPl6zc8PnmCNIbr82uW8yndTsTuzh5Xl5fYOmJ6m1JaDWGE1TFl4fE7v/Oa0eCIdqtLXngIJIORpK9iTq9f8/L1x6zzlDcXr7m7WSPqNnkp6HbHDAYdyoOCz9cz8rKg1YoYDQccHR4AksVyTbffYzzeYXf/gCKb4XkGJT329g750Y9/SFkWjMdjrq9vuLm54fj4iDAMubi44PTigqrWxK22e9Komvn8DoQlCAPiMGCzXjG7W4JsuY3DKiWQgmGvT1WXbLOcpN2i1R5wWk0IvQ5CKrq9DuO9XaqsJo4j4jh2B6aJAtfGILUj/DpDjzv89wlRuq7wlOMROp+B25VbaUC4zAGMxZMeftBCWYkxtdMXBC5NuC5z6kqTxCG11igsH33wPv3BDrv7x/zm7/w+nz9/g/Jdjy2lQL+1BoPnK4zVbNONYwlgUL7ECz2UNMgm/szyZQaCkPJLRgJgpUSjMRiiOOb4+IjHT07Y2RnjKUlVZEjp0UlaVHnBw0cnfPe73wYLn33+OWEUsjMeU9YuTwAhyfKcbbp2ce+eoxLVxq37jNbUlUVrSRwpR97yQtJsjShrtK4IfEWZp0hryNPUZQ4ajReFFHXl4t2EpcxS8iJzWZb/qkXAWnuFowhjrV0LIT4BjoB/G/gzzaf9X4F/2BSBfxv4v1nHpf4tIURfCHHQfJ0/vAh4AWUFz59/wbN3ntIfJMwXU3Rl2d/dZ2/nAZO7O9J0xmJzxqTcELb6eGGfJEjwg4jRcJ/l7ZZXp5/z6Sdv3NVbSc7Orlkubhn0vsXB/jHGwPnpOQqPR4+fcns7pbrckLS6TG+2/ODHP2B8OObBs8dUpaAqJMeHQ5Rs89nnZ8ymGx4/Pma/k1DqzLn6lMezd55xO1nQii391iG2BqziYO8IXS45PY1pJTF7ewPiyEd6FYdHA+KWTxi6a2ae5sShR29/yN1kzmjY5ezsDf1hTG03fPCNJ3Q7fccZrOHg6JBAxczXO7x484L5fEnU8jG1AOMOsQXyIqfXa6ONT1WlSDR1XRDFHTbTDcvliizXHMc9Hj44IQhazO7mvHr5nCj08aXPcrWg086w1Ji6oCwrhxtHgLbOeNMQOIwxVFXtTELqXorbzAesQYhmV+AW/O4qLl1KkVYS4UeokGZSDp5vMQhaiUR6Adui5GBvwL/37/xl9vd2+Ov/r/+C09MpKEUYhmjrQkql8gjCGG0MRZ4jVem2GiLF9wNCKdB+gFUeRhtqNFbYBk7uVoVvvQoNkizptHn48ISnTx5hTU3gS4RQ1JVuhqk9ZlnBt3/mG3TbCev1guurG7AGT0rSPEd5bpKvvEZhauxbjLtEgrTN6rWmyF0RNGaLCi0qcLMOIQXbbeqArxhXDIrcCafcRYO6rt3/F2WBMX+MIvDTryaE5DvAbwN7P3Wwr3HtArgC8dMYk/PmY19ZBIz2GIwe8bs//seo7i1Cw+15zdOTX2JcdVmvVtRaEiUB6/MCWfqEoWQyuebkqMv7v/oeKk9YTqa8fHnFutKE4wEqU5y+ucMP2rRbI8bDHovZjN5gTLEtKA0EcZvrs2seHD3i4c47nL+54sP33qW32+OL588Zdnfo9x4w7B/yj/7R79BqGxZb2Mke0+2P2R/XyFDR6rS5u/iYR/sHJGGf+eSWSES09zzG4yHvvfcBsGV3NyYMCiaTTwj8kIP9LrouSSKPdL2lHWiqdEGoasryjlanojOWrLYrHn3wmKqsyDY1eSFphR1MOqXtSQJguUgptgGdeI91NSdJdqh13qTvaITZIpSPkD6lsaRljhYSL4qorWabz0HAdH5FtfFIszUPHx+hVc3pxQsQHodHxyRxxHaVMhgMCEQLrSuM57wVQigagjdSBk7mampsAwGRFvzKd4GpWe6wXoGk9CqKQKOD0HkYPHcwQwnKWnSV44mC2K9RoaAoKrwy55e+84y2+kv8zb/56/zo9Q3K90AosqxG+AoRtrBVhfQt2gjKvMSzgirMnU7Bd/24i2MFq1w6gdcIyYw11NZQSYiCgO6gx+HhPqHX0Ipp8hZ83yUDeyEHOx3m04oPnz0k/ZVf5O/93b/PcjohiBNMVWGBOA4pCqf+01WN1m6LokIfT7gEp9pUzQylxgpDWRgCG7gZjBD4SctF7wlAaDdXaJyQeZGjjSaKInzPWcD/2EVACNHG8QP/V9ba1U+nnFprrXBL4j/y66dzB3rdDrsHAz55ccVk1uX64oaWf4QfSWTgYsUm0ylmPiGO+xQUXF7MWC5ykmjFZrkhXVfM51NOTvY4ebCH7wvydUm30+OdZyccHR6z3SwAgVLKEV67CaPRAKFqDnePqdeKg+NDHj5+TH+nyzcWSz57fs5wdEC322Vn3GU6m/DRNx7R7iTUVYUfeLz/0QdMZjNOjo/RlWR2t2Q1S0nCDod7h+zs7FGVGa9e/5BaG6SuaSURURija2i3W/zsz3+byV2KsjO+/71P+Ma3vsNmk7Gzu0dabri6u2Kz3XB5PmW90OwMHyF0QT65ozMY0k4SLm7uUEFEGEUEQQhWUBQlVCX4ormKKspKk+UblNcC4WGtcwTe3N4RRRFlXbJau7+fszfnVLqgKCqmyR2r5RxFRFlq4sjH84XT8vuO8FsVlVv7SdGYZtzGwLUEtQsXEYa6KJ2N2Jf3vFJoWgfleS5DEAPaPfk8DMpzcWVk7uspKZFIvv2tb7BZbVj+V7/FZLUhK5se2GqHiZfuNiJw+/88z5FriZAeURRT+xqEdkwB6QxtUgqEsEgp8ZWHFNDpxAyGgwZ37yjFbgOinY7ZaIR0eot20iLwQr7x0UfcXN+y3GypaoP0hszWa8qyxBrjNARNAXC3AQiCgDgKGjuy+0cpxbr5c1Hkvr+QgiAMqaqKvCioqspZvj3XGmmtXcEwpole/8Nff6QiIITwmwLwf7fW/ufNh2/ur/lCiAPgHmd6AZz81B8/bj72z7x+Onfg0aN968cFj945IopDLq4v2evH5NWCVldy9HCH29kO3//Rp+ye9MirEiliDg726HQGzBcLp1+h5uhoTFnlvHr9HE/t0O11EQI22zWXF+csZnMCP2ZnZ0y/16MVh+RJi1evX1JvEparJT/8wff57p/+NuPxiD/44ad892dPuL274913nyBUxsnRIb1em+l0Sqcd43uS1WpOkadsNyW+F3ByfMjeaEwrDtmmBdb67O4e43kKzwMhaqqqpChL4sQwGnfIy5rFXcV494Ref5f56TmekCzmGXUluL2eYbSg2x4QBy1Wiw11nuPnhSMNtdu0Oj3CKCT05NuhmM5TTKAItI/WhYO3oPCDkiBUGOvEPfPlGoMrCEEQUpY1p2entNstknabwA9YzGeYShIELbI0AqvRWALT7OhV6IAhUqKkoK6ME+MISV3bpmNwmgAn5vFQkmaSLZrQUHcNN0IiPfe1hAJrKsBtFrDFW7xXFPgcHezys9/5Bp+9POWLl69JQp8gabHarGglbWQTe4YxVLp2ik0vImoVIH18YxvgiEV4DVGoMT1Z4Q5cr9tl0B808BGLBKoGTCKEoxl7nsCTina7Q0rGzs6YDz/4gIvLaz757AuUUiStFqWpETg9htWq8Qq4YuA17QI4cIu1rhglSQI4/obWmrquUcrJtcuyITs1n+v7PkkrcQGpRY62X/2M/qNsBwSOS/SJtfb/+FP/6W8B/1PgP2p+/Zs/9fH/pRDir+EGgst/0TzAfRPNfH3G4fEu22xNEPqkxZrTy88Y9Efsjvboj/ps0prW1rAtarSWjMdDut0BWZqRZylR7Ha7Ol+TxJbdwT6LWcFsdk3cgixLieOY4WCHYpszmU5YryEvV/zk449JFx57BwMm8wk//vjH1AiMrdndG3J3e8U7Tx5R6xW6qijylPliQquT8PrVC5aLGZv1AklIv99l1N3DVIbPv/iMwBuglKKV9DE2I8sr6rqgLFKCoEVtCkqTgaqYzpeMxkfkpeD16Q130wlJL+LBo0e0opjdUY/lpGQ5WyEJ8H2f2WxGaeFgf5/x3h7r7YaqyCiyFcZo4lZMt92mrmE5WZCmJVHUYrkqaCU+Uexsr54fY1AYXbPNU+I4JA4TDvcPnTVWKmaTO0xl2d05wNQF203BJt3iRSFSKAaDMZ4XIoUHeAhb4ykFzUVRNIh2bSr3JMVS1XVDH26Iv03AqBAS5QdOeFSDrh0eyFF5NFK67cSm3GCt5uHxPr1+nyjwef76DbWunOZAuBWbkzU7taMxlizL2Gy2WCGJjG2QaQaMh7QeXqAQOIS573n0uj36/T5KKScYMs4jUNfGORCF0xUIcPkTMgNrGI4GvPvsHU7PL7iZTGn3emzzjEproibmXTT5jj917t4e9PtiEEURvu8ckGmauq1HcyN3OgA3B6iqilYc00najSpRUfPHawd+GfifAD8SQny/+dj/tjn8/w8hxP8ceIMLJgX427j14HPcivB/9i/7BtqUtDqCVtzhzZuJ28Ebn4o13/vR/4ef/fafZrldELU6bLY1eW0Igog46tDt9MFq5vMprVzR6QQIm/Po4R6Pjt7lk49Pubp+SRxFDPsP6Hf7VKXl848/c2EbjfSt2+uQRAknjw/ZVgvysiDpdHnv3afEkYenDEkckaYSWxvW6zVFUWCF4frmyg3AjGEw7BEHIe12Qr7R/OjlJ4yGj9jd20cbQRDFbLeQFRVJq0UUJcwWU+bLnPkio6g181XKm4spn79wROXecMDhwUOMrlHWZza55u5qzfHBQ9J0w910hgpjBmOf+XxGXuR0uwmtqM+43yIOPTzpMZunKL+i1W7R7w8wBqJWh929PYoyd4M5U1GUGfWqxvgBrSih3erSbidYI1jMJvhSUfe7pKlEa8PNzQ2tTot+f0i6UsRxgu9HVLVL+L0PAnXUIUtZ5tS6QihBXWnKMkNXFUIKF28uGkOP8LCiaQ2Mj216Y6UgjGJMbahFRTtp0e0kTNcFTx4c0ElismzLF6/PaScdJxQSsgkydWgyi6AsKzabjZM8oxqeAAhr8ATQuPkQgigM6Pd7dLs9Zz0XNRjHOjDWcC+MdlBSgdY1Sri2qJ3EPHr4gHeePma+Wr5VEIbGoKRy4anw9oCXZUlVVW9vAW574IqEUooguI9Mz9xN7x651rQNVVVRlKXzEjQbDk/98bYD/+TtSflvv/78H/L5Fvhf/Mu+7j/zEhY/VFzfzLi9XYMVnBztELfh8vwNi+0jrKr58BvfZFtuKHRG0krY3zuk1++zXs6Zz6YsRUXwYJdeL2R3NMDzBVWdEwQeVV3SCxOGwyHptuTk5AFFkWL0ltqk7B/u4skhJ48OyfWA86tTWknMwdExxuQkLY9uO6Lb3iEKFdsyZTQckWYpd7dTR79VPv2OJi1TLvMrQq/Nzs4OrSSh3W2hJISRJQhH1HcbOr0uVW05vzhF147Vr/zQBYUWa6K4w9N3nlLVKbqGKIxIVxnTuzsuz+7otXoIZanrGi1K8jzDCBiNhhwc7PL/be/NYiRLs/u+3/fd/d64sUdGZlZulVW9L9McNmeG5GhkSiTHImGNaMCGXmwRMOAXG7Af/EBDL3q1AfvBgGHAhgXIhiS+WIJpW7IpS0MS0JAzPUtPT3VX175kZuUSGXvE3e/9/PBF5vSMps2haSqrMfUHAhkZkag+0Te+c8/yP//j2hJZZVRFShylOLGk3TFASIIgJElTOr11+hvrDEcDhAVJGiFTyNMacRxrufGkZJbPsR2bPEvJ8xrTyYDxeIRlOYyGZxiihVGvE81HlFmC5WhtA9fzodItwgsiUFFmq5Xmuldf5FpzUVOPBVLkGNJEGialFFTSQEkTDEt/qVWJa1moomJZFNTrIZ1Om+FkQVHENEOPN1+9SRTFTJcJjm3qZaarxSVCFwcwMEjTFLmM9PCQXO0vEorS1Lk06Faj57qEYYjj6LVzciVTJo0UqcrLdWYXj6IssUyTsBboMems4I03XuNsOOR4cI5A4bsuaZZpfpUQlxLtZVmuFIv0zIAmKf1oamDbNrZts1wuL9OIizTAcRyKvCBeRlqHcZVefRqeC8Zglubcu/+Y48Mxy3lCp92kv7GJEAk7+2sIO6G/2abmbyAtWCR6Xt4QBqZpUKvVaIQNsnSup8CEhcRgNp3guhb7+3vMZufUQ/8yzFrfWOd8cEISZzimx3RmgRS4gYustNxUXuTYjoGqMrZ3NjBQqConS1Ncp0aS5CwXKYOTEVme0+v1Wc5jovmM6fgp1zauc/PGqyAtwoaNaYJtpzQbfdJ0SlUJFvMFs1lCnufcfOkNJCbD8zHttS7LLGPvxnWG58c6zSkKouV8tU23IvAd2mtNvFqNwXCsWWWuzfXre+zuXCNaTDg/e8Z0NiOJMwzLx3IU48mEeRTT7Xa1Ym2ekOQxWZlSklFSUJQFjuPiWA5lrphNpnieje1IBAXjyRlRXLCxsY0hFfPJkKFlIqXeDSmkTX99E8c2KcuL++SKRcjF6nCoikrvNjSNlTpJRZEVoFIMW1K5AsuwLnfN6f0GeujHsh0sM0GVJUHg02rWODw6ZjqP6XdavPX6K3z3gw+RhiDNSqqqWHEC9H/KsiwqpUiSGGkYl6xG0/hhKF6YBpZh4boOvudhGKtWqJBIy1jNSOg7sOZLV0jAMgxKwLH1xqaqqti/vsvpYMBoOmM6HOmuTJ5rmjCs2qg6v7+4618c4Mt2X55fRlUXkYOmMa80FFZt0ixNKbPiMj34f8Nz4QTyvODB3SN8r88br73DzvY69bokTo/Z2rlGtEjIM4v+RhfHtxnPDMajCdPRjKIw8T2PnZ1dlrMxqkwpMkmeCgxL0WyFSOGzjEYItLLxyfExnu2xWCxwHEmlFJ7ngOkym0+ZLier72NFHM/xHBPXkQxOTqmKkjhKyIXBZDplNJ6jCgNLmqyvbRH4dSZnz7AMh1ajQ6+zRpQvkVaKH5i4joPtCJqtDqcnpyzmOaYRUJaS7WsvMRqfMZzc00tNpV6UsbbeYzafUmYxVIpWO8RQFvW6j+04vP76G4ymUwzLwnYd6mGNsixI04TJZMLp6SlSWggTTs+HPHigtQqb7TppHhOlU4bTAZYtKaqEghQ/8Njsb1HkFYup1k9UpRbxyNIFaZaTZJAXa+R5ysHhE4bnAxrNFnGcIU2LWs2n1WpSZKyKa4YWxFgtNBUr/UJpGHrEeKW/V5aVzmGlrrYLw9QqQsLUkmZKAiWm5WDaDsv5DMM0CDwbqpz5dEQQtlnrNGk36kyWCWWWocoSaZmrBSaag6CUDrM1q1DqQqNjaQdQFFSViRQWjmXheS6mZelBo9VGZmmarASKtFRZlmLYmqp7QaDyXYeiXqOXtrl58wYPnxxwOjgnjRM9Q7G6MV08gB+5e18IugKXsxBS6vkHz/NWcxvVj2wZUgp83yeOY2az2f8/LcK/SJimRZm5XLv+Mr/2V/8t1noht+78AdPplHbX4+nhMeenJXs7r7HMcpbJEChZLCYYQufoaZJjSAfbdrGkxWJWYroR0rBJ05hGI8S0DMbjMfPFgnkxxzQFQeAwGE5wPQfbrzGfz/G8gCDQW2Jms4m++8cJ5ycDWvU2w8GYcVSCgG57E9/VK7Ze2X8Ny3SoUotWs8/+9Veo11uwTFmm52S5dlhVpQtIs0nEfJaxsX4dx24S+F2++/638Gs2rucihhVn58es9Zrce3CH6zubhGGNqSNpND3ieEpSWfQ3N7nRvQFSENRqDEcD7t55CqthmsVySS2sU5UZ8+WUKFniOBbz5YSnhwmNVkhexNiugyEVrmnSbHVZ6/aZDGfkcY7vOlRVRKkWRNGEOM1BekxmU+7evcPg4Am99XV6cQTCwHY8omhGlkVYykVIE2lqGq4OURWGAKVMKhTGSp67UqVWKpIGQmoFo4vtwghN6xVVgVACJTRzMSv1QXJdR2sh+K6OFBB0Wg0m84iq1Jz8siioVnfNNEsxTAvLshBCkecpSWJQBPoalVVJWejowbZNbFuz+1SVc8EotmwbKYSeOagUVVlgKmClxMRqk5HrmIQ1n2sbfV595SUePTng9HyE7Tq6awGX0Ydt25d3+4tagJTyMlK4uPNfRAumaZKvNkV9sqBouJ7+t1DkxadHA8+HEzAsblx/g1duvEmz3sNx9PTUYrHk4OCQpwdH5EnAwyd3GY0H1EIDy/CYzydQSDw7IE0K6rWQjbU14mjB6ekxhYhp9zokyQLT1P8T9RjuBFEJtrc2sGxJmsb4NQ9pSCzbYnd3D8s2MS04GxxyeDggcF2EVPh+QFGMSWNNM/3iu69T5AlJEtNrr2EaNuWOge81dUtHgFIJy2jAcpFD2abX6TIZz4ijnPk0YWdrk153hzDokucZL79+Hc+rkyQJSZpSVjotWet3GZ8NGQxPEYWleW3K5e7du+zs7uAFAb21DpOJYLmYk2cJliXJ8owkTfBqLs1WjetiE9f3qFTG4ydH7IgtyiojGU/xfL0zMclj5tGcOEkBA9OQ+HUTYVicDJ6SV4ogdCmritF4jBKCosiZzqaEYR2lCmbTMefnp7TbPVwv0FPFSmmm4YrWKwwTUek7a1UqFBLDMjEtG2FaerV4WVKh/9YwJQITlSekeUlalChhIEwLvxbQ769hmi5ZIUlzWOsuOT2fMJkvdFiuFMIwsE0bVRYoJaiUgVQ6rM+yhOViTs21AC3yIajhOg6mYawyAUlZFchKYRgWEkGZF0C1aoUW6PNfUpQ5F1qovufQ7bR4+aWXuPXRXU5OB1yk6noIa1XfWRX5Lh5SyssI4aIucJEG/LBDUfwrnYIKhWGZOKuZifHoOZ4ilMLk5Zuvs97f4PHjRwQ1GI3GCGEyHs/wXJf13gZnZ8ecnBywt7vOsoiwDIM4ivHtOv21DRzLwTR9lEpJkgpllozH59i2JMsKsiTj7PSc8WiOiclbb74GIqJWC3Bdm7wsdJ5YKtrtLkLmnA2eMp2P2br2Ggsj1kKTTkA9NDAti1a9S6UysjRCrvTw67WALM+YToa4QUBRRuT5kmgxx1AFgetRFQrfDXFtKFLFdLKg17vO3t4OQuYYpqLTaxEtY5qtBusb65RlQZwmLJZzAruBF3gkBRweHWLaFjs7W+RZiuvYtJoNBucJy+VyFcJCpUo8zyYI1mi2WkxnU+RYMZ1NQBS4nl72Op1FBHaGZbrM5xnRLCH0ba7v74Gx4O7D72O6HiUVeVWytr4JyVJrEwiFH/ggYDA40avNTXMVvlogTQxp68IaimqVJggpUaWWArMcF8tyqIQJhtRTfau0QUgDQYWSluY0mDa2C2VpIClZW1/HCxpEcUlWCJZZxelwyvFgSF5Vuv9uGpiGiVlJ0jwjTRWGaWIoEyiZTDNaDQ/bCaiKnDiJNCtwlbsbhqlTlrJcpSc6FaDS25zzLNNOoCxWRVDtOGzLpFEP2dne4u233uT+w8cs4gglSqSUOI4D6JD/k7m+4zg/5E6suAAX7184DqUUtm1fjk+XVUmhdI3ADXxs1+G5HiU2TZN+r8PJyVOqqmAwiCjyhF6nz3Q+wDYltqHnojfXN3BMh5PBkM31PQzlUg+bGMJiOpkxOBuCKpGGhRdazKMJQVBnOh1hG47OoTo2VVbiOA5nZ8+o10M9K16a5AVEy4Qg9BlNpviBj+c52I5JlmcsZiP8oEXY6bCzvUMQhKTJDN9pMBoOiKIIDK2wW1aSqpKgCvI0hUpRZAVlXuE5PoFXsr/bw7JdJqMxZyfHmKbg7p2PaLc3yHOdo3a7PdJkycnpMc1mk53dPUKvRbPe5enxGbbjEAQ+nU6HNI1J0xjTlNTrIUfzqd4KXK+D1KyzRqOJ6zhkrsvNGzeZLiakWclar89ocs5oOKa23cSwJKPJGKO0sKwaeZ7jOoLuWhtpuSR5yXQ+xw/rVIbEdvSK7iCskSQJ48kYISWNegMpBLWgienoGUN919K7CTFACANh6iKhYTkI29VM+tWqcEMaK9ahzuMRJoblYjk+2DZKFSSqIBACYbiYdo4SNou4ZHe25OHBEcVijuO7ZKoizzICv0acxihRaXnxrEBgk6c5k+mYfq+FaUkW8ymL+ezSGSndC9R3+qqEQo9My5UakVCmvnOvxEsFSuf7qkIIk1oQ8M47n+POvUd845t/TKEqTNPEcRyCILhsP+e5bkEWRYHnebiue/m7Uuqy4HfRUbhwABfMwVJVCCUxLQu5cjA/8fz9aznlfwqEANtSDM4e8tbbr3H47HRF0QxYzBfkecHZ+ZC93Ze4tr7OwcNHWNKjUWtj4JLnFeeTc2bTKdPpGNMQ9HodHNeiwMEPPJyZRT2s0270WUwTKHW7ajqbsrunC2+G4VMLAhzboyoAJdjd3aWs9AGcLzNcs43t+kjTot9fI4mmZGmC4RrE0ZTjk2cso4hGu0ur08NwFGmakCU5gRsQeCGWtInLjDzN2d5eoygs8rTi8cM7jJMTikJLZQVBhzjJ8dyAvb190iim2Wrjv9LEMWpIYTGNchzHYWdnh3pYI00TBJAkMZZpYFyMztoOrh+wXMRYppasCoKQjc0NzgYnDIZnqEoQLRI67TWanSbKgPlyTq+xgecHPDl4ytZujV6/S1bC7DRGUrCYxlTRHN/3cF2LJE+xLJN8teFoeD6gLEqMdRNPSkRuYxpyNXSkWYECiRQrp2BaKGO1NRh9iAxj5QSKaiVboDUDlTS1fLjlQ5WBlFhVillIDMNd0b179Nd6JGWhRUfLiqLIqFYjy+bqDpokKZYpsEzJZDLi9Mzn2voaSlVMp9NV3i1X4bbAMCzyNKbKc0yhi4BVWWIath6HlgLTtFGqIskz0iSmEjamYbG7vcMv/9Iv8uDxQ54eHV6G87WaVgUuy/Kya3BRpLw46Eqpy8Nu2za+718Km+o1caYWLzG0QpK0dOTzaXgunECeZ8znJwS1EsNcMJsdkZwnSLkLlYMlbJbzKVVZYQobSpuX91+i1ewxnSw5OjxgMpkCFaUq8F0P2zNJ0ojlck6rGbJ/Y58sLlgulvhBSCtsMR6f4rkutVrAYjGlLAo2+5ucnk+YT+fUWyGNhku9EfLxs0M8q0lYb0Klo4L5fMpkdIIhckbnE9JkDiphPj8nLSIW8ZhCbVGWJY2gw1q3i2c7VAWoSoeRi9mEsjJxXZs79x5gBkuu7+4hpM/+9VdZLDOKEgK/zvbOPlVW4EgD2wgoMsXa2hpVVdBsNqkqPbte5BnLxfySViqEZLGMsJ2AMLxYrW3TarfxHJ/N9S18X2skokxeffl1Gms+g5M5hao4Hw7pd1sEQcBkMmaRj8grSZIq5ssB5ycTqniJbZt4rkVV5WxurFMLfJRSHB48ZT5b4NgubWFQSQtlWQjLxBACaep0RUj03gLD0gQhaSBWk4SGKRHKQEmhiT9lTlFBUYFhm1iuh6hCLWxaSIgKrTXgOFimRbfTZTifMVrMQAq9OFYobMdCmgZlqfcVmJZBsxESL+acnDyj12kQBiFJEmtVIql3EgohdTGxKCjSdDX0JFGFulRVklKstg6VGKUmDiEEruUgrYDPf/7zHBw/4//4P/8pBwcHP0L/dRwH13VXHAN9uOM4/pECoWVZeJ53mQpYqx2ShqFT1WrVzrzYFPVpeC6cgJQwmT4jyyd8fPec89Ez0rSgHjbY23uZOMo5OR6TRCl5UnGtv0fg1LGkhyTjfDBiNp8Rhh69fhu/5oBRMhyec3r6jGYjpB7WGI2GHD454603fw4pJfP5jDAMGZwPNF/brNHr9nj46Ig0S0k8xfGzEaBoNBqUmcloPGGt08SxPc6HZxiU+L7D2ckIIXI8z2Rrew0Mg8l8weD8GbYV0O9t0G2vYxsGk+EIVSlMw+Dk+AiERbuzRpEvIM8whIFh2IRhk0bd5nRwxv37jwl8F9+tMZrNCNqafBSqkka9RlWVxFlOFC04PjlmNBoznU7wAx/btqlKPTjjOVqpx3UEYdBgNl0gDYN+b5PlMsIwLDw3pN1tMJukWLbJ8HBIUPsctcDh/uOPsYKK8+kC0+nz8NFjWuEalUgYT2ZUjQDTAITCtEzSNOHxk8f0OhHtZhs/aGB4JYY0KKSuAchV5Uyxqg+YJsIw9RwBK50CqSvwF605pRQIA9OysRwbYZiYjotdKZxcwWRJUVS4rkcQajJRazJktJhiGAb1MMQSEjvPidMM0PLcYRhQDwIcS5LGS/I8xfd7pEnCbDqjqirMlVRZWaTkeUGeF9iGQJgO0jDIMj0XYBjykrFnGAau66Kkg7A8Kkx63S6/+qu/ymg6YT6fs1gsVnJkLr7vY1kWcRxfRgAXd3l9ZuRlDeAiXbAs3emAi/0KK6UnNCnr0/CcOAFBEqWYwsJ3FI2aQ9Bfp9ddY72/weB0QLvRQBYScq1Me+uD97m+/zJgYBhg2YJmJ2Rrd53jswOyJEaJkqAWkBc57337PRxTC0KkacLZ2QlxtKQWBNx/cJ/eWo/d3esoVWJbimarxeHxYwbDQ3avb7LVv87TxyccHz7jWm+PKs15+uwZu3vrOG6NIKyhqhSBg+O5mK5HOJ8zXUTE84gxA1q1ENd2GE9mTEZT0qwkLRRFGeNmC7r9OousYDSZ0Ww4zKZzpAqpMod4UZCnc5y1gFkU0+mX+HUbM28AGYVIGY6GnDw7Q6yEMoeTIWfDc7q9DXZ2tgg8H8s0yfICz/Op1XyiGBDgOjbXNjcpyi6VKhkNxlquKvBw9mpIx+bbP3ifvIp4a/8mZ9N7+DWPMDT5hXff4un9Ax4/fohpOTQaAY4fUKiCKi+ZRROCWkCURqRZgpcXqNU8gZJQoleAVUIfemFauu0mJdLUX2qFsVo5hk4hVqQY0wRztb+vwkCaDq4v8PyYJM6p1QKumxbLJOZsMuLw9IQccD2PZq1GFC0R8xm2Y9PrdXFsC0NAza8zm5SUVYphKJbLmPH5GVm0xApreodhqYeOlBKUSujURBqQp5oZWehCpjQMDFliGRUlBVkyJc0ERSXZ3Vzj13/lK2TLGQ8fPaIoK+IkRSkoKkWa5eRFheu5eK6HlII816pJUhokSYpArzZTmquEaZoURU5aaNKRZVmXWo8/Cc+FEyiKkuFRyka7h+MkOGXJeqeNH9T5+PZHnB4/pOGGeNgU8wXDySGnJ4eEocX65jadNR8vFZh+SW4sKM0lUTZmd+cmVb7GZDQkTpdsbW6x0VtDFQbnJ2fYpsA2od2soSo9f55Vc0xzSafhc/hwwcn9E7Yb11EYeGUDqxyynJ6y5m8xzxXHT4+oyMgqCLwaVZYRzQrsXFHz+pyPn7GYnGEXCaOBTVYoTk4nmGYdywoJOzWieIpwJbGa0NvcIkoXxOWS2WzJ/Fxw8PgZnbU6i8UZEy9lY3+PYXGICgRZ1EKpKa5TEk9GZGaGLH06/XVymXF0dIoTdAjr27RbLlUZgwTXs1EqodGwaXc7nJyc4LiCutfk8OiQ8ckIzw0J6wE3X3mD23fucfvolGvba6SyS72TkKZzru/5FPkxZyfHpHFBq+3T6m6wzBfkyZxOK6SxHoBdMI0nnJwdI5WJ024hXV3RF5lWKaqEAW5Na3yjkDJASU8XAis9jSiUrsJDhWXpQ1ZUBYiKQhkIQ+LXPHobBtF8SRjUWUOAhKOzU+4+eES2jKgKQa3eQFFg2zW2Nrs4tqDIEz2yrLRoRykcbKeiMCuy5Yx8OUMGHoAeRbZ9iqyiAAppo4TAdJfMJgvyDHyvhuc5qKpElRFJNGc0nDAazjDNgHZ/g9e3e2z9e/8Og/GEj27f5ff/+R9yMhgiLBffr2O5Fa7v41sWRZaB0jUXPciUkecZcZzieb4e1670zsay0MQsQ14sV/nJeC6cQFVW7O7ssd5ZJ88PkNYGVAZJlHNwcMhyMeLazQ0CUaesSpI0Ya3fI6j5ZHlOLahRb9WZLkdMxhPa7SbC0KGxLWygYvvaNRbzGQ9m91lrb7KMIna3+zSbDvPlgFLBaDQhzxKm0ylJtMBzXHa395iMtLhop93l8OgJvu/y+uuv4tc8Hp484u7dj7FdSbtZxygVNb9Gs9lmkRckSYpje5p8hMB2bEzToNGoI4RHt7/Gk4M5B08Pkaak2eowO5zS6bb53ve+Q7f2Nrdvf0R4XNDo2RiuT63Z5N79e2wWHsdPn9BsCtbWAtqdNmQF0VTRaTfY2Gpz86UIoZoI5eiJNQWGKajXa8RxhELpTUN5RpQmNE2T6XRKvIxwHV9TkK9vc3I+QBiS9Y0NHMfj9PSMdsul2Qo5OHhCnheMJ3Nefu1lrm1tce/BDzBskzRP6XQ7TM7nHJ+eEC9KZGES+i4SkyqPkVLnuIbtYRs20XKJUQk827tk9EkEK/UMtHyRFuaUUmJiUFYKaZiYq/TC93wsaUKlcByP3d0d3nrrLZ4cHfPhnXuAYDIZo8qc/lqX63u7LBcjFouSdrvJfLGk0agT1HyUquivddne2cR1ba0zsJoYVLat9QUMoWcdSj3e7NdCFjO9ezDLCqoyI88T4iQmihacnw+IoxPG8wX7L7/KjZs32K5gsUwRoFu7jpasdxwXISRJkupBJbFaWpprYtFisVjVB3QRMcsykixBmQrfCyiLEst6zp2AlIKbL7/EztYN4lmN0dzh2WBKWUiajR5rvYA4TkiSEXmeUSiohQ1qYQPT8pnOI8ajCaPpENsTZLlLkibEWYrKFc1mHVUWHDw9oN/dZjQacfzshMC3mC9KojghCOscPzvjSRoReJIP3r/N9uYua2t94mVJUKvT6TXZ3d1ld3cbQamXZ5QZ9XpIqRKEUDiurtYapoEsKzqdLo3NTcwyR4lS75a3BFm+xDAFg8ExqioZjkas9et6MaWQ9Hpd/q9H32AWetiOgeOD61k8evSQyWyJaXl88P7HDI4VYVjw1pv7BFtN1tc3qO12ePjgLgeP7tGoN+n3r2GKGrWazWIRo1RJHEe6ILtIAEjSFEOaTKdzDGnx0ssvo5RgbeMaZ6cnmKbk9Tde4Rd+4edxHcEPfvBNXM8jL2PqYcibv/ZzHB6d0Gy1cRybTqdLRcTZ2RF7O7s0r/do1HpkUcV0PuPg8JC97Q1MqUd6lVJYSHIikiLGrYPd6CKU3iy80uBmVelCKU38MQyBNCxUrnRrWOrtSLZtI5RiMpogsxLb9vjc229zPBhyeHKmNzqVCWGg9f981yNaCDzXpd/v47qzlTyZi22b7O5usbe7jetaCFmtmHk6ArEdC8PQY+x6VNnEC13iqMJxPdI0pkJQKEWptLMyTJOiylkuI8ajEUEtpNbQRKIvfemLnI+nDKdzlLBQWYHp2IiypCpyLYqyIhBdsAtBa0fEcaznC4RaqQppNuF8Pv/U8/dcOAHXc7l9+yM+/v5dOm3FPHnGIlGUQmDYku5Gn4MHD9nr7/L48RMG0yFKuNQWMbXQwzBsjg6POTh6hONLvECiZMlmp49n2biWQVIW9Htdtq9dQwoXz/Gp1T2Onj0gzxer1k2bg9MnvLS/w1tvvkO0iDl4esTO9j6WbTEcji4VW46On3J6dsRwdMbOjW2CepNoNmMyGRMvE+RognR9rXlXCJazhDRfUMmSMKzRW1snywUffnSPRrPB5uYGYUMX7YQwuf3RbVzXwnUlouUjzQlpHq00Ek/4zd/6S3zj20NaL3WYzZ+SxBknJ2eEjonXbzI4P+Po8AC5LXCdIfWalqNOU4llBRRFznKZ0Wq1KMqSRqOFYVrcunUL07Z56803+O733ieKFjx8qNWMoVrtBKz43Dufw5A53//etygTA8OEv/SVX+L4ZMB4NsZ2HM6H51i2Q1XpzTkoST2sUyYlURyRFzm1uk9VZaRpRpzEpMuURQZtw6V9wSdYpbMCdNJbVXpQZ8WoQ2gCD5VadZAAFPEyYjqdUVbg+CGbO9f5+Z9/l1sf3+G973yHwLeo+Q4nxyeEgUWz2WQ0PGU8GlEp6HW0+KvMFb5nE8czjCnUVEPrYhYlCnk5fAQS07CpciiLijQvkJaNKAsMUWJUNpblEDZabCiLsFEgTRvTtJjN5mSFwjJN3nnnHc7HM/7ln3yLrFQIw2SxXFIPfMKgjpSalZgkCWVZ4vu6QxCtBEcNw8BxbQzToMhyTCGZR8mnnr/nwgnkec7jg6d4qkZVWmRlwpvv/CLfv3VIXmYso5TReIRvnJJXJa4fUlSCo2enCDmm3e7SaXV5/Pghw7Mh3bWQs7MxZix449UbzMdjXMdl69omDx7cZ2P9Os1mh9H4nMdPj/ACSVYZ2IaPY4c4dkCrEZAnp6yv17m+d4OTszNG8wGng2NG4zO6bpu8KghDjyhZ4NVMTk6PmQxGCGXg1RrgeCyzgnKhIC/YuNZi41qbNI9YRGOqyiKOp0hT0V3r0Om0GY1j1ta7/Mtv/DMc12I6H/Lo4RO++ptf5L3vfoNXXv0S3e4e6/1dWvUNLLNJPYQiHyOUII1SDp4+xTINGo0aYd2n3a6zt3udmie4e+d7RHFCvdEgSROy84zB+YhaLcRxHYq8Yv/6DsvlgulszHg2YbmY8dqbb5LlGePxgKOjR2xdWyNKYurNFuOzObP5mMVixmgyJIpjlMyxLYf9/R0mowmT8ZTjwymO4XNzd59Ws6s1DJSDYzskaUqcJMS5oEDrFVIopCtR8kKyXNcDyjLT5BxDt+mqFYNOKEWWpAjToCrz1eBMTqUEURRhmAY3b+zzy7/8ZZ48fcrZ6RGSCqoU21J84d23MeUajx8/Ik5TamGDJMsJbZdkMmZwfszG5hZb27sEQYhleWBaCHSofUFtLjOYTmckWablxYWhmZK2gyNKLeTiNgijAoXEcl2E1GPNSV5Sr9f54he/SJIVfPeDW4wnc0zLYrlcUBUZtVqwGjPmso2oBUYUYRhgmiaWbRGnMVmqW4Y1P/jU8/dcOIGyKqmFAQ2zQ57NyAuDRriOZMj33v8Btz4c4JkKMp8bL71OWuU8eXSEZXiUZcTR0Rmua9Fudag3Ntm81ub+g49ZnA/4zjcH/NKXf4lOt814PKPf6+L7HmG9zny5oL9+jf5mC5AcP8l5/bXPUWZzvvvdD9jd2qXb7vMnf/JNFtGCV157CcMqsWzB6OCUKE9orXdwQ1+LmgQezWCX6XiOND2Ozofcf3rIq3ufoyxgMdcTXYt4xCKaUSmTRsMnK0tu377N/s2bHB6M6HQ1XfXatU3u3R6xvdfny1/5Eg+efoQQ8NqrbzEeTqh5bUwzoLVVJ0vO8cwaNi0m50tUVXFj/zr7N2/QbGzjuxaL+YDz4Sknp2e8+ebbdHtdDo+OtW5+WOf05FS31IIap2cntFpNnp1o4tbGepc4i/kH//Dv47om/X6T8/NzOr0+k/MlSbLkm9/6BkpY7L/0Eq12iDRKhsNTvvHH38ISLq1wjWQxJE9yVJUh6SJUjm1ClhcUZYVhuHi1JmFYX6n0SM0fqC72FGhJblWVl3r7oFWITNOilDlqJbiRZxmO7VAhqITFdDKl1u7yV37lVzg9O+V/+71/DCq/5N/fvv0xtcCBFVXX932iZYTMC8bzqRaBcSxqoY8QCt9XlJmJYTl4nrWaQ1AgDWbzBa7XQAmJYVqoosCxfGzXxJI2QmaUKtNphGmS5gWGKWgGIR3bZef6TfZvvkznf/8n/Is/+CMWywjTMrFtkzAMLxmGSZJcjhyD5kXo2QWBJQ0qIcnSlLDW+NTz91w4Adu2aHc6tN0Nau46jx9/zO/+g3+E5a+TJhVlGrOsIgbHC3obO1SGwfl4wt52F9ep8Ud/+IfE0ZLPvf0K77z1OqZVUvNshkcHbKx12d/f4/bHd0hziOIxTw8HeN4hpVJkZcrtu/fxnBq98GUCv0FU5Wysb9HpdFkuYh7cf4QwBOubPSazMf31NqXKefbsKZNkwrXrW6RZAqXi6PAp7VaPbrdDa2OD/vYOTtXFlVrPbjR9SEFCvowJak1uvPQyy7jg1ke3UYbJcqaoqlQTlzbXuXv7mLDR4c6DD/nCl97l/ASeHZ1QiZx3f/6XOXp2ijQmlJnFgwcPMas6oaejCiXnnJ+fMJvkUNlYRkq306DVbtJfX6PeaHN0fApKMp9p6fFWs8VsOqXVdRAGpHmMYTncv3+Ho+ND3n3353Aci2fPjrj98S0MQ+IZPp0gZzQ6p93bYLFYMJmOSLIF49EZICmKiryoGE+mJFGEIMcQBYu5hWkIfN+nEiaW5+C5Hp5fQ9jOZf6vywGrhaGqolIlotL9d0PqcXBWKj+L2Zz5dIqQEt8NUEKSVYInT5+wbdr0N67x1a9+lWeHT7n94fephzWm4ynPDh/z0s09ajWfslJMJhOmsyXtmkfNFjTqPaSsOD97RhxFeH6dJK+ohW22dms4lkuaZiRZzmwRsd7XXSU9LaWdQVmWKCGRhonj6nqGEJoqbbkelu0yi2JmiwVra2v89m//Nr/xG7/JnXv3+frX/wW3P/qQKIoIw5Bms8lsNrtMAS7oxEmS4Nj2qisgsaTx5xca/YtGnusuwNP5kLff2Off/q1/l+98/yH/5Pf/GCFMpLSZL87Z6PTob1wjqxSvv27TrHfJk4JWs0OVl2yub6FK+MFHt6hUwrVWSM3z+OC732MynxInFW7QIV5m3Lt3gO3a9LfWKKsKy/V5773v8dGHt/jaX/81siTm++/fYntrly984QuUqiRKZ4zHYxotn+VyhqIkiubcufsxzWaT6WgMOTSbLfI8Z5kkzGYzQqvJG2+9xv2H75FmOeubbT68fYvJdEZ/fQfPD7l+cx/XrfHg41tsrr/CbF4ym03JihRpwXvvfZO/+ut/BUqLOM4wTIVj1PC9BXE8oR422FgHWQRQmngeLOIp9+7dwTI6tBprBG7FbDYhrLe4f/8eYaPN9s4Os9mSj29/jOM4tJpNTfE1DWQBrVaTg6NDkizlF7/0CxRVQavTQt6q6PTafP0Pvs7GVpvRaECSxfT7fRqtNoXKaTR3ePLYJs9KRoM5+/s3qXvnDE6OkYahF6cuKyxL0Gg0Nee/NHHCRHcBkVx2t8WP1AVXwzN6QAbDgiLXQjB5znQ6YT6aUK+FWoHH9ZjGGc8Oj/DDJn6tzs61Lf7yV77C0dNHHB48pVl36ffW8Fb8fNO0saXF/v7LiDwmW4zp97tAyeHTx0jDJmx0iNOK3vo2/Wt7Wu0oy7XzmM5wXJc4yhDodegUijTNkKIEtVI6LirmiwWGaePXm/j1Jo4fYtkzJvOIWr3B22+/xTvvvMOv/Bt/mVu3fsDBwQEfffQRJycnlzMEFynBRb0g8H3KIkciaNQbmo79KRCfFDe8KgghBsASOL9qW/4c6PLZth8++5/hs24//MV+hl2lVO/HX3wunACAEOLbSql3r9qO/6/4rNsPn/3P8Fm3H67mM3x6ovACL/ACPxN44QRe4AV+xvE8OYH//qoN+HPis24/fPY/w2fdfriCz/Dc1ARe4AVe4GrwPEUCL/ACL3AFuHInIIT4N4UQd4QQ94UQv3PV9vy0EEI8FkL8QAjxvhDi26vX2kKIfyaEuLf62bpqOz8JIcTfFUKcCSFufeK1n2iz0PhvVtflAyHE56/O8ktbf5L9f0cIcbS6Du8LIX7jE+/95yv77wghvno1Vv8QQohtIcTXhRAfCSE+FEL8J6vXr/YaXCw2uIoHYAAPgH3ABr4PvH6VNv0ZbH8MdH/stf8S+J3V898B/ourtvPH7PsK8Hng1p9mM3qf5D9Fc3S+BHzzObX/7wD/2U/429dX3ycHuL76nhlXbP8G8PnV8xC4u7LzSq/BVUcCXwDuK6UeKqUy4HeBr12xTX8efA34e6vnfw/4G1dnyr8KpdQfAaMfe/nTbP4a8D8pjT8BmqsV9FeGT7H/0/A14HeVUqlS6hF6Qe4X/sKM+ymglDpWSn139XwO3AauccXX4KqdwDXg4BO/H65e+yxAAb8vhPiOEOI/XL3WVz9cw34C9K/GtD8TPs3mz9K1+Y9X4fLf/UQK9lzbL4TYA34O+CZXfA2u2gl8lvFlpdTngb8G/EdCiK988k2l47nPVOvls2gz8N8BN4B3gGPgv7pSa34KCCFqwP8C/KdKqdkn37uKa3DVTuAI2P7E71ur1557KKWOVj/PgH+MDjVPL8K11c+zq7Pwp8an2fyZuDZKqVOlVKmUqoD/gR+G/M+l/UIIC+0A/r5S6h+tXr7Sa3DVTuA94CUhxHUhhA38TeD3rtimPxVCiEAIEV48B34duIW2/W+t/uxvAf/r1Vj4Z8Kn2fx7wL+/qlB/CZh+ImR9bvBjOfJvoa8DaPv/phDCEUJcB14CvvWv275PQui1S/8jcFsp9V9/4q2rvQZXWS39RAX0Lrp6+7ev2p6f0uZ9dOX5+8CHF3YDHeCfA/eA/xtoX7WtP2b3P0SHzDk6v/wPPs1mdEX6v11dlx8A7z6n9v/PK/s+WB2ajU/8/d9e2X8H+GvPgf1fRof6HwDvrx6/cdXX4AVj8AVe4GccV50OvMALvMAV44UTeIEX+BnHCyfwAi/wM44XTuAFXuBnHC+cwAu8wM84XjiBF3iBn3G8cAIv8AI/43jhBF7gBX7G8f8Ag1cGsYI8cBsAAAAASUVORK5CYII=", "text/plain": [ "

" ] @@ -314,7 +314,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "The ONNX format is a framework-agnostic way of describing and saving the structure and state of deep learning models. We can convert Tensorflow 2 Keras models to ONNX using the keras2onnx tool provided by the ONNX project. (You can find the ONNX project here: https://onnx.ai or on GitHub here: https://github.com/onnx/onnx)" + "The ONNX format is a framework-agnostic way of describing and saving the structure and state of deep learning models. We can convert Tensorflow 2 Keras models to ONNX using the tf2onnx tool provided by the ONNX project. (You can find the ONNX project here: https://onnx.ai or on GitHub here: https://github.com/onnx/onnx)" ] }, { @@ -325,7 +325,7 @@ }, "outputs": [], "source": [ - "import onnx, keras2onnx" + "import onnx" ] }, { @@ -548,10 +548,7 @@ "source": [ "model.save('my_model')\n", "!python -m tf2onnx.convert --saved-model my_model --output temp.onnx\n", - "onnx_model = onnx.load_model('temp.onnx')\n", - "\n", - "# This can also be done with keras2onnx:\n", - "# onnx_model = keras2onnx.convert_keras(model, model.name)" + "onnx_model = onnx.load_model('temp.onnx')" ] }, { diff --git a/quickstart/IntroNotebooks/Additional Examples/helper.py b/quickstart/IntroNotebooks/Additional Examples/helper.py index 6cbe417d..66c4e006 100644 --- a/quickstart/IntroNotebooks/Additional Examples/helper.py +++ b/quickstart/IntroNotebooks/Additional Examples/helper.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/quickstart/IntroNotebooks/helper.py b/quickstart/IntroNotebooks/helper.py index 6cbe417d..66c4e006 100644 --- a/quickstart/IntroNotebooks/helper.py +++ b/quickstart/IntroNotebooks/helper.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/quickstart/IntroNotebooks/onnx_helper.py b/quickstart/IntroNotebooks/onnx_helper.py index ccd88f7a..6bea97dd 100644 --- a/quickstart/IntroNotebooks/onnx_helper.py +++ b/quickstart/IntroNotebooks/onnx_helper.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/quickstart/Makefile b/quickstart/Makefile index cbdcced9..bf728ff4 100644 --- a/quickstart/Makefile +++ b/quickstart/Makefile @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/quickstart/Makefile.config b/quickstart/Makefile.config index 2c1234ba..d81f325d 100644 --- a/quickstart/Makefile.config +++ b/quickstart/Makefile.config @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/quickstart/SemanticSegmentation/Makefile b/quickstart/SemanticSegmentation/Makefile index e119309b..5c1bdea3 100644 --- a/quickstart/SemanticSegmentation/Makefile +++ b/quickstart/SemanticSegmentation/Makefile @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/quickstart/SemanticSegmentation/export.py b/quickstart/SemanticSegmentation/export.py index f106094a..e5168aaa 100644 --- a/quickstart/SemanticSegmentation/export.py +++ b/quickstart/SemanticSegmentation/export.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/quickstart/SemanticSegmentation/tutorial-runtime.cpp b/quickstart/SemanticSegmentation/tutorial-runtime.cpp index b6d31911..7f0854a3 100644 --- a/quickstart/SemanticSegmentation/tutorial-runtime.cpp +++ b/quickstart/SemanticSegmentation/tutorial-runtime.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/quickstart/common/logger.cpp b/quickstart/common/logger.cpp index d795754c..2eaccd54 100644 --- a/quickstart/common/logger.cpp +++ b/quickstart/common/logger.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/quickstart/common/logger.h b/quickstart/common/logger.h index c60c447e..513275c2 100644 --- a/quickstart/common/logger.h +++ b/quickstart/common/logger.h @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/quickstart/common/logging.h b/quickstart/common/logging.h index fcd8efb5..f323d22b 100644 --- a/quickstart/common/logging.h +++ b/quickstart/common/logging.h @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/quickstart/common/util.cpp b/quickstart/common/util.cpp index be301128..717b63aa 100644 --- a/quickstart/common/util.cpp +++ b/quickstart/common/util.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/quickstart/common/util.h b/quickstart/common/util.h index 571645f9..50455e97 100644 --- a/quickstart/common/util.h +++ b/quickstart/common/util.h @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/quickstart/deploy_to_triton/config.pbtxt b/quickstart/deploy_to_triton/config.pbtxt index 4ffbdc88..63046c8d 100644 --- a/quickstart/deploy_to_triton/config.pbtxt +++ b/quickstart/deploy_to_triton/config.pbtxt @@ -1,30 +1,36 @@ -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-License-Identifier: LicenseRef-NvidiaProprietary -# -# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual -# property and proprietary rights in and to this material, related -# documentation and any modifications thereto. Any use, reproduction, -# disclosure or distribution of this material and related documentation -# without an express license agreement from NVIDIA CORPORATION or -# its affiliates is strictly prohibited. - - -name: "resnet50" -platform: "tensorrt_plan" -max_batch_size : 0 -input [ - { - name: "input" - data_type: TYPE_FP32 - dims: [ 3, 224, 224 ] - reshape { shape: [ 1, 3, 224, 224 ] } - } -] -output [ - { - name: "output" - data_type: TYPE_FP32 - dims: [ 1, 1000 ,1, 1] - reshape { shape: [ 1, 1000 ] } - } -] +# +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +name: "resnet50" +platform: "tensorrt_plan" +max_batch_size : 0 +input [ + { + name: "input" + data_type: TYPE_FP32 + dims: [ 3, 224, 224 ] + reshape { shape: [ 1, 3, 224, 224 ] } + } +] +output [ + { + name: "output" + data_type: TYPE_FP32 + dims: [ 1, 1000 ,1, 1] + reshape { shape: [ 1, 1000 ] } + } +] diff --git a/quickstart/deploy_to_triton/export_resnet_to_onnx.py b/quickstart/deploy_to_triton/export_resnet_to_onnx.py index f8357a75..fba1550a 100644 --- a/quickstart/deploy_to_triton/export_resnet_to_onnx.py +++ b/quickstart/deploy_to_triton/export_resnet_to_onnx.py @@ -1,27 +1,34 @@ -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-License-Identifier: LicenseRef-NvidiaProprietary -# -# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual -# property and proprietary rights in and to this material, related -# documentation and any modifications thereto. Any use, reproduction, -# disclosure or distribution of this material and related documentation -# without an express license agreement from NVIDIA CORPORATION or -# its affiliates is strictly prohibited. - -import torch -import torchvision.models as models - -torch.hub._validate_not_a_forked_repo=lambda a,b,c: True - -# load model; We are going to use a pretrained resnet model -model = models.resnet50(pretrained=True).eval() -x = torch.randn(1, 3, 224, 224, requires_grad=True) - -# Export the model -torch.onnx.export(model, # model being run - x, # model input (or a tuple for multiple inputs) - "resnet50.onnx", # where to save the model (can be a file or file-like object) - export_params=True, # store the trained parameter weights inside the model file - input_names = ['input'], # the model's input names - output_names = ['output'], # the model's output names - ) +# +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import torch +import torchvision.models as models + +torch.hub._validate_not_a_forked_repo=lambda a,b,c: True + +# load model; We are going to use a pretrained resnet model +model = models.resnet50(pretrained=True).eval() +x = torch.randn(1, 3, 224, 224, requires_grad=True) + +# Export the model +torch.onnx.export(model, # model being run + x, # model input (or a tuple for multiple inputs) + "resnet50.onnx", # where to save the model (can be a file or file-like object) + export_params=True, # store the trained parameter weights inside the model file + input_names = ['input'], # the model's input names + output_names = ['output'], # the model's output names + ) diff --git a/quickstart/deploy_to_triton/triton_client.py b/quickstart/deploy_to_triton/triton_client.py index 8448efb3..a6e7553d 100644 --- a/quickstart/deploy_to_triton/triton_client.py +++ b/quickstart/deploy_to_triton/triton_client.py @@ -1,42 +1,49 @@ -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-License-Identifier: LicenseRef-NvidiaProprietary -# -# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual -# property and proprietary rights in and to this material, related -# documentation and any modifications thereto. Any use, reproduction, -# disclosure or distribution of this material and related documentation -# without an express license agreement from NVIDIA CORPORATION or -# its affiliates is strictly prohibited. - -import numpy as np -from torchvision import transforms -from PIL import Image -import tritonclient.http as httpclient -from tritonclient.utils import triton_to_np_dtype - -def rn50_preprocess(img_path="img1.jpg"): - img = Image.open(img_path) - preprocess = transforms.Compose([ - transforms.Resize(256), - transforms.CenterCrop(224), - transforms.ToTensor(), - transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), - ]) - return preprocess(img).numpy() - -transformed_img = rn50_preprocess() - -# Setup a connection with the Triton Inference Server. -triton_client = httpclient.InferenceServerClient(url="localhost:8000") - -# Specify the names of the input and output layer(s) of our model. -test_input = httpclient.InferInput("input", transformed_img.shape, datatype="FP32") -test_input.set_data_from_numpy(transformed_img, binary_data=True) - -test_output = httpclient.InferRequestedOutput("output", binary_data=True, class_count=1000) - -# Querying the server -results = triton_client.infer(model_name="resnet50", inputs=[test_input], outputs=[test_output]) -test_output_fin = results.as_numpy('output') - -print(test_output_fin[:5]) +# +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import numpy as np +from torchvision import transforms +from PIL import Image +import tritonclient.http as httpclient +from tritonclient.utils import triton_to_np_dtype + +def rn50_preprocess(img_path="img1.jpg"): + img = Image.open(img_path) + preprocess = transforms.Compose([ + transforms.Resize(256), + transforms.CenterCrop(224), + transforms.ToTensor(), + transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), + ]) + return preprocess(img).numpy() + +transformed_img = rn50_preprocess() + +# Setup a connection with the Triton Inference Server. +triton_client = httpclient.InferenceServerClient(url="localhost:8000") + +# Specify the names of the input and output layer(s) of our model. +test_input = httpclient.InferInput("input", transformed_img.shape, datatype="FP32") +test_input.set_data_from_numpy(transformed_img, binary_data=True) + +test_output = httpclient.InferRequestedOutput("output", binary_data=True, class_count=1000) + +# Querying the server +results = triton_client.infer(model_name="resnet50", inputs=[test_input], outputs=[test_output]) +test_output_fin = results.as_numpy('output') + +print(test_output_fin[:5]) diff --git a/requirements.txt b/requirements.txt index cb4cf74b..f87a9b0c 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,4 @@ -onnx==1.10.2; python_version<"3.10" -onnx==1.12.0; python_version=="3.10" +onnx tensorflow-gpu==2.9.1; (platform_machine=="x86_64" and sys.platform=="linux" and python_version>="3.7") onnxruntime==1.8.1; python_version<"3.10" onnxruntime==1.12.1; python_version=="3.10" diff --git a/samples/CMakeLists.txt b/samples/CMakeLists.txt index 73a0716a..ff543aaf 100644 --- a/samples/CMakeLists.txt +++ b/samples/CMakeLists.txt @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -25,6 +25,7 @@ set(OPENSOURCE_SAMPLES_LIST sampleOnnxMNIST sampleIOFormats sampleOnnxMnistCoordConvAC + sampleNamedDimensions trtexec) foreach(SAMPLE_ITER ${OPENSOURCE_SAMPLES_LIST}) diff --git a/samples/CMakeSamplesTemplate.txt b/samples/CMakeSamplesTemplate.txt index 47d2c240..b355fa1d 100644 --- a/samples/CMakeSamplesTemplate.txt +++ b/samples/CMakeSamplesTemplate.txt @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/samples/common/BatchStream.h b/samples/common/BatchStream.h index 4990aade..c08c9e14 100644 --- a/samples/common/BatchStream.h +++ b/samples/common/BatchStream.h @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -120,7 +120,7 @@ class MNISTBatchStream : public IBatchStream file.read(reinterpret_cast(rawData.data()), numElements * sizeof(uint8_t)); mData.resize(numElements); std::transform( - rawData.begin(), rawData.end(), mData.begin(), [](uint8_t val) { return static_cast(val) / 255.f; }); + rawData.begin(), rawData.end(), mData.begin(), [](uint8_t val) { return static_cast(val) / 255.F; }); } void readLabelsFile(const std::string& labelsFilePath) @@ -177,7 +177,6 @@ class BatchStream : public IBatchStream mLabels.resize(mBatchSize, 0); mFileBatch.resize(mDims.d[0] * mImageSize, 0); mFileLabels.resize(mDims.d[0], 0); - reset(0); } BatchStream(int batchSize, int maxBatches, std::string const& prefix, std::vector const& directories) @@ -198,7 +197,6 @@ class BatchStream : public IBatchStream mLabels.resize(mBatchSize, 0); mFileBatch.resize(mDims.d[0] * mImageSize, 0); mFileLabels.resize(mDims.d[0], 0); - reset(0); } // Resets data members diff --git a/samples/common/EntropyCalibrator.h b/samples/common/EntropyCalibrator.h index 366b9beb..936d10e0 100644 --- a/samples/common/EntropyCalibrator.h +++ b/samples/common/EntropyCalibrator.h @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/samples/common/ErrorRecorder.h b/samples/common/ErrorRecorder.h index f6b4c3ee..3cc8ef9e 100644 --- a/samples/common/ErrorRecorder.h +++ b/samples/common/ErrorRecorder.h @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/samples/common/argsParser.h b/samples/common/argsParser.h index 52f42ab0..3b80797c 100644 --- a/samples/common/argsParser.h +++ b/samples/common/argsParser.h @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/samples/common/buffers.h b/samples/common/buffers.h index 271dab52..6d87a11a 100644 --- a/samples/common/buffers.h +++ b/samples/common/buffers.h @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/samples/common/common.h b/samples/common/common.h index 9437217b..b2a01c5e 100644 --- a/samples/common/common.h +++ b/samples/common/common.h @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -271,8 +271,8 @@ inline void readPGMFile(const std::string& fileName, uint8_t* buffer, int inH, i { std::ifstream infile(fileName, std::ifstream::binary); assert(infile.is_open() && "Attempting to read from a file that is not open."); - std::string magic, h, w, max; - infile >> magic >> h >> w >> max; + std::string magic, w, h, max; + infile >> magic >> w >> h >> max; infile.seekg(1, infile.cur); infile.read(reinterpret_cast(buffer), inH * inW); } @@ -352,7 +352,7 @@ inline void* safeCudaMalloc(size_t memSize) if (deviceMem == nullptr) { std::cerr << "Out of memory" << std::endl; - exit(1); + exit(EXIT_FAILURE); } return deviceMem; } @@ -535,7 +535,7 @@ inline int32_t calculateSoftmax(float* const prob, int32_t const numDigits) // // The default parameter values choosen arbitrarily. Range values should be choosen such that // we avoid underflow or overflow. Also range value should be non zero to avoid uniform zero scale tensor. -inline void setAllDynamicRanges(nvinfer1::INetworkDefinition* network, float inRange = 2.0f, float outRange = 4.0f) +inline void setAllDynamicRanges(nvinfer1::INetworkDefinition* network, float inRange = 2.0F, float outRange = 4.0F) { // Ensure that all layer inputs have a scale. for (int i = 0; i < network->getNbLayers(); i++) @@ -709,7 +709,7 @@ void writePPMFileWithBBox(const std::string& filename, PPM& ppm, const << ppm.w << " " << ppm.h << "\n" << ppm.max << "\n"; - auto round = [](float x) -> int { return int(std::floor(x + 0.5f)); }; + auto round = [](float x) -> int { return int(std::floor(x + 0.5F)); }; const int x1 = std::min(std::max(0, round(int(bbox.x1))), W - 1); const int x2 = std::min(std::max(0, round(int(bbox.x2))), W - 1); const int y1 = std::min(std::max(0, round(int(bbox.y1))), H - 1); @@ -750,7 +750,7 @@ inline void writePPMFileWithBBox(const std::string& filename, vPPM ppm, std::vec << "\n" << ppm.w << " " << ppm.h << "\n" << ppm.max << "\n"; - auto round = [](float x) -> int { return int(std::floor(x + 0.5f)); }; + auto round = [](float x) -> int { return int(std::floor(x + 0.5F)); }; for (auto bbox : dets) { @@ -789,7 +789,7 @@ class TimerBase virtual void stop() {} float microseconds() const noexcept { - return mMs * 1000.f; + return mMs * 1000.F; } float milliseconds() const noexcept { @@ -797,15 +797,15 @@ class TimerBase } float seconds() const noexcept { - return mMs / 1000.f; + return mMs / 1000.F; } void reset() noexcept { - mMs = 0.f; + mMs = 0.F; } protected: - float mMs{0.0f}; + float mMs{0.0F}; }; class GpuTimer : public TimerBase @@ -829,7 +829,7 @@ class GpuTimer : public TimerBase void stop() override { CHECK(cudaEventRecord(mStop, mStream)); - float ms{0.0f}; + float ms{0.0F}; CHECK(cudaEventSynchronize(mStop)); CHECK(cudaEventElapsedTime(&ms, mStart, mStop)); mMs += ms; diff --git a/samples/common/dumpTFWts.py b/samples/common/dumpTFWts.py index 4afa36f5..0b7a0123 100644 --- a/samples/common/dumpTFWts.py +++ b/samples/common/dumpTFWts.py @@ -1,6 +1,6 @@ #!/usr/bin/python # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/samples/common/getOptions.cpp b/samples/common/getOptions.cpp index f55caa51..8bcf7958 100644 --- a/samples/common/getOptions.cpp +++ b/samples/common/getOptions.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/samples/common/getOptions.h b/samples/common/getOptions.h index e4d4276a..e8460513 100644 --- a/samples/common/getOptions.h +++ b/samples/common/getOptions.h @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/samples/common/getoptWin.h b/samples/common/getoptWin.h index 5f7f9fb0..7e1cf1ba 100644 --- a/samples/common/getoptWin.h +++ b/samples/common/getoptWin.h @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/samples/common/half.h b/samples/common/half.h index 23b282e8..c5ebdb1a 100644 --- a/samples/common/half.h +++ b/samples/common/half.h @@ -16,7 +16,7 @@ // OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -1523,14 +1523,14 @@ class half /// \return incremented half value half& operator++() { - return *this += 1.0f; + return *this += 1.0F; } /// Prefix decrement. /// \return decremented half value half& operator--() { - return *this -= 1.0f; + return *this -= 1.0F; } /// Postfix increment. diff --git a/samples/common/logger.cpp b/samples/common/logger.cpp index 1f303cb0..0592db2c 100644 --- a/samples/common/logger.cpp +++ b/samples/common/logger.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/samples/common/logger.h b/samples/common/logger.h index 94a95b7b..ff59bfa9 100644 --- a/samples/common/logger.h +++ b/samples/common/logger.h @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/samples/common/logging.h b/samples/common/logging.h index 17aa58e6..38cbbd01 100644 --- a/samples/common/logging.h +++ b/samples/common/logging.h @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/samples/common/parserOnnxConfig.h b/samples/common/parserOnnxConfig.h index 20ca20ac..b1c4e434 100644 --- a/samples/common/parserOnnxConfig.h +++ b/samples/common/parserOnnxConfig.h @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/samples/common/safeCommon.h b/samples/common/safeCommon.h index 9cf8e4ec..326257ab 100644 --- a/samples/common/safeCommon.h +++ b/samples/common/safeCommon.h @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -154,11 +154,7 @@ class TrtCudaGraphSafe void endCapture(cudaStream_t& stream) { CHECK(cudaStreamEndCapture(stream, &mGraph)); -#if CUDART_VERSION >= 12000 - CHECK(cudaGraphInstantiate(&mGraphExec, mGraph, 0)); -#else CHECK(cudaGraphInstantiate(&mGraphExec, mGraph, nullptr, nullptr, 0)); -#endif CHECK(cudaGraphDestroy(mGraph)); } diff --git a/samples/common/sampleConfig.h b/samples/common/sampleConfig.h index bb2eb30b..6402b448 100644 --- a/samples/common/sampleConfig.h +++ b/samples/common/sampleConfig.h @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -56,9 +56,9 @@ class SampleConfig : public nvonnxparser::IOnnxConfig bool mDebugBuilder{false}; InputDataFormat mInputDataFormat{InputDataFormat::kASCII}; uint64_t mTopK{0}; - float mFailurePercentage{-1.0f}; - float mTolerance{0.0f}; - float mAbsTolerance{1e-5f}; + float mFailurePercentage{-1.0F}; + float mTolerance{0.0F}; + float mAbsTolerance{1e-5F}; public: SampleConfig() @@ -313,7 +313,7 @@ class SampleConfig : public nvonnxparser::IOnnxConfig { return mTimingCacheFilename.c_str(); } - + void setTimingCacheFileName(const char* timingCacheFilename) noexcept { mTimingCacheFilename = std::string(timingCacheFilename); diff --git a/samples/common/sampleDevice.h b/samples/common/sampleDevice.h index f73889b8..83cd53c3 100644 --- a/samples/common/sampleDevice.h +++ b/samples/common/sampleDevice.h @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/samples/common/sampleEngines.cpp b/samples/common/sampleEngines.cpp index b9b838d9..8a442083 100644 --- a/samples/common/sampleEngines.cpp +++ b/samples/common/sampleEngines.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -236,8 +236,8 @@ void setTensorScalesFromCalibration(nvinfer1::INetworkDefinition& network, std:: //! //! \see Parser::operator bool() //! -Parser modelToNetwork(const ModelOptions& model, nvinfer1::INetworkDefinition& network, std::ostream& err, - std::vector* vcPluginLibrariesUsed) +Parser modelToNetwork(ModelOptions const& model, BuildOptions const& build, nvinfer1::INetworkDefinition& network, + std::ostream& err, std::vector* vcPluginLibrariesUsed) { sample::gLogInfo << "Start parsing network model." << std::endl; auto const tBegin = std::chrono::high_resolution_clock::now(); @@ -310,6 +310,15 @@ Parser modelToNetwork(const ModelOptions& model, nvinfer1::INetworkDefinition& n { using namespace nvonnxparser; parser.onnxParser.reset(createONNXParser(network)); + ASSERT(parser.onnxParser != nullptr); + // For version or hardware compatible engines, we must use TensorRT's native InstanceNorm implementation for + // compatibility. + if (build.versionCompatible + || (build.hardwareCompatibilityLevel != nvinfer1::HardwareCompatibilityLevel::kNONE)) + { + auto parserflags = 1U << static_cast(OnnxParserFlag::kNATIVE_INSTANCENORM); + parser.onnxParser->setFlags(parserflags); + } if (!parser.onnxParser->parseFromFile( model.baseModel.model.c_str(), static_cast(sample::gLogger.getReportableSeverity()))) { @@ -648,7 +657,20 @@ void setMemoryPoolLimits(IBuilderConfig& config, BuildOptions const& build) } if (build.dlaSRAM >= 0) { - config.setMemoryPoolLimit(MemoryPoolType::kDLA_MANAGED_SRAM, roundToBytes(build.dlaSRAM)); + size_t const sizeInBytes = roundToBytes(build.dlaSRAM); + size_t sizeInPowerOf2{1}; + // Using 2^30 bytes as a loose upper bound to prevent the possibility of overflows and infinite loops. + while (sizeInPowerOf2 < 31 && (static_cast(1) << sizeInPowerOf2) <= sizeInBytes) + { + ++sizeInPowerOf2; + } + --sizeInPowerOf2; + if (sizeInPowerOf2 == 30) + { + sample::gLogWarning << "User-specified DLA managed SRAM size is too large and has been clipped to 2^30 bytes. " + << "Please make sure that this is the intended managed SRAM size." << std::endl; + } + config.setMemoryPoolLimit(MemoryPoolType::kDLA_MANAGED_SRAM, static_cast(1) << sizeInPowerOf2); } if (build.dlaLocalDRAM >= 0) { @@ -886,7 +908,10 @@ bool setupNetworkAndConfig(BuildOptions const& build, SystemOptions const& sys, config.setFlag(BuilderFlag::kENABLE_TACTIC_HEURISTIC); } - config.setBuilderOptimizationLevel(build.builderOptimizationLevel); + if (build.builderOptimizationLevel != defaultBuilderOptimizationLevel) + { + config.setBuilderOptimizationLevel(build.builderOptimizationLevel); + } if (build.timingCacheMode == TimingCacheMode::kDISABLE) { @@ -1087,9 +1112,9 @@ bool setupNetworkAndConfig(BuildOptions const& build, SystemOptions const& sys, setLayerDeviceTypes(network, config, build.layerDeviceTypes); } - if (build.safe) + if (build.safe && sys.DLACore == -1) { - config.setEngineCapability(sys.DLACore != -1 ? EngineCapability::kDLA_STANDALONE : EngineCapability::kSAFETY); + config.setEngineCapability(EngineCapability::kSAFETY); } if (build.restricted) @@ -1104,8 +1129,11 @@ bool setupNetworkAndConfig(BuildOptions const& build, SystemOptions const& sys, config.setDefaultDeviceType(DeviceType::kDLA); config.setDLACore(sys.DLACore); config.setFlag(BuilderFlag::kPREFER_PRECISION_CONSTRAINTS); - - if (sys.fallback) + if (build.buildDLAStandalone) + { + config.setEngineCapability(EngineCapability::kDLA_STANDALONE); + } + if (build.allowGPUFallback) { config.setFlag(BuilderFlag::kGPU_FALLBACK); } @@ -1214,7 +1242,8 @@ bool modelToBuildEnv( std::vector vcPluginLibrariesUsed; SMP_RETVAL_IF_FALSE(env.network != nullptr, "Network creation failed", false, err); - env.parser = modelToNetwork(model, *env.network, err, build.versionCompatible ? &vcPluginLibrariesUsed : nullptr); + env.parser + = modelToNetwork(model, build, *env.network, err, build.versionCompatible ? &vcPluginLibrariesUsed : nullptr); SMP_RETVAL_IF_FALSE(env.parser.operator bool(), "Parsing model failed", false, err); if (build.versionCompatible && !sys.ignoreParsedPluginLibs && !vcPluginLibrariesUsed.empty()) diff --git a/samples/common/sampleEngines.h b/samples/common/sampleEngines.h index b53e1303..6c0a88b6 100644 --- a/samples/common/sampleEngines.h +++ b/samples/common/sampleEngines.h @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/samples/common/sampleInference.cpp b/samples/common/sampleInference.cpp index e3fb116c..84850d93 100644 --- a/samples/common/sampleInference.cpp +++ b/samples/common/sampleInference.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -93,7 +93,7 @@ class FillBindingClosure void fillOneBinding(TensorInfo const& tensorInfo) { auto const name = tensorInfo.name; - auto const* bindingInOutStr = tensorInfo.isInput ? "input" : "output"; + auto const* bindingInOutStr = tensorInfo.isInput ? "Input" : "Output"; for (auto& binding : bindings) { auto const input = inputs.find(name); @@ -104,11 +104,23 @@ class FillBindingClosure } else { - sample::gLogInfo << "Using random values for " << bindingInOutStr << " " << name << std::endl; + if (tensorInfo.isInput) + { + sample::gLogInfo << "Using random values for input " << name << std::endl; + } binding->addBinding(tensorInfo); } - sample::gLogInfo << "Created " << bindingInOutStr << " binding for " << name << " with dimensions " - << tensorInfo.dims << std::endl; + if (tensorInfo.isDynamic) + { + sample::gLogInfo << bindingInOutStr << " binding for " << name + << " is dynamic and will be created during execution using OutputAllocator." + << std::endl; + } + else + { + sample::gLogInfo << bindingInOutStr << " binding for " << name << " with dimensions " << tensorInfo.dims + << " is created." << std::endl; + } } } @@ -505,14 +517,19 @@ class EnqueueImplicit : private Enqueue bool operator()(TrtCudaStream& stream) const { - if (mContext.enqueue(mBatch, mBuffers, stream.get(), nullptr)) + try { + bool const result = mContext.enqueue(mBatch, mBuffers, stream.get(), nullptr); // Collecting layer timing info from current profile index of execution context if (mContext.getProfiler() && !mContext.getEnqueueEmitsProfile() && !mContext.reportToProfiler()) { gLogWarning << "Failed to collect layer timing info from previous enqueue()" << std::endl; } - return true; + return result; + } + catch (const std::exception&) + { + return false; } return false; } @@ -539,14 +556,19 @@ class EnqueueExplicit : private Enqueue bool operator()(TrtCudaStream& stream) const { - if (mContext.enqueueV3(stream.get())) + try { + bool const result = mContext.enqueueV3(stream.get()); // Collecting layer timing info from current profile index of execution context if (mContext.getProfiler() && !mContext.getEnqueueEmitsProfile() && !mContext.reportToProfiler()) { gLogWarning << "Failed to collect layer timing info from previous enqueueV3()" << std::endl; } - return true; + return result; + } + catch (const std::exception&) + { + return false; } return false; } @@ -624,9 +646,13 @@ class EnqueueSafe bool operator()(TrtCudaStream& stream) const { - if (mContext.enqueueV3(stream.get())) + try { - return true; + return mContext.enqueueV3(stream.get()); + } + catch (const std::exception&) + { + return false; } return false; } @@ -854,7 +880,10 @@ class Iteration // Avoid capturing initialization calls by executing the enqueue function at least // once before starting CUDA graph capture. auto const ret = mEnqueue(stream); - assert(ret); + if (!ret) + { + throw std::runtime_error("Inference enqueue failed."); + } stream.synchronize(); mGraph.beginCapture(stream); @@ -922,6 +951,26 @@ bool inferenceLoop(std::vector>>& iStream float durationMs = 0; int32_t skip = 0; + if (maxDurationMs == -1.F) + { + sample::gLogWarning << "--duration=-1 is specified, inference will run in an endless loop until" + << " aborted with CTRL-C (SIGINT)" << std::endl; + while (true) + { + for (auto& s : iStreams) + { + if (!s->query(skipTransfers)) + { + return false; + } + } + for (auto& s : iStreams) + { + s->sync(cpuStart, gpuStart, trace, skipTransfers); + } + } + } + for (int32_t i = 0; i < iterations + skip || durationMs < maxDurationMs; ++i) { for (auto& s : iStreams) @@ -957,50 +1006,65 @@ bool inferenceLoop(std::vector>>& iStream template void inferenceExecution(InferenceOptions const& inference, InferenceEnvironment& iEnv, SyncStruct& sync, - int32_t const threadIdx, int32_t const streamsPerThread, int32_t device, std::vector& trace) + int32_t const threadIdx, int32_t const streamsPerThread, int32_t device, std::vector& trace) noexcept { - float warmupMs = inference.warmup; - float durationMs = inference.duration * 1000.F + warmupMs; + try + { + float warmupMs = inference.warmup; + float durationMs = -1.F; + if (inference.duration != -1.F) + { + durationMs = inference.duration * 1000.F + warmupMs; + } - cudaCheck(cudaSetDevice(device)); + cudaCheck(cudaSetDevice(device)); - std::vector>> iStreams; + std::vector>> iStreams; - for (int32_t s = 0; s < streamsPerThread; ++s) - { - int32_t const streamId{threadIdx * streamsPerThread + s}; - auto* iteration = new Iteration( - streamId, inference, *iEnv.template getContext(streamId), *iEnv.bindings[streamId]); - if (inference.skipTransfers) + for (int32_t s = 0; s < streamsPerThread; ++s) { - iteration->setInputData(true); + int32_t const streamId{threadIdx * streamsPerThread + s}; + auto* iteration = new Iteration( + streamId, inference, *iEnv.template getContext(streamId), *iEnv.bindings[streamId]); + if (inference.skipTransfers) + { + iteration->setInputData(true); + } + iStreams.emplace_back(iteration); } - iStreams.emplace_back(iteration); - } - for (auto& s : iStreams) - { - s->wait(sync.gpuStart); - } + for (auto& s : iStreams) + { + s->wait(sync.gpuStart); + } - std::vector localTrace; - if (!inferenceLoop(iStreams, sync.cpuStart, sync.gpuStart, inference.iterations, durationMs, warmupMs, localTrace, - inference.skipTransfers, inference.idle)) - { - iEnv.error = true; - } + std::vector localTrace; + if (!inferenceLoop(iStreams, sync.cpuStart, sync.gpuStart, inference.iterations, durationMs, warmupMs, localTrace, + inference.skipTransfers, inference.idle)) + { + sync.mutex.lock(); + iEnv.error = true; + sync.mutex.unlock(); + } - if (inference.skipTransfers) - { - for (auto& s : iStreams) + if (inference.skipTransfers) { - s->fetchOutputData(true); + for (auto& s : iStreams) + { + s->fetchOutputData(true); + } } - } - sync.mutex.lock(); - trace.insert(trace.end(), localTrace.begin(), localTrace.end()); - sync.mutex.unlock(); + sync.mutex.lock(); + trace.insert(trace.end(), localTrace.begin(), localTrace.end()); + sync.mutex.unlock(); + } + catch(...) + { + sync.mutex.lock(); + iEnv.error = true; + sync.mutex.unlock(); + } } inline std::thread makeThread(InferenceOptions const& inference, InferenceEnvironment& iEnv, SyncStruct& sync, diff --git a/samples/common/sampleInference.h b/samples/common/sampleInference.h index e906552b..909a71b8 100644 --- a/samples/common/sampleInference.h +++ b/samples/common/sampleInference.h @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/samples/common/sampleOptions.cpp b/samples/common/sampleOptions.cpp index 16913c3e..41a13972 100644 --- a/samples/common/sampleOptions.cpp +++ b/samples/common/sampleOptions.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -957,6 +957,8 @@ void BuildOptions::parse(Arguments& arguments) throw std::invalid_argument("Invalid usage, fp8 and int8 aren't allowed to be enabled together."); } getAndDelOption(arguments, "--safe", safe); + getAndDelOption(arguments, "--buildDLAStandalone", buildDLAStandalone); + getAndDelOption(arguments, "--allowGPUFallback", allowGPUFallback); getAndDelOption(arguments, "--consistency", consistency); getAndDelOption(arguments, "--restricted", restricted); if (getAndDelOption(arguments, "--buildOnly", skipInference)) @@ -1145,7 +1147,12 @@ void BuildOptions::parse(Arguments& arguments) { timingCacheMode = TimingCacheMode::kLOCAL; } - getAndDelOption(arguments, "--heuristic", heuristic); + if (getAndDelOption(arguments, "--heuristic", heuristic)) + { + sample::gLogWarning << "--heuristic flag has been deprecated, use --builderOptimizationLevel= flag instead " + "(N <= 2 enables heuristic)." + << std::endl; + } getAndDelOption(arguments, "--builderOptimizationLevel", builderOptimizationLevel); std::string hardwareCompatibleArgs; @@ -1245,7 +1252,6 @@ void SystemOptions::parse(Arguments& arguments) { getAndDelOption(arguments, "--device", device); getAndDelOption(arguments, "--useDLACore", DLACore); - getAndDelOption(arguments, "--allowGPUFallback", fallback); std::string pluginName; while (getAndDelOption(arguments, "--plugins", pluginName)) { @@ -1431,28 +1437,37 @@ void AllOptions::parse(Arguments& arguments) } if (build.safe && system.DLACore >= 0) { - auto checkSafeDLAFormats = [](std::vector const& fmt) { - return fmt.empty() ? false : std::all_of(fmt.begin(), fmt.end(), [](IOFormat const& pair) { + build.buildDLAStandalone = true; + } + if (build.buildDLAStandalone) + { + build.skipInference = true; + auto checkSafeDLAFormats = [](std::vector const& fmt, bool isInput) { + return fmt.empty() ? false : std::all_of(fmt.begin(), fmt.end(), [&](IOFormat const& pair) { bool supported{false}; bool const isDLA_LINEAR{ pair.second == 1U << static_cast(nvinfer1::TensorFormat::kDLA_LINEAR)}; - bool const isCHW4{pair.second == 1U << static_cast(nvinfer1::TensorFormat::kCHW4)}; + bool const isHWC4{pair.second == 1U << static_cast(nvinfer1::TensorFormat::kCHW4) + || pair.second == 1U << static_cast(nvinfer1::TensorFormat::kDLA_HWC4)}; bool const isCHW32{pair.second == 1U << static_cast(nvinfer1::TensorFormat::kCHW32)}; bool const isCHW16{pair.second == 1U << static_cast(nvinfer1::TensorFormat::kCHW16)}; - supported |= pair.first == nvinfer1::DataType::kINT8 && (isDLA_LINEAR || isCHW4 || isCHW32); - supported |= pair.first == nvinfer1::DataType::kHALF && (isDLA_LINEAR || isCHW4 || isCHW16); + supported |= pair.first == nvinfer1::DataType::kINT8 + && (isDLA_LINEAR || (isInput ? isHWC4 : false) || isCHW32); + supported |= pair.first == nvinfer1::DataType::kHALF + && (isDLA_LINEAR || (isInput ? isHWC4 : false) || isCHW16); return supported; }); }; - if (!checkSafeDLAFormats(build.inputFormats) || !checkSafeDLAFormats(build.outputFormats)) + if (!checkSafeDLAFormats(build.inputFormats, true) || !checkSafeDLAFormats(build.outputFormats, false)) { throw std::invalid_argument( - "I/O formats for safe DLA capability are restricted to fp16/int8:dla_linear, fp16:chw16 or " + "I/O formats for safe DLA capability are restricted to fp16/int8:dla_linear, fp16/int8:hwc4, " + "fp16:chw16 or " "int8:chw32"); } - if (system.fallback) + if (build.allowGPUFallback) { - throw std::invalid_argument("GPU fallback (--allowGPUFallback) not allowed for safe DLA capability"); + throw std::invalid_argument("GPU fallback (--allowGPUFallback) not allowed for DLA standalone mode"); } } } @@ -1777,7 +1792,6 @@ std::ostream& operator<<(std::ostream& os, const BuildOptions& options) { // clang-format off os << "=== Build Options ===" << std::endl << - "Max batch: "; printBatch(os, options.maxBatch) << std::endl << "Memory Pools: "; printMemoryPools(os, options) << std::endl << "minTiming: " << options.minTiming << std::endl << @@ -1788,12 +1802,14 @@ std::ostream& operator<<(std::ostream& os, const BuildOptions& options) "Calibration: " << (options.int8 && options.calibration.empty() ? "Dynamic" : options.calibration.c_str()) << std::endl << "Refit: " << boolToEnabled(options.refittable) << std::endl << "Version Compatible: " << boolToEnabled(options.versionCompatible) << std::endl << - "TensorRT runtime: " << options.useRuntime << std::endl << - "Lean DLL Path: " << options.leanDLLPath << std::endl << + "TensorRT runtime: " << options.useRuntime << std::endl << + "Lean DLL Path: " << options.leanDLLPath << std::endl << "Tempfile Controls: "; printTempfileControls(os, options.tempfileControls) << std::endl << "Exclude Lean Runtime: " << boolToEnabled(options.excludeLeanRuntime) << std::endl << "Sparsity: "; printSparsity(os, options) << std::endl << "Safe mode: " << boolToEnabled(options.safe) << std::endl << + "Build DLA standalone loadable: " << boolToEnabled(options.buildDLAStandalone) << std::endl << + "Allow GPU fallback for DLA: " << boolToEnabled(options.allowGPUFallback) << std::endl << "DirectIO mode: " << boolToEnabled(options.directIO) << std::endl << "Restricted mode: " << boolToEnabled(options.restricted) << std::endl << "Skip inference: " << boolToEnabled(options.skipInference) << std::endl << @@ -1837,8 +1853,7 @@ std::ostream& operator<<(std::ostream& os, const SystemOptions& options) os << "=== System Options ===" << std::endl << "Device: " << options.device << std::endl << - "DLACore: " << (options.DLACore != -1 ? std::to_string(options.DLACore) : "") << - (options.DLACore != -1 && options.fallback ? "(With GPU fallback)" : "") << std::endl; + "DLACore: " << (options.DLACore != -1 ? std::to_string(options.DLACore) : "") << std::endl; os << "Plugins:"; for (const auto& p : options.plugins) @@ -2053,6 +2068,7 @@ void BuildOptions::help(std::ostream& os) " --workspace=N Set workspace size in MiB." "\n" " --memPoolSize=poolspec Specify the size constraints of the designated memory pool(s) in MiB." "\n" " Note: Also accepts decimal sizes, e.g. 0.25MiB. Will be rounded down to the nearest integer bytes." "\n" + " In particular, for dlaSRAM the bytes will be rounded down to the nearest power of 2." "\n" R"( Pool constraint: poolspec ::= poolfmt[","poolspec])" "\n" " poolfmt ::= pool:sizeInMiB" "\n" R"( pool ::= "workspace"|"dlaSRAM"|"dlaLocalDRAM"|"dlaGlobalDRAM")" "\n" @@ -2116,7 +2132,13 @@ void BuildOptions::help(std::ostream& os) R"( layerDeviceTypePair ::= layerName":"deviceType)" "\n" R"( deviceType ::= "GPU"|"DLA")" "\n" " --calib= Read INT8 calibration cache file" "\n" - " --safe Enable build safety certified engine" "\n" + " --safe Enable build safety certified engine, if DLA is enable, --buildDLAStandalone will be specified" "\n" + " automatically (default = disabled)" "\n" + " --buildDLAStandalone Enable build DLA standalone loadable which can be loaded by cuDLA, when this option is enabled, " "\n" + " --allowGPUFallback is disallowed and --skipInference is enabled by default. Additionally, " "\n" + " specifying --inputIOFormats and --outputIOFormats restricts I/O data type and memory layout" "\n" + " (default = disabled)" "\n" + " --allowGPUFallback When DLA is enabled, allow GPU fallback for unsupported layers (default = disabled)" "\n" " --consistency Perform consistency checking on safety certified engine" "\n" " --restricted Enable safety scope checking with kSAFETY_SCOPE build flag" "\n" " --saveEngine= Save the serialized engine" "\n" @@ -2139,10 +2161,9 @@ void BuildOptions::help(std::ostream& os) R"( flag ::= "fasterDynamicShapes0805")" "\n" R"( |"disableExternalTacticSourcesForCore0805")" "\n" R"( |"profileSharing0806")" "\n" - " --builderOptimizationLevel Set the builder optimization level. (default is 3" "\n" + " --builderOptimizationLevel Set the builder optimization level. (default is 3)" "\n" " Higher level allows TensorRT to spend more building time for more optimization options." "\n" - " The default level is 3. Valid values include integers from 0 to the maximum optimization level," "\n" - " which is currently 5." "\n" + " Valid values include integers from 0 to the maximum optimization level, which is currently 5." "\n" " --hardwareCompatibilityLevel=mode Make the engine file compatible with other GPU architectures. (default = none)" "\n" R"( Hardware Compatibility Level: mode ::= "none" | "ampere+")" "\n" " none = no compatibility" "\n" @@ -2171,8 +2192,6 @@ void SystemOptions::help(std::ostream& os) os << "=== System Options ===" << std::endl << " --device=N Select cuda device N (default = " << defaultDevice << ")" << std::endl << " --useDLACore=N Select DLA core N for layers that support DLA (default = none)" << std::endl << - " --allowGPUFallback When DLA is enabled, allow GPU fallback for unsupported layers " - "(default = disabled)" << std::endl << " --staticPlugins Plugin library (.so) to load statically (can be specified multiple times)" << std::endl << " --dynamicPlugins Plugin library (.so) to load dynamically and may be serialized with the engine if they are included in --setPluginsToSerialize (can be specified multiple times)" << std::endl << " --setPluginsToSerialize Plugin library (.so) to be serialized with the engine (can be specified multiple times)" << std::endl << @@ -2205,6 +2224,7 @@ void InferenceOptions::help(std::ostream& os) << defaultWarmUp << ")" << std::endl << " --duration=N Run performance measurements for at least N seconds wallclock time (default = " << defaultDuration << ")" << std::endl << + " If -1 is specified, inference will keep running unless stopped manually" << std::endl << " --sleepTime=N Delay inference start with a gap of N milliseconds between launch and compute " "(default = " << defaultSleep << ")" << std::endl << " --idleTime=N Sleep N milliseconds between two continuous iterations" diff --git a/samples/common/sampleOptions.h b/samples/common/sampleOptions.h index 25a5c790..36fb6839 100644 --- a/samples/common/sampleOptions.h +++ b/samples/common/sampleOptions.h @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -37,7 +37,7 @@ constexpr int32_t maxBatchNotProvided{0}; constexpr int32_t defaultMinTiming{1}; constexpr int32_t defaultAvgTiming{8}; constexpr int32_t defaultMaxAuxStreams{-1}; -constexpr int32_t defaultBuilderOptimizationLevel{3}; +constexpr int32_t defaultBuilderOptimizationLevel{-1}; // System default params constexpr int32_t defaultDevice{0}; @@ -206,6 +206,8 @@ class BuildOptions : public Options LayerOutputTypes layerOutputTypes; LayerDeviceTypes layerDeviceTypes; bool safe{false}; + bool buildDLAStandalone{false}; + bool allowGPUFallback{false}; bool consistency{false}; bool restricted{false}; bool skipInference{false}; @@ -249,7 +251,6 @@ class SystemOptions : public Options public: int32_t device{defaultDevice}; int32_t DLACore{-1}; - bool fallback{false}; bool ignoreParsedPluginLibs{false}; std::vector plugins; std::vector setPluginsToSerialize; diff --git a/samples/common/sampleReporting.cpp b/samples/common/sampleReporting.cpp index 0b3894c1..e7fc17f1 100644 --- a/samples/common/sampleReporting.cpp +++ b/samples/common/sampleReporting.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -181,7 +181,7 @@ PerformanceResult getPerformanceResult(std::vector const& timings PerformanceResult result; result.min = metricGetter(newTimings.front()); result.max = metricGetter(newTimings.back()); - result.mean = std::accumulate(newTimings.begin(), newTimings.end(), 0.0f, metricAccumulator) / newTimings.size(); + result.mean = std::accumulate(newTimings.begin(), newTimings.end(), 0.0F, metricAccumulator) / newTimings.size(); result.median = findMedian(newTimings, metricGetter); for (auto percentile : percentiles) { @@ -364,17 +364,14 @@ void Profiler::reportLayerTime(char const* layerName, float timeMs) noexcept void Profiler::print(std::ostream& os) const noexcept { - std::string const nameHdr("Layer"); - std::string const timeHdr(" Time (ms)"); - std::string const avgHdr(" Avg. Time (ms)"); - std::string const medHdr(" Median Time (ms)"); - std::string const percentageHdr(" Time %"); + std::string const nameHdr(" Layer"); + std::string const timeHdr(" Time(ms)"); + std::string const avgHdr(" Avg.(ms)"); + std::string const medHdr(" Median(ms)"); + std::string const percentageHdr(" Time(%)"); float const totalTimeMs = getTotalTime(); - auto const cmpLayer = [](LayerProfile const& a, LayerProfile const& b) { return a.name.size() < b.name.size(); }; - auto const longestName = std::max_element(mLayers.begin(), mLayers.end(), cmpLayer); - auto const nameLength = std::max(longestName->name.size() + 1, nameHdr.size()); auto const timeLength = timeHdr.size(); auto const avgLength = avgHdr.size(); auto const medLength = medHdr.size(); @@ -382,7 +379,7 @@ void Profiler::print(std::ostream& os) const noexcept os << std::endl << "=== Profile (" << mUpdatesCount << " iterations ) ===" << std::endl - << std::setw(nameLength) << nameHdr << timeHdr << avgHdr << medHdr << percentageHdr << std::endl; + << timeHdr << avgHdr << medHdr << percentageHdr << nameHdr << std::endl; for (auto const& p : mLayers) { @@ -392,17 +389,18 @@ void Profiler::print(std::ostream& os) const noexcept continue; } // clang-format off - os << std::setw(nameLength) << p.name << std::setw(timeLength) << std::fixed << std::setprecision(2) << getTotalTime(p) + os << std::setw(timeLength) << std::fixed << std::setprecision(2) << getTotalTime(p) << std::setw(avgLength) << std::fixed << std::setprecision(4) << getAvgTime(p) << std::setw(medLength) << std::fixed << std::setprecision(4) << getMedianTime(p) << std::setw(percentageLength) << std::fixed << std::setprecision(1) << getTotalTime(p) / totalTimeMs * 100 - << std::endl; + << " " << p.name << std::endl; } { - os << std::setw(nameLength) << "Total" << std::setw(timeLength) << std::fixed << std::setprecision(2) + os << std::setw(timeLength) << std::fixed << std::setprecision(2) << totalTimeMs << std::setw(avgLength) << std::fixed << std::setprecision(4) << totalTimeMs / mUpdatesCount << std::setw(medLength) << std::fixed << std::setprecision(4) << getMedianTime() - << std::setw(percentageLength) << std::fixed << std::setprecision(1) << 100.0 << std::endl; + << std::setw(percentageLength) << std::fixed << std::setprecision(1) << 100.0 + << " Total" << std::endl; // clang-format on } os << std::endl; diff --git a/samples/common/sampleReporting.h b/samples/common/sampleReporting.h index 186f8f06..fa0d706d 100644 --- a/samples/common/sampleReporting.h +++ b/samples/common/sampleReporting.h @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/samples/common/sampleUtils.cpp b/samples/common/sampleUtils.cpp index 97646c24..93aeb69d 100644 --- a/samples/common/sampleUtils.cpp +++ b/samples/common/sampleUtils.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -383,6 +383,11 @@ void print(std::ostream& os, int8_t v) os << static_cast(v); } +void print(std::ostream& os, __half v) +{ + os << static_cast(v); +} + template void dumpBuffer(void const* buffer, std::string const& separator, std::ostream& os, Dims const& dims, Dims const& strides, int32_t vectorDim, int32_t spv) diff --git a/samples/common/sampleUtils.h b/samples/common/sampleUtils.h index 67efa86a..618c2782 100644 --- a/samples/common/sampleUtils.h +++ b/samples/common/sampleUtils.h @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/samples/python/common.py b/samples/python/common.py index 36c78eee..76e2b17b 100644 --- a/samples/python/common.py +++ b/samples/python/common.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/samples/python/detectron2/build_engine.py b/samples/python/detectron2/build_engine.py index 6b970c4d..42de4df0 100644 --- a/samples/python/detectron2/build_engine.py +++ b/samples/python/detectron2/build_engine.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/samples/python/detectron2/create_onnx.py b/samples/python/detectron2/create_onnx.py index 5b25b2f6..38538464 100644 --- a/samples/python/detectron2/create_onnx.py +++ b/samples/python/detectron2/create_onnx.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/samples/python/detectron2/eval_coco.py b/samples/python/detectron2/eval_coco.py index 6b890632..828413d4 100644 --- a/samples/python/detectron2/eval_coco.py +++ b/samples/python/detectron2/eval_coco.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -75,7 +75,7 @@ def main(args): pred_boxes = [] scores = [] pred_classes = [] - # Number of detections. + # Number of detections. num_instances = len(detections[i]) # Reserve numpy array to hold all mask predictions per image. pred_masks = np.empty((num_instances, 28, 28), dtype=np.float32) @@ -84,7 +84,7 @@ def main(args): # Loop over every single detection. for n in range(num_instances): det = detections[i][n] - # Append box coordinates data. + # Append box coordinates data. pred_boxes.append([det['ymin'], det['xmin'], det['ymax'], det['xmax']]) # Append score. scores.append(det['score']) @@ -105,21 +105,21 @@ def main(args): image_dict = [{'instances': instances}] input_dict = [{'image_id': source_id}] evaluator.process(input_dict, image_dict) - + # Final evaluations, generation of mAP accuracy performance. evaluator.evaluate() - + if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("-e", "--engine", help="The TensorRT engine to infer with.") parser.add_argument("-i", "--input", - help="The input to infer, either a single image path, or a directory of images.") + help="The input to infer, either a single image path, or a directory of images.") parser.add_argument("-c", "--det2_config", help="The Detectron 2 config file (.yaml) for the model", type=str) - parser.add_argument("-w", "--det2_weights", help="The Detectron 2 model weights (.pkl)", type=str) - parser.add_argument("-t", "--nms_threshold", type=float, + parser.add_argument("-w", "--det2_weights", help="The Detectron 2 model weights (.pkl)", type=str) + parser.add_argument("-t", "--nms_threshold", type=float, help="Override the score threshold for the NMS operation, if higher than the threshold in the engine.") - parser.add_argument("--iou_threshold", default=0.5, type=float, + parser.add_argument("--iou_threshold", default=0.5, type=float, help="Select the IoU threshold for the mask segmentation. Range is 0 to 1. Pixel values more than threshold will become 1, less 0.") args = parser.parse_args() if not all([args.engine, args.input, args.det2_config, args.det2_weights]): diff --git a/samples/python/detectron2/image_batcher.py b/samples/python/detectron2/image_batcher.py index 7aa9af6d..35d8c8e1 100644 --- a/samples/python/detectron2/image_batcher.py +++ b/samples/python/detectron2/image_batcher.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -144,7 +144,7 @@ def resize_pad(image, pad_color=(0, 0, 0)): # Get characteristics. width, height = image.size - + # Replicates behavior of ResizeShortestEdge augmentation. size = self.min_size_test * 1.0 pre_scale = size / min(height, width) @@ -163,7 +163,7 @@ def resize_pad(image, pad_color=(0, 0, 0)): neww = int(neww + 0.5) newh = int(newh + 0.5) - # Scaling factor for normalized box coordinates scaling in post-processing. + # Scaling factor for normalized box coordinates scaling in post-processing. scaling = max(newh/height, neww/width) # Padding. @@ -177,7 +177,7 @@ def resize_pad(image, pad_color=(0, 0, 0)): image = Image.open(image_path) image = image.convert(mode='RGB') # Pad with mean values of COCO dataset, since padding is applied before actual model's - # preprocessor steps (Sub, Div ops), we need to pad with mean values in order to reverse + # preprocessor steps (Sub, Div ops), we need to pad with mean values in order to reverse # the effects of Sub and Div, so that padding after model's preprocessor will be with actual 0s. image, scale = resize_pad(image, (124, 116, 104)) image = np.asarray(image, dtype=np.float32) diff --git a/samples/python/detectron2/infer.py b/samples/python/detectron2/infer.py index a91d512d..aae41435 100644 --- a/samples/python/detectron2/infer.py +++ b/samples/python/detectron2/infer.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/samples/python/detectron2/onnx_utils.py b/samples/python/detectron2/onnx_utils.py index ae4c3b02..56d280fa 100644 --- a/samples/python/detectron2/onnx_utils.py +++ b/samples/python/detectron2/onnx_utils.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -119,8 +119,8 @@ def gather(self, name, data, indices, axes=0): :param self: The gs.Graph object being extended. :param name: The name to use for the node. :param data: Data from which to gather specific tensors. - :param indices: Indices by which to gather data tensors. - :param axes: A list of axes on which to perform gather operation + :param indices: Indices by which to gather data tensors. + :param axes: A list of axes on which to perform gather operation """ data_tensor = data if type(data) is gs.Variable else data[0] indices_tensor = indices if type(indices) is gs.Variable else indices[0] @@ -217,7 +217,7 @@ def find_node_by_op_input_output_name(self, op, input_name, output_name, input_p @gs.Graph.register() def find_descendant_by_op(self, node, op, depth=10): """ - Starting from the given node, finds a node lower in the graph matching the given operation name. + Starting from the given node, finds a node lower in the graph matching the given operation name. This is not an exhaustive graph search. In order to graph search bfs is used, so runtime complexity is O(V+E). :param self: The gs.Graph object being extended. @@ -242,7 +242,7 @@ def find_ancestor_by_op(self, node, op, depth=10): """ Starting from the given node, finds a node higher in the graph matching the given operation name. This is not an exhaustive graph search. - In order to graph search bfs is used, so runtime complexity is O(V+E). + In order to graph search bfs is used, so runtime complexity is O(V+E). :param self: The gs.Graph object being extended. :param node: The node to start searching from. :param op: The operation name to search for. @@ -258,4 +258,4 @@ def find_ancestor_by_op(self, node, op, depth=10): return node for child in node.inputs[-1].inputs: queue.append(child) - return None \ No newline at end of file + return None diff --git a/samples/python/detectron2/requirements.txt b/samples/python/detectron2/requirements.txt index 7dca0535..d7361093 100644 --- a/samples/python/detectron2/requirements.txt +++ b/samples/python/detectron2/requirements.txt @@ -1,5 +1,7 @@ -onnx==1.8.1 -onnxruntime==1.8.0 +onnx==1.9.0; python_version<"3.8" +onnx==1.12.0; python_version>="3.8" +onnxruntime==1.8.1; python_version<"3.8" +onnxruntime==1.12.1; python_version>="3.8" Pillow git+https://github.com/facebookresearch/detectron2.git git+https://github.com/NVIDIA/TensorRT#subdirectory=tools/onnx-graphsurgeon diff --git a/samples/python/detectron2/visualize.py b/samples/python/detectron2/visualize.py index a7a0e780..546d7669 100644 --- a/samples/python/detectron2/visualize.py +++ b/samples/python/detectron2/visualize.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -69,7 +69,7 @@ def visualize_detections(image_path, output_path, detections, labels=[], iou_thr det_width = round(d['xmax'] - d['xmin']) det_height = round(d['ymax'] - d['ymin']) # Slight scaling, to get binary masks after float32 -> uint8 - # conversion, if not scaled all pixels are zero. + # conversion, if not scaled all pixels are zero. mask = d['mask'] > iou_threshold # Convert float32 -> uint8. mask = mask.astype(np.uint8) diff --git a/samples/python/downloader.py b/samples/python/downloader.py index 80e007b3..b4a436e2 100755 --- a/samples/python/downloader.py +++ b/samples/python/downloader.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/samples/python/efficientdet/build_engine.py b/samples/python/efficientdet/build_engine.py index 3cf76c6e..638c604f 100644 --- a/samples/python/efficientdet/build_engine.py +++ b/samples/python/efficientdet/build_engine.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/samples/python/efficientdet/compare_tf.py b/samples/python/efficientdet/compare_tf.py index 21413a5c..54c356cd 100644 --- a/samples/python/efficientdet/compare_tf.py +++ b/samples/python/efficientdet/compare_tf.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/samples/python/efficientdet/create_onnx.py b/samples/python/efficientdet/create_onnx.py index 0c66620c..17fee5f6 100644 --- a/samples/python/efficientdet/create_onnx.py +++ b/samples/python/efficientdet/create_onnx.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/samples/python/efficientdet/eval_coco.py b/samples/python/efficientdet/eval_coco.py index ad15918d..966f49be 100644 --- a/samples/python/efficientdet/eval_coco.py +++ b/samples/python/efficientdet/eval_coco.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/samples/python/efficientdet/image_batcher.py b/samples/python/efficientdet/image_batcher.py index 82756447..e519a5db 100644 --- a/samples/python/efficientdet/image_batcher.py +++ b/samples/python/efficientdet/image_batcher.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/samples/python/efficientdet/infer.py b/samples/python/efficientdet/infer.py index 8abe69ff..409738ac 100644 --- a/samples/python/efficientdet/infer.py +++ b/samples/python/efficientdet/infer.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/samples/python/efficientdet/infer_tf.py b/samples/python/efficientdet/infer_tf.py index 7f9c91b0..a02f87ee 100644 --- a/samples/python/efficientdet/infer_tf.py +++ b/samples/python/efficientdet/infer_tf.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/samples/python/efficientdet/onnx_utils.py b/samples/python/efficientdet/onnx_utils.py index 7155b171..e55f7e11 100644 --- a/samples/python/efficientdet/onnx_utils.py +++ b/samples/python/efficientdet/onnx_utils.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/samples/python/efficientdet/visualize.py b/samples/python/efficientdet/visualize.py index c09b0bc6..742ecbce 100644 --- a/samples/python/efficientdet/visualize.py +++ b/samples/python/efficientdet/visualize.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/samples/python/efficientnet/build_engine.py b/samples/python/efficientnet/build_engine.py index e71222e9..2391aca0 100644 --- a/samples/python/efficientnet/build_engine.py +++ b/samples/python/efficientnet/build_engine.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/samples/python/efficientnet/compare_tf.py b/samples/python/efficientnet/compare_tf.py index c204ccfe..6d9ad88f 100644 --- a/samples/python/efficientnet/compare_tf.py +++ b/samples/python/efficientnet/compare_tf.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/samples/python/efficientnet/create_onnx.py b/samples/python/efficientnet/create_onnx.py index 71dd6595..b98fd137 100644 --- a/samples/python/efficientnet/create_onnx.py +++ b/samples/python/efficientnet/create_onnx.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/samples/python/efficientnet/eval_gt.py b/samples/python/efficientnet/eval_gt.py index f2b67627..88198b89 100644 --- a/samples/python/efficientnet/eval_gt.py +++ b/samples/python/efficientnet/eval_gt.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/samples/python/efficientnet/image_batcher.py b/samples/python/efficientnet/image_batcher.py index a23ff136..996a72a3 100644 --- a/samples/python/efficientnet/image_batcher.py +++ b/samples/python/efficientnet/image_batcher.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/samples/python/efficientnet/infer.py b/samples/python/efficientnet/infer.py index 8543a87c..46c11253 100644 --- a/samples/python/efficientnet/infer.py +++ b/samples/python/efficientnet/infer.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/samples/python/engine_refit_onnx_bidaf/build_and_refit_engine.py b/samples/python/engine_refit_onnx_bidaf/build_and_refit_engine.py index f397a2c6..268a5cf5 100644 --- a/samples/python/engine_refit_onnx_bidaf/build_and_refit_engine.py +++ b/samples/python/engine_refit_onnx_bidaf/build_and_refit_engine.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/samples/python/engine_refit_onnx_bidaf/data_processing.py b/samples/python/engine_refit_onnx_bidaf/data_processing.py index bdde98c2..6eb90fa0 100644 --- a/samples/python/engine_refit_onnx_bidaf/data_processing.py +++ b/samples/python/engine_refit_onnx_bidaf/data_processing.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/samples/python/engine_refit_onnx_bidaf/prepare_model.py b/samples/python/engine_refit_onnx_bidaf/prepare_model.py index 11b1cf2e..cbeb6a92 100644 --- a/samples/python/engine_refit_onnx_bidaf/prepare_model.py +++ b/samples/python/engine_refit_onnx_bidaf/prepare_model.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/samples/python/introductory_parser_samples/onnx_resnet50.py b/samples/python/introductory_parser_samples/onnx_resnet50.py index 8b980ec0..e7e845b6 100644 --- a/samples/python/introductory_parser_samples/onnx_resnet50.py +++ b/samples/python/introductory_parser_samples/onnx_resnet50.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/samples/python/network_api_pytorch_mnist/model.py b/samples/python/network_api_pytorch_mnist/model.py index 9be0d11e..654532bf 100644 --- a/samples/python/network_api_pytorch_mnist/model.py +++ b/samples/python/network_api_pytorch_mnist/model.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/samples/python/network_api_pytorch_mnist/sample.py b/samples/python/network_api_pytorch_mnist/sample.py index f8126a40..7c0a6417 100644 --- a/samples/python/network_api_pytorch_mnist/sample.py +++ b/samples/python/network_api_pytorch_mnist/sample.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/samples/python/onnx_custom_plugin/CMakeLists.txt b/samples/python/onnx_custom_plugin/CMakeLists.txt index 09363eaf..a7ad8933 100644 --- a/samples/python/onnx_custom_plugin/CMakeLists.txt +++ b/samples/python/onnx_custom_plugin/CMakeLists.txt @@ -1,3 +1,20 @@ +# +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + # We need cmake >= 3.8, since 3.8 introduced CUDA as a first class language cmake_minimum_required(VERSION 3.8 FATAL_ERROR) project(CustomHardMax LANGUAGES CXX CUDA) diff --git a/samples/python/onnx_custom_plugin/load_plugin_lib.py b/samples/python/onnx_custom_plugin/load_plugin_lib.py index 4e2c3946..0a85f18e 100644 --- a/samples/python/onnx_custom_plugin/load_plugin_lib.py +++ b/samples/python/onnx_custom_plugin/load_plugin_lib.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/samples/python/onnx_custom_plugin/model.py b/samples/python/onnx_custom_plugin/model.py index 3d29b74a..cde029c5 100644 --- a/samples/python/onnx_custom_plugin/model.py +++ b/samples/python/onnx_custom_plugin/model.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -52,7 +52,7 @@ def _do_graph_surgery(raw_model_path, trt_model_path): compress_node = node_by_name['Compress_31'] einsum_node = gs.Node( - 'Einsum', + 'Einsum', 'Dot_of_Hardmax_and_Transpose', attrs={'equation': 'ij,ij->i'}, # "Dot product" of 2d tensors inputs=[hardmax_node.outputs[0], transpose_node.outputs[0]], diff --git a/samples/python/onnx_custom_plugin/plugin/customHardmaxPlugin.cpp b/samples/python/onnx_custom_plugin/plugin/customHardmaxPlugin.cpp index 125d9577..4900292b 100644 --- a/samples/python/onnx_custom_plugin/plugin/customHardmaxPlugin.cpp +++ b/samples/python/onnx_custom_plugin/plugin/customHardmaxPlugin.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/samples/python/onnx_custom_plugin/plugin/customHardmaxPlugin.h b/samples/python/onnx_custom_plugin/plugin/customHardmaxPlugin.h index c98e70f9..250291d5 100644 --- a/samples/python/onnx_custom_plugin/plugin/customHardmaxPlugin.h +++ b/samples/python/onnx_custom_plugin/plugin/customHardmaxPlugin.h @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/samples/python/onnx_custom_plugin/sample.py b/samples/python/onnx_custom_plugin/sample.py index c2a3579a..65b1991f 100644 --- a/samples/python/onnx_custom_plugin/sample.py +++ b/samples/python/onnx_custom_plugin/sample.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/samples/python/onnx_custom_plugin/test_custom_hardmax_plugin.py b/samples/python/onnx_custom_plugin/test_custom_hardmax_plugin.py index 5fac3d60..dc919b72 100644 --- a/samples/python/onnx_custom_plugin/test_custom_hardmax_plugin.py +++ b/samples/python/onnx_custom_plugin/test_custom_hardmax_plugin.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/samples/python/onnx_packnet/convert_to_onnx.py b/samples/python/onnx_packnet/convert_to_onnx.py index be07ae64..df604f96 100644 --- a/samples/python/onnx_packnet/convert_to_onnx.py +++ b/samples/python/onnx_packnet/convert_to_onnx.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/samples/python/onnx_packnet/post_processing.py b/samples/python/onnx_packnet/post_processing.py index 5dad482f..fd101b45 100644 --- a/samples/python/onnx_packnet/post_processing.py +++ b/samples/python/onnx_packnet/post_processing.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/samples/python/scripts/download_mnist_data.sh b/samples/python/scripts/download_mnist_data.sh index 9572a805..809bcbc9 100755 --- a/samples/python/scripts/download_mnist_data.sh +++ b/samples/python/scripts/download_mnist_data.sh @@ -1,6 +1,6 @@ #!/bin/bash # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/samples/python/scripts/download_mnist_pgms.py b/samples/python/scripts/download_mnist_pgms.py index 19bb6ca2..a1ee0cba 100644 --- a/samples/python/scripts/download_mnist_pgms.py +++ b/samples/python/scripts/download_mnist_pgms.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -66,4 +66,3 @@ def main(): if __name__ == '__main__': main() - diff --git a/samples/python/tensorflow_object_detection_api/build_engine.py b/samples/python/tensorflow_object_detection_api/build_engine.py index a7945a52..2efd6599 100644 --- a/samples/python/tensorflow_object_detection_api/build_engine.py +++ b/samples/python/tensorflow_object_detection_api/build_engine.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/samples/python/tensorflow_object_detection_api/compare_tf.py b/samples/python/tensorflow_object_detection_api/compare_tf.py index 8d805f3a..409aec6b 100644 --- a/samples/python/tensorflow_object_detection_api/compare_tf.py +++ b/samples/python/tensorflow_object_detection_api/compare_tf.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -109,13 +109,13 @@ def infer(self, batch, scales=None, nms_threshold=None): # Depending on detection type you need slightly different data. if self.detection_type == 'bbox': mask = None - # Segmentation is only supported with Mask R-CNN, which has + # Segmentation is only supported with Mask R-CNN, which has # fixed_shape_resizer as image_resizer (lookup pipeline.config) elif self.detection_type == 'segmentation': # Select a mask mask = masks[0][n] # Slight scaling, to get binary masks after float32 -> uint8 - # conversion, if not scaled all pixels are zero. + # conversion, if not scaled all pixels are zero. mask = mask > self.iou_threshold # Convert float32 -> uint8. mask = mask.astype(np.uint8) @@ -131,7 +131,7 @@ def infer(self, batch, scales=None, nms_threshold=None): scale_x = scale if nms_threshold and scores[0][n] < nms_threshold: continue - # Append to detections + # Append to detections detections[0].append({ 'ymin': boxes[0][n][0] * scale_y, 'xmin': boxes[0][n][1] * scale_x, @@ -169,7 +169,7 @@ def parse_annotations(annotations_path, detection_type): # Depending on detection type you need slightly different data. if detection_type == 'bbox': mask = None - # Segmentation is only supported with Mask R-CNN, which has + # Segmentation is only supported with Mask R-CNN, which has # fixed_shape_resizer as image_resizer (lookup pipeline.config) elif detection_type == 'segmentation': # Get np.array segmentation mask from annotation @@ -270,4 +270,4 @@ def main(args): if not all([args.engine, args.saved_model, args.input, args.output, args.preprocessor]): parser.print_help() sys.exit(1) - main(args) \ No newline at end of file + main(args) diff --git a/samples/python/tensorflow_object_detection_api/create_onnx.py b/samples/python/tensorflow_object_detection_api/create_onnx.py index 3ecb1b93..919cc8e6 100644 --- a/samples/python/tensorflow_object_detection_api/create_onnx.py +++ b/samples/python/tensorflow_object_detection_api/create_onnx.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -69,12 +69,12 @@ def __init__(self, saved_model_path, pipeline_config_path): # Fold constants via ONNX-GS that TF2ONNX may have missed. self.graph.fold_constants() - + # Pipeline config parsing. pipeline_config = config_util.get_configs_from_pipeline_file(pipeline_config_path) # Get input resolution. self.height, self.width = config_util.get_spatial_image_size(config_util.get_image_resizer_config(pipeline_config["model"])) - + # If your model is SSD, get characteristics accordingly from pipeline.config file. if pipeline_config["model"].HasField("ssd"): # Getting model characteristics. @@ -83,11 +83,11 @@ def __init__(self, saved_model_path, pipeline_config_path): self.first_stage_nms_iou_threshold = float(pipeline_config["model"].ssd.post_processing.batch_non_max_suppression.iou_threshold) self.first_stage_max_proposals = int(pipeline_config["model"].ssd.post_processing.batch_non_max_suppression.max_detections_per_class) # If your model is Faster R-CNN get it's characteristics from pipeline.config file. - elif pipeline_config["model"].HasField("faster_rcnn"): + elif pipeline_config["model"].HasField("faster_rcnn"): # Getting model characteristics. - self.model = str(pipeline_config["model"].faster_rcnn.feature_extractor.type) + self.model = str(pipeline_config["model"].faster_rcnn.feature_extractor.type) self.num_classes = pipeline_config["model"].faster_rcnn.num_classes - self.first_stage_nms_score_threshold = float(pipeline_config["model"].faster_rcnn.first_stage_nms_score_threshold) + self.first_stage_nms_score_threshold = float(pipeline_config["model"].faster_rcnn.first_stage_nms_score_threshold) self.first_stage_nms_iou_threshold = float(pipeline_config["model"].faster_rcnn.first_stage_nms_iou_threshold) self.first_stage_max_proposals = int(pipeline_config["model"].faster_rcnn.first_stage_max_proposals) self.first_stage_crop_size = int(pipeline_config["model"].faster_rcnn.initial_crop_size) @@ -101,15 +101,15 @@ def __init__(self, saved_model_path, pipeline_config_path): self.matmul_crop_and_resize = pipeline_config["model"].faster_rcnn.use_matmul_crop_and_resize # If model is Mask R-CNN, get final instance segmentation masks resolution. if pipeline_config["model"].faster_rcnn.second_stage_box_predictor.mask_rcnn_box_predictor.HasField("mask_height") and pipeline_config["model"].faster_rcnn.second_stage_box_predictor.mask_rcnn_box_predictor.HasField("mask_width"): - self.mask_height = int(pipeline_config["model"].faster_rcnn.second_stage_box_predictor.mask_rcnn_box_predictor.mask_height) + self.mask_height = int(pipeline_config["model"].faster_rcnn.second_stage_box_predictor.mask_rcnn_box_predictor.mask_height) self.mask_width = int(pipeline_config["model"].faster_rcnn.second_stage_box_predictor.mask_rcnn_box_predictor.mask_width) - else: + else: log.info("Given Model type is not supported") sys.exit(1) # List of supported models. - supported_models = ["ssd_mobilenet_v2_keras", "ssd_mobilenet_v1_fpn_keras", "ssd_mobilenet_v2_fpn_keras", "ssd_resnet50_v1_fpn_keras", - "ssd_resnet101_v1_fpn_keras", "ssd_resnet152_v1_fpn_keras", "faster_rcnn_resnet50_keras", "faster_rcnn_resnet101_keras", + supported_models = ["ssd_mobilenet_v2_keras", "ssd_mobilenet_v1_fpn_keras", "ssd_mobilenet_v2_fpn_keras", "ssd_resnet50_v1_fpn_keras", + "ssd_resnet101_v1_fpn_keras", "ssd_resnet152_v1_fpn_keras", "faster_rcnn_resnet50_keras", "faster_rcnn_resnet101_keras", "faster_rcnn_resnet152_keras", "faster_rcnn_inception_resnet_v2_keras"] assert self.model in supported_models @@ -129,7 +129,7 @@ def __init__(self, saved_model_path, pipeline_config_path): if not (self.mask_height is None and self.mask_width is None): log.info("Mask height is {}".format(self.mask_height)) log.info("Mask width is {}".format(self.mask_width)) - + self.batch_size = None def sanitize(self): @@ -182,7 +182,7 @@ def save(self, output_path): def add_debug_output(self, debug): """ - Add a debug output to a given node. + Add a debug output to a given node. :param debug: Name of the output you would like to debug. """ tensors = self.graph.tensors() @@ -239,7 +239,7 @@ def update_preprocessor(self, batch_size, input_format): elif 'resnet' in self.model: sub_const = np.expand_dims(np.asarray([255 * 0.485, 255 * 0.456, 255 * 0.406], dtype=np.float32), axis=(0, 2, 3)) sub_out = self.graph.op_with_const("Sub", "preprocessor/mean", input_tensor, sub_const) - + # Backbone is not supported. else: log.info("Given model's backbone is not supported, pre-processor algorithm can't be generated") @@ -266,7 +266,7 @@ def update_preprocessor(self, batch_size, input_format): self.sanitize() def find_head_end(self, head_name, descendant, end_op): - # This helper function finds ends of Class Net and Box Net, based on a model type. + # This helper function finds ends of Class Net and Box Net, based on a model type. # :param head_name: This is a common name that nodes in either Class or Box Nets start with. # :param descendant: Descendant of head_name, identified by operation (Transpose, MatMul, etc.). # :param end_op: Operation of a node you would like to get in the end of each Net. @@ -297,8 +297,8 @@ def get_anchor(output_idx, op, depth=5): node = self.graph.find_descendant_by_op(split.o(0, output_idx), op) for i in range(depth): if node.op == op: - # Input of size 1 is not anchor data - if (node.inputs[1].values).size == 1: + # Input of size 1 is not anchor data + if (node.inputs[1].values).size == 1: node = node.o() # Find the node that with anchor data, multielement input elif (node.inputs[1].values).size > 1: @@ -308,7 +308,7 @@ def get_anchor(output_idx, op, depth=5): else: node = node.o() return None - + anchors_y = get_anchor(0, "Add") anchors_x = get_anchor(1, "Add") anchors_h = get_anchor(2, "Mul") @@ -320,19 +320,19 @@ def get_anchor(output_idx, op, depth=5): # Trim total number of anchors in order to not have copies introduced by growing number of batch_size. anchors = batched_anchors[0:num_anchors,0:num_anchors] return gs.Constant(name="nms/anchors:0", values=anchors) - + def NMS(self, box_net_tensor, class_net_tensor, anchors_tensor, background_class, score_activation, iou_threshold, nms_score_threshold, user_threshold, nms_name=None): - # Helper function to create the NMS Plugin node with the selected inputs. + # Helper function to create the NMS Plugin node with the selected inputs. # EfficientNMS_TRT TensorRT Plugin is suitable for our use case. - # :param box_net_tensor: The box predictions from the Box Net. + # :param box_net_tensor: The box predictions from the Box Net. # :param class_net_tensor: The class predictions from the Class Net. # :param anchors_tensor: The default anchor coordinates (from the extracted anchor constants) # :param background_class: The label ID for the background class. - # :param score_activation: If set to True - apply sigmoid activation to the confidence scores during NMS operation, + # :param score_activation: If set to True - apply sigmoid activation to the confidence scores during NMS operation, # if false - no activation, pass one from the graph. # :param iou_threshold: NMS intersection over union threshold, given by pipeline.config. # :param nms_score_threshold: NMS score threshold, given by pipeline.config. - # :param user_threshold: User's given threshold to overwrite default NMS score threshold. + # :param user_threshold: User's given threshold to overwrite default NMS score threshold. # :param nms_name: Name of NMS node in a graph, renames NMS elements accordingly in order to eliminate cycles. if nms_name is None: @@ -369,24 +369,24 @@ def NMS(self, box_net_tensor, class_net_tensor, anchors_tensor, background_class 'score_activation': score_activation, 'class_agnostic': False, 'box_coding': 1, - } + } ) log.info("Created 'nms/non_maximum_suppression{}' NMS plugin".format(nms_name)) return nms_outputs def CropAndResize(self, unsqeeze_input, relu_node_outputs, cnr_num): - # Helper function to create the NMS Plugin node with the selected inputs. + # Helper function to create the NMS Plugin node with the selected inputs. # CropAndResize TensorRT Plugin is suitable for our use case. - # :param unsqeeze_input: NMS's bonding boxes output, clipped and normalized if this is first CropAndResize, this is a souce of rois for CropAndResize. + # :param unsqeeze_input: NMS's bonding boxes output, clipped and normalized if this is first CropAndResize, this is a souce of rois for CropAndResize. # :param relu_node_outputs: 1st backbone's last Relu node, this is a souce of feature_maps for CropAndResize - # :param cnr_num: Positional number of CropAndResize node in a graph, renames CropAndResize elements accordingly in order to eliminate cycles. - + # :param cnr_num: Positional number of CropAndResize node in a graph, renames CropAndResize elements accordingly in order to eliminate cycles. + # CropAndResizePlugin requires 4th dimension of 1: [N, B, 4, 1], so - # we need to add unsqeeze node to make tensor 4 dimensional. + # we need to add unsqeeze node to make tensor 4 dimensional. unsqueeze_node = self.graph.unsqueeze("CNR/detection_boxes_unsqueeze_"+cnr_num, unsqeeze_input) - # CropAndResizePlugin's inputs + # CropAndResizePlugin's inputs feature_maps = relu_node_outputs rois = unsqueeze_node[0] @@ -394,7 +394,7 @@ def CropAndResize(self, unsqeeze_input, relu_node_outputs, cnr_num): cnr_pfmap = gs.Variable(name="cnr/pfmap_"+cnr_num, dtype=np.float32, shape=[self.batch_size, self.first_stage_max_proposals, feature_maps.shape[1], self.first_stage_crop_size, self.first_stage_crop_size]) - # Create the CropandResize Plugin node with the selected inputs. + # Create the CropandResize Plugin node with the selected inputs. # Two inputs are given to the CropAndResize TensorRT node: # - The feature_maps (from the relu_node_outputs): [batch_size, channel_num, height, width] # - The rois (clipped and normalized detection boxes resulting from NMS): [batch_size, featuremap, 4, 1] @@ -415,7 +415,7 @@ def CropAndResize(self, unsqeeze_input, relu_node_outputs, cnr_num): reshape_node = self.graph.op_with_const("Reshape", "cnr/reshape_"+cnr_num, cnr_pfmap, reshape_shape) return reshape_node[0] - + def process_graph(self, first_nms_threshold=None, second_nms_threshold=None): """ Processes the graph to replace the NMS operations by EfficientNMS_TRT TensorRT plugin nodes and @@ -434,7 +434,7 @@ def first_nms(background_class, score_activation, first_nms_threshold, nms_name= # Supported models ssd_models = ['ssd_mobilenet_v1_fpn_keras', 'ssd_mobilenet_v2_fpn_keras', 'ssd_resnet50_v1_fpn_keras', 'ssd_resnet101_v1_fpn_keras', 'ssd_resnet152_v1_fpn_keras'] frcnn_models = ['faster_rcnn_resnet50_keras', 'faster_rcnn_resnet101_keras', 'faster_rcnn_resnet152_keras', 'faster_rcnn_inception_resnet_v2_keras'] - + # Getting SSD's Class and Box Nets final tensors. if "ssd" in self.model: # Find the concat node at the end of the class net (multi-scale class predictor). @@ -456,7 +456,7 @@ def first_nms(background_class, score_activation, first_nms_threshold, nms_name= variance_adj = np.expand_dims(np.asarray([0.1, 0.1, 0.2, 0.2], dtype=np.float32), axis=(0, 1)) # Final Box Net tensor. box_net_tensor = self.graph.op_with_const("Mul", box_net_head_name+"/scale", box_net_output, variance_adj)[0] - + # Getting Faster R-CNN's 1st Class and Box Nets tensors. elif "faster_rcnn" in self.model: # Identify Class Net and Box Net head names @@ -465,7 +465,7 @@ def first_nms(background_class, score_activation, first_nms_threshold, nms_name= # Find the softmax node at the end of the class net (multi-scale class predictor). class_net = self.find_head_end(head_names[0], "Transpose", "Softmax") # Final Class Net tensor - class_net_tensor = class_net.outputs[0] + class_net_tensor = class_net.outputs[0] # Find the reshape node at the end of the box net (multi-scale localization predictor). box_net = self.find_head_end(head_names[1], "Transpose", "Reshape") @@ -495,7 +495,7 @@ def first_nms(background_class, score_activation, first_nms_threshold, nms_name= def first_cnr(input): """ Updates the graph to replace the 1st cropAndResize op by CropAndResize TensorRT plugin node. - :param input: Input tensor is the output from previous first_nms() step. + :param input: Input tensor is the output from previous first_nms() step. """ # Locate the last Relu node of the first backbone (pre 1st NMS). Relu node contains feature maps @@ -511,7 +511,7 @@ def first_cnr(input): div_out = self.graph.op_with_const("Div", "FirstNMS/detection_boxes_normalizer", clip_out[0], div_const) # Linear transformation to convert box coordinates from (TopLeft, BottomRight) Corner encoding - # to CenterSize encoding. 1st NMS boxes are multiplied by transformation matrix in order to + # to CenterSize encoding. 1st NMS boxes are multiplied by transformation matrix in order to # encode it into CenterSize format. matmul_const = np.matrix('0.5 0 -1 0; 0 0.5 0 -1; 0.5 0 1 0; 0 0.5 0 1', dtype=np.float32) matmul_out = self.graph.matmul("FirstNMS/detection_boxes_conversion", div_out[0], matmul_const) @@ -523,7 +523,7 @@ def first_cnr(input): maxpool_node = [node for node in self.graph.nodes if node.op == "MaxPool" and "MaxPool2D/MaxPool" in node.name][0] maxpool_node.inputs[0] = cnr_output - # Return linear transformation node, it will be located between 1st and 2nd NMS, + # Return linear transformation node, it will be located between 1st and 2nd NMS, # so we need to pass and connect it to 2nd NMS. # In case you are converting Mask R-CNN, feature maps are required for 2nd CropAndResize. return matmul_out[0], relu_node.outputs[0] @@ -531,9 +531,9 @@ def first_cnr(input): def second_nms(background_class, score_activation, encoded_boxes, second_nms_threshold, nms_name=None): """ Updates the graph to replace the 2nd (or final) NMS op by EfficientNMS_TRT TensorRT plugin node. - :param background_class: Set EfficientNMS_TRT's background_class atribute. - :param score_activation: Set EfficientNMS_TRT's score_activation atribute. - :param encoded_boxes: The boxes to use as input. + :param background_class: Set EfficientNMS_TRT's background_class atribute. + :param score_activation: Set EfficientNMS_TRT's score_activation atribute. + :param encoded_boxes: The boxes to use as input. :param second_nms_threshold: Override the NMS score threshold. :param nms_name: Set the NMS node name. """ @@ -550,7 +550,7 @@ def second_nms(background_class, score_activation, encoded_boxes, second_nms_thr # Final Class Net tensor. second_class_net_tensor = slice_out[0] - + # Find the add node at the end of the box net (multi-scale localization predictor). second_box_net = self.find_head_end(second_head_names[1], "MatMul", "Add") # Final Box Net tensor. @@ -560,7 +560,7 @@ def second_nms(background_class, score_activation, encoded_boxes, second_nms_thr # Based on type of Crop and Resize operation, second_box_net_output can be of two types, example: # If use_matmul_crop_and_resize in pipeline.config is set to True, expect: [batch_size, first_stage_max_proposals, 4]. # Else use_matmul_crop_and_resize is either False or absent, expect: [batch_size, first_stage_max_proposals, num_classes, 4] - if self.matmul_crop_and_resize: + if self.matmul_crop_and_resize: reshape_shape_second = np.asarray([self.batch_size, self.first_stage_max_proposals, second_box_net.outputs[0].shape[1]], dtype=np.int64) else: reshape_shape_second = np.asarray([self.batch_size, self.first_stage_max_proposals, self.num_classes, second_box_net.outputs[0].shape[1]/self.num_classes], dtype=np.int64) @@ -574,13 +574,13 @@ def second_nms(background_class, score_activation, encoded_boxes, second_nms_thr # Create NMS node. nms_outputs = self.NMS(second_box_net_tensor, second_class_net_tensor, encoded_boxes, background_class, score_activation, self.second_stage_iou_threshold, self.second_stage_nms_score_threshold, second_nms_threshold, nms_name) - + return nms_outputs def second_cnr(feature_maps, second_nms_outputs): """ Updates the graph to replace the 2nd cropAndResize op by CropAndResize TensorRT plugin node. - :param input: Input tensor is the output from previous first_nms() step. + :param input: Input tensor is the output from previous first_nms() step. """ # Before passing 2nd NMS's detection boxes (rois) to second CropAndResize, we need to clip them. @@ -590,7 +590,7 @@ def second_cnr(feature_maps, second_nms_outputs): # Create Crop and Resize node. cnr_output = self.CropAndResize(clip_out, feature_maps, "second") - # Find MaxPool node that summarizes CropAndResize structure + # Find MaxPool node that summarizes CropAndResize structure maxpool_node = [node for node in self.graph.nodes if node.op == "MaxPool" and "MaxPool2D/MaxPool_1" in node.name][0] maxpool_node.inputs[0] = cnr_output @@ -616,7 +616,7 @@ def second_cnr(feature_maps, second_nms_outputs): final_reshape_node[0].name = "detection_masks" return final_reshape_node[0] - + # If you model is SSD, you need only one NMS and nothin else. if "ssd" in self.model: # Set graph outputs. @@ -662,7 +662,7 @@ def main(args): parser.add_argument("-t1", "--first_nms_threshold", help="Override the score threshold for the 1st NMS operation", type=float) parser.add_argument("-t2", "--second_nms_threshold", help="Override the score threshold for the 2nd NMS operation", type=float) parser.add_argument("-d", "--debug", action='append', help="Add an extra output to debug a particular node") - parser.add_argument("-f", "--input_format", default="NHWC", choices=["NHWC", "NCHW"], + parser.add_argument("-f", "--input_format", default="NHWC", choices=["NHWC", "NCHW"], help="Set the input shape of the graph, as comma-separated dimensions in NCHW or NHWC format, default: NHWC") parser.add_argument("--tf2onnx", help="The path where to save the intermediate ONNX graph generated by tf2onnx, " "useful for debugging purposes, default: not saved", type=str) @@ -672,4 +672,3 @@ def main(args): print("\nThese arguments are required: --pipeline_config, --saved_model and --onnx") sys.exit(1) main(args) - diff --git a/samples/python/tensorflow_object_detection_api/eval_coco.py b/samples/python/tensorflow_object_detection_api/eval_coco.py index 5641ceb2..5086c660 100644 --- a/samples/python/tensorflow_object_detection_api/eval_coco.py +++ b/samples/python/tensorflow_object_detection_api/eval_coco.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -37,7 +37,7 @@ def main(args): # Read annotations json as dictionary. with open(args.annotations) as f: data = json.load(f) - groundtruth = coco_tools.COCOWrapper(data, detection_type=args.detection_type) + groundtruth = coco_tools.COCOWrapper(data, detection_type=args.detection_type) detections_list = [] for batch, images, scales in batcher.get_batch(): print("Processing Image {} / {}".format(batcher.image_index, batcher.num_images), end="\r") @@ -59,7 +59,7 @@ def main(args): detections_list.append(coco_det) elif args.detection_type == 'segmentation': # Get detection bbox resolution. - det_width = round(det['xmax'] - det['xmin']) + det_width = round(det['xmax'] - det['xmin']) det_height = round(det['ymax'] - det['ymin']) # Create an image out of predicted mask array. small_mask = Image.fromarray(det['mask']) @@ -94,13 +94,13 @@ def main(args): parser = argparse.ArgumentParser() parser.add_argument("-e", "--engine", help="The TensorRT engine to infer with.") parser.add_argument("-i", "--input", - help="The input to infer, either a single image path, or a directory of images.") + help="The input to infer, either a single image path, or a directory of images.") parser.add_argument("-d", "--detection_type", default="bbox", choices=["bbox", "segmentation"], help="Detection type for COCO, either bbox or if you are using Mask R-CNN's instance segmentation - segmentation.") parser.add_argument("-a", "--annotations", help="Set the json file to use for COCO instance annotations.") - parser.add_argument("-t", "--nms_threshold", type=float, + parser.add_argument("-t", "--nms_threshold", type=float, help="Override the score threshold for the NMS operation, if higher than the threshold in the engine.") - parser.add_argument("--iou_threshold", default=0.5, type=float, + parser.add_argument("--iou_threshold", default=0.5, type=float, help="Select the IoU threshold for the mask segmentation. Range is 0 to 1. Pixel values more than threshold will become 1, less 0.") parser.add_argument("--preprocessor", default="fixed_shape_resizer", choices=["fixed_shape_resizer", "keep_aspect_ratio_resizer"], help="Select the image preprocessor to use based on your pipeline.config, either 'fixed_shape_resizer' or 'keep_aspect_ratio_resizer', default: fixed_shape_resizer.") diff --git a/samples/python/tensorflow_object_detection_api/image_batcher.py b/samples/python/tensorflow_object_detection_api/image_batcher.py index 47a3c2e0..c40e86c8 100644 --- a/samples/python/tensorflow_object_detection_api/image_batcher.py +++ b/samples/python/tensorflow_object_detection_api/image_batcher.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/samples/python/tensorflow_object_detection_api/infer.py b/samples/python/tensorflow_object_detection_api/infer.py index ef0d9f8a..cdb1f2a9 100644 --- a/samples/python/tensorflow_object_detection_api/infer.py +++ b/samples/python/tensorflow_object_detection_api/infer.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/samples/python/tensorflow_object_detection_api/onnx_utils.py b/samples/python/tensorflow_object_detection_api/onnx_utils.py index a70ecb48..b539197a 100644 --- a/samples/python/tensorflow_object_detection_api/onnx_utils.py +++ b/samples/python/tensorflow_object_detection_api/onnx_utils.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -172,7 +172,7 @@ def find_node_by_op(self, op): @gs.Graph.register() def find_descendant_by_op(self, node, op, depth=10): """ - Starting from the given node, finds a node lower in the graph matching the given operation name. + Starting from the given node, finds a node lower in the graph matching the given operation name. This is not an exhaustive graph search. In order to graph search bfs is used, so runtime complexity is O(V+E). :param self: The gs.Graph object being extended. @@ -198,7 +198,7 @@ def find_ancestor_by_op(self, node, op, depth=10): """ Starting from the given node, finds a node higher in the graph matching the given operation name. This is not an exhaustive graph search. - In order to graph search bfs is used, so runtime complexity is O(V+E). + In order to graph search bfs is used, so runtime complexity is O(V+E). :param self: The gs.Graph object being extended. :param node: The node to start searching from. :param op: The operation name to search for. diff --git a/samples/python/tensorflow_object_detection_api/visualize.py b/samples/python/tensorflow_object_detection_api/visualize.py index 283f06e1..ca992d0f 100644 --- a/samples/python/tensorflow_object_detection_api/visualize.py +++ b/samples/python/tensorflow_object_detection_api/visualize.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -142,4 +142,4 @@ def draw_text(draw, font, text, width, bar_height, offset, color): if output_path is None: return concat - concat.save(output_path) \ No newline at end of file + concat.save(output_path) diff --git a/samples/python/yolov3_onnx/data_processing.py b/samples/python/yolov3_onnx/data_processing.py index 06a3bc26..8a68145f 100644 --- a/samples/python/yolov3_onnx/data_processing.py +++ b/samples/python/yolov3_onnx/data_processing.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/samples/python/yolov3_onnx/onnx_to_tensorrt.py b/samples/python/yolov3_onnx/onnx_to_tensorrt.py index 7ce53098..7572f76a 100644 --- a/samples/python/yolov3_onnx/onnx_to_tensorrt.py +++ b/samples/python/yolov3_onnx/onnx_to_tensorrt.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/samples/python/yolov3_onnx/yolov3_to_onnx.py b/samples/python/yolov3_onnx/yolov3_to_onnx.py index 4259d55f..59f8b3a6 100644 --- a/samples/python/yolov3_onnx/yolov3_to_onnx.py +++ b/samples/python/yolov3_onnx/yolov3_to_onnx.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/samples/sampleAlgorithmSelector/CMakeLists.txt b/samples/sampleAlgorithmSelector/CMakeLists.txt index 4df908f2..8d31262a 100644 --- a/samples/sampleAlgorithmSelector/CMakeLists.txt +++ b/samples/sampleAlgorithmSelector/CMakeLists.txt @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/samples/sampleAlgorithmSelector/sampleAlgorithmSelector.cpp b/samples/sampleAlgorithmSelector/sampleAlgorithmSelector.cpp index 14d8b12f..927d5ed3 100644 --- a/samples/sampleAlgorithmSelector/sampleAlgorithmSelector.cpp +++ b/samples/sampleAlgorithmSelector/sampleAlgorithmSelector.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/samples/sampleCharRNN/CMakeLists.txt b/samples/sampleCharRNN/CMakeLists.txt index 656b9519..9c9b9f6c 100644 --- a/samples/sampleCharRNN/CMakeLists.txt +++ b/samples/sampleCharRNN/CMakeLists.txt @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/samples/sampleCharRNN/sampleCharRNN.cpp b/samples/sampleCharRNN/sampleCharRNN.cpp index 677c812e..919f0929 100644 --- a/samples/sampleCharRNN/sampleCharRNN.cpp +++ b/samples/sampleCharRNN/sampleCharRNN.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -207,7 +207,7 @@ class SampleCharRNNBase //! //! \brief Looks up the embedding tensor for a given char and copies it to input buffer //! - void copyEmbeddingToInput(samplesCommon::BufferManager& buffers, const char& c); + void copyEmbeddingToInput(samplesCommon::BufferManager& buffers, char const& c); //! //! \brief Perform one time step of inference with the TensorRT execution context @@ -917,14 +917,14 @@ bool SampleCharRNNBase::infer() //! //! \brief Looks up the embedding tensor for a given char and copies it to input buffer //! -void SampleCharRNNBase::copyEmbeddingToInput(samplesCommon::BufferManager& buffers, const char& c) +void SampleCharRNNBase::copyEmbeddingToInput(samplesCommon::BufferManager& buffers, char const& c) { auto embed = mWeightMap[mParams.weightNames.EMBED_NAME]; float* inputBuffer = static_cast(buffers.getHostBuffer(mParams.bindingNames.INPUT_BLOB_NAME)); auto index = mParams.charMaps.charToID.at(c); + auto bufSize = buffers.size(mParams.bindingNames.INPUT_BLOB_NAME); - std::memcpy(inputBuffer, static_cast(embed.values) + index * mParams.dataSize, - buffers.size(mParams.bindingNames.INPUT_BLOB_NAME)); + std::memcpy(inputBuffer, static_cast(embed.values) + index * mParams.dataSize, bufSize); } //! diff --git a/samples/sampleDynamicReshape/CMakeLists.txt b/samples/sampleDynamicReshape/CMakeLists.txt index 0fa80a84..374b5566 100644 --- a/samples/sampleDynamicReshape/CMakeLists.txt +++ b/samples/sampleDynamicReshape/CMakeLists.txt @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/samples/sampleDynamicReshape/sampleDynamicReshape.cpp b/samples/sampleDynamicReshape/sampleDynamicReshape.cpp index 8ba21f8d..003bfb55 100644 --- a/samples/sampleDynamicReshape/sampleDynamicReshape.cpp +++ b/samples/sampleDynamicReshape/sampleDynamicReshape.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/samples/sampleINT8API/CMakeLists.txt b/samples/sampleINT8API/CMakeLists.txt index 138b3484..e8eed5c3 100644 --- a/samples/sampleINT8API/CMakeLists.txt +++ b/samples/sampleINT8API/CMakeLists.txt @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/samples/sampleINT8API/sampleINT8API.cpp b/samples/sampleINT8API/sampleINT8API.cpp index e4eeaefb..9c421b6a 100644 --- a/samples/sampleINT8API/sampleINT8API.cpp +++ b/samples/sampleINT8API/sampleINT8API.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/samples/sampleIOFormats/CMakeLists.txt b/samples/sampleIOFormats/CMakeLists.txt index 98c8f0e5..154b599e 100755 --- a/samples/sampleIOFormats/CMakeLists.txt +++ b/samples/sampleIOFormats/CMakeLists.txt @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/samples/sampleIOFormats/sampleIOFormats.cpp b/samples/sampleIOFormats/sampleIOFormats.cpp index d7d364da..a8f94717 100644 --- a/samples/sampleIOFormats/sampleIOFormats.cpp +++ b/samples/sampleIOFormats/sampleIOFormats.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/samples/sampleNamedDimensions/CMakeLists.txt b/samples/sampleNamedDimensions/CMakeLists.txt index f5d22c05..f03d19b1 100644 --- a/samples/sampleNamedDimensions/CMakeLists.txt +++ b/samples/sampleNamedDimensions/CMakeLists.txt @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/samples/sampleNamedDimensions/create_model.py b/samples/sampleNamedDimensions/create_model.py index 610d9b9b..e4146aa5 100644 --- a/samples/sampleNamedDimensions/create_model.py +++ b/samples/sampleNamedDimensions/create_model.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -29,7 +29,7 @@ def main(): graph = gs.Graph(nodes=[node], inputs=[input0, input1], outputs=[output]) model = gs.export_onnx(graph) - onnx.save(model, "concat_layer.onnx") + onnx.save(model, "concat_layer.onnx") if __name__ == '__main__': main() diff --git a/samples/sampleNamedDimensions/sampleNamedDimensions.cpp b/samples/sampleNamedDimensions/sampleNamedDimensions.cpp index 1d09094e..c38d2bfb 100644 --- a/samples/sampleNamedDimensions/sampleNamedDimensions.cpp +++ b/samples/sampleNamedDimensions/sampleNamedDimensions.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/samples/sampleOnnxMNIST/CMakeLists.txt b/samples/sampleOnnxMNIST/CMakeLists.txt index b25a83a8..23bd886b 100644 --- a/samples/sampleOnnxMNIST/CMakeLists.txt +++ b/samples/sampleOnnxMNIST/CMakeLists.txt @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/samples/sampleOnnxMNIST/README.md b/samples/sampleOnnxMNIST/README.md index ec4fb594..05abeafd 100644 --- a/samples/sampleOnnxMNIST/README.md +++ b/samples/sampleOnnxMNIST/README.md @@ -56,7 +56,7 @@ To build the engine, create the builder and pass a logger created for TensorRT w `IBuilder* builder = createInferBuilder(sample::gLogger);` To build the engine from the generated TensorRT network, issue the following call: -`nvinfer1::ICudaEngine* engine = builder->buildCudaEngine(*network);` +`SampleUniquePtr plan{builder->buildSerializedNetwork(*network, *config)};` After you build the engine, verify that the engine is running properly by confirming the output is what you expected. The output format of this sample should be the same as the output of sampleMNIST. @@ -91,6 +91,9 @@ The Scale layer implements a per-tensor, per-channel, or per-element affine tran [Shuffle layer](https://docs.nvidia.com/deeplearning/sdk/tensorrt-developer-guide/index.html#shuffle-layer) The Shuffle layer implements a reshape and transpose operator for tensors. +## Preparing sample data + +Download the sample data from the [TensorRT release tarball](https://developer.nvidia.com/nvidia-tensorrt-download#). ## Running the sample diff --git a/samples/sampleOnnxMNIST/sampleOnnxMNIST.cpp b/samples/sampleOnnxMNIST/sampleOnnxMNIST.cpp index 2aea25c2..9fb5b678 100644 --- a/samples/sampleOnnxMNIST/sampleOnnxMNIST.cpp +++ b/samples/sampleOnnxMNIST/sampleOnnxMNIST.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/samples/sampleOnnxMnistCoordConvAC/CMakeLists.txt b/samples/sampleOnnxMnistCoordConvAC/CMakeLists.txt index e89d5282..b094cf08 100644 --- a/samples/sampleOnnxMnistCoordConvAC/CMakeLists.txt +++ b/samples/sampleOnnxMnistCoordConvAC/CMakeLists.txt @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/samples/sampleOnnxMnistCoordConvAC/coord_conv.py b/samples/sampleOnnxMnistCoordConvAC/coord_conv.py index b5749b25..b2572ad5 100644 --- a/samples/sampleOnnxMnistCoordConvAC/coord_conv.py +++ b/samples/sampleOnnxMnistCoordConvAC/coord_conv.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/samples/sampleOnnxMnistCoordConvAC/mnist_coord_conv_train.py b/samples/sampleOnnxMnistCoordConvAC/mnist_coord_conv_train.py index 29ce9c17..8d0a9623 100644 --- a/samples/sampleOnnxMnistCoordConvAC/mnist_coord_conv_train.py +++ b/samples/sampleOnnxMnistCoordConvAC/mnist_coord_conv_train.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -28,14 +28,14 @@ class Net(nn.Module): - """ - Original implementation of Convnet from + """ + Original implementation of Convnet from PyTorch repo https://github.com/pytorch/examples/tree/master/mnist but with CoordConv2d layers instead of Conv layers """ def __init__(self): super(Net, self).__init__() - # Regular Conv layer replaced with CoordConv2d layer + # Regular Conv layer replaced with CoordConv2d layer self.conv1 = CoordConv2d(1, 32, 3, 1) # Regular Conv layer replaced with CoordConv2d layer self.conv2 = CoordConv2d(32, 64, 3, 1) @@ -138,8 +138,8 @@ def main(): transforms.Normalize((0.1307,), (0.3081,)) ])), batch_size=args.test_batch_size, shuffle=True, **kwargs) - - + + model = Net().to(device) print(model) optimizer = optim.Adadelta(model.parameters(), lr=args.lr) diff --git a/samples/sampleOnnxMnistCoordConvAC/modify_onnx_ac.py b/samples/sampleOnnxMnistCoordConvAC/modify_onnx_ac.py index ba0b1e9e..5462d6e0 100644 --- a/samples/sampleOnnxMnistCoordConvAC/modify_onnx_ac.py +++ b/samples/sampleOnnxMnistCoordConvAC/modify_onnx_ac.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -50,7 +50,7 @@ def main(): parser.add_argument('--output', default="mnist_with_coordconv.onnx", help='input batch size for testing (default: output.onnx)') args = parser.parse_args() - + # Load ONNX file graph = gs.import_onnx(onnx.load(args.onnx)) diff --git a/samples/sampleOnnxMnistCoordConvAC/sampleOnnxMnistCoordConvAC.cpp b/samples/sampleOnnxMnistCoordConvAC/sampleOnnxMnistCoordConvAC.cpp index 0beeb2e7..e161e4f8 100644 --- a/samples/sampleOnnxMnistCoordConvAC/sampleOnnxMnistCoordConvAC.cpp +++ b/samples/sampleOnnxMnistCoordConvAC/sampleOnnxMnistCoordConvAC.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/samples/trtexec/CMakeLists.txt b/samples/trtexec/CMakeLists.txt index 15eadf26..35ad26de 100644 --- a/samples/trtexec/CMakeLists.txt +++ b/samples/trtexec/CMakeLists.txt @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/samples/trtexec/prn_utils.py b/samples/trtexec/prn_utils.py index d71e0cf7..6d759238 100755 --- a/samples/trtexec/prn_utils.py +++ b/samples/trtexec/prn_utils.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/samples/trtexec/profiler.py b/samples/trtexec/profiler.py index 66e9ebc5..e251254b 100755 --- a/samples/trtexec/profiler.py +++ b/samples/trtexec/profiler.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/samples/trtexec/tracer.py b/samples/trtexec/tracer.py index 815862bc..8a9b7a62 100755 --- a/samples/trtexec/tracer.py +++ b/samples/trtexec/tracer.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/samples/trtexec/trtexec.cpp b/samples/trtexec/trtexec.cpp index 0c54aff6..3113ff81 100644 --- a/samples/trtexec/trtexec.cpp +++ b/samples/trtexec/trtexec.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -116,6 +116,12 @@ bool initLibrary(LibraryPtr& libPtr, std::string const& libName, FetchPtrs fetch libPtr.reset(new DynamicLibrary{libName}); fetchFunc(libPtr.get()); } + catch (std::exception const& e) + { + libPtr.reset(); + sample::gLogError << "Could not load library " << libName << ": " << e.what() << std::endl; + return false; + } catch (...) { libPtr.reset(); diff --git a/scripts/copyright-scan.py b/scripts/copyright-scan.py index 8a52ff76..cc51c8e1 100644 --- a/scripts/copyright-scan.py +++ b/scripts/copyright-scan.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/scripts/stubify.sh b/scripts/stubify.sh index ba6c300c..a6839b78 100755 --- a/scripts/stubify.sh +++ b/scripts/stubify.sh @@ -1,12 +1,13 @@ #!/bin/bash # -# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 + +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, @@ -14,7 +15,6 @@ # See the License for the specific language governing permissions and # limitations under the License. # - # This short shell script will extract all the strong "text" symbols from the # shared library and create a new "stub" shared library with the same symbols. # The body of these functions will be empty and therefore have no dependencies. @@ -40,15 +40,22 @@ fi SONAME=$(readelf -d "${IN_LIBFILE}" | grep '(SONAME)' | cut -d [ -f 2 | cut -d ] -f 1) +OS=$(lsb_release -si)-$(lsb_release -sr | cut -d '.' -f 1-2) + +if [ "$OS" = "Ubuntu-22.04" ] ; then + EXTRA_NM_FLAG="--without-symbol-versions" +fi + # make stub library if [ -z "${CC_ARGS}" ] ; then nm -D "${IN_LIBFILE}" ${EXTRA_NM_FLAG} | \ awk '{if ($2 == "T") { print "void",$3,"() {}" }}' | \ - "${CC}" -c -x c -Og -fPIC -shared -Wl,-soname=${SONAME} -Wl,--strip-all -o "${OUT_LIBFILE}" - + "${CC}" -x c -Og -fPIC -shared -Wl,-soname=${SONAME} -Wl,--strip-all -o "${OUT_LIBFILE}" - else nm -D "${IN_LIBFILE}" ${EXTRA_NM_FLAG} | \ awk '{if ($2 == "T") { print "void",$3,"() {}" }}' | \ - "${CC}" -c -x c -Og -fPIC -shared -Wl,-soname=${SONAME} -Wl,--strip-all -o "${OUT_LIBFILE}" "${CC_ARGS}" - + "${CC}" -x c -Og -fPIC -shared -Wl,-soname=${SONAME} -Wl,--strip-all -o "${OUT_LIBFILE}" "${CC_ARGS}" - fi exit $? + diff --git a/third_party/ieee/half.h b/third_party/ieee/half.h index 017834e9..c1f20f16 100644 --- a/third_party/ieee/half.h +++ b/third_party/ieee/half.h @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/onnx-graphsurgeon/Makefile b/tools/onnx-graphsurgeon/Makefile index 38d847fb..5b0cf62d 100644 --- a/tools/onnx-graphsurgeon/Makefile +++ b/tools/onnx-graphsurgeon/Makefile @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/onnx-graphsurgeon/docs/conf.py b/tools/onnx-graphsurgeon/docs/conf.py index 575e2e3c..34433481 100644 --- a/tools/onnx-graphsurgeon/docs/conf.py +++ b/tools/onnx-graphsurgeon/docs/conf.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/onnx-graphsurgeon/examples/01_creating_a_model/example.py b/tools/onnx-graphsurgeon/examples/01_creating_a_model/example.py index 5fe4cb58..f4e0d80e 100644 --- a/tools/onnx-graphsurgeon/examples/01_creating_a_model/example.py +++ b/tools/onnx-graphsurgeon/examples/01_creating_a_model/example.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/onnx-graphsurgeon/examples/02_creating_a_model_with_initializer/example.py b/tools/onnx-graphsurgeon/examples/02_creating_a_model_with_initializer/example.py index 0ab2dfed..c0268236 100644 --- a/tools/onnx-graphsurgeon/examples/02_creating_a_model_with_initializer/example.py +++ b/tools/onnx-graphsurgeon/examples/02_creating_a_model_with_initializer/example.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/onnx-graphsurgeon/examples/03_isolating_a_subgraph/generate.py b/tools/onnx-graphsurgeon/examples/03_isolating_a_subgraph/generate.py index 82ae2ab3..e71a783b 100644 --- a/tools/onnx-graphsurgeon/examples/03_isolating_a_subgraph/generate.py +++ b/tools/onnx-graphsurgeon/examples/03_isolating_a_subgraph/generate.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/onnx-graphsurgeon/examples/03_isolating_a_subgraph/isolate.py b/tools/onnx-graphsurgeon/examples/03_isolating_a_subgraph/isolate.py index 15a9be95..2d4bfdf5 100644 --- a/tools/onnx-graphsurgeon/examples/03_isolating_a_subgraph/isolate.py +++ b/tools/onnx-graphsurgeon/examples/03_isolating_a_subgraph/isolate.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/onnx-graphsurgeon/examples/04_modifying_a_model/generate.py b/tools/onnx-graphsurgeon/examples/04_modifying_a_model/generate.py index 82ae2ab3..e71a783b 100644 --- a/tools/onnx-graphsurgeon/examples/04_modifying_a_model/generate.py +++ b/tools/onnx-graphsurgeon/examples/04_modifying_a_model/generate.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/onnx-graphsurgeon/examples/04_modifying_a_model/modify.py b/tools/onnx-graphsurgeon/examples/04_modifying_a_model/modify.py index 5dba5271..55d42234 100644 --- a/tools/onnx-graphsurgeon/examples/04_modifying_a_model/modify.py +++ b/tools/onnx-graphsurgeon/examples/04_modifying_a_model/modify.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/onnx-graphsurgeon/examples/05_folding_constants/README.md b/tools/onnx-graphsurgeon/examples/05_folding_constants/README.md index 905b0e51..7b860f68 100644 --- a/tools/onnx-graphsurgeon/examples/05_folding_constants/README.md +++ b/tools/onnx-graphsurgeon/examples/05_folding_constants/README.md @@ -2,7 +2,7 @@ ## Introduction -This example first generates a a model with several operations that can be evaluated +This example first generates a model with several operations that can be evaluated prior to inference time, then folds these operations and exports a new model. Constant folding involves pre-computing expressions that do not depend on runtime diff --git a/tools/onnx-graphsurgeon/examples/05_folding_constants/fold.py b/tools/onnx-graphsurgeon/examples/05_folding_constants/fold.py index e2378876..4514f808 100644 --- a/tools/onnx-graphsurgeon/examples/05_folding_constants/fold.py +++ b/tools/onnx-graphsurgeon/examples/05_folding_constants/fold.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/onnx-graphsurgeon/examples/05_folding_constants/generate.py b/tools/onnx-graphsurgeon/examples/05_folding_constants/generate.py index 12c61080..98c6bc43 100644 --- a/tools/onnx-graphsurgeon/examples/05_folding_constants/generate.py +++ b/tools/onnx-graphsurgeon/examples/05_folding_constants/generate.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/onnx-graphsurgeon/examples/06_removing_nodes/generate.py b/tools/onnx-graphsurgeon/examples/06_removing_nodes/generate.py index c1bd733a..e786f894 100644 --- a/tools/onnx-graphsurgeon/examples/06_removing_nodes/generate.py +++ b/tools/onnx-graphsurgeon/examples/06_removing_nodes/generate.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/onnx-graphsurgeon/examples/06_removing_nodes/remove.py b/tools/onnx-graphsurgeon/examples/06_removing_nodes/remove.py index c4823fe0..01a6d683 100644 --- a/tools/onnx-graphsurgeon/examples/06_removing_nodes/remove.py +++ b/tools/onnx-graphsurgeon/examples/06_removing_nodes/remove.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/onnx-graphsurgeon/examples/07_creating_a_model_with_the_layer_api/generate.py b/tools/onnx-graphsurgeon/examples/07_creating_a_model_with_the_layer_api/generate.py index 627d6682..516a9e33 100644 --- a/tools/onnx-graphsurgeon/examples/07_creating_a_model_with_the_layer_api/generate.py +++ b/tools/onnx-graphsurgeon/examples/07_creating_a_model_with_the_layer_api/generate.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/onnx-graphsurgeon/examples/08_replacing_a_subgraph/generate.py b/tools/onnx-graphsurgeon/examples/08_replacing_a_subgraph/generate.py index 1f02b0e0..680ad607 100644 --- a/tools/onnx-graphsurgeon/examples/08_replacing_a_subgraph/generate.py +++ b/tools/onnx-graphsurgeon/examples/08_replacing_a_subgraph/generate.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/onnx-graphsurgeon/examples/08_replacing_a_subgraph/replace.py b/tools/onnx-graphsurgeon/examples/08_replacing_a_subgraph/replace.py index 93006cb7..88652309 100644 --- a/tools/onnx-graphsurgeon/examples/08_replacing_a_subgraph/replace.py +++ b/tools/onnx-graphsurgeon/examples/08_replacing_a_subgraph/replace.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/onnx-graphsurgeon/examples/09_shape_operations_with_the_layer_api/generate.py b/tools/onnx-graphsurgeon/examples/09_shape_operations_with_the_layer_api/generate.py index 33cbba4c..29403b08 100644 --- a/tools/onnx-graphsurgeon/examples/09_shape_operations_with_the_layer_api/generate.py +++ b/tools/onnx-graphsurgeon/examples/09_shape_operations_with_the_layer_api/generate.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/onnx-graphsurgeon/examples/10_dynamic_batch_size/generate.py b/tools/onnx-graphsurgeon/examples/10_dynamic_batch_size/generate.py index 679af4a2..19435b9c 100644 --- a/tools/onnx-graphsurgeon/examples/10_dynamic_batch_size/generate.py +++ b/tools/onnx-graphsurgeon/examples/10_dynamic_batch_size/generate.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/onnx-graphsurgeon/examples/10_dynamic_batch_size/modify.py b/tools/onnx-graphsurgeon/examples/10_dynamic_batch_size/modify.py index b6bb64b5..959a36b2 100644 --- a/tools/onnx-graphsurgeon/examples/10_dynamic_batch_size/modify.py +++ b/tools/onnx-graphsurgeon/examples/10_dynamic_batch_size/modify.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/onnx-graphsurgeon/onnx_graphsurgeon/exporters/base_exporter.py b/tools/onnx-graphsurgeon/onnx_graphsurgeon/exporters/base_exporter.py index 0673a1d3..b596e5d1 100644 --- a/tools/onnx-graphsurgeon/onnx_graphsurgeon/exporters/base_exporter.py +++ b/tools/onnx-graphsurgeon/onnx_graphsurgeon/exporters/base_exporter.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/onnx-graphsurgeon/onnx_graphsurgeon/exporters/onnx_exporter.py b/tools/onnx-graphsurgeon/onnx_graphsurgeon/exporters/onnx_exporter.py index 057cc5de..4d4b2429 100644 --- a/tools/onnx-graphsurgeon/onnx_graphsurgeon/exporters/onnx_exporter.py +++ b/tools/onnx-graphsurgeon/onnx_graphsurgeon/exporters/onnx_exporter.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/onnx-graphsurgeon/onnx_graphsurgeon/importers/base_importer.py b/tools/onnx-graphsurgeon/onnx_graphsurgeon/importers/base_importer.py index 6d853951..153d076e 100644 --- a/tools/onnx-graphsurgeon/onnx_graphsurgeon/importers/base_importer.py +++ b/tools/onnx-graphsurgeon/onnx_graphsurgeon/importers/base_importer.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/onnx-graphsurgeon/onnx_graphsurgeon/importers/onnx_importer.py b/tools/onnx-graphsurgeon/onnx_graphsurgeon/importers/onnx_importer.py index 23d429c2..de5cf73f 100644 --- a/tools/onnx-graphsurgeon/onnx_graphsurgeon/importers/onnx_importer.py +++ b/tools/onnx-graphsurgeon/onnx_graphsurgeon/importers/onnx_importer.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/onnx-graphsurgeon/onnx_graphsurgeon/ir/graph.py b/tools/onnx-graphsurgeon/onnx_graphsurgeon/ir/graph.py index f785a553..ce71d82b 100644 --- a/tools/onnx-graphsurgeon/onnx_graphsurgeon/ir/graph.py +++ b/tools/onnx-graphsurgeon/onnx_graphsurgeon/ir/graph.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/onnx-graphsurgeon/onnx_graphsurgeon/ir/node.py b/tools/onnx-graphsurgeon/onnx_graphsurgeon/ir/node.py index f4d652f9..bf10f3f6 100644 --- a/tools/onnx-graphsurgeon/onnx_graphsurgeon/ir/node.py +++ b/tools/onnx-graphsurgeon/onnx_graphsurgeon/ir/node.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/onnx-graphsurgeon/onnx_graphsurgeon/ir/tensor.py b/tools/onnx-graphsurgeon/onnx_graphsurgeon/ir/tensor.py index 7286979e..eec11405 100644 --- a/tools/onnx-graphsurgeon/onnx_graphsurgeon/ir/tensor.py +++ b/tools/onnx-graphsurgeon/onnx_graphsurgeon/ir/tensor.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/onnx-graphsurgeon/onnx_graphsurgeon/logger/logger.py b/tools/onnx-graphsurgeon/onnx_graphsurgeon/logger/logger.py index 248d9188..7aac34d0 100644 --- a/tools/onnx-graphsurgeon/onnx_graphsurgeon/logger/logger.py +++ b/tools/onnx-graphsurgeon/onnx_graphsurgeon/logger/logger.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/onnx-graphsurgeon/onnx_graphsurgeon/util/exception.py b/tools/onnx-graphsurgeon/onnx_graphsurgeon/util/exception.py index ce27f3cb..79843c9c 100644 --- a/tools/onnx-graphsurgeon/onnx_graphsurgeon/util/exception.py +++ b/tools/onnx-graphsurgeon/onnx_graphsurgeon/util/exception.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/onnx-graphsurgeon/onnx_graphsurgeon/util/misc.py b/tools/onnx-graphsurgeon/onnx_graphsurgeon/util/misc.py index d66675cc..cc46c459 100644 --- a/tools/onnx-graphsurgeon/onnx_graphsurgeon/util/misc.py +++ b/tools/onnx-graphsurgeon/onnx_graphsurgeon/util/misc.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/onnx-graphsurgeon/setup.py b/tools/onnx-graphsurgeon/setup.py index 984fb55e..6982e8e0 100644 --- a/tools/onnx-graphsurgeon/setup.py +++ b/tools/onnx-graphsurgeon/setup.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/onnx-graphsurgeon/tests/ir/test_graph.py b/tools/onnx-graphsurgeon/tests/ir/test_graph.py index 7fa51ef6..5a082d98 100644 --- a/tools/onnx-graphsurgeon/tests/ir/test_graph.py +++ b/tools/onnx-graphsurgeon/tests/ir/test_graph.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/onnx-graphsurgeon/tests/onnx_models.py b/tools/onnx-graphsurgeon/tests/onnx_models.py index 5a5ec875..fe3b6a93 100644 --- a/tools/onnx-graphsurgeon/tests/onnx_models.py +++ b/tools/onnx-graphsurgeon/tests/onnx_models.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/onnx-graphsurgeon/tests/test_api.py b/tools/onnx-graphsurgeon/tests/test_api.py index adceb603..2e3ac861 100644 --- a/tools/onnx-graphsurgeon/tests/test_api.py +++ b/tools/onnx-graphsurgeon/tests/test_api.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/onnx-graphsurgeon/tests/test_examples.py b/tools/onnx-graphsurgeon/tests/test_examples.py index a89266a8..a7f29ef8 100644 --- a/tools/onnx-graphsurgeon/tests/test_examples.py +++ b/tools/onnx-graphsurgeon/tests/test_examples.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/onnx-graphsurgeon/tests/test_exporters.py b/tools/onnx-graphsurgeon/tests/test_exporters.py index 8f5a607d..7d069fe9 100644 --- a/tools/onnx-graphsurgeon/tests/test_exporters.py +++ b/tools/onnx-graphsurgeon/tests/test_exporters.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/onnx-graphsurgeon/tests/test_importers.py b/tools/onnx-graphsurgeon/tests/test_importers.py index 604d65b6..30570cb6 100644 --- a/tools/onnx-graphsurgeon/tests/test_importers.py +++ b/tools/onnx-graphsurgeon/tests/test_importers.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/onnx-graphsurgeon/tests/test_ir.py b/tools/onnx-graphsurgeon/tests/test_ir.py index d2b90a4f..6f7be80e 100644 --- a/tools/onnx-graphsurgeon/tests/test_ir.py +++ b/tools/onnx-graphsurgeon/tests/test_ir.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/onnx-graphsurgeon/tests/test_util.py b/tools/onnx-graphsurgeon/tests/test_util.py index d1c7fa53..72023153 100644 --- a/tools/onnx-graphsurgeon/tests/test_util.py +++ b/tools/onnx-graphsurgeon/tests/test_util.py @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License");