From 451ae6ccd8b9de2f7b63913c662fe5317db55d77 Mon Sep 17 00:00:00 2001 From: Winkelmann Date: Mon, 18 Feb 2019 15:03:04 -0800 Subject: [PATCH] Initial release for OpenDistro Elasticsearch Alerting --- .editorConfig | 4 + .github/PULL_REQUEST_TEMPLATE.md | 6 + .gitignore | 8 + CODE_OF_CONDUCT.md | 4 + CONTRIBUTING.md | 62 + LICENSE.txt | 202 +++ NOTICE | 2 + README.md | 113 ++ THIRD-PARTY | 1199 +++++++++++++++++ alerting/.idea/codeStyles/codeStyleConfig.xml | 5 + alerting/build.gradle | 130 ++ .../alerting/AlertingPlugin.kt | 182 +++ .../alerting/MonitorRunner.kt | 430 ++++++ .../alerting/alerts/AlertError.kt | 61 + .../alerting/alerts/AlertIndices.kt | 200 +++ .../alerting/alerts/AlertMover.kt | 166 +++ .../alerting/model/Alert.kt | 177 +++ .../alerting/model/Monitor.kt | 179 +++ .../alerting/model/MonitorRunResult.kt | 135 ++ .../alerting/model/Trigger.kt | 105 ++ .../alerting/model/action/Action.kt | 95 ++ .../alerting/model/destination/Chime.kt | 77 ++ .../model/destination/CustomWebhook.kt | 109 ++ .../alerting/model/destination/Destination.kt | 186 +++ .../alerting/model/destination/SNS.kt | 71 + .../alerting/model/destination/Slack.kt | 77 ++ .../resthandler/AsyncActionHandler.kt | 27 + .../resthandler/RestAcknowledgeAlertAction.kt | 217 +++ .../RestDeleteDestinationAction.kt | 57 + .../resthandler/RestDeleteMonitorAction.kt | 60 + .../resthandler/RestExecuteMonitorAction.kt | 110 ++ .../resthandler/RestGetMonitorAction.kt | 96 ++ .../resthandler/RestIndexDestinationAction.kt | 190 +++ .../resthandler/RestIndexMonitorAction.kt | 229 ++++ .../resthandler/RestSearchMonitorAction.kt | 94 ++ .../script/TriggerExecutionContext.kt | 51 + .../alerting/script/TriggerScript.kt | 55 + .../alerting/settings/AlertingSettings.kt | 81 ++ .../alerting/util/DestinationType.kt | 23 + .../alerting/util/RestHandlerUtils.kt | 39 + .../plugin-metadata/plugin-security.policy | 8 + alerting/src/main/resources/DUMMY-FILE | 1 + ...asticsearch.painless.spi.PainlessExtension | 16 + .../alerting/alerts/alert_mapping.json | 69 + ...on.opendistroforelasticsearch.alerting.txt | 47 + .../alerting/AlertingRestTestCase.kt | 339 +++++ .../alerting/MonitorRunnerIT.kt | 540 ++++++++ .../alerting/MonitorTests.kt | 57 + .../alerting/TestHelpers.kt | 96 ++ .../alerting/alerts/AlertIndicesIT.kt | 80 ++ .../alerting/model/AlertTests.kt | 45 + .../alerting/model/DestinationTests.kt | 77 ++ .../alerting/model/XContentTests.kt | 103 ++ .../resthandler/DestinationRestApiIT.kt | 157 +++ .../alerting/resthandler/MonitorRestApiIT.kt | 531 ++++++++ build-tools/esplugin-coverage.gradle | 98 ++ build-tools/merged-coverage.gradle | 58 + build-tools/repositories.gradle | 19 + build-tools/rpmbuild.gradle | 40 + build.gradle | 91 ++ core/build.gradle | 30 + .../alerting/core/JobRunner.kt | 27 + .../alerting/core/JobSweeper.kt | 465 +++++++ .../alerting/core/JobSweeperMetrics.kt | 39 + .../alerting/core/ScheduledJobIndices.kt | 72 + .../core/action/node/ScheduledJobStats.kt | 104 ++ .../action/node/ScheduledJobsStatsAction.kt | 34 + .../action/node/ScheduledJobsStatsRequest.kt | 59 + .../node/ScheduledJobsStatsRequestBuilder.kt | 29 + .../action/node/ScheduledJobsStatsResponse.kt | 81 ++ .../node/ScheduledJobsStatsTransportAction.kt | 154 +++ .../alerting/core/model/Input.kt | 39 + .../alerting/core/model/Schedule.kt | 311 +++++ .../alerting/core/model/ScheduledJob.kt | 122 ++ .../alerting/core/model/SearchInput.kt | 78 ++ .../RestScheduledJobStatsHandler.kt | 103 ++ .../alerting/core/schedule/JobScheduler.kt | 235 ++++ .../core/schedule/JobSchedulerMetrics.kt | 56 + .../core/settings/ScheduledJobSettings.kt | 57 + .../alerting/elasticapi/ElasticExtensions.kt | 93 ++ .../resources/mappings/scheduled-jobs.json | 216 +++ .../alerting/core/XContentTests.kt | 44 + .../alerting/core/model/MockScheduledJob.kt | 39 + .../alerting/core/model/ScheduleTest.kt | 340 +++++ .../alerting/core/model/XContentTestBase.kt | 41 + .../core/schedule/JobSchedulerTest.kt | 189 +++ .../alerting/core/schedule/MockJobRunner.kt | 41 + elastic-api/build.gradle | 55 + .../alerting/elasticapi/ElasticAPI65.kt | 67 + ...asticsearch.alerting.elasticapi.ElasticAPI | 16 + .../alerting/elasticapi/ElasticAPI.kt | 103 ++ .../alerting/test/ElasticTestAPI.kt | 60 + gradle/wrapper/gradle-wrapper.jar | Bin 0 -> 54413 bytes gradle/wrapper/gradle-wrapper.properties | 21 + gradlew | 172 +++ gradlew.bat | 84 ++ notification/build.gradle | 26 + .../alerting/destination/Notification.java | 47 + .../client/DestinationHttpClient.java | 169 +++ .../client/DestinationHttpClientPool.java | 30 + .../factory/ChimeDestinationFactory.java | 61 + .../CustomWebhookDestinationFactory.java | 62 + .../factory/DestinationFactory.java | 32 + .../factory/DestinationFactoryProvider.java | 57 + .../factory/SlackDestinationFactory.java | 62 + .../destination/message/BaseMessage.java | 71 + .../destination/message/ChimeMessage.java | 80 ++ .../message/CustomWebhookMessage.java | 196 +++ .../destination/message/DestinationType.java | 23 + .../destination/message/SlackMessage.java | 90 ++ .../destination/response/BaseResponse.java | 34 + .../response/DestinationHttpResponse.java | 57 + .../destination/ChimeDestinationTest.java | 94 ++ .../destination/CustomWebhookMessageTest.java | 103 ++ .../destination/SlackDestinationTest.java | 97 ++ ...istro-elasticsearch-alerting.release-notes | 25 + settings.gradle | 24 + 117 files changed, 12902 insertions(+) create mode 100644 .editorConfig create mode 100644 .github/PULL_REQUEST_TEMPLATE.md create mode 100644 .gitignore create mode 100644 CODE_OF_CONDUCT.md create mode 100644 CONTRIBUTING.md create mode 100644 LICENSE.txt create mode 100644 NOTICE create mode 100644 README.md create mode 100644 THIRD-PARTY create mode 100644 alerting/.idea/codeStyles/codeStyleConfig.xml create mode 100644 alerting/build.gradle create mode 100644 alerting/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/AlertingPlugin.kt create mode 100644 alerting/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/MonitorRunner.kt create mode 100644 alerting/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/alerts/AlertError.kt create mode 100644 alerting/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/alerts/AlertIndices.kt create mode 100644 alerting/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/alerts/AlertMover.kt create mode 100644 alerting/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/model/Alert.kt create mode 100644 alerting/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/model/Monitor.kt create mode 100644 alerting/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/model/MonitorRunResult.kt create mode 100644 alerting/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/model/Trigger.kt create mode 100644 alerting/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/model/action/Action.kt create mode 100644 alerting/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/model/destination/Chime.kt create mode 100644 alerting/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/model/destination/CustomWebhook.kt create mode 100644 alerting/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/model/destination/Destination.kt create mode 100644 alerting/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/model/destination/SNS.kt create mode 100644 alerting/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/model/destination/Slack.kt create mode 100644 alerting/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/resthandler/AsyncActionHandler.kt create mode 100644 alerting/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/resthandler/RestAcknowledgeAlertAction.kt create mode 100644 alerting/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/resthandler/RestDeleteDestinationAction.kt create mode 100644 alerting/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/resthandler/RestDeleteMonitorAction.kt create mode 100644 alerting/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/resthandler/RestExecuteMonitorAction.kt create mode 100644 alerting/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/resthandler/RestGetMonitorAction.kt create mode 100644 alerting/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/resthandler/RestIndexDestinationAction.kt create mode 100644 alerting/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/resthandler/RestIndexMonitorAction.kt create mode 100644 alerting/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/resthandler/RestSearchMonitorAction.kt create mode 100644 alerting/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/script/TriggerExecutionContext.kt create mode 100644 alerting/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/script/TriggerScript.kt create mode 100644 alerting/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/settings/AlertingSettings.kt create mode 100644 alerting/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/util/DestinationType.kt create mode 100644 alerting/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/util/RestHandlerUtils.kt create mode 100644 alerting/src/main/plugin-metadata/plugin-security.policy create mode 100644 alerting/src/main/resources/DUMMY-FILE create mode 100644 alerting/src/main/resources/META-INF/services/org.elasticsearch.painless.spi.PainlessExtension create mode 100644 alerting/src/main/resources/com/amazon/opendistroforelasticsearch/alerting/alerts/alert_mapping.json create mode 100644 alerting/src/main/resources/com/amazon/opendistroforelasticsearch/alerting/com.amazon.opendistroforelasticsearch.alerting.txt create mode 100644 alerting/src/test/kotlin/com/amazon/opendistroforelasticsearch/alerting/AlertingRestTestCase.kt create mode 100644 alerting/src/test/kotlin/com/amazon/opendistroforelasticsearch/alerting/MonitorRunnerIT.kt create mode 100644 alerting/src/test/kotlin/com/amazon/opendistroforelasticsearch/alerting/MonitorTests.kt create mode 100644 alerting/src/test/kotlin/com/amazon/opendistroforelasticsearch/alerting/TestHelpers.kt create mode 100644 alerting/src/test/kotlin/com/amazon/opendistroforelasticsearch/alerting/alerts/AlertIndicesIT.kt create mode 100644 alerting/src/test/kotlin/com/amazon/opendistroforelasticsearch/alerting/model/AlertTests.kt create mode 100644 alerting/src/test/kotlin/com/amazon/opendistroforelasticsearch/alerting/model/DestinationTests.kt create mode 100644 alerting/src/test/kotlin/com/amazon/opendistroforelasticsearch/alerting/model/XContentTests.kt create mode 100644 alerting/src/test/kotlin/com/amazon/opendistroforelasticsearch/alerting/resthandler/DestinationRestApiIT.kt create mode 100644 alerting/src/test/kotlin/com/amazon/opendistroforelasticsearch/alerting/resthandler/MonitorRestApiIT.kt create mode 100644 build-tools/esplugin-coverage.gradle create mode 100644 build-tools/merged-coverage.gradle create mode 100644 build-tools/repositories.gradle create mode 100644 build-tools/rpmbuild.gradle create mode 100644 build.gradle create mode 100644 core/build.gradle create mode 100644 core/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/core/JobRunner.kt create mode 100644 core/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/core/JobSweeper.kt create mode 100644 core/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/core/JobSweeperMetrics.kt create mode 100644 core/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/core/ScheduledJobIndices.kt create mode 100644 core/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/core/action/node/ScheduledJobStats.kt create mode 100644 core/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/core/action/node/ScheduledJobsStatsAction.kt create mode 100644 core/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/core/action/node/ScheduledJobsStatsRequest.kt create mode 100644 core/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/core/action/node/ScheduledJobsStatsRequestBuilder.kt create mode 100644 core/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/core/action/node/ScheduledJobsStatsResponse.kt create mode 100644 core/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/core/action/node/ScheduledJobsStatsTransportAction.kt create mode 100644 core/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/core/model/Input.kt create mode 100644 core/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/core/model/Schedule.kt create mode 100644 core/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/core/model/ScheduledJob.kt create mode 100644 core/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/core/model/SearchInput.kt create mode 100644 core/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/core/resthandler/RestScheduledJobStatsHandler.kt create mode 100644 core/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/core/schedule/JobScheduler.kt create mode 100644 core/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/core/schedule/JobSchedulerMetrics.kt create mode 100644 core/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/core/settings/ScheduledJobSettings.kt create mode 100644 core/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/elasticapi/ElasticExtensions.kt create mode 100644 core/src/main/resources/mappings/scheduled-jobs.json create mode 100644 core/src/test/kotlin/com/amazon/opendistroforelasticsearch/alerting/core/XContentTests.kt create mode 100644 core/src/test/kotlin/com/amazon/opendistroforelasticsearch/alerting/core/model/MockScheduledJob.kt create mode 100644 core/src/test/kotlin/com/amazon/opendistroforelasticsearch/alerting/core/model/ScheduleTest.kt create mode 100644 core/src/test/kotlin/com/amazon/opendistroforelasticsearch/alerting/core/model/XContentTestBase.kt create mode 100644 core/src/test/kotlin/com/amazon/opendistroforelasticsearch/alerting/core/schedule/JobSchedulerTest.kt create mode 100644 core/src/test/kotlin/com/amazon/opendistroforelasticsearch/alerting/core/schedule/MockJobRunner.kt create mode 100644 elastic-api/build.gradle create mode 100644 elastic-api/es65/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/elasticapi/ElasticAPI65.kt create mode 100644 elastic-api/es65/main/resources/META-INF/services/com.amazon.opendistroforelasticsearch.alerting.elasticapi.ElasticAPI create mode 100644 elastic-api/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/elasticapi/ElasticAPI.kt create mode 100644 elastic-api/src/test/kotlin/com/amazon/opendistroforelasticsearch/alerting/test/ElasticTestAPI.kt create mode 100644 gradle/wrapper/gradle-wrapper.jar create mode 100644 gradle/wrapper/gradle-wrapper.properties create mode 100755 gradlew create mode 100644 gradlew.bat create mode 100644 notification/build.gradle create mode 100644 notification/src/main/java/com/amazon/opendistroforelasticsearch/alerting/destination/Notification.java create mode 100644 notification/src/main/java/com/amazon/opendistroforelasticsearch/alerting/destination/client/DestinationHttpClient.java create mode 100644 notification/src/main/java/com/amazon/opendistroforelasticsearch/alerting/destination/client/DestinationHttpClientPool.java create mode 100644 notification/src/main/java/com/amazon/opendistroforelasticsearch/alerting/destination/factory/ChimeDestinationFactory.java create mode 100644 notification/src/main/java/com/amazon/opendistroforelasticsearch/alerting/destination/factory/CustomWebhookDestinationFactory.java create mode 100644 notification/src/main/java/com/amazon/opendistroforelasticsearch/alerting/destination/factory/DestinationFactory.java create mode 100644 notification/src/main/java/com/amazon/opendistroforelasticsearch/alerting/destination/factory/DestinationFactoryProvider.java create mode 100644 notification/src/main/java/com/amazon/opendistroforelasticsearch/alerting/destination/factory/SlackDestinationFactory.java create mode 100644 notification/src/main/java/com/amazon/opendistroforelasticsearch/alerting/destination/message/BaseMessage.java create mode 100644 notification/src/main/java/com/amazon/opendistroforelasticsearch/alerting/destination/message/ChimeMessage.java create mode 100644 notification/src/main/java/com/amazon/opendistroforelasticsearch/alerting/destination/message/CustomWebhookMessage.java create mode 100644 notification/src/main/java/com/amazon/opendistroforelasticsearch/alerting/destination/message/DestinationType.java create mode 100644 notification/src/main/java/com/amazon/opendistroforelasticsearch/alerting/destination/message/SlackMessage.java create mode 100644 notification/src/main/java/com/amazon/opendistroforelasticsearch/alerting/destination/response/BaseResponse.java create mode 100644 notification/src/main/java/com/amazon/opendistroforelasticsearch/alerting/destination/response/DestinationHttpResponse.java create mode 100644 notification/src/test/java/com/amazon/opendistroforelasticsearch/alerting/destination/ChimeDestinationTest.java create mode 100644 notification/src/test/java/com/amazon/opendistroforelasticsearch/alerting/destination/CustomWebhookMessageTest.java create mode 100644 notification/src/test/java/com/amazon/opendistroforelasticsearch/alerting/destination/SlackDestinationTest.java create mode 100644 opendistro-elasticsearch-alerting.release-notes create mode 100644 settings.gradle diff --git a/.editorConfig b/.editorConfig new file mode 100644 index 00000000..4bd0d22a --- /dev/null +++ b/.editorConfig @@ -0,0 +1,4 @@ +root=true +[*.{kt,kts}] +insert_final_newline=true +max_line_length=140 diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 00000000..ab40d21d --- /dev/null +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,6 @@ +*Issue #, if available:* + +*Description of changes:* + + +By submitting this pull request, I confirm that my contribution is made under the terms of the Apache 2.0 license. diff --git a/.gitignore b/.gitignore new file mode 100644 index 00000000..91b20067 --- /dev/null +++ b/.gitignore @@ -0,0 +1,8 @@ +buildSrc/libs +.gradle/ +build/ +.idea/ +!.idea/codeStyles/codeStyleConfig.xml +.DS_Store +*.log +out/ diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md new file mode 100644 index 00000000..5b627cfa --- /dev/null +++ b/CODE_OF_CONDUCT.md @@ -0,0 +1,4 @@ +## Code of Conduct +This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). +For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact +opensource-codeofconduct@amazon.com with any additional questions or comments. diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 00000000..9e10c1d0 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,62 @@ +# Contributing Guidelines + +Thank you for your interest in contributing to our project. Whether it's a bug report, new feature, correction, or additional +documentation, we greatly value feedback and contributions from our community. + +Please read through this document before submitting any issues or pull requests to ensure we have all the necessary +information to effectively respond to your bug report or contribution. + + +## Reporting Bugs/Feature Requests + +We welcome you to use the GitHub issue tracker to report bugs or suggest features. + +When filing an issue, please check [existing open](https://github.com/mauve-hedgehog/opendistro-elasticsearch-alerting/issues), or [recently closed](https://github.com/mauve-hedgehog/opendistro-elasticsearch-alerting/issues?q=is%3Aissue+is%3Aclosed), issues to make sure somebody else hasn't already +reported the issue. Please try to include as much information as you can. Details like these are incredibly useful: + +* A reproducible test case or series of steps +* The version of our code being used +* Any modifications you've made relevant to the bug +* Anything unusual about your environment or deployment + + +## Contributing via Pull Requests +Contributions via pull requests are much appreciated. Before sending us a pull request, please ensure that: + +1. You are working against the latest source on the *master* branch. +2. You check existing open, and recently merged, pull requests to make sure someone else hasn't addressed the problem already. +3. You open an issue to discuss any significant work - we would hate for your time to be wasted. + +To send us a pull request, please: + +1. Fork the repository. +2. Modify the source; please focus on the specific change you are contributing. If you also reformat all the code, it will be hard for us to focus on your change. +3. Ensure local tests pass. +4. Commit to your fork using clear commit messages. +5. Send us a pull request, answering any default questions in the pull request interface. +6. Pay attention to any automated CI failures reported in the pull request, and stay involved in the conversation. + +GitHub provides additional document on [forking a repository](https://help.github.com/articles/fork-a-repo/) and +[creating a pull request](https://help.github.com/articles/creating-a-pull-request/). + + +## Finding contributions to work on +Looking at the existing issues is a great way to find something to contribute on. As our projects, by default, use the default GitHub issue labels (enhancement/bug/duplicate/help wanted/invalid/question/wontfix), looking at any ['help wanted'](https://github.com/mauve-hedgehog/opendistro-elasticsearch-alerting/labels/help%20wanted) issues is a great place to start. + + +## Code of Conduct +This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). +For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact +opensource-codeofconduct@amazon.com with any additional questions or comments. + + +## Security issue notifications +If you discover a potential security issue in this project we ask that you notify AWS/Amazon Security via our [vulnerability reporting page](http://aws.amazon.com/security/vulnerability-reporting/). Please do **not** create a public github issue. + + +## Licensing + +See the [LICENSE](https://github.com/mauve-hedgehog/opendistro-elasticsearch-alerting/blob/master/LICENSE) file for our project's licensing. We will ask you to confirm the licensing of your contribution. + +We may ask you to sign a [Contributor License Agreement (CLA)](http://en.wikipedia.org/wiki/Contributor_License_Agreement) for larger changes. + diff --git a/LICENSE.txt b/LICENSE.txt new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/NOTICE b/NOTICE new file mode 100644 index 00000000..fda07859 --- /dev/null +++ b/NOTICE @@ -0,0 +1,2 @@ +OpenDistroForElasticsearch +Copyright 2018-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. diff --git a/README.md b/README.md new file mode 100644 index 00000000..4ba300aa --- /dev/null +++ b/README.md @@ -0,0 +1,113 @@ +# Open Distro for Elasticsearch Alerting + +The Open Distro for Elasticsearch alerting plugin lets you perform scheduled searches against your cluster and send notifications upon meeting the criteria of a trigger. + + +# Highlights + +Scheduled searches use [cron expressions](https://en.wikipedia.org/wiki/Cron) or intervals (e.g. every five minutes) and the Elasticsearch query DSL. + +To define trigger conditions, use the Painless scripting language or simple thresholds (e.g. count > 100). + +When trigger conditions are met, you can publish messages to the following destinations: + +* [Chime](https://aws.amazon.com/chime/) +* [Slack](https://slack.com/) +* Custom webhook + +Messages can be static strings, or you can use the [Mustache](https://mustache.github.io/mustache.5.html) templates to include contextual information. + + +# Technical documentation + +Please see our [technical documentation](https://opendistro.github.io/for-elasticsearch-docs/). + + +# Developer setup, build, and run steps + + +## Setup + +1. Check out this package from version control. +1. Launch Intellij IDEA, choose **Import Project**, and select the `settings.gradle` file in the root of this package. +1. To build from the command line, set `JAVA_HOME` to point to a JDK >=11 before running `./gradlew`. + + +## Build + +This package is organized into subprojects, most of which contribute JARs to the top-level plugin in the `alerting` subproject. + +All subprojects in this package use the [Gradle](https://docs.gradle.org/4.10.2/userguide/userguide.html) build system. Gradle comes with excellent documentation that should be your first stop when trying to figure out how to operate or modify the build. + +However, to build the `alerting` plugin subproject, we also use the Elastic build tools for Gradle. These tools are idiosyncratic and don't always follow the conventions and instructions for building regular Java code using Gradle. Not everything in `alerting` will work the way it's described in the Gradle documentation. If you encounter such a situation, the Elastic build tools [source code](https://github.com/elastic/elasticsearch/tree/master/buildSrc/src/main/groovy/org/elasticsearch/gradle) is your best bet for figuring out what's going on. + + +### Building from the command line + +1. `./gradlew release` builds and tests all subprojects. +2. `./gradlew :alerting:run` launches a single node cluster with the alerting plugin installed. +3. `./gradlew :alerting:integTest` launches a single node cluster with the alerting plugin installed and runs all integ tests. +4. ` ./gradlew :alerting:integTest --tests="**.test execute foo"` runs a single integ test class or method + (remember to quote the test method name if it contains spaces). + +When launching a cluster using one of the above commands, logs are placed in `alerting/build/cluster/run node0/elasticsearch-/logs`. Though the logs are teed to the console, in practices it's best to check the actual log file. + + +### Debugging + +Sometimes it's useful to attach a debugger to either the Elasticsearch cluster or the integ tests to see what's going on. When running unit tests, hit **Debug** from the IDE's gutter to debug the tests. To debug code running in an actual server, run: + +``` +./gradlew :alerting:integTest --debug-jvm # to start a cluster and run integ tests +``` + +OR + +``` +./gradlew :alerting:run --debug-jvm # to just start a cluster that can be debugged +``` + +The Elasticsearch server JVM will launch suspended and wait for a debugger to attach to `localhost:8000` before starting the Elasticsearch server. + +To debug code running in an integ test (which exercises the server from a separate JVM), run: + +``` +./gradlew -Dtest.debug :alerting:integTest +``` + +The test runner JVM will start suspended and wait for a debugger to attach to `localhost:5005` before running the tests. + + +### Advanced: Launching multi-node clusters locally + +Sometimes you need to launch a cluster with more than one Elasticsearch server process. The `startMultiNodeXX` tasks can help. There are two ways to use them: + + +#### Option 1: Start and stop all nodes together + +If you need a multi-node cluster where all nodes are started together, use: + +``` +./gradlew -PnumNodes=2 startMultiNode ... # to launch 2 nodes + +``` + +Remember to manually kill the nodes when you're done. + + +#### Option 2: Nodes join and leave the cluster independently + +If you need a multi-node cluster where you'd like to be able to add and kill each node independently, use: + +``` +./gradlew startMultiNode1 +./gradlew startMultiNode2 +... and so on +``` + +Just like option 1, remember to manually kill the nodes when you're done. + + +# License + +This code is licensed under the [Apache License, Version 2.0](https://github.com/mauve-hedgehog/opendistro-elasticsearch-alerting/blob/master/LICENSE.txt). diff --git a/THIRD-PARTY b/THIRD-PARTY new file mode 100644 index 00000000..5b8e33a0 --- /dev/null +++ b/THIRD-PARTY @@ -0,0 +1,1199 @@ +** Apache Commons Codec 1.10; version 1.10 -- http://commons.apache.org/proper/commons-codec/ +** Apache Commons Logging; version 1.1.3 -- https://commons.apache.org/proper/commons-logging/ +** Apache-HttpComponents-HttpCore; version 4.4.5 -- https://hc.apache.org/httpcomponents-core-ga/ +** cron-utils; version 7.0.5 -- https://github.com/jmrozanec/cron-utils/tree/7.0.5 +** EasyMock; version 4.0.1 -- https://mvnrepository.com/artifact/org.easymock/easymock/4.0.1 +** elasticsearch 6.5.4; version 6.5.4 -- https://github.com/elastic/elasticsearch/tree/v6.5.4 +** Jackson Core; version 2.8.10 -- https://github.com/FasterXML/jackson-core/tree/jackson-core-2.8.10 +** Jackson Date Format; version 2.8.10 -- https://github.com/FasterXML/jackson-dataformats-binary +** joda-time; version 2.9.9 -- https://github.com/JodaOrg/joda-time/tree/v2.9.9 +** Kolin 1.2.6; version 1.2.60 -- https://github.com/JetBrains/kotlin/tree/1.2.60 +** kotlin-test; version 1.2.60 -- https://github.com/JetBrains/kotlin/tree/1.2.60 +** kotlin-test-junit; version 1.2.60 -- https://github.com/JetBrains/kotlin/tree/1.2.60 +** Maven-org-jetbrains_annotations; version 13.0 -- http://www.jetbrains.org/display/IJOS/Home;jsessionid=1881AA3B9F1A3A8D98C0EDD69082DC85 +** PowerMock; version 1.7.4 -- http://www.powermock.org + +Apache License + +Version 2.0, January 2004 + +http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND +DISTRIBUTION + +1. Definitions. + + + +"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this +document. + + + +"Licensor" shall mean the copyright owner or entity authorized by the copyright +owner that is granting the License. + + + +"Legal Entity" shall mean the union of the acting entity and all other entities +that control, are controlled by, or are under common control with that entity. +For the purposes of this definition, "control" means (i) the power, direct or +indirect, to cause the direction or management of such entity, whether by +contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such +entity. + + + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + + + +"Source" form shall mean the preferred form for making modifications, including +but not limited to software source code, documentation source, and +configuration files. + + + +"Object" form shall mean any form resulting from mechanical transformation or +translation of a Source form, including but not limited to compiled object +code, generated documentation, and conversions to other media +types. + + + +"Work" shall mean the work of authorship, whether in Source or Object form, +made available under the License, as indicated by a copyright notice that is +included in or attached to the work (an example is provided in the Appendix +below). + + + +"Derivative Works" shall mean any work, whether in Source or Object form, that +is based on (or derived from) the Work and for which the editorial revisions, +annotations, elaborations, or other modifications represent, as a whole, an +original work of authorship. For the purposes of this License, Derivative Works +shall not include works that remain separable from, or merely link (or bind by +name) to the interfaces of, the Work and Derivative Works +thereof. + + + +"Contribution" shall mean any work of authorship, including the original +version of the Work and any modifications or additions to that Work or +Derivative Works thereof, that is intentionally submitted to Licensor for +inclusion in the Work by the copyright owner or by an individual or Legal +Entity authorized to submit on behalf of the copyright owner. For the purposes +of this definition, "submitted" means any form of electronic, verbal, or +written communication sent to the Licensor or its representatives, including +but not limited to communication on electronic mailing lists, source code +control systems, and issue tracking systems that are managed by, or on behalf +of, the Licensor for the purpose of discussing and improving the Work, but +excluding communication that is conspicuously marked or otherwise designated in +writing by the copyright owner as "Not a Contribution." + + + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf +of whom a Contribution has been received by Licensor and subsequently +incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of this +License, each Contributor hereby grants to You a perpetual, worldwide, +non-exclusive, no-charge, royalty-free, irrevocable copyright license to +reproduce, prepare Derivative Works of, publicly display, publicly perform, +sublicense, and distribute the Work and such Derivative Works in Source or +Object form. + +3. Grant of Patent License. Subject to the terms and conditions of this +License, each Contributor hereby grants to You a perpetual, worldwide, +non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this +section) patent license to make, have made, use, offer to sell, sell, import, +and otherwise transfer the Work, where such license applies only to those +patent claims licensable by such Contributor that are necessarily infringed by +their Contribution(s) alone or by combination of their Contribution(s) with the +Work to which such Contribution(s) was submitted. If You institute patent +litigation against any entity (including a cross-claim or counterclaim in a +lawsuit) alleging that the Work or a Contribution incorporated within the Work +constitutes direct or contributory patent infringement, then any patent +licenses granted to You under this License for that Work shall terminate as of +the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the Work or +Derivative Works thereof in any medium, with or without modifications, and in +Source or Object form, provided that You meet the following conditions: + +(a) You must give any other recipients of the Work or Derivative Works a copy +of this License; and + +(b) You must cause any modified files to carry prominent notices stating that +You changed the files; and + +(c) You must retain, in the Source form of any Derivative Works that You +distribute, all copyright, patent, trademark, and attribution notices from the +Source form of the Work, excluding those notices that do not pertain to any +part of the Derivative Works; and + +(d) If the Work includes a "NOTICE" text file as part of its distribution, then +any Derivative Works that You distribute must include a readable copy of the +attribution notices contained within such NOTICE file, excluding those notices +that do not pertain to any part of the Derivative Works, in at least one of the +following places: within a NOTICE text file distributed as part of the +Derivative Works; within the Source form or documentation, if provided along +with the Derivative Works; or, within a display generated by the Derivative +Works, if and wherever such third-party notices normally appear. The contents +of the NOTICE file are for informational purposes only and do not modify the +License. You may add Your own attribution notices within Derivative Works that +You distribute, alongside or as an addendum to the NOTICE text from the Work, +provided that such additional attribution notices cannot be construed as +modifying the License. + +You may add Your own copyright statement to Your modifications and may provide +additional or different license terms and conditions for use, reproduction, or +distribution of Your modifications, or for any such Derivative Works as a +whole, provided Your use, reproduction, and distribution of the Work otherwise +complies with the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, any +Contribution intentionally submitted for inclusion in the Work by You to the +Licensor shall be under the terms and conditions of this License, without any +additional terms or conditions. Notwithstanding the above, nothing herein shall +supersede or modify the terms of any separate license agreement you may have +executed with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade names, +trademarks, service marks, or product names of the Licensor, except as required +for reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or agreed to in +writing, Licensor provides the Work (and each Contributor provides its +Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +KIND, either express or implied, including, without limitation, any warranties +or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A +PARTICULAR PURPOSE. You are solely responsible for determining the +appropriateness of using or redistributing the Work and assume any risks +associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, whether in +tort (including negligence), contract, or otherwise, unless required by +applicable law (such as deliberate and grossly negligent acts) or agreed to in +writing, shall any Contributor be liable to You for damages, including any +direct, indirect, special, incidental, or consequential damages of any +character arising as a result of this License or out of the use or inability to +use the Work (including but not limited to damages for loss of goodwill, work +stoppage, computer failure or malfunction, or any and all other commercial +damages or losses), even if such Contributor has been advised of the +possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing the Work or +Derivative Works thereof, You may choose to offer, and charge a fee for, +acceptance of support, warranty, indemnity, or other liability obligations +and/or rights consistent with this License. However, in accepting such +obligations, You may act only on Your own behalf and on Your sole +responsibility, not on behalf of any other Contributor, and only if You agree +to indemnify, defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason of your +accepting any such warranty or additional liability. END OF TERMS AND +CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + +To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets "[]" replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included on +the same "printed page" as the copyright notice for easier identification +within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); + +you may not use this file except in compliance with the License. + +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software + +distributed under the License is distributed on an "AS IS" BASIS, + +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + +See the License for the specific language governing permissions and + +limitations under the License. + +* For Apache Commons Codec 1.10 see also this required NOTICE: +Apache Commons Codec +Copyright 2002-2014 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + +src/test/org/apache/commons/codec/language/DoubleMetaphoneTest.java +contains test data from http://aspell.net/test/orig/batch0.tab. +Copyright (C) 2002 Kevin Atkinson (kevina@gnu.org) + + +=============================================================================== + +The content of package org.apache.commons.codec.language.bm has been +translated +from the original php source code available at +http://stevemorse.org/phoneticinfo.htm +with permission from the original authors. +Original source copyright: +Copyright (c) 2008 Alexander Beider & Stephen P. Morse. +* For Apache Commons Logging see also this required NOTICE: +Apache Commons Logging +Copyright 2003-2007 The Apache Software Foundation + +This product includes software developed by +The Apache Software Foundation (http://www.apache.org/). +* For Apache-HttpComponents-HttpCore see also this required NOTICE: +Apache HttpComponents Core +Copyright 2005-2016 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + +This product includes software developed by +Amazon Technologies, Inc (http://www.amazon.com/). + +********************** +THIRD PARTY COMPONENTS +********************** +This software includes third party software subject to the following +copyrights: +- XML parsing and utility functions from JetS3t - Copyright 2006-2009 James +Murty. +- PKCS#1 PEM encoded private key parsing and utility functions from +oauth.googlecode.com - Copyright 1998-2010 AOL Inc. + +The licenses for these third party components are included in LICENSE.txt +* For cron-utils see also this required NOTICE: +no NOTICE file available +* For EasyMock see also this required NOTICE: +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, +and distribution as defined by Sections 1 through 9 of this +document. + +"Licensor" shall mean the copyright owner or entity authorized by +the copyright owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all +other entities that control, are controlled by, or are under common +control with that entity. For the purposes of this definition, +"control" means (i) the power, direct or indirect, to cause the +direction or management of such entity, whether by contract or +otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such +entity. + +"You" (or "Your") shall mean an individual or Legal Entity +exercising permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, +including but not limited to software source code, documentation +source, and configuration files. + +"Object" form shall mean any form resulting from mechanical +transformation or translation of a Source form, including but +not limited to compiled object code, generated documentation, +and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or +Object form, made available under the License, as indicated by a +copyright notice that is included in or attached to the work +(an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object +form, that is based on (or derived from) the Work and for which the +editorial revisions, annotations, elaborations, or other +modifications +represent, as a whole, an original work of authorship. For the +purposes +of this License, Derivative Works shall not include works that remain +separable from, or merely link (or bind by name) to the interfaces +of, +the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including +the original version of the Work and any modifications or additions +to that Work or Derivative Works thereof, that is intentionally +submitted to Licensor for inclusion in the Work by the copyright +owner +or by an individual or Legal Entity authorized to submit on behalf of +the copyright owner. For the purposes of this definition, "submitted" +means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control +systems, +and issue tracking systems that are managed by, or on behalf of, the +Licensor for the purpose of discussing and improving the Work, but +excluding communication that is conspicuously marked or otherwise +designated in writing by the copyright owner as "Not a +Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity +on behalf of whom a Contribution has been received by Licensor and +subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of +this License, each Contributor hereby grants to You a perpetual, +worldwide, non-exclusive, no-charge, royalty-free, irrevocable +copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the +Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of +this License, each Contributor hereby grants to You a perpetual, +worldwide, non-exclusive, no-charge, royalty-free, irrevocable +(except as stated in this section) patent license to make, have made, +use, offer to sell, sell, import, and otherwise transfer the Work, +where such license applies only to those patent claims licensable +by such Contributor that are necessarily infringed by their +Contribution(s) alone or by combination of their Contribution(s) +with the Work to which such Contribution(s) was submitted. If You +institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work +or a Contribution incorporated within the Work constitutes direct +or contributory patent infringement, then any patent licenses +granted to You under this License for that Work shall terminate +as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the +Work or Derivative Works thereof in any medium, with or without +modifications, and in Source or Object form, provided that You +meet the following conditions: + +(a) You must give any other recipients of the Work or +Derivative Works a copy of this License; and + +(b) You must cause any modified files to carry prominent notices +stating that You changed the files; and + +(c) You must retain, in the Source form of any Derivative Works +that You distribute, all copyright, patent, trademark, and +attribution notices from the Source form of the Work, +excluding those notices that do not pertain to any part of +the Derivative Works; and + +(d) If the Work includes a "NOTICE" text file as part of its +distribution, then any Derivative Works that You distribute must +include a readable copy of the attribution notices contained +within such NOTICE file, excluding those notices that do not +pertain to any part of the Derivative Works, in at least one +of the following places: within a NOTICE text file distributed +as part of the Derivative Works; within the Source form or +documentation, if provided along with the Derivative Works; or, +within a display generated by the Derivative Works, if and +wherever such third-party notices normally appear. The contents +of the NOTICE file are for informational purposes only and +do not modify the License. You may add Your own attribution +notices within Derivative Works that You distribute, alongside +or as an addendum to the NOTICE text from the Work, provided +that such additional attribution notices cannot be construed +as modifying the License. + +You may add Your own copyright statement to Your modifications and +may provide additional or different license terms and conditions +for use, reproduction, or distribution of Your modifications, or +for any such Derivative Works as a whole, provided Your use, +reproduction, and distribution of the Work otherwise complies with +the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state +otherwise, +any Contribution intentionally submitted for inclusion in the Work +by You to the Licensor shall be under the terms and conditions of +this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify +the terms of any separate license agreement you may have executed +with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the +trade +names, trademarks, service marks, or product names of the Licensor, +except as required for reasonable and customary use in describing the +origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or +agreed to in writing, Licensor provides the Work (and each +Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +implied, including, without limitation, any warranties or conditions +of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A +PARTICULAR PURPOSE. You are solely responsible for determining the +appropriateness of using or redistributing the Work and assume any +risks associated with Your exercise of permissions under this +License. + +8. Limitation of Liability. In no event and under no legal theory, +whether in tort (including negligence), contract, or otherwise, +unless required by applicable law (such as deliberate and grossly +negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, +incidental, or consequential damages of any character arising as a +result of this License or out of the use or inability to use the +Work (including but not limited to damages for loss of goodwill, +work stoppage, computer failure or malfunction, or any and all +other commercial damages or losses), even if such Contributor +has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing +the Work or Derivative Works thereof, You may choose to offer, +and charge a fee for, acceptance of support, warranty, indemnity, +or other liability obligations and/or rights consistent with this +License. However, in accepting such obligations, You may act only +on Your own behalf and on Your sole responsibility, not on behalf +of any other Contributor, and only if You agree to indemnify, +defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason +of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + +To apply the Apache License to your work, attach the following +boilerplate notice, with the fields enclosed by brackets "[]" +replaced with your own identifying information. (Don't include +the brackets!) The text should be enclosed in the appropriate +comment syntax for the file format. We also recommend that a +file or class name and description of purpose be included on the +same "printed page" as the copyright notice for easier +identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +* For elasticsearch 6.2.3 see also this required NOTICE: +Elasticsearch +Copyright 2009-2017 Elasticsearch + +This product includes software developed by The Apache Software +Foundation (http://www.apache.org/). +* For elasticsearch 6.3.1 see also this required NOTICE: +Elasticsearch +Copyright 2009-2018 Elasticsearch + +This product includes software developed by The Apache Software +Foundation (http://www.apache.org/). +* For elasticsearch 6.4.2 see also this required NOTICE: +Elasticsearch +Copyright 2009-2018 Elasticsearch + +This product includes software developed by The Apache Software +Foundation (http://www.apache.org/). +* For elasticsearch 6.5.4 see also this required NOTICE: +Elasticsearch +Copyright 2009-2018 Elasticsearch + +This product includes software developed by The Apache Software +Foundation (http://www.apache.org/). +* For Jackson Core see also this required NOTICE: +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, +and distribution as defined by Sections 1 through 9 of this +document. + +"Licensor" shall mean the copyright owner or entity authorized by +the copyright owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all +other entities that control, are controlled by, or are under common +control with that entity. For the purposes of this definition, +"control" means (i) the power, direct or indirect, to cause the +direction or management of such entity, whether by contract or +otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such +entity. + +"You" (or "Your") shall mean an individual or Legal Entity +exercising permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, +including but not limited to software source code, documentation +source, and configuration files. + +"Object" form shall mean any form resulting from mechanical +transformation or translation of a Source form, including but +not limited to compiled object code, generated documentation, +and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or +Object form, made available under the License, as indicated by a +copyright notice that is included in or attached to the work +(an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object +form, that is based on (or derived from) the Work and for which the +editorial revisions, annotations, elaborations, or other +modifications +represent, as a whole, an original work of authorship. For the +purposes +of this License, Derivative Works shall not include works that remain +separable from, or merely link (or bind by name) to the interfaces +of, +the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including +the original version of the Work and any modifications or additions +to that Work or Derivative Works thereof, that is intentionally +submitted to Licensor for inclusion in the Work by the copyright +owner +or by an individual or Legal Entity authorized to submit on behalf of +the copyright owner. For the purposes of this definition, "submitted" +means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control +systems, +and issue tracking systems that are managed by, or on behalf of, the +Licensor for the purpose of discussing and improving the Work, but +excluding communication that is conspicuously marked or otherwise +designated in writing by the copyright owner as "Not a +Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity +on behalf of whom a Contribution has been received by Licensor and +subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of +this License, each Contributor hereby grants to You a perpetual, +worldwide, non-exclusive, no-charge, royalty-free, irrevocable +copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the +Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of +this License, each Contributor hereby grants to You a perpetual, +worldwide, non-exclusive, no-charge, royalty-free, irrevocable +(except as stated in this section) patent license to make, have made, +use, offer to sell, sell, import, and otherwise transfer the Work, +where such license applies only to those patent claims licensable +by such Contributor that are necessarily infringed by their +Contribution(s) alone or by combination of their Contribution(s) +with the Work to which such Contribution(s) was submitted. If You +institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work +or a Contribution incorporated within the Work constitutes direct +or contributory patent infringement, then any patent licenses +granted to You under this License for that Work shall terminate +as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the +Work or Derivative Works thereof in any medium, with or without +modifications, and in Source or Object form, provided that You +meet the following conditions: + +(a) You must give any other recipients of the Work or +Derivative Works a copy of this License; and + +(b) You must cause any modified files to carry prominent notices +stating that You changed the files; and + +(c) You must retain, in the Source form of any Derivative Works +that You distribute, all copyright, patent, trademark, and +attribution notices from the Source form of the Work, +excluding those notices that do not pertain to any part of +the Derivative Works; and + +(d) If the Work includes a "NOTICE" text file as part of its +distribution, then any Derivative Works that You distribute must +include a readable copy of the attribution notices contained +within such NOTICE file, excluding those notices that do not +pertain to any part of the Derivative Works, in at least one +of the following places: within a NOTICE text file distributed +as part of the Derivative Works; within the Source form or +documentation, if provided along with the Derivative Works; or, +within a display generated by the Derivative Works, if and +wherever such third-party notices normally appear. The contents +of the NOTICE file are for informational purposes only and +do not modify the License. You may add Your own attribution +notices within Derivative Works that You distribute, alongside +or as an addendum to the NOTICE text from the Work, provided +that such additional attribution notices cannot be construed +as modifying the License. + +You may add Your own copyright statement to Your modifications and +may provide additional or different license terms and conditions +for use, reproduction, or distribution of Your modifications, or +for any such Derivative Works as a whole, provided Your use, +reproduction, and distribution of the Work otherwise complies with +the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state +otherwise, +any Contribution intentionally submitted for inclusion in the Work +by You to the Licensor shall be under the terms and conditions of +this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify +the terms of any separate license agreement you may have executed +with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the +trade +names, trademarks, service marks, or product names of the Licensor, +except as required for reasonable and customary use in describing the +origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or +agreed to in writing, Licensor provides the Work (and each +Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +implied, including, without limitation, any warranties or conditions +of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A +PARTICULAR PURPOSE. You are solely responsible for determining the +appropriateness of using or redistributing the Work and assume any +risks associated with Your exercise of permissions under this +License. + +8. Limitation of Liability. In no event and under no legal theory, +whether in tort (including negligence), contract, or otherwise, +unless required by applicable law (such as deliberate and grossly +negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, +incidental, or consequential damages of any character arising as a +result of this License or out of the use or inability to use the +Work (including but not limited to damages for loss of goodwill, +work stoppage, computer failure or malfunction, or any and all +other commercial damages or losses), even if such Contributor +has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing +the Work or Derivative Works thereof, You may choose to offer, +and charge a fee for, acceptance of support, warranty, indemnity, +or other liability obligations and/or rights consistent with this +License. However, in accepting such obligations, You may act only +on Your own behalf and on Your sole responsibility, not on behalf +of any other Contributor, and only if You agree to indemnify, +defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason +of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + +To apply the Apache License to your work, attach the following +boilerplate notice, with the fields enclosed by brackets "[]" +replaced with your own identifying information. (Don't include +the brackets!) The text should be enclosed in the appropriate +comment syntax for the file format. We also recommend that a +file or class name and description of purpose be included on the +same "printed page" as the copyright notice for easier +identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +* For Jackson Date Format see also this required NOTICE: +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, +and distribution as defined by Sections 1 through 9 of this +document. + +"Licensor" shall mean the copyright owner or entity authorized by +the copyright owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all +other entities that control, are controlled by, or are under common +control with that entity. For the purposes of this definition, +"control" means (i) the power, direct or indirect, to cause the +direction or management of such entity, whether by contract or +otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such +entity. + +"You" (or "Your") shall mean an individual or Legal Entity +exercising permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, +including but not limited to software source code, documentation +source, and configuration files. + +"Object" form shall mean any form resulting from mechanical +transformation or translation of a Source form, including but +not limited to compiled object code, generated documentation, +and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or +Object form, made available under the License, as indicated by a +copyright notice that is included in or attached to the work +(an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object +form, that is based on (or derived from) the Work and for which the +editorial revisions, annotations, elaborations, or other +modifications +represent, as a whole, an original work of authorship. For the +purposes +of this License, Derivative Works shall not include works that remain +separable from, or merely link (or bind by name) to the interfaces +of, +the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including +the original version of the Work and any modifications or additions +to that Work or Derivative Works thereof, that is intentionally +submitted to Licensor for inclusion in the Work by the copyright +owner +or by an individual or Legal Entity authorized to submit on behalf of +the copyright owner. For the purposes of this definition, "submitted" +means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control +systems, +and issue tracking systems that are managed by, or on behalf of, the +Licensor for the purpose of discussing and improving the Work, but +excluding communication that is conspicuously marked or otherwise +designated in writing by the copyright owner as "Not a +Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity +on behalf of whom a Contribution has been received by Licensor and +subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of +this License, each Contributor hereby grants to You a perpetual, +worldwide, non-exclusive, no-charge, royalty-free, irrevocable +copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the +Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of +this License, each Contributor hereby grants to You a perpetual, +worldwide, non-exclusive, no-charge, royalty-free, irrevocable +(except as stated in this section) patent license to make, have made, +use, offer to sell, sell, import, and otherwise transfer the Work, +where such license applies only to those patent claims licensable +by such Contributor that are necessarily infringed by their +Contribution(s) alone or by combination of their Contribution(s) +with the Work to which such Contribution(s) was submitted. If You +institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work +or a Contribution incorporated within the Work constitutes direct +or contributory patent infringement, then any patent licenses +granted to You under this License for that Work shall terminate +as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the +Work or Derivative Works thereof in any medium, with or without +modifications, and in Source or Object form, provided that You +meet the following conditions: + +(a) You must give any other recipients of the Work or +Derivative Works a copy of this License; and + +(b) You must cause any modified files to carry prominent notices +stating that You changed the files; and + +(c) You must retain, in the Source form of any Derivative Works +that You distribute, all copyright, patent, trademark, and +attribution notices from the Source form of the Work, +excluding those notices that do not pertain to any part of +the Derivative Works; and + +(d) If the Work includes a "NOTICE" text file as part of its +distribution, then any Derivative Works that You distribute must +include a readable copy of the attribution notices contained +within such NOTICE file, excluding those notices that do not +pertain to any part of the Derivative Works, in at least one +of the following places: within a NOTICE text file distributed +as part of the Derivative Works; within the Source form or +documentation, if provided along with the Derivative Works; or, +within a display generated by the Derivative Works, if and +wherever such third-party notices normally appear. The contents +of the NOTICE file are for informational purposes only and +do not modify the License. You may add Your own attribution +notices within Derivative Works that You distribute, alongside +or as an addendum to the NOTICE text from the Work, provided +that such additional attribution notices cannot be construed +as modifying the License. + +You may add Your own copyright statement to Your modifications and +may provide additional or different license terms and conditions +for use, reproduction, or distribution of Your modifications, or +for any such Derivative Works as a whole, provided Your use, +reproduction, and distribution of the Work otherwise complies with +the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state +otherwise, +any Contribution intentionally submitted for inclusion in the Work +by You to the Licensor shall be under the terms and conditions of +this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify +the terms of any separate license agreement you may have executed +with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the +trade +names, trademarks, service marks, or product names of the Licensor, +except as required for reasonable and customary use in describing the +origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or +agreed to in writing, Licensor provides the Work (and each +Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +implied, including, without limitation, any warranties or conditions +of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A +PARTICULAR PURPOSE. You are solely responsible for determining the +appropriateness of using or redistributing the Work and assume any +risks associated with Your exercise of permissions under this +License. + +8. Limitation of Liability. In no event and under no legal theory, +whether in tort (including negligence), contract, or otherwise, +unless required by applicable law (such as deliberate and grossly +negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, +incidental, or consequential damages of any character arising as a +result of this License or out of the use or inability to use the +Work (including but not limited to damages for loss of goodwill, +work stoppage, computer failure or malfunction, or any and all +other commercial damages or losses), even if such Contributor +has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing +the Work or Derivative Works thereof, You may choose to offer, +and charge a fee for, acceptance of support, warranty, indemnity, +or other liability obligations and/or rights consistent with this +License. However, in accepting such obligations, You may act only +on Your own behalf and on Your sole responsibility, not on behalf +of any other Contributor, and only if You agree to indemnify, +defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason +of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + +To apply the Apache License to your work, attach the following +boilerplate notice, with the fields enclosed by brackets "[]" +replaced with your own identifying information. (Don't include +the brackets!) The text should be enclosed in the appropriate +comment syntax for the file format. We also recommend that a +file or class name and description of purpose be included on the +same "printed page" as the copyright notice for easier +identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +* For joda-time see also this required NOTICE: + +============================================================================= += NOTICE file corresponding to section 4d of the Apache License Version 2.0 += + +============================================================================= +This product includes software developed by +Joda.org (http://www.joda.org/). +* For Kolin 1.2.6 see also this required NOTICE: + +========================================================================= +== NOTICE file corresponding to the section 4 d of +== +== the Apache License, Version 2.0, +== +== in this case for the Kotlin Compiler distribution. +== + +========================================================================= + +Kotlin Compiler +Copyright 2010-2017 JetBrains s.r.o and respective authors and developers +* For kotlin-test see also this required NOTICE: +Copyright 2010-2018 JetBrains s.r.o. +* For kotlin-test-junit see also this required NOTICE: +Copyright 2010-2018 JetBrains s.r.o. +* For Maven-org-jetbrains_annotations see also this required NOTICE: +/* +* Copyright 2000-2013 JetBrains s.r.o. +* +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ +* For PowerMock see also this required NOTICE: +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, +and distribution as defined by Sections 1 through 9 of this +document. + +"Licensor" shall mean the copyright owner or entity authorized by +the copyright owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all +other entities that control, are controlled by, or are under common +control with that entity. For the purposes of this definition, +"control" means (i) the power, direct or indirect, to cause the +direction or management of such entity, whether by contract or +otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such +entity. + +"You" (or "Your") shall mean an individual or Legal Entity +exercising permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, +including but not limited to software source code, documentation +source, and configuration files. + +"Object" form shall mean any form resulting from mechanical +transformation or translation of a Source form, including but +not limited to compiled object code, generated documentation, +and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or +Object form, made available under the License, as indicated by a +copyright notice that is included in or attached to the work +(an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object +form, that is based on (or derived from) the Work and for which the +editorial revisions, annotations, elaborations, or other +modifications +represent, as a whole, an original work of authorship. For the +purposes +of this License, Derivative Works shall not include works that remain +separable from, or merely link (or bind by name) to the interfaces +of, +the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including +the original version of the Work and any modifications or additions +to that Work or Derivative Works thereof, that is intentionally +submitted to Licensor for inclusion in the Work by the copyright +owner +or by an individual or Legal Entity authorized to submit on behalf of +the copyright owner. For the purposes of this definition, "submitted" +means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control +systems, +and issue tracking systems that are managed by, or on behalf of, the +Licensor for the purpose of discussing and improving the Work, but +excluding communication that is conspicuously marked or otherwise +designated in writing by the copyright owner as "Not a +Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity +on behalf of whom a Contribution has been received by Licensor and +subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of +this License, each Contributor hereby grants to You a perpetual, +worldwide, non-exclusive, no-charge, royalty-free, irrevocable +copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the +Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of +this License, each Contributor hereby grants to You a perpetual, +worldwide, non-exclusive, no-charge, royalty-free, irrevocable +(except as stated in this section) patent license to make, have made, +use, offer to sell, sell, import, and otherwise transfer the Work, +where such license applies only to those patent claims licensable +by such Contributor that are necessarily infringed by their +Contribution(s) alone or by combination of their Contribution(s) +with the Work to which such Contribution(s) was submitted. If You +institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work +or a Contribution incorporated within the Work constitutes direct +or contributory patent infringement, then any patent licenses +granted to You under this License for that Work shall terminate +as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the +Work or Derivative Works thereof in any medium, with or without +modifications, and in Source or Object form, provided that You +meet the following conditions: + +(a) You must give any other recipients of the Work or +Derivative Works a copy of this License; and + +(b) You must cause any modified files to carry prominent notices +stating that You changed the files; and + +(c) You must retain, in the Source form of any Derivative Works +that You distribute, all copyright, patent, trademark, and +attribution notices from the Source form of the Work, +excluding those notices that do not pertain to any part of +the Derivative Works; and + +(d) If the Work includes a "NOTICE" text file as part of its +distribution, then any Derivative Works that You distribute must +include a readable copy of the attribution notices contained +within such NOTICE file, excluding those notices that do not +pertain to any part of the Derivative Works, in at least one +of the following places: within a NOTICE text file distributed +as part of the Derivative Works; within the Source form or +documentation, if provided along with the Derivative Works; or, +within a display generated by the Derivative Works, if and +wherever such third-party notices normally appear. The contents +of the NOTICE file are for informational purposes only and +do not modify the License. You may add Your own attribution +notices within Derivative Works that You distribute, alongside +or as an addendum to the NOTICE text from the Work, provided +that such additional attribution notices cannot be construed +as modifying the License. + +You may add Your own copyright statement to Your modifications and +may provide additional or different license terms and conditions +for use, reproduction, or distribution of Your modifications, or +for any such Derivative Works as a whole, provided Your use, +reproduction, and distribution of the Work otherwise complies with +the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state +otherwise, +any Contribution intentionally submitted for inclusion in the Work +by You to the Licensor shall be under the terms and conditions of +this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify +the terms of any separate license agreement you may have executed +with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the +trade +names, trademarks, service marks, or product names of the Licensor, +except as required for reasonable and customary use in describing the +origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or +agreed to in writing, Licensor provides the Work (and each +Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +implied, including, without limitation, any warranties or conditions +of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A +PARTICULAR PURPOSE. You are solely responsible for determining the +appropriateness of using or redistributing the Work and assume any +risks associated with Your exercise of permissions under this +License. + +8. Limitation of Liability. In no event and under no legal theory, +whether in tort (including negligence), contract, or otherwise, +unless required by applicable law (such as deliberate and grossly +negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, +incidental, or consequential damages of any character arising as a +result of this License or out of the use or inability to use the +Work (including but not limited to damages for loss of goodwill, +work stoppage, computer failure or malfunction, or any and all +other commercial damages or losses), even if such Contributor +has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing +the Work or Derivative Works thereof, You may choose to offer, +and charge a fee for, acceptance of support, warranty, indemnity, +or other liability obligations and/or rights consistent with this +License. However, in accepting such obligations, You may act only +on Your own behalf and on Your sole responsibility, not on behalf +of any other Contributor, and only if You agree to indemnify, +defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason +of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + +To apply the Apache License to your work, attach the following +boilerplate notice, with the fields enclosed by brackets "[]" +replaced with your own identifying information. (Don't include +the brackets!) The text should be enclosed in the appropriate +comment syntax for the file format. We also recommend that a +file or class name and description of purpose be included on the +same "printed page" as the copyright notice for easier +identification within third-party archives. + +Copyright 2007-2017 PowerMock Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/alerting/.idea/codeStyles/codeStyleConfig.xml b/alerting/.idea/codeStyles/codeStyleConfig.xml new file mode 100644 index 00000000..0f7bc519 --- /dev/null +++ b/alerting/.idea/codeStyles/codeStyleConfig.xml @@ -0,0 +1,5 @@ + + + + diff --git a/alerting/build.gradle b/alerting/build.gradle new file mode 100644 index 00000000..08ef8022 --- /dev/null +++ b/alerting/build.gradle @@ -0,0 +1,130 @@ +/* + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ +apply plugin: 'java' +apply plugin: 'idea' +apply plugin: 'org.jetbrains.kotlin.jvm' +apply plugin: 'elasticsearch.esplugin' +apply plugin: 'jacoco' +apply from: '../build-tools/esplugin-coverage.gradle' + +ext { + projectSubstitutions = [:] + licenseFile = rootProject.file('LICENSE.txt') + noticeFile = rootProject.file('NOTICE.txt') +} + +if (isSnapshot) { + version += "-SNAPSHOT" +} + +esplugin { + name 'opendistro_alerting' + description 'Amazon OpenDistro alerting plugin' + classname 'com.amazon.opendistroforelasticsearch.alerting.AlertingPlugin' + extendedPlugins = ['lang-painless'] +} + +// Prefer elastic bundled versions for conflicts (primarily with AWS SDK). We need to specify these manually because +// esplugin sets ResolutionStrategy.failOnVersionConflict. Luckily it is also nice enough to expose (most of) the +// versions it uses in ext.versions. + +configurations.all { + if (it.state != Configuration.State.UNRESOLVED) return + resolutionStrategy { + force "joda-time:joda-time:${versions.joda}" + force "com.fasterxml.jackson.core:jackson-core:${versions.jackson}" + force "com.fasterxml.jackson.dataformat:jackson-dataformat-cbor:${versions.jackson}" + force "commons-logging:commons-logging:${versions.commonslogging}" + force "org.apache.httpcomponents:httpcore:${versions.httpcore}" + force "commons-codec:commons-codec:${versions.commonscodec}" + } +} + +dependencies { + compileOnly "org.elasticsearch.plugin:elasticsearch-scripting-painless-spi:${versions.elasticsearch}" + + // Elasticsearch Nanny state + compile "org.jetbrains.kotlin:kotlin-stdlib:${kotlin_version}" + compile "org.jetbrains.kotlin:kotlin-stdlib-common:${kotlin_version}" + compile "org.jetbrains:annotations:13.0" + + compile project(":alerting-core") + compile project(":alerting-notification") + compile project(':alerting-elastic-api') + + testCompile project(path: ':alerting-elastic-api', configuration: 'testOutput') +} + +javadoc.enabled = false // turn off javadoc as it barfs on Kotlin code +licenseHeaders.enabled = true +dependencyLicenses.enabled = false +thirdPartyAudit.enabled = false + +// See package README.md for details on using these tasks. +(1..3).each { i -> + def _numNodes = findProperty('numNodes') as Integer ?: 1 + tasks.create(name : "runMultiNode$i", type: org.elasticsearch.gradle.test.RunTask) { + daemonize = true + numNodes = _numNodes + setting 'node.name', "multi-node-$i" + setting 'http.port', '9200-9300' + setting 'transport.tcp.port', '9300-9400' + clusterName = 'multi-node-run' + plugin project.path + distribution = "oss-zip" + } + + tasks.create(name: "startMultiNode$i") { + if (_numNodes == 1) { + dependsOn "runMultiNode${i}#start" + } else { + (0..<_numNodes).each { n -> dependsOn "runMultiNode${i}#node${n}.start" } + } + } +} +task startMultiNode(dependsOn: startMultiNode1) + +def es_tmp_dir = rootProject.file('build/private/es_tmp').absoluteFile +es_tmp_dir.mkdirs() + +test { + systemProperty 'tests.security.manager', 'false' +} + +integTestRunner { + systemProperty 'tests.security.manager', 'false' + systemProperty 'java.io.tmpdir', es_tmp_dir.absolutePath + // Tell the test JVM if the cluster JVM is running under a debugger so that tests can use longer timeouts for + // requests. The 'doFirst' delays reading the debug setting on the cluster till execution time. + doFirst { systemProperty 'cluster.debug', integTestCluster.debug } + + // The --debug-jvm command-line option makes the cluster debuggable; this makes the tests debuggable + if (System.getProperty("test.debug") != null) { + jvmArg '-agentlib:jdwp=transport=dt_socket,server=y,suspend=y,address=*:5005' + } +} + +integTestCluster { + distribution = "oss-zip" +} +run { + distribution = "oss-zip" +} + +afterEvaluate { + repositories.remove(repositories.findByName("MavenRepo")) +} + +apply from: '../build-tools/rpmbuild.gradle' diff --git a/alerting/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/AlertingPlugin.kt b/alerting/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/AlertingPlugin.kt new file mode 100644 index 00000000..66aca096 --- /dev/null +++ b/alerting/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/AlertingPlugin.kt @@ -0,0 +1,182 @@ +/* + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ +package com.amazon.opendistroforelasticsearch.alerting + +import com.amazon.opendistroforelasticsearch.alerting.core.JobSweeper +import com.amazon.opendistroforelasticsearch.alerting.core.ScheduledJobIndices +import com.amazon.opendistroforelasticsearch.alerting.core.action.node.ScheduledJobsStatsAction +import com.amazon.opendistroforelasticsearch.alerting.core.action.node.ScheduledJobsStatsTransportAction +import com.amazon.opendistroforelasticsearch.alerting.alerts.AlertIndices +import com.amazon.opendistroforelasticsearch.alerting.model.Monitor +import com.amazon.opendistroforelasticsearch.alerting.resthandler.RestAcknowledgeAlertAction +import com.amazon.opendistroforelasticsearch.alerting.resthandler.RestDeleteDestinationAction +import com.amazon.opendistroforelasticsearch.alerting.resthandler.RestDeleteMonitorAction +import com.amazon.opendistroforelasticsearch.alerting.resthandler.RestExecuteMonitorAction +import com.amazon.opendistroforelasticsearch.alerting.resthandler.RestGetMonitorAction +import com.amazon.opendistroforelasticsearch.alerting.resthandler.RestIndexDestinationAction +import com.amazon.opendistroforelasticsearch.alerting.resthandler.RestIndexMonitorAction +import com.amazon.opendistroforelasticsearch.alerting.resthandler.RestSearchMonitorAction +import com.amazon.opendistroforelasticsearch.alerting.script.TriggerScript +import com.amazon.opendistroforelasticsearch.alerting.settings.AlertingSettings +import com.amazon.opendistroforelasticsearch.alerting.core.model.ScheduledJob +import com.amazon.opendistroforelasticsearch.alerting.core.model.SearchInput +import com.amazon.opendistroforelasticsearch.alerting.core.resthandler.RestScheduledJobStatsHandler +import com.amazon.opendistroforelasticsearch.alerting.core.schedule.JobScheduler +import com.amazon.opendistroforelasticsearch.alerting.core.settings.ScheduledJobSettings +import org.elasticsearch.action.ActionRequest +import org.elasticsearch.action.ActionResponse +import org.elasticsearch.client.Client +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver +import org.elasticsearch.cluster.node.DiscoveryNodes +import org.elasticsearch.cluster.service.ClusterService +import org.elasticsearch.common.io.stream.NamedWriteableRegistry +import org.elasticsearch.common.settings.ClusterSettings +import org.elasticsearch.common.settings.IndexScopedSettings +import org.elasticsearch.common.settings.Setting +import org.elasticsearch.common.settings.Settings +import org.elasticsearch.common.settings.SettingsFilter +import org.elasticsearch.common.xcontent.NamedXContentRegistry +import org.elasticsearch.env.Environment +import org.elasticsearch.env.NodeEnvironment +import org.elasticsearch.index.IndexModule +import org.elasticsearch.painless.spi.PainlessExtension +import org.elasticsearch.painless.spi.Whitelist +import org.elasticsearch.painless.spi.WhitelistLoader +import org.elasticsearch.plugins.ActionPlugin +import org.elasticsearch.plugins.Plugin +import org.elasticsearch.plugins.ScriptPlugin +import org.elasticsearch.rest.RestController +import org.elasticsearch.rest.RestHandler +import org.elasticsearch.script.ScriptContext +import org.elasticsearch.script.ScriptService +import org.elasticsearch.threadpool.ExecutorBuilder +import org.elasticsearch.threadpool.ThreadPool +import org.elasticsearch.watcher.ResourceWatcherService +import java.util.function.Supplier +/** + * Entry point of the OpenDistro for Elasticsearch alerting plugin + * This class initializes the [RestGetMonitorAction], [RestDeleteMonitorAction], [RestIndexMonitorAction] rest handlers. + * It also adds [Monitor.XCONTENT_REGISTRY], [SearchInput.XCONTENT_REGISTRY] to the + * [NamedXContentRegistry] so that we are able to deserialize the custom named objects. + */ +internal class AlertingPlugin : PainlessExtension, ActionPlugin, ScriptPlugin, Plugin() { + override fun getContextWhitelists(): Map, List> { + val whitelist = WhitelistLoader.loadFromResourceFiles(javaClass, "com.amazon.opendistroforelasticsearch.alerting.txt") + return mapOf(TriggerScript.CONTEXT to listOf(whitelist)) + } + + companion object { + @JvmField val KIBANA_USER_AGENT = "Kibana" + @JvmField val UI_METADATA_EXCLUDE = arrayOf("monitor.${Monitor.UI_METADATA_FIELD}") + @JvmField val MONITOR_BASE_URI = "/_opendistro/_alerting/monitors" + @JvmField val DESTINATION_BASE_URI = "/_opendistro/_alerting/destinations" + @JvmField val ALERTING_JOB_TYPES = listOf("monitor") + } + + lateinit var runner: MonitorRunner + lateinit var scheduler: JobScheduler + lateinit var sweeper: JobSweeper + lateinit var scheduledJobIndices: ScheduledJobIndices + lateinit var threadPool: ThreadPool + lateinit var alertIndices: AlertIndices + lateinit var clusterService: ClusterService + + override fun getRestHandlers( + settings: Settings, + restController: RestController, + clusterSettings: ClusterSettings, + indexScopedSettings: IndexScopedSettings, + settingsFilter: SettingsFilter, + indexNameExpressionResolver: IndexNameExpressionResolver?, + nodesInCluster: Supplier + ): List { + return listOf(RestGetMonitorAction(settings, restController), + RestDeleteMonitorAction(settings, restController), + RestIndexMonitorAction(settings, restController, scheduledJobIndices, clusterService), + RestSearchMonitorAction(settings, restController), + RestExecuteMonitorAction(settings, restController, runner), + RestAcknowledgeAlertAction(settings, restController), + RestScheduledJobStatsHandler(settings, restController, "_alerting"), + RestIndexDestinationAction(settings, restController, scheduledJobIndices, clusterService), + RestDeleteDestinationAction(settings, restController)) + } + + override fun getActions(): List> { + return listOf(ActionPlugin.ActionHandler(ScheduledJobsStatsAction.INSTANCE, ScheduledJobsStatsTransportAction::class.java)) + } + + override fun getNamedXContent(): List { + return listOf(Monitor.XCONTENT_REGISTRY, SearchInput.XCONTENT_REGISTRY) + } + + override fun createComponents( + client: Client, + clusterService: ClusterService, + threadPool: ThreadPool, + resourceWatcherService: ResourceWatcherService, + scriptService: ScriptService, + xContentRegistry: NamedXContentRegistry, + environment: Environment, + nodeEnvironment: NodeEnvironment, + namedWriteableRegistry: NamedWriteableRegistry + ): Collection { + // Need to figure out how to use the Elasticsearch DI classes rather than handwiring things here. + val settings = environment.settings() + alertIndices = AlertIndices(settings, client.admin().indices(), threadPool, clusterService) + runner = MonitorRunner(settings, client, threadPool, scriptService, xContentRegistry, alertIndices, clusterService) + scheduledJobIndices = ScheduledJobIndices(client.admin(), clusterService) + scheduler = JobScheduler(threadPool, runner) + sweeper = JobSweeper(environment.settings(), client, clusterService, threadPool, xContentRegistry, scheduler, ALERTING_JOB_TYPES) + this.threadPool = threadPool + this.clusterService = clusterService + return listOf(sweeper, scheduler, runner, scheduledJobIndices) + } + + override fun getSettings(): List> { + return listOf( + ScheduledJobSettings.REQUEST_TIMEOUT, + ScheduledJobSettings.SWEEP_BACKOFF_MILLIS, + ScheduledJobSettings.SWEEP_BACKOFF_RETRY_COUNT, + ScheduledJobSettings.SWEEP_PERIOD, + ScheduledJobSettings.SWEEP_PAGE_SIZE, + ScheduledJobSettings.SWEEPER_ENABLED, + AlertingSettings.INPUT_TIMEOUT, + AlertingSettings.INDEX_TIMEOUT, + AlertingSettings.BULK_TIMEOUT, + AlertingSettings.ALERT_BACKOFF_MILLIS, + AlertingSettings.ALERT_BACKOFF_COUNT, + AlertingSettings.MOVE_ALERTS_BACKOFF_MILLIS, + AlertingSettings.MOVE_ALERTS_BACKOFF_COUNT, + AlertingSettings.ALERT_HISTORY_ROLLOVER_PERIOD, + AlertingSettings.ALERT_HISTORY_INDEX_MAX_AGE, + AlertingSettings.ALERT_HISTORY_MAX_DOCS, + AlertingSettings.ALERTING_MAX_MONITORS, + AlertingSettings.REQUEST_TIMEOUT) + } + + override fun onIndexModule(indexModule: IndexModule) { + if (indexModule.index.name == ScheduledJob.SCHEDULED_JOBS_INDEX) { + indexModule.addIndexOperationListener(sweeper) + } + } + + override fun getContexts(): List> { + return listOf(TriggerScript.CONTEXT) + } + + override fun getExecutorBuilders(settings: Settings): List> { + return listOf(MonitorRunner.executorBuilder(settings)) + } +} diff --git a/alerting/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/MonitorRunner.kt b/alerting/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/MonitorRunner.kt new file mode 100644 index 00000000..78760420 --- /dev/null +++ b/alerting/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/MonitorRunner.kt @@ -0,0 +1,430 @@ +/* + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package com.amazon.opendistroforelasticsearch.alerting + +import com.amazon.opendistroforelasticsearch.alerting.core.JobRunner +import com.amazon.opendistroforelasticsearch.alerting.alerts.AlertError +import com.amazon.opendistroforelasticsearch.alerting.alerts.AlertIndices +import com.amazon.opendistroforelasticsearch.alerting.alerts.AlertMover +import com.amazon.opendistroforelasticsearch.alerting.model.ActionRunResult +import com.amazon.opendistroforelasticsearch.alerting.model.Alert +import com.amazon.opendistroforelasticsearch.alerting.model.Alert.State.ACKNOWLEDGED +import com.amazon.opendistroforelasticsearch.alerting.model.Alert.State.ACTIVE +import com.amazon.opendistroforelasticsearch.alerting.model.Alert.State.COMPLETED +import com.amazon.opendistroforelasticsearch.alerting.model.Alert.State.DELETED +import com.amazon.opendistroforelasticsearch.alerting.model.Alert.State.ERROR +import com.amazon.opendistroforelasticsearch.alerting.model.InputRunResults +import com.amazon.opendistroforelasticsearch.alerting.model.Monitor +import com.amazon.opendistroforelasticsearch.alerting.model.MonitorRunResult +import com.amazon.opendistroforelasticsearch.alerting.model.Trigger +import com.amazon.opendistroforelasticsearch.alerting.model.TriggerRunResult +import com.amazon.opendistroforelasticsearch.alerting.model.action.Action +import com.amazon.opendistroforelasticsearch.alerting.model.action.Action.Companion.MESSAGE +import com.amazon.opendistroforelasticsearch.alerting.model.action.Action.Companion.MESSAGE_ID +import com.amazon.opendistroforelasticsearch.alerting.model.action.Action.Companion.SUBJECT +import com.amazon.opendistroforelasticsearch.alerting.model.destination.Destination +import com.amazon.opendistroforelasticsearch.alerting.script.TriggerExecutionContext +import com.amazon.opendistroforelasticsearch.alerting.script.TriggerScript +import com.amazon.opendistroforelasticsearch.alerting.core.model.ScheduledJob +import com.amazon.opendistroforelasticsearch.alerting.settings.AlertingSettings.Companion.ALERT_BACKOFF_COUNT +import com.amazon.opendistroforelasticsearch.alerting.settings.AlertingSettings.Companion.ALERT_BACKOFF_MILLIS +import com.amazon.opendistroforelasticsearch.alerting.settings.AlertingSettings.Companion.BULK_TIMEOUT +import com.amazon.opendistroforelasticsearch.alerting.settings.AlertingSettings.Companion.INPUT_TIMEOUT +import com.amazon.opendistroforelasticsearch.alerting.settings.AlertingSettings.Companion.MOVE_ALERTS_BACKOFF_COUNT +import com.amazon.opendistroforelasticsearch.alerting.settings.AlertingSettings.Companion.MOVE_ALERTS_BACKOFF_MILLIS +import com.amazon.opendistroforelasticsearch.alerting.core.model.ScheduledJob.Companion.SCHEDULED_JOBS_INDEX +import com.amazon.opendistroforelasticsearch.alerting.core.model.ScheduledJob.Companion.SCHEDULED_JOB_TYPE +import com.amazon.opendistroforelasticsearch.alerting.core.model.SearchInput +import com.amazon.opendistroforelasticsearch.alerting.elasticapi.ElasticAPI +import com.amazon.opendistroforelasticsearch.alerting.elasticapi.convertToMap +import com.amazon.opendistroforelasticsearch.alerting.elasticapi.firstFailureOrNull +import com.amazon.opendistroforelasticsearch.alerting.elasticapi.retry +import org.elasticsearch.ExceptionsHelper +import org.elasticsearch.action.DocWriteRequest +import org.elasticsearch.action.bulk.BackoffPolicy +import org.elasticsearch.action.bulk.BulkItemResponse +import org.elasticsearch.action.bulk.BulkRequest +import org.elasticsearch.action.delete.DeleteRequest +import org.elasticsearch.action.get.GetRequest +import org.elasticsearch.action.index.IndexRequest +import org.elasticsearch.action.search.SearchRequest +import org.elasticsearch.client.Client +import org.elasticsearch.common.Strings +import org.elasticsearch.cluster.service.ClusterService +import org.elasticsearch.common.bytes.BytesReference +import org.elasticsearch.common.settings.Settings +import org.elasticsearch.common.unit.TimeValue +import org.elasticsearch.common.util.concurrent.EsExecutors +import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException +import org.elasticsearch.common.xcontent.NamedXContentRegistry +import org.elasticsearch.common.xcontent.ToXContent +import org.elasticsearch.common.xcontent.XContentFactory +import org.elasticsearch.common.xcontent.XContentParser +import org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken +import org.elasticsearch.index.query.QueryBuilders +import org.elasticsearch.rest.RestStatus +import org.elasticsearch.script.Script +import org.elasticsearch.script.ScriptService +import org.elasticsearch.script.ScriptType +import org.elasticsearch.script.TemplateScript +import org.elasticsearch.search.builder.SearchSourceBuilder +import org.elasticsearch.threadpool.ScalingExecutorBuilder +import org.elasticsearch.threadpool.ThreadPool +import java.time.Instant + +class MonitorRunner( + settings: Settings, + private val client: Client, + private val threadPool: ThreadPool, + private val scriptService: ScriptService, + private val xContentRegistry: NamedXContentRegistry, + private val alertIndices: AlertIndices, + clusterService: ClusterService +) : JobRunner { + + private val logger = ElasticAPI.INSTANCE.getLogger(MonitorRunner::class.java, settings) + + @Volatile private var searchTimeout = INPUT_TIMEOUT.get(settings) + @Volatile private var bulkTimeout = BULK_TIMEOUT.get(settings) + @Volatile private var alertBackoffMillis = ALERT_BACKOFF_MILLIS.get(settings) + @Volatile private var alertBackoffCount = ALERT_BACKOFF_COUNT.get(settings) + @Volatile private var moveAlertsBackoffMillis = MOVE_ALERTS_BACKOFF_MILLIS.get(settings) + @Volatile private var moveAlertsBackoffCount = MOVE_ALERTS_BACKOFF_COUNT.get(settings) + @Volatile private var retryPolicy = BackoffPolicy.constantBackoff(alertBackoffMillis, alertBackoffCount) + @Volatile private var moveAlertsRetryPolicy = BackoffPolicy.exponentialBackoff(moveAlertsBackoffMillis, moveAlertsBackoffCount) + + init { + clusterService.clusterSettings.addSettingsUpdateConsumer(INPUT_TIMEOUT) { searchTimeout = it } + clusterService.clusterSettings.addSettingsUpdateConsumer(BULK_TIMEOUT) { bulkTimeout = it } + clusterService.clusterSettings.addSettingsUpdateConsumer(ALERT_BACKOFF_MILLIS) { + retryPolicy = BackoffPolicy.constantBackoff(it, alertBackoffCount) + } + clusterService.clusterSettings.addSettingsUpdateConsumer(ALERT_BACKOFF_COUNT) { + retryPolicy = BackoffPolicy.constantBackoff(alertBackoffMillis, it) + } + clusterService.clusterSettings.addSettingsUpdateConsumer(MOVE_ALERTS_BACKOFF_MILLIS) { + moveAlertsRetryPolicy = BackoffPolicy.exponentialBackoff(it, moveAlertsBackoffCount) + } + clusterService.clusterSettings.addSettingsUpdateConsumer(MOVE_ALERTS_BACKOFF_COUNT) { + moveAlertsRetryPolicy = BackoffPolicy.exponentialBackoff(alertBackoffMillis, it) + } + } + + companion object { + private const val THREAD_POOL_NAME = "opendistro_monitor_runner" + + fun executorBuilder(settings: Settings): ScalingExecutorBuilder { + val availableProcessors = EsExecutors.numberOfProcessors(settings) + // Use the same setting as ES GENERIC Executor builder. + val genericThreadPoolMax = Math.min(512, Math.max(128, 4 * availableProcessors)) + return ScalingExecutorBuilder(THREAD_POOL_NAME, 4, genericThreadPoolMax, TimeValue.timeValueSeconds(30L)) + } + } + + fun executor() = threadPool.executor(THREAD_POOL_NAME)!! + + override fun postIndex(job: ScheduledJob) { + if (job is Monitor) { + executor().submit { + AlertMover(client, threadPool, this, alertIndices, moveAlertsRetryPolicy.iterator(), logger, job.id, job).run() + } + } else { + throw IllegalArgumentException("Invalid job type") + } + } + + override fun postDelete(jobId: String) { + executor().submit { + AlertMover(client, threadPool, this, alertIndices, moveAlertsRetryPolicy.iterator(), logger, jobId).run() + } + } + + override fun runJob(job: ScheduledJob, periodStart: Instant, periodEnd: Instant) { + if (job is Monitor) { + executor().submit { runMonitor(job, periodStart, periodEnd) } + } else { + throw IllegalArgumentException("Invalid job type") + } + } + + fun runMonitor(monitor: Monitor, periodStart: Instant, periodEnd: Instant, dryrun: Boolean = false): MonitorRunResult { + if (periodStart == periodEnd) { + logger.warn("Start and end time are the same: $periodStart. This monitor will probably only run once.") + } + + var monitorResult = MonitorRunResult(monitor.name, periodStart, periodEnd) + val currentAlerts = try { + alertIndices.createAlertIndex() + alertIndices.createInitialHistoryIndex() + loadCurrentAlerts(monitor) + } catch (e: Exception) { + // We can't save ERROR alerts to the index here as we don't know if there are existing ACTIVE alerts + val id = if (monitor.id.trim().isEmpty()) "_na_" else monitor.id + logger.error("Error loading alerts for monitor: $id", e) + return monitorResult.copy(error = e) + } + + monitorResult = monitorResult.copy(inputResults = collectInputResults(monitor, periodStart, periodEnd)) + + val updatedAlerts = mutableListOf() + val triggerResults = mutableMapOf() + for (trigger in monitor.triggers) { + val currentAlert = currentAlerts[trigger] + val triggerCtx = TriggerExecutionContext(monitor, trigger, monitorResult, currentAlert) + val triggerResult = runTrigger(monitor, trigger, triggerCtx) + triggerResults[trigger.id] = triggerResult + + if (isTriggerActionable(triggerCtx, triggerResult)) { + val actionCtx = triggerCtx.copy(error = monitorResult.error ?: triggerResult.error) + for (action in trigger.actions) { + triggerResult.actionResults[action.name] = runAction(action, actionCtx, dryrun) + } + } + + val updatedAlert = composeAlert(triggerCtx, triggerResult, monitorResult.alertError() ?: triggerResult.alertError()) + if (updatedAlert != null) updatedAlerts += updatedAlert + } + + // Don't save alerts if this is a test monitor + if (!dryrun && monitor.id != Monitor.NO_ID) { + saveAlerts(updatedAlerts) + } + return monitorResult.copy(triggerResults = triggerResults) + } + + fun rescheduleAlertMover(monitorId: String, monitor: Monitor?, backoff: Iterator) { + executor().submit { + AlertMover(client, threadPool, this, alertIndices, backoff, logger, monitorId, monitor).run() + } + } + + private fun currentTime() = Instant.ofEpochMilli(threadPool.absoluteTimeInMillis()) + + private fun composeAlert(ctx: TriggerExecutionContext, result: TriggerRunResult, alertError: AlertError?): Alert? { + val currentTime = currentTime() + val currentAlert = ctx.alert + // Merge the alert's error message to the current alert's history + val updatedHistory = currentAlert?.errorHistory.update(alertError) + return if (alertError == null && !result.triggered) { + currentAlert?.copy(state = COMPLETED, endTime = currentTime, errorMessage = null, + errorHistory = updatedHistory) + } else if (alertError == null && currentAlert?.isAcknowledged() == true) { + null + } else if (currentAlert != null) { + val alertState = if (alertError == null) ACTIVE else ERROR + currentAlert.copy(state = alertState, lastNotificationTime = currentTime, errorMessage = alertError?.message, + errorHistory = updatedHistory) + } else { + val alertState = if (alertError == null) ACTIVE else ERROR + Alert(monitor = ctx.monitor, trigger = ctx.trigger, startTime = currentTime, + lastNotificationTime = currentTime, state = alertState, errorMessage = alertError?.message, + errorHistory = updatedHistory) + } + } + + private fun collectInputResults(monitor: Monitor, periodStart: Instant, periodEnd: Instant): InputRunResults { + return try { + val results = mutableListOf>() + monitor.inputs.forEach { input -> + when (input) { + is SearchInput -> { + // TODO: Figure out a way to use SearchTemplateRequest without bringing in the entire TransportClient + val searchParams = mapOf("period_start" to periodStart.toEpochMilli(), + "period_end" to periodEnd.toEpochMilli()) + val searchSource = scriptService.compile(Script(ScriptType.INLINE, Script.DEFAULT_TEMPLATE_LANG, + input.query.toString(), searchParams), TemplateScript.CONTEXT) + .newInstance(searchParams) + .execute() + + val searchRequest = SearchRequest().indices(*input.indices.toTypedArray()) + ElasticAPI.INSTANCE.jsonParser(xContentRegistry, searchSource).use { + searchRequest.source(SearchSourceBuilder.fromXContent(it)) + } + results += client.search(searchRequest).actionGet(searchTimeout).convertToMap() + } + else -> { + throw IllegalArgumentException("Unsupported input type: ${input.name()}.") + } + } + } + InputRunResults(results.toList()) + } catch (e: Exception) { + logger.info("Error collecting inputs for monitor: ${monitor.id}", e) + InputRunResults(emptyList(), e) + } + } + + private fun runTrigger(monitor: Monitor, trigger: Trigger, ctx: TriggerExecutionContext): TriggerRunResult { + return try { + val triggered = scriptService.compile(trigger.condition, TriggerScript.CONTEXT) + .newInstance(trigger.condition.params) + .execute(ctx) + TriggerRunResult(trigger.name, triggered, null) + } catch (e: Exception) { + logger.info("Error running script for monitor ${monitor.id}, trigger: ${trigger.id}", e) + // if the script fails we need to send an alert so set triggered = true + TriggerRunResult(trigger.name, true, e) + } + } + + private fun loadCurrentAlerts(monitor: Monitor): Map { + val request = SearchRequest(AlertIndices.ALERT_INDEX) + .routing(monitor.id) + .source(alertQuery(monitor)) + val response = client.search(request).actionGet(searchTimeout) + if (response.status() != RestStatus.OK) { + throw (response.firstFailureOrNull()?.cause ?: RuntimeException("Unknown error loading alerts")) + } + + val foundAlerts = response.hits.map { Alert.parse(contentParser(it.sourceRef), it.id, it.version) } + .groupBy { it.triggerId } + foundAlerts.values.forEach { alerts -> + if (alerts.size > 1) { + logger.warn("Found multiple alerts for same trigger: $alerts") + } + } + + return monitor.triggers.associate { trigger -> + trigger to (foundAlerts[trigger.id]?.firstOrNull()) + } + } + + private fun contentParser(bytesReference: BytesReference): XContentParser { + val xcp = ElasticAPI.INSTANCE.jsonParser(xContentRegistry, bytesReference) + ensureExpectedToken(XContentParser.Token.START_OBJECT, xcp.nextToken(), xcp::getTokenLocation) + return xcp + } + + private fun alertQuery(monitor: Monitor): SearchSourceBuilder { + return SearchSourceBuilder.searchSource() + .size(monitor.triggers.size * 2) // We expect there to be only a single in-progress alert so fetch 2 to check + .query(QueryBuilders.termQuery(Alert.MONITOR_ID_FIELD, monitor.id)) + } + + private fun saveAlerts(alerts: List) { + var requestsToRetry = alerts.flatMap { alert -> + // we don't want to set the version when saving alerts because the Runner has first priority when writing alerts. + // In the rare event that a user acknowledges an alert between when it's read and when it's written + // back we're ok if that acknowledgement is lost. It's easier to get the user to retry than for the runner to + // spend time reloading the alert and writing it back. + when (alert.state) { + ACTIVE, ERROR -> { + listOf>(IndexRequest(AlertIndices.ALERT_INDEX, AlertIndices.MAPPING_TYPE) + .routing(alert.monitorId) + .source(alert.toXContent(XContentFactory.jsonBuilder(), ToXContent.EMPTY_PARAMS)) + .id(if (alert.id != Alert.NO_ID) alert.id else null)) + } + ACKNOWLEDGED, DELETED -> { + throw IllegalStateException("Unexpected attempt to save ${alert.state} alert: $alert") + } + COMPLETED -> { + listOf>( + DeleteRequest(AlertIndices.ALERT_INDEX, AlertIndices.MAPPING_TYPE, alert.id) + .routing(alert.monitorId), + IndexRequest(AlertIndices.HISTORY_WRITE_INDEX, AlertIndices.MAPPING_TYPE) + .routing(alert.monitorId) + .source(alert.toXContent(XContentFactory.jsonBuilder(), ToXContent.EMPTY_PARAMS)) + .id(alert.id)) + } + } + } + + if (requestsToRetry.isEmpty()) return + var bulkRequest = BulkRequest().add(requestsToRetry) + val successfulResponses = mutableListOf() + var failedResponses = listOf() + retryPolicy.retry { // Handles 502, 503, 504 responses for the bulk request. + retryPolicy.iterator().forEach { delay -> // Handles partial failures + val responses = client.bulk(bulkRequest).actionGet(bulkTimeout).items ?: arrayOf() + successfulResponses += responses.filterNot { it.isFailed } + failedResponses = responses.filter { it.isFailed } + // retry only if this is a EsRejectedExecutionException (i.e. 429 TOO MANY REQUESTs) + requestsToRetry = failedResponses + .filter { ExceptionsHelper.unwrapCause(it.failure.cause) is EsRejectedExecutionException } + .map { bulkRequest.requests()[it.itemId] as IndexRequest } + + bulkRequest = BulkRequest().add(requestsToRetry) + if (requestsToRetry.isEmpty()) { + return@retry + } else { + Thread.sleep(delay.millis) + } + } + } + + for (it in failedResponses) { + logger.error("Failed to write alert: ${it.id}", it.failure.cause) + } + } + + private fun isTriggerActionable(ctx: TriggerExecutionContext, result: TriggerRunResult): Boolean { + // Suppress actions if the current alert is acknowledged and there are no errors. + val suppress = ctx.alert?.state == ACKNOWLEDGED && result.error == null && ctx.error == null + return result.triggered && !suppress + } + + private fun runAction(action: Action, ctx: TriggerExecutionContext, dryrun: Boolean): ActionRunResult { + return try { + val actionOutput = mutableMapOf() + actionOutput[SUBJECT] = if (action.subjectTemplate != null) compileTemplate(action.subjectTemplate, ctx) else "" + actionOutput[MESSAGE] = compileTemplate(action.messageTemplate, ctx) + if (Strings.isNullOrEmpty(actionOutput[MESSAGE])) { + throw IllegalStateException("Message content missing in the Destination with id: ${action.destinationId}") + } + if (!dryrun) { + var destination = getDestinationInfo(action.destinationId) + actionOutput[MESSAGE_ID] = destination.publish(actionOutput[SUBJECT], actionOutput[MESSAGE]!!) + } + ActionRunResult(action.name, actionOutput, false, null) + } catch (e: Exception) { + ActionRunResult(action.name, mapOf(), false, e) + } + } + + private fun compileTemplate(template: Script, ctx: TriggerExecutionContext): String { + return scriptService.compile(template, TemplateScript.CONTEXT) + .newInstance(template.params + mapOf("ctx" to ctx.asTemplateArg())) + .execute() + } + + private fun getDestinationInfo(destinationId: String): Destination { + var destination: Destination + val getRequest = GetRequest(SCHEDULED_JOBS_INDEX, SCHEDULED_JOB_TYPE, destinationId).routing(destinationId) + val getResponse = client.get(getRequest).actionGet() + if (!getResponse.isExists || getResponse.isSourceEmpty) { + throw IllegalStateException("Destination document with id $destinationId not found or source is empty") + } + + val jobSource = getResponse.sourceAsBytesRef + val xcp = ElasticAPI.INSTANCE.jsonParser(xContentRegistry, jobSource) + ensureExpectedToken(XContentParser.Token.START_OBJECT, xcp.nextToken(), xcp::getTokenLocation) + ensureExpectedToken(XContentParser.Token.FIELD_NAME, xcp.nextToken(), xcp::getTokenLocation) + ensureExpectedToken(XContentParser.Token.START_OBJECT, xcp.nextToken(), xcp::getTokenLocation) + destination = Destination.parse(xcp) + ensureExpectedToken(XContentParser.Token.END_OBJECT, xcp.nextToken(), xcp::getTokenLocation) + return destination + } + + private fun List?.update(alertError: AlertError?): List { + return when { + this == null && alertError == null -> emptyList() + this != null && alertError == null -> this + this == null && alertError != null -> listOf(alertError) + this != null && alertError != null -> (listOf(alertError) + this).take(10) + else -> throw IllegalStateException("Unreachable code reached!") + } + } +} diff --git a/alerting/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/alerts/AlertError.kt b/alerting/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/alerts/AlertError.kt new file mode 100644 index 00000000..b24a95f7 --- /dev/null +++ b/alerting/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/alerts/AlertError.kt @@ -0,0 +1,61 @@ +/* + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package com.amazon.opendistroforelasticsearch.alerting.alerts + +import com.amazon.opendistroforelasticsearch.alerting.elasticapi.instant +import com.amazon.opendistroforelasticsearch.alerting.elasticapi.optionalTimeField +import org.elasticsearch.common.xcontent.ToXContent +import org.elasticsearch.common.xcontent.XContentBuilder +import org.elasticsearch.common.xcontent.XContentParser +import org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken +import java.io.IOException +import java.time.Instant + +data class AlertError(val timestamp: Instant, val message: String) : ToXContent { + + companion object { + + const val TIMESTAMP_FIELD = "timestamp" + const val MESSAGE_FIELD = "message" + + @JvmStatic + @Throws(IOException::class) + fun parse(xcp: XContentParser): AlertError { + + lateinit var timestamp: Instant + lateinit var message: String + + ensureExpectedToken(XContentParser.Token.START_OBJECT, xcp.currentToken(), xcp::getTokenLocation) + while (xcp.nextToken() != XContentParser.Token.END_OBJECT) { + val fieldName = xcp.currentName() + xcp.nextToken() + + when (fieldName) { + TIMESTAMP_FIELD -> timestamp = requireNotNull(xcp.instant()) + MESSAGE_FIELD -> message = xcp.text() + } + } + return AlertError(timestamp = timestamp, message = message) + } + } + + override fun toXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { + return builder.startObject() + .optionalTimeField(TIMESTAMP_FIELD, timestamp) + .field(MESSAGE_FIELD, message) + .endObject() + } +} diff --git a/alerting/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/alerts/AlertIndices.kt b/alerting/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/alerts/AlertIndices.kt new file mode 100644 index 00000000..d606021a --- /dev/null +++ b/alerting/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/alerts/AlertIndices.kt @@ -0,0 +1,200 @@ +/* + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package com.amazon.opendistroforelasticsearch.alerting.alerts + +import com.amazon.opendistroforelasticsearch.alerting.alerts.AlertIndices.Companion.ALERT_INDEX +import com.amazon.opendistroforelasticsearch.alerting.alerts.AlertIndices.Companion.HISTORY_WRITE_INDEX +import com.amazon.opendistroforelasticsearch.alerting.settings.AlertingSettings +import com.amazon.opendistroforelasticsearch.alerting.settings.AlertingSettings.Companion.ALERT_HISTORY_INDEX_MAX_AGE +import com.amazon.opendistroforelasticsearch.alerting.settings.AlertingSettings.Companion.ALERT_HISTORY_MAX_DOCS +import com.amazon.opendistroforelasticsearch.alerting.settings.AlertingSettings.Companion.ALERT_HISTORY_ROLLOVER_PERIOD +import com.amazon.opendistroforelasticsearch.alerting.settings.AlertingSettings.Companion.REQUEST_TIMEOUT +import com.amazon.opendistroforelasticsearch.alerting.elasticapi.ElasticAPI +import org.elasticsearch.ResourceAlreadyExistsException +import org.elasticsearch.action.admin.indices.alias.Alias +import org.elasticsearch.action.admin.indices.create.CreateIndexRequest +import org.elasticsearch.action.admin.indices.exists.indices.IndicesExistsRequest +import org.elasticsearch.action.admin.indices.rollover.RolloverRequest +import org.elasticsearch.client.IndicesAdminClient +import org.elasticsearch.cluster.ClusterChangedEvent +import org.elasticsearch.cluster.ClusterStateListener +import org.elasticsearch.cluster.LocalNodeMasterListener +import org.elasticsearch.cluster.service.ClusterService +import org.elasticsearch.common.settings.Settings +import org.elasticsearch.common.unit.TimeValue +import org.elasticsearch.common.xcontent.XContentType +import org.elasticsearch.threadpool.Scheduler.Cancellable +import org.elasticsearch.threadpool.ThreadPool + +/** + * Class to manage the creation and rollover of alert indices and alert history indices. In progress alerts are stored + * in [ALERT_INDEX]. Completed alerts are written to [HISTORY_WRITE_INDEX] which is an alias that points at the + * current index to which completed alerts are written. [HISTORY_WRITE_INDEX] is periodically rolled over to a new + * date based index. The frequency of rolling over indices is controlled by the `opendistro.alerting.alert_rollover_period` setting. + * + * These indexes are created when first used and are then rolled over every `alert_rollover_period`. The rollover is + * initiated on the master node to ensure only a single node tries to roll it over. Once we have a curator functionality + * in Scheduled Jobs we can migrate to using that to rollover the index. + */ +class AlertIndices( + settings: Settings, + private val client: IndicesAdminClient, + private val threadPool: ThreadPool, + private val clusterService: ClusterService +) : LocalNodeMasterListener, ClusterStateListener { + + init { + clusterService.addListener(this) + clusterService.addLocalNodeMasterListener(this) + clusterService.clusterSettings.addSettingsUpdateConsumer(ALERT_HISTORY_MAX_DOCS) { historyMaxDocs = it } + clusterService.clusterSettings.addSettingsUpdateConsumer(ALERT_HISTORY_INDEX_MAX_AGE) { historyMaxAge = it } + clusterService.clusterSettings.addSettingsUpdateConsumer(ALERT_HISTORY_ROLLOVER_PERIOD) { + historyRolloverPeriod = it + rescheduleRollover() + } + clusterService.clusterSettings.addSettingsUpdateConsumer(REQUEST_TIMEOUT) { requestTimeout = it } + } + + companion object { + + /** The in progress alert history index. */ + const val ALERT_INDEX = ".opendistro-alerting-alerts" + + /** The Elastic mapping type */ + const val MAPPING_TYPE = "_doc" + + /** The alias of the index in which to write alert history */ + const val HISTORY_WRITE_INDEX = ".opendistro-alerting-alert-history-write" + + /** The index name pattern to query all the alert history indices */ + const val HISTORY_INDEX_PATTERN = "<.opendistro-alerting-alert-history-{now/d}-1>" + + /** The index name pattern to query all alerts, history and current alerts. */ + const val ALL_INDEX_PATTERN = ".opendistro-alerting-alert*" + + @JvmStatic + fun alertMapping() = + AlertIndices::class.java.getResource("alert_mapping.json").readText() + } + + private val logger = ElasticAPI.INSTANCE.getLogger(AlertIndices::class.java, settings) + + @Volatile private var historyMaxDocs = AlertingSettings.ALERT_HISTORY_MAX_DOCS.get(settings) + + @Volatile private var historyMaxAge = AlertingSettings.ALERT_HISTORY_INDEX_MAX_AGE.get(settings) + + @Volatile private var historyRolloverPeriod = AlertingSettings.ALERT_HISTORY_ROLLOVER_PERIOD.get(settings) + + @Volatile private var requestTimeout = AlertingSettings.REQUEST_TIMEOUT.get(settings) + + // for JobsMonitor to report + var lastRolloverTime: TimeValue? = null + + private var historyIndexInitialized: Boolean = false + + private var alertIndexInitialized: Boolean = false + + private var scheduledRollover: Cancellable? = null + + override fun onMaster() { + try { + // try to rollover immediately as we might be restarting the cluster + rolloverHistoryIndex() + // schedule the next rollover for approx MAX_AGE later + scheduledRollover = threadPool.scheduleWithFixedDelay({ rolloverHistoryIndex() }, + historyRolloverPeriod, executorName()) + } catch (e: Exception) { + // This should be run on cluster startup + logger.error("Error creating alert indices. " + + "Alerts can't be recorded until master node is restarted.", e) + } + } + + override fun offMaster() { + scheduledRollover?.cancel() + } + + override fun executorName(): String { + return ThreadPool.Names.MANAGEMENT + } + + override fun clusterChanged(event: ClusterChangedEvent) { + // if the indexes have been deleted they need to be reinitalized + alertIndexInitialized = event.state().routingTable().hasIndex(ALERT_INDEX) + historyIndexInitialized = event.state().metaData().hasAlias(HISTORY_WRITE_INDEX) + } + + private fun rescheduleRollover() { + if (clusterService.state().nodes.isLocalNodeElectedMaster) { + scheduledRollover?.cancel() + scheduledRollover = threadPool.scheduleWithFixedDelay({ rolloverHistoryIndex() }, historyRolloverPeriod, executorName()) + } + } + + fun isInitialized(): Boolean { + return alertIndexInitialized && historyIndexInitialized + } + + fun createAlertIndex() { + if (!alertIndexInitialized) { + alertIndexInitialized = createIndex(ALERT_INDEX) + } + alertIndexInitialized + } + + fun createInitialHistoryIndex() { + if (!historyIndexInitialized) { + historyIndexInitialized = createIndex(HISTORY_INDEX_PATTERN, HISTORY_WRITE_INDEX) + } + historyIndexInitialized + } + + private fun createIndex(index: String, alias: String? = null): Boolean { + // This should be a fast check of local cluster state. Should be exceedingly rare that the local cluster + // state does not contain the index and multiple nodes concurrently try to create the index. + // If it does happen that error is handled we catch the ResourceAlreadyExistsException + val exists = client.exists(IndicesExistsRequest(index).local(true)).actionGet(requestTimeout).isExists + if (exists) return true + + val request = CreateIndexRequest(index).mapping(MAPPING_TYPE, alertMapping(), XContentType.JSON) + if (alias != null) request.alias(Alias(alias)) + return try { + client.create(request).actionGet(requestTimeout).isAcknowledged + } catch (e: ResourceAlreadyExistsException) { + true + } + } + + fun rolloverHistoryIndex(): Boolean { + if (!historyIndexInitialized) { + return false + } + + // We have to pass null for newIndexName in order to get Elastic to increment the index count. + val request = RolloverRequest(HISTORY_WRITE_INDEX, null) + ElasticAPI.INSTANCE.getCreateIndexRequest(request).index(HISTORY_INDEX_PATTERN) + .mapping(MAPPING_TYPE, alertMapping(), XContentType.JSON) + request.addMaxIndexDocsCondition(historyMaxDocs) + request.addMaxIndexAgeCondition(historyMaxAge) + val response = client.rolloversIndex(request).actionGet(requestTimeout) + if (!response.isRolledOver) { + logger.info("$HISTORY_WRITE_INDEX not rolled over. Conditions were: ${response.conditionStatus}") + } else { + lastRolloverTime = TimeValue.timeValueMillis(threadPool.absoluteTimeInMillis()) + } + return response.isRolledOver + } +} diff --git a/alerting/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/alerts/AlertMover.kt b/alerting/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/alerts/AlertMover.kt new file mode 100644 index 00000000..eca55cd9 --- /dev/null +++ b/alerting/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/alerts/AlertMover.kt @@ -0,0 +1,166 @@ +/* + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package com.amazon.opendistroforelasticsearch.alerting.alerts + +import com.amazon.opendistroforelasticsearch.alerting.MonitorRunner +import com.amazon.opendistroforelasticsearch.alerting.alerts.AlertIndices.Companion.ALERT_INDEX +import com.amazon.opendistroforelasticsearch.alerting.alerts.AlertIndices.Companion.HISTORY_WRITE_INDEX +import com.amazon.opendistroforelasticsearch.alerting.model.Alert +import com.amazon.opendistroforelasticsearch.alerting.model.Monitor +import com.amazon.opendistroforelasticsearch.alerting.elasticapi.ElasticAPI +import org.apache.logging.log4j.Logger +import org.elasticsearch.action.ActionListener +import org.elasticsearch.action.bulk.BulkRequest +import org.elasticsearch.action.bulk.BulkResponse +import org.elasticsearch.action.delete.DeleteRequest +import org.elasticsearch.action.index.IndexRequest +import org.elasticsearch.action.search.SearchRequest +import org.elasticsearch.action.search.SearchResponse +import org.elasticsearch.client.Client +import org.elasticsearch.common.bytes.BytesReference +import org.elasticsearch.common.unit.TimeValue +import org.elasticsearch.common.xcontent.NamedXContentRegistry +import org.elasticsearch.common.xcontent.ToXContent +import org.elasticsearch.common.xcontent.XContentFactory +import org.elasticsearch.common.xcontent.XContentParser +import org.elasticsearch.common.xcontent.XContentParserUtils +import org.elasticsearch.index.VersionType +import org.elasticsearch.index.query.QueryBuilders +import org.elasticsearch.search.builder.SearchSourceBuilder +import org.elasticsearch.threadpool.ThreadPool + +/** + * Class to manage the moving of active alerts when a monitor or trigger is deleted. + * + * The logic for moving alerts consists of: + * 1. Find active alerts: + * a. matching monitorId if no monitor is provided (postDelete) + * b. matching monitorId and no triggerIds if monitor is provided (postIndex) + * 2. Move alerts over to [HISTORY_WRITE_INDEX] as DELETED + * 3. Delete alerts from [ALERT_INDEX] + * 4. Schedule a retry if there were any failures + */ +class AlertMover( + private val client: Client, + private val threadPool: ThreadPool, + private val monitorRunner: MonitorRunner, + private val alertIndices: AlertIndices, + private val backoff: Iterator, + private val logger: Logger, + private val monitorId: String, + private val monitor: Monitor? = null +) { + + private var hasFailures: Boolean = false + + fun run() { + if (alertIndices.isInitialized()) { + findActiveAlerts() + } + } + + private fun findActiveAlerts() { + val boolQuery = QueryBuilders.boolQuery() + .filter(QueryBuilders.termQuery(Alert.MONITOR_ID_FIELD, monitorId)) + + if (monitor != null) { + boolQuery.mustNot(QueryBuilders.termsQuery(Alert.TRIGGER_ID_FIELD, monitor.triggers.map { it.id })) + } + + val activeAlertsQuery = SearchSourceBuilder.searchSource() + .query(boolQuery) + .version(true) + + val activeAlertsRequest = SearchRequest(AlertIndices.ALERT_INDEX) + .routing(monitorId) + .source(activeAlertsQuery) + client.search(activeAlertsRequest, ActionListener.wrap(::onSearchResponse, ::onFailure)) + } + + private fun onSearchResponse(response: SearchResponse) { + // If no alerts are found, simply return + if (response.hits.totalHits == 0L) return + val indexRequests = response.hits.map { hit -> + IndexRequest(AlertIndices.HISTORY_WRITE_INDEX, AlertIndices.MAPPING_TYPE) + .routing(monitorId) + .source(Alert.parse(alertContentParser(hit.sourceRef), hit.id, hit.version) + .copy(state = Alert.State.DELETED) + .toXContent(XContentFactory.jsonBuilder(), ToXContent.EMPTY_PARAMS)) + .version(hit.version) + .versionType(VersionType.EXTERNAL_GTE) + .id(hit.id) + } + val copyRequest = BulkRequest().add(indexRequests) + client.bulk(copyRequest, ActionListener.wrap(::onCopyResponse, ::onFailure)) + } + + private fun onCopyResponse(response: BulkResponse) { + val deleteRequests = response.items.filterNot { it.isFailed }.map { + DeleteRequest(AlertIndices.ALERT_INDEX, AlertIndices.MAPPING_TYPE, it.id) + .routing(monitorId) + .version(it.version) + } + if (response.hasFailures()) { + hasFailures = true + for (it in response.items) { + logger.error("Failed to move deleted alert to alert history index: ${it.id}", + it.failure.cause) + } + } + + val bulkRequest = BulkRequest().add(deleteRequests) + client.bulk(bulkRequest, ActionListener.wrap(::onDeleteResponse, ::onFailure)) + } + + private fun onDeleteResponse(response: BulkResponse) { + if (response.hasFailures()) { + hasFailures = true + for (it in response.items) { + logger.error("Failed to delete active alert from alert index: ${it.id}", + it.failure.cause) + } + } + if (hasFailures) reschedule() + } + + private fun onFailure(e: Exception) { + logger.error("Failed to move alerts for ${monitorIdTriggerIdsTuple()}", e) + reschedule() + } + + private fun reschedule() { + if (backoff.hasNext()) { + logger.warn("Rescheduling AlertMover due to failure for ${monitorIdTriggerIdsTuple()}") + val wait = backoff.next() + val runnable = Runnable { + monitorRunner.rescheduleAlertMover(monitorId, monitor, backoff) + } + threadPool.schedule(wait, ThreadPool.Names.SAME, runnable) + } else { + logger.warn("Retries exhausted for ${monitorIdTriggerIdsTuple()}") + } + } + + private fun alertContentParser(bytesReference: BytesReference): XContentParser { + val xcp = ElasticAPI.INSTANCE.jsonParser(NamedXContentRegistry.EMPTY, bytesReference) + XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, xcp.nextToken(), xcp::getTokenLocation) + return xcp + } + + private fun monitorIdTriggerIdsTuple(): String { + return "[$monitorId, ${monitor?.triggers?.map { it.id }}]" + } +} diff --git a/alerting/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/model/Alert.kt b/alerting/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/model/Alert.kt new file mode 100644 index 00000000..67dc5efc --- /dev/null +++ b/alerting/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/model/Alert.kt @@ -0,0 +1,177 @@ +/* + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package com.amazon.opendistroforelasticsearch.alerting.model + +import com.amazon.opendistroforelasticsearch.alerting.alerts.AlertError +import com.amazon.opendistroforelasticsearch.alerting.elasticapi.instant +import com.amazon.opendistroforelasticsearch.alerting.elasticapi.optionalTimeField +import org.elasticsearch.common.lucene.uid.Versions +import org.elasticsearch.common.xcontent.ToXContent +import org.elasticsearch.common.xcontent.XContentBuilder +import org.elasticsearch.common.xcontent.XContentParser +import org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken +import java.io.IOException +import java.time.Instant + +data class Alert( + val id: String = NO_ID, + val version: Long = NO_VERSION, + val monitorId: String, + val monitorName: String, + val monitorVersion: Long, + val triggerId: String, + val triggerName: String, + val state: State, + val startTime: Instant, + val endTime: Instant? = null, + val lastNotificationTime: Instant? = null, + val acknowledgedTime: Instant? = null, + val errorMessage: String? = null, + val errorHistory: List, + val severity: String +) : ToXContent { + + init { + if (errorMessage != null) require(state == State.DELETED || state == State.ERROR) { + "Attempt to create an alert with an error in state: $state" + } + } + + constructor( + monitor: Monitor, + trigger: Trigger, + startTime: Instant, + lastNotificationTime: Instant?, + state: State = State.ACTIVE, + errorMessage: String? = null, + errorHistory: List = mutableListOf() + ) : this(monitorId = monitor.id, monitorName = monitor.name, monitorVersion = monitor.version, + triggerId = trigger.id, triggerName = trigger.name, state = state, startTime = startTime, + lastNotificationTime = lastNotificationTime, errorMessage = errorMessage, errorHistory = errorHistory, + severity = trigger.severity) + + enum class State { + ACTIVE, ACKNOWLEDGED, COMPLETED, ERROR, DELETED + } + + fun isAcknowledged(): Boolean = (state == State.ACKNOWLEDGED) + + companion object { + + const val MONITOR_ID_FIELD = "monitor_id" + const val MONITOR_VERSION_FIELD = "monitor_version" + const val MONITOR_NAME_FIELD = "monitor_name" + const val TRIGGER_ID_FIELD = "trigger_id" + const val TRIGGER_NAME_FIELD = "trigger_name" + const val STATE_FIELD = "state" + const val START_TIME_FIELD = "start_time" + const val LAST_NOTIFICATION_TIME_FIELD = "last_notification_time" + const val END_TIME_FIELD = "end_time" + const val ACKNOWLEDGED_TIME_FIELD = "acknowledged_time" + const val ERROR_MESSAGE_FIELD = "error_message" + const val ALERT_HISTORY_FIELD = "alert_history" + const val SEVERITY_FIELD = "severity" + + const val NO_ID = "" + const val NO_VERSION = Versions.NOT_FOUND + + /** + * The mapping type of [Alert]s in the ES index. + * + * This should go away starting ES 7. We use "_doc" for future compatibility as described here: + * https://www.elastic.co/guide/en/elasticsearch/reference/6.x/removal-of-types.html#_schedule_for_removal_of_mapping_types + */ + const val ALERT_TYPE = "_doc" + + @JvmStatic @JvmOverloads + @Throws(IOException::class) + fun parse(xcp: XContentParser, id: String = NO_ID, version: Long = NO_VERSION): Alert { + + lateinit var monitorId: String + lateinit var monitorName: String + var monitorVersion: Long = Versions.NOT_FOUND + lateinit var triggerId: String + lateinit var triggerName: String + lateinit var state: State + lateinit var startTime: Instant + lateinit var severity: String + var endTime: Instant? = null + var lastNotificationTime: Instant? = null + var acknowledgedTime: Instant? = null + var errorMessage: String? = null + val errorHistory: MutableList = mutableListOf() + + ensureExpectedToken(XContentParser.Token.START_OBJECT, xcp.currentToken(), xcp::getTokenLocation) + while (xcp.nextToken() != XContentParser.Token.END_OBJECT) { + val fieldName = xcp.currentName() + xcp.nextToken() + + when (fieldName) { + MONITOR_ID_FIELD -> monitorId = xcp.text() + MONITOR_NAME_FIELD -> monitorName = xcp.text() + MONITOR_VERSION_FIELD -> monitorVersion = xcp.longValue() + TRIGGER_ID_FIELD -> triggerId = xcp.text() + STATE_FIELD -> state = State.valueOf(xcp.text()) + TRIGGER_NAME_FIELD -> triggerName = xcp.text() + START_TIME_FIELD -> startTime = requireNotNull(xcp.instant()) + END_TIME_FIELD -> endTime = xcp.instant() + LAST_NOTIFICATION_TIME_FIELD -> lastNotificationTime = xcp.instant() + ACKNOWLEDGED_TIME_FIELD -> acknowledgedTime = xcp.instant() + ERROR_MESSAGE_FIELD -> errorMessage = xcp.textOrNull() + ALERT_HISTORY_FIELD -> { + ensureExpectedToken(XContentParser.Token.START_ARRAY, xcp.currentToken(), xcp::getTokenLocation) + while (xcp.nextToken() != XContentParser.Token.END_ARRAY) { + errorHistory.add(AlertError.parse(xcp)) + } + } + SEVERITY_FIELD -> severity = xcp.text() + } + } + + return Alert(id = id, version = version, monitorId = requireNotNull(monitorId), + monitorName = requireNotNull(monitorName), monitorVersion = monitorVersion, + triggerId = requireNotNull(triggerId), triggerName = requireNotNull(triggerName), + state = requireNotNull(state), startTime = requireNotNull(startTime), endTime = endTime, + lastNotificationTime = lastNotificationTime, acknowledgedTime = acknowledgedTime, + errorMessage = errorMessage, errorHistory = errorHistory, severity = severity) + } + } + + override fun toXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { + return builder.startObject() + .field(MONITOR_ID_FIELD, monitorId) + .field(MONITOR_VERSION_FIELD, monitorVersion) + .field(MONITOR_NAME_FIELD, monitorName) + .field(TRIGGER_ID_FIELD, triggerId) + .field(TRIGGER_NAME_FIELD, triggerName) + .field(STATE_FIELD, state) + .field(ERROR_MESSAGE_FIELD, errorMessage) + .field(ALERT_HISTORY_FIELD, errorHistory.toTypedArray()) + .field(SEVERITY_FIELD, severity) + .optionalTimeField(START_TIME_FIELD, startTime) + .optionalTimeField(LAST_NOTIFICATION_TIME_FIELD, lastNotificationTime) + .optionalTimeField(END_TIME_FIELD, endTime) + .optionalTimeField(ACKNOWLEDGED_TIME_FIELD, acknowledgedTime) + .endObject() + } + + fun asTemplateArg(): Map { + return mapOf(STATE_FIELD to state.toString(), + ERROR_MESSAGE_FIELD to errorMessage, + ACKNOWLEDGED_TIME_FIELD to acknowledgedTime?.toEpochMilli(), + LAST_NOTIFICATION_TIME_FIELD to lastNotificationTime?.toEpochMilli()) + } +} diff --git a/alerting/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/model/Monitor.kt b/alerting/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/model/Monitor.kt new file mode 100644 index 00000000..872838b7 --- /dev/null +++ b/alerting/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/model/Monitor.kt @@ -0,0 +1,179 @@ +/* + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package com.amazon.opendistroforelasticsearch.alerting.model + +import com.amazon.opendistroforelasticsearch.alerting.settings.AlertingSettings.Companion.MONITOR_MAX_INPUTS +import com.amazon.opendistroforelasticsearch.alerting.settings.AlertingSettings.Companion.MONITOR_MAX_TRIGGERS +import com.amazon.opendistroforelasticsearch.alerting.core.model.Input +import com.amazon.opendistroforelasticsearch.alerting.core.model.Schedule +import com.amazon.opendistroforelasticsearch.alerting.core.model.ScheduledJob +import com.amazon.opendistroforelasticsearch.alerting.util._ID +import com.amazon.opendistroforelasticsearch.alerting.util._VERSION +import com.amazon.opendistroforelasticsearch.alerting.elasticapi.instant +import com.amazon.opendistroforelasticsearch.alerting.elasticapi.optionalTimeField +import org.elasticsearch.common.CheckedFunction +import org.elasticsearch.common.ParseField +import org.elasticsearch.common.xcontent.NamedXContentRegistry +import org.elasticsearch.common.xcontent.ToXContent +import org.elasticsearch.common.xcontent.XContentBuilder +import org.elasticsearch.common.xcontent.XContentParser +import org.elasticsearch.common.xcontent.XContentParser.Token +import org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken +import java.io.IOException +import java.time.Instant + +/** + * A value object that represents a Monitor. Monitors are used to periodically execute a source query and check the + * results. + */ +data class Monitor( + override val id: String = NO_ID, + override val version: Long = NO_VERSION, + override val name: String, + override val enabled: Boolean, + override val schedule: Schedule, + override val lastUpdateTime: Instant, + override val enabledTime: Instant?, + val inputs: List, + val triggers: List, + val uiMetadata: Map +) : ScheduledJob { + + override val type = MONITOR_TYPE + + init { + // Ensure that trigger ids are unique within a monitor + val triggerIds = mutableSetOf() + triggers.forEach { trigger -> + require(triggerIds.add(trigger.id)) { "Duplicate trigger id: ${trigger.id}. Trigger ids must be unique." } + } + if (enabled) { + requireNotNull(enabledTime) + } else { + require(enabledTime == null) + } + require(inputs.size <= MONITOR_MAX_INPUTS) { "Monitors can only have $MONITOR_MAX_INPUTS search input." } + require(triggers.size <= MONITOR_MAX_TRIGGERS) { "Monitors can only support up to $MONITOR_MAX_TRIGGERS triggers." } + } + + fun toXContent(builder: XContentBuilder): XContentBuilder { + return toXContent(builder, ToXContent.EMPTY_PARAMS) + } + + /** Returns a representation of the monitor suitable for passing into painless and mustache scripts. */ + fun asTemplateArg(): Map { + return mapOf(_ID to id, _VERSION to version, NAME_FIELD to name, ENABLED_FIELD to enabled) + } + + override fun toXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { + builder.startObject() + if (params.paramAsBoolean("with_type", false)) builder.startObject(type) + builder.field(TYPE_FIELD, type) + .field(NAME_FIELD, name) + .field(ENABLED_FIELD, enabled) + .optionalTimeField(ENABLED_TIME_FIELD, enabledTime) + .field(SCHEDULE_FIELD, schedule) + .field(INPUTS_FIELD, inputs.toTypedArray()) + .field(TRIGGERS_FIELD, triggers.toTypedArray()) + .optionalTimeField(LAST_UPDATE_TIME_FIELD, lastUpdateTime) + if (uiMetadata.isNotEmpty()) builder.field(UI_METADATA_FIELD, uiMetadata) + if (params.paramAsBoolean("with_type", false)) builder.endObject() + return builder.endObject() + } + + override fun fromDocument(id: String, version: Long): Monitor = copy(id = id, version = version) + + companion object { + const val MONITOR_TYPE = "monitor" + const val TYPE_FIELD = "type" + const val NAME_FIELD = "name" + const val ENABLED_FIELD = "enabled" + const val SCHEDULE_FIELD = "schedule" + const val TRIGGERS_FIELD = "triggers" + const val NO_ID = "" + const val NO_VERSION = 1L + const val INPUTS_FIELD = "inputs" + const val LAST_UPDATE_TIME_FIELD = "last_update_time" + const val UI_METADATA_FIELD = "ui_metadata" + const val ENABLED_TIME_FIELD = "enabled_time" + + // This is defined here instead of in ScheduledJob to avoid having the ScheduledJob class know about all + // the different subclasses and creating circular dependencies + val XCONTENT_REGISTRY = NamedXContentRegistry.Entry(ScheduledJob::class.java, + ParseField(MONITOR_TYPE), + CheckedFunction { parse(it) }) + + @JvmStatic + @JvmOverloads + @Throws(IOException::class) + fun parse(xcp: XContentParser, id: String = NO_ID, version: Long = NO_VERSION): Monitor { + lateinit var name: String + lateinit var schedule: Schedule + var lastUpdateTime: Instant? = null + var enabledTime: Instant? = null + var uiMetadata: Map = mapOf() + var enabled = true + val triggers: MutableList = mutableListOf() + val inputs: MutableList = mutableListOf() + + ensureExpectedToken(Token.START_OBJECT, xcp.currentToken(), xcp::getTokenLocation) + while (xcp.nextToken() != Token.END_OBJECT) { + val fieldName = xcp.currentName() + xcp.nextToken() + + when (fieldName) { + NAME_FIELD -> name = xcp.text() + ENABLED_FIELD -> enabled = xcp.booleanValue() + SCHEDULE_FIELD -> schedule = Schedule.parse(xcp) + INPUTS_FIELD -> { + ensureExpectedToken(Token.START_ARRAY, xcp.currentToken(), xcp::getTokenLocation) + while (xcp.nextToken() != Token.END_ARRAY) { + inputs.add(Input.parse(xcp)) + } + } + TRIGGERS_FIELD -> { + ensureExpectedToken(Token.START_ARRAY, xcp.currentToken(), xcp::getTokenLocation) + while (xcp.nextToken() != Token.END_ARRAY) { + triggers.add(Trigger.parse(xcp)) + } + } + ENABLED_TIME_FIELD -> enabledTime = xcp.instant() + LAST_UPDATE_TIME_FIELD -> lastUpdateTime = xcp.instant() + UI_METADATA_FIELD -> uiMetadata = xcp.map() + else -> { + xcp.skipChildren() + } + } + } + + if (enabled && enabledTime == null) { + enabledTime = Instant.now() + } else if (!enabled) { + enabledTime = null + } + return Monitor(id, + version, + requireNotNull(name) { "Monitor name is null" }, + enabled, + requireNotNull(schedule) { "Monitor schedule is null" }, + lastUpdateTime ?: Instant.now(), + enabledTime, + inputs.toList(), + triggers.toList(), + uiMetadata) + } + } +} diff --git a/alerting/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/model/MonitorRunResult.kt b/alerting/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/model/MonitorRunResult.kt new file mode 100644 index 00000000..e886071d --- /dev/null +++ b/alerting/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/model/MonitorRunResult.kt @@ -0,0 +1,135 @@ +/* + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package com.amazon.opendistroforelasticsearch.alerting.model + +import com.amazon.opendistroforelasticsearch.alerting.alerts.AlertError +import com.amazon.opendistroforelasticsearch.alerting.elasticapi.optionalTimeField +import org.elasticsearch.ElasticsearchException +import org.elasticsearch.common.logging.Loggers +import org.elasticsearch.common.xcontent.ToXContent +import org.elasticsearch.common.xcontent.XContentBuilder +import org.elasticsearch.script.ScriptException +import java.time.Instant + +data class MonitorRunResult( + val monitorName: String, + val periodStart: Instant, + val periodEnd: Instant, + val error: Exception? = null, + val inputResults: InputRunResults = InputRunResults(), + val triggerResults: Map = mapOf() +) : ToXContent { + override fun toXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { + return builder.startObject() + .field("monitor_name", monitorName) + .optionalTimeField("period_start", periodStart) + .optionalTimeField("period_end", periodEnd) + .field("error", error?.message) + .field("input_results", inputResults) + .field("trigger_results", triggerResults) + .endObject() + } + + /** Returns error information to store in the Alert. Currently it's just the stack trace but it can be more */ + fun alertError(): AlertError? { + if (error != null) { + return AlertError(Instant.now(), "Error running monitor:\n${error.userErrorMessage()}") + } + + if (inputResults.error != null) { + return AlertError(Instant.now(), "Error fetching inputs:\n${inputResults.error.userErrorMessage()}") + } + return null + } + + fun scriptContextError(trigger: Trigger): Exception? { + return error ?: inputResults.error ?: triggerResults[trigger.id]?.error + } +} + +data class InputRunResults(val results: List> = listOf(), val error: Exception? = null) : ToXContent { + + override fun toXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { + return builder.startObject() + .field("results", results) + .field("error", error?.message) + .endObject() + } +} + +data class TriggerRunResult( + val triggerName: String, + val triggered: Boolean, + val error: Exception? = null, + val actionResults: MutableMap = mutableMapOf() +) : ToXContent { + override fun toXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { + return builder.startObject() + .field("name", triggerName) + .field("triggered", triggered) + .field("error", error?.message) + .field("action_results", actionResults as Map) + .endObject() + } + + /** Returns error information to store in the Alert. Currently it's just the stack trace but it can be more */ + fun alertError(): AlertError? { + if (error != null) { + return AlertError(Instant.now(), "Error evaluating trigger:\n${error.userErrorMessage()}") + } + for (actionResult in actionResults.values) { + if (actionResult.error != null) { + return AlertError(Instant.now(), "Error running action:\n${actionResult.error.userErrorMessage()}") + } + } + return null + } +} + +data class ActionRunResult( + val actionName: String, + val output: Map, + val throttled: Boolean = false, + val error: Exception? = null +) : ToXContent { + + override fun toXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { + return builder.startObject() + .field("name", actionName) + .field("output", output) + .field("throttled", throttled) + .field("error", error?.message) + .endObject() + } +} + +private val logger = Loggers.getLogger("UserError") + +/** Constructs an error message from an exception suitable for human consumption. */ +private fun Throwable.userErrorMessage(): String { + return when { + this is ScriptException -> this.scriptStack.joinToString(separator = "\n", limit = 100) + this is ElasticsearchException -> this.detailedMessage + this.message != null -> { + logger.info("Internal error: ${this.message}. See the Elasticsearch.log for details", this) + this.message!! + } + else -> { + logger.info("Unknown Internal error. See the Elasticsearch log for details.", this) + "Unknown Internal error. See the Elasticsearch log for details." + } + } +} diff --git a/alerting/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/model/Trigger.kt b/alerting/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/model/Trigger.kt new file mode 100644 index 00000000..5842171f --- /dev/null +++ b/alerting/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/model/Trigger.kt @@ -0,0 +1,105 @@ +/* + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package com.amazon.opendistroforelasticsearch.alerting.model + +import com.amazon.opendistroforelasticsearch.alerting.model.action.Action +import org.elasticsearch.common.UUIDs +import org.elasticsearch.common.xcontent.ToXContent +import org.elasticsearch.common.xcontent.XContentBuilder +import org.elasticsearch.common.xcontent.XContentParser +import org.elasticsearch.common.xcontent.XContentParser.Token +import org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken +import org.elasticsearch.script.Script +import java.io.IOException + +data class Trigger( + val name: String, + val severity: String, + val condition: Script, + val actions: List, + val id: String = UUIDs.base64UUID() +) : ToXContent { + + override fun toXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { + builder.startObject() + .field(ID_FIELD, id) + .field(NAME_FIELD, name) + .field(SEVERITY_FIELD, severity) + .startObject(CONDITION_FIELD) + .field(SCRIPT_FIELD, condition) + .endObject() + .field(ACTIONS_FIELD, actions.toTypedArray()) + .endObject() + return builder + } + + /** Returns a representation of the trigger suitable for passing into painless and mustache scripts. */ + fun asTemplateArg(): Map { + return mapOf(ID_FIELD to id, NAME_FIELD to name, SEVERITY_FIELD to severity, + ACTIONS_FIELD to actions.map { it.asTemplateArg() }) + } + + companion object { + const val ID_FIELD = "id" + const val NAME_FIELD = "name" + const val SEVERITY_FIELD = "severity" + const val CONDITION_FIELD = "condition" + const val ACTIONS_FIELD = "actions" + const val SCRIPT_FIELD = "script" + + @JvmStatic @Throws(IOException::class) + fun parse(xcp: XContentParser): Trigger { + var id = UUIDs.base64UUID() // assign a default triggerId if one is not specified + lateinit var name: String + lateinit var severity: String + lateinit var condition: Script + val actions: MutableList = mutableListOf() + ensureExpectedToken(Token.START_OBJECT, xcp.currentToken(), xcp::getTokenLocation) + + while (xcp.nextToken() != Token.END_OBJECT) { + val fieldName = xcp.currentName() + + xcp.nextToken() + when (fieldName) { + ID_FIELD -> id = xcp.text() + NAME_FIELD -> name = xcp.text() + SEVERITY_FIELD -> severity = xcp.text() + CONDITION_FIELD -> { + xcp.nextToken() + condition = Script.parse(xcp) + require(condition.lang == Script.DEFAULT_SCRIPT_LANG) { + "Invalid script language. Allowed languages are [${Script.DEFAULT_SCRIPT_LANG}]" + } + xcp.nextToken() + } + ACTIONS_FIELD -> { + ensureExpectedToken(Token.START_ARRAY, xcp.currentToken(), xcp::getTokenLocation) + while (xcp.nextToken() != Token.END_ARRAY) { + actions.add(Action.parse(xcp)) + } + } + } + } + + return Trigger( + name = requireNotNull(name) { "Trigger name is null" }, + severity = requireNotNull(severity) { "Trigger severity is null" }, + condition = requireNotNull(condition) { "Trigger is null" }, + actions = requireNotNull(actions) { "Trigger actions are null" }, + id = requireNotNull(id) { "Trigger id is null." }) + } + } +} diff --git a/alerting/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/model/action/Action.kt b/alerting/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/model/action/Action.kt new file mode 100644 index 00000000..a8064f23 --- /dev/null +++ b/alerting/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/model/action/Action.kt @@ -0,0 +1,95 @@ +/* + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package com.amazon.opendistroforelasticsearch.alerting.model.action + +import org.elasticsearch.common.xcontent.ToXContent +import org.elasticsearch.common.xcontent.ToXContentObject +import org.elasticsearch.common.xcontent.XContentBuilder +import org.elasticsearch.common.xcontent.XContentParser +import org.elasticsearch.common.xcontent.XContentParserUtils +import org.elasticsearch.script.Script +import java.io.IOException +import java.lang.IllegalStateException + +/** + * This class holds the data and parser logic for Action which is part of a trigger + */ +data class Action( + val name: String, + val destinationId: String, + val subjectTemplate: Script?, + val messageTemplate: Script +) : ToXContentObject { + + init { + if (subjectTemplate != null) { + require(subjectTemplate.lang == Action.MUSTACHE) { "subject_template must be a mustache script" } + } + require(messageTemplate.lang == Action.MUSTACHE) { "message_template must be a mustache script" } + } + + override fun toXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { + return builder.startObject() + .field(NAME_FIELD, name) + .field(DESTINATION_ID_FIELD, destinationId) + .field(SUBJECT_TEMPLATE_FIELD, subjectTemplate) + .field(MESSAGE_TEMPLATE_FIELD, messageTemplate) + .endObject() + } + + fun asTemplateArg(): Map { + return mapOf(NAME_FIELD to name) + } + + companion object { + const val NAME_FIELD = "name" + const val DESTINATION_ID_FIELD = "destination_id" + const val SUBJECT_TEMPLATE_FIELD = "subject_template" + const val MESSAGE_TEMPLATE_FIELD = "message_template" + const val MUSTACHE = "mustache" + const val SUBJECT = "subject" + const val MESSAGE = "message" + const val MESSAGE_ID = "messageId" + + @JvmStatic + @Throws(IOException::class) + fun parse(xcp: XContentParser): Action { + lateinit var name: String + lateinit var destinationId: String + var subjectTemplate: Script? = null // subject template could be null for some destinations + lateinit var messageTemplate: Script + + XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, xcp.currentToken(), xcp::getTokenLocation) + while (xcp.nextToken() != XContentParser.Token.END_OBJECT) { + val fieldName = xcp.currentName() + xcp.nextToken() + when (fieldName) { + NAME_FIELD -> name = xcp.textOrNull() + DESTINATION_ID_FIELD -> destinationId = xcp.textOrNull() + SUBJECT_TEMPLATE_FIELD -> subjectTemplate = Script.parse(xcp, Script.DEFAULT_TEMPLATE_LANG) + MESSAGE_TEMPLATE_FIELD -> messageTemplate = Script.parse(xcp, Script.DEFAULT_TEMPLATE_LANG) + else -> { + throw IllegalStateException("Unexpected field: $fieldName, while parsing action") + } + } + } + return Action(requireNotNull(name) { "Destination name is null" }, + requireNotNull(destinationId) { "Destination id is null" }, + subjectTemplate, + requireNotNull(messageTemplate) { "Destination message template is null" }) + } + } +} diff --git a/alerting/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/model/destination/Chime.kt b/alerting/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/model/destination/Chime.kt new file mode 100644 index 00000000..28e12f5c --- /dev/null +++ b/alerting/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/model/destination/Chime.kt @@ -0,0 +1,77 @@ +/* + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package com.amazon.opendistroforelasticsearch.alerting.model.destination + +import com.amazon.opendistroforelasticsearch.alerting.elasticapi.string +import org.elasticsearch.common.Strings +import org.elasticsearch.common.xcontent.ToXContent +import org.elasticsearch.common.xcontent.XContentBuilder +import org.elasticsearch.common.xcontent.XContentFactory +import org.elasticsearch.common.xcontent.XContentParser +import org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken +import org.elasticsearch.common.xcontent.XContentType +import java.io.IOException +import java.lang.IllegalStateException + +/** + * A value object that represents a Chime message. Chime message will be + * submitted to the Chime destination + */ +data class Chime(val url: String) : ToXContent { + + init { + require(!Strings.isNullOrEmpty(url)) { "URL is null or empty" } + } + + override fun toXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { + return builder.startObject(TYPE) + .field(URL, url) + .endObject() + } + + companion object { + const val URL = "url" + const val TYPE = "chime" + + @JvmStatic + @Throws(IOException::class) + fun parse(xcp: XContentParser): Chime { + lateinit var url: String + + ensureExpectedToken(XContentParser.Token.START_OBJECT, xcp.currentToken(), xcp::getTokenLocation) + while (xcp.nextToken() != XContentParser.Token.END_OBJECT) { + val fieldName = xcp.currentName() + xcp.nextToken() + when (fieldName) { + URL -> url = xcp.text() + else -> { + throw IllegalStateException("Unexpected field: $fieldName, while parsing Chime destination") + } + } + } + return Chime(url) + } + } + + fun constructMessageContent(subject: String?, message: String?): String { + val messageContent: String? = if (Strings.isNullOrEmpty(subject)) message else "$subject \n\n $message" + val builder = XContentFactory.contentBuilder(XContentType.JSON) + builder.startObject() + .field("Content", messageContent) + .endObject() + return builder.string() + } +} diff --git a/alerting/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/model/destination/CustomWebhook.kt b/alerting/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/model/destination/CustomWebhook.kt new file mode 100644 index 00000000..4deda766 --- /dev/null +++ b/alerting/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/model/destination/CustomWebhook.kt @@ -0,0 +1,109 @@ +/* + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package com.amazon.opendistroforelasticsearch.alerting.model.destination + +import org.elasticsearch.common.Strings +import org.elasticsearch.common.xcontent.ToXContent +import org.elasticsearch.common.xcontent.XContentBuilder +import org.elasticsearch.common.xcontent.XContentParser +import org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken +import java.io.IOException +import java.lang.IllegalStateException + +/** + * A value object that represents a Custom webhook message. Webhook message will be + * submitted to the Custom webhook destination + */ +data class CustomWebhook( + val url: String?, + val scheme: String?, + val host: String?, + val port: Int, + val path: String?, + val queryParams: Map, + val headerParams: Map, + val username: String?, + val password: String? +) : ToXContent { + + init { + require(!(Strings.isNullOrEmpty(url) && Strings.isNullOrEmpty(host))) { + "Url or Host name must be provided." + } + } + + override fun toXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { + return builder.startObject(TYPE) + .field(URL, url) + .field(SCHEME_FIELD, scheme) + .field(HOST_FIELD, host) + .field(PORT_FIELD, port) + .field(PATH_FIELD, path) + .field(QUERY_PARAMS_FIELD, queryParams) + .field(HEADER_PARAMS_FIELD, headerParams) + .field(USERNAME_FIELD, username) + .field(PASSWORD_FIELD, password) + .endObject() + } + + companion object { + const val URL = "url" + const val TYPE = "custom_webhook" + const val SCHEME_FIELD = "scheme" + const val HOST_FIELD = "host" + const val PORT_FIELD = "port" + const val PATH_FIELD = "path" + const val QUERY_PARAMS_FIELD = "query_params" + const val HEADER_PARAMS_FIELD = "header_params" + const val USERNAME_FIELD = "username" + const val PASSWORD_FIELD = "password" + + @JvmStatic + @Throws(IOException::class) + fun parse(xcp: XContentParser): CustomWebhook { + var url: String? = null + var scheme: String? = null + var host: String? = null + var port: Int = -1 + var path: String? = null + var queryParams: Map = mutableMapOf() + var headerParams: Map = mutableMapOf() + var username: String? = null + var password: String? = null + + ensureExpectedToken(XContentParser.Token.START_OBJECT, xcp.currentToken(), xcp::getTokenLocation) + while (xcp.nextToken() != XContentParser.Token.END_OBJECT) { + val fieldName = xcp.currentName() + xcp.nextToken() + when (fieldName) { + URL -> url = xcp.textOrNull() + SCHEME_FIELD -> scheme = xcp.textOrNull() + HOST_FIELD -> host = xcp.textOrNull() + PORT_FIELD -> port = xcp.intValue() + PATH_FIELD -> path = xcp.textOrNull() + QUERY_PARAMS_FIELD -> queryParams = xcp.mapStrings() + HEADER_PARAMS_FIELD -> headerParams = xcp.mapStrings() + USERNAME_FIELD -> username = xcp.textOrNull() + PASSWORD_FIELD -> password = xcp.textOrNull() + else -> { + throw IllegalStateException("Unexpected field: $fieldName, while parsing custom webhook destination") + } + } + } + return CustomWebhook(url, scheme, host, port, path, queryParams, headerParams, username, password) + } + } +} diff --git a/alerting/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/model/destination/Destination.kt b/alerting/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/model/destination/Destination.kt new file mode 100644 index 00000000..66e5af71 --- /dev/null +++ b/alerting/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/model/destination/Destination.kt @@ -0,0 +1,186 @@ +/* + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package com.amazon.opendistroforelasticsearch.alerting.model.destination + +import com.amazon.opendistroforelasticsearch.alerting.destination.Notification +import com.amazon.opendistroforelasticsearch.alerting.destination.client.DestinationHttpClient +import com.amazon.opendistroforelasticsearch.alerting.destination.message.BaseMessage +import com.amazon.opendistroforelasticsearch.alerting.destination.message.ChimeMessage +import com.amazon.opendistroforelasticsearch.alerting.destination.message.CustomWebhookMessage +import com.amazon.opendistroforelasticsearch.alerting.destination.message.SlackMessage +import com.amazon.opendistroforelasticsearch.alerting.destination.response.DestinationHttpResponse +import com.amazon.opendistroforelasticsearch.alerting.elasticapi.convertToMap +import com.amazon.opendistroforelasticsearch.alerting.elasticapi.instant +import com.amazon.opendistroforelasticsearch.alerting.elasticapi.optionalTimeField +import com.amazon.opendistroforelasticsearch.alerting.util.DestinationType +import org.elasticsearch.common.logging.Loggers +import org.elasticsearch.common.xcontent.ToXContent +import org.elasticsearch.common.xcontent.XContentBuilder +import org.elasticsearch.common.xcontent.XContentParser +import org.elasticsearch.common.xcontent.XContentParserUtils +import java.io.IOException +import java.time.Instant +import java.util.Locale + +/** + * A value object that represents a Destination message. + */ +data class Destination( + val id: String = NO_ID, + val version: Long = NO_VERSION, + val type: DestinationType, + val name: String, + val lastUpdateTime: Instant, + val chime: Chime?, + val slack: Slack?, + val customWebhook: CustomWebhook? +) : ToXContent { + + private val logger = Loggers.getLogger(DestinationHttpClient::class.java) + + override fun toXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { + builder.startObject() + if (params.paramAsBoolean("with_type", false)) builder.startObject(DESTINATION) + builder.field(TYPE_FIELD, type.value) + .field(NAME_FIELD, name) + .optionalTimeField(LAST_UPDATE_TIME_FIELD, lastUpdateTime) + .field(type.value, constructResponseForDestinationType(type)) + if (params.paramAsBoolean("with_type", false)) builder.endObject() + return builder.endObject() + } + + fun toXContent(builder: XContentBuilder): XContentBuilder { + return toXContent(builder, ToXContent.EMPTY_PARAMS) + } + + companion object { + const val DESTINATION = "destination" + const val TYPE_FIELD = "type" + const val NAME_FIELD = "name" + const val NO_ID = "" + const val NO_VERSION = 1L + const val LAST_UPDATE_TIME_FIELD = "last_update_time" + const val CHIME = "chime" + const val SLACK = "slack" + const val CUSTOMWEBHOOK = "custom_webhook" + // This constant is used for test actions created part of integ tests + const val TEST_ACTION = "test" + + @JvmStatic + @JvmOverloads + @Throws(IOException::class) + fun parse(xcp: XContentParser, id: String = NO_ID, version: Long = NO_VERSION): Destination { + lateinit var name: String + lateinit var type: String + var slack: Slack? = null + var chime: Chime? = null + var customWebhook: CustomWebhook? = null + var lastUpdateTime: Instant? = null + + XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, xcp.currentToken(), xcp::getTokenLocation) + while (xcp.nextToken() != XContentParser.Token.END_OBJECT) { + val fieldName = xcp.currentName() + xcp.nextToken() + + when (fieldName) { + NAME_FIELD -> name = xcp.text() + TYPE_FIELD -> { + type = xcp.text() + val allowedTypes = DestinationType.values().map { it.value } + if (!allowedTypes.contains(type)) { + throw IllegalStateException("Type should be one of the $allowedTypes") + } + } + LAST_UPDATE_TIME_FIELD -> lastUpdateTime = xcp.instant() + CHIME -> { + chime = Chime.parse(xcp) + } + SLACK -> { + slack = Slack.parse(xcp) + } + CUSTOMWEBHOOK -> { + customWebhook = CustomWebhook.parse(xcp) + } + TEST_ACTION -> { + // This condition is for integ tests to avoid parsing + } + else -> { + xcp.skipChildren() + } + } + } + return Destination(id, + version, + DestinationType.valueOf(type.toUpperCase(Locale.ROOT)), + requireNotNull(name) { "Destination name is null" }, + lastUpdateTime ?: Instant.now(), + chime, + slack, + customWebhook) + } + } + + fun publish(compiledSubject: String?, compiledMessage: String): String { + val destinationMessage: BaseMessage + when (type) { + DestinationType.CHIME -> { + val messageContent = chime?.constructMessageContent(compiledSubject, compiledMessage) + destinationMessage = ChimeMessage.Builder(name) + .withUrl(chime?.url) + .withMessage(messageContent) + .build() + } + DestinationType.SLACK -> { + val messageContent = slack?.constructMessageContent(compiledSubject, compiledMessage) + destinationMessage = SlackMessage.Builder(name) + .withUrl(slack?.url) + .withMessage(messageContent) + .build() + } + DestinationType.CUSTOM_WEBHOOK -> { + destinationMessage = CustomWebhookMessage.Builder(name) + .withUrl(customWebhook?.url) + .withScheme(customWebhook?.scheme) + .withHost(customWebhook?.host) + .withPort(customWebhook?.port) + .withPath(customWebhook?.path) + .withQueryParams(customWebhook?.queryParams) + .withHeaderParams(customWebhook?.headerParams) + .withMessage(compiledMessage).build() + } + DestinationType.TEST_ACTION -> { + return "test action" + } + } + val response = Notification.publish(destinationMessage) as DestinationHttpResponse + logger.info("Message published for action name: $name, messageid: ${response.responseContent}, statuscode: ${response.statusCode}") + return response.responseContent + } + + fun constructResponseForDestinationType(type: DestinationType): Any { + var content: Any? = null + when (type) { + DestinationType.CHIME -> content = chime?.convertToMap()?.get(type.value) + DestinationType.SLACK -> content = slack?.convertToMap()?.get(type.value) + DestinationType.CUSTOM_WEBHOOK -> content = customWebhook?.convertToMap()?.get(type.value) + DestinationType.TEST_ACTION -> content = "dummy" + } + if (content == null) { + throw IllegalArgumentException("Content is NULL for destination type ${type.value}") + } + return content + } +} diff --git a/alerting/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/model/destination/SNS.kt b/alerting/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/model/destination/SNS.kt new file mode 100644 index 00000000..31722f61 --- /dev/null +++ b/alerting/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/model/destination/SNS.kt @@ -0,0 +1,71 @@ +/* + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package com.amazon.opendistroforelasticsearch.alerting.model.destination + +import org.elasticsearch.common.xcontent.ToXContent +import org.elasticsearch.common.xcontent.XContentBuilder +import org.elasticsearch.common.xcontent.XContentParser +import org.elasticsearch.common.xcontent.XContentParserUtils +import java.io.IOException +import java.lang.IllegalStateException +import java.util.regex.Pattern + +data class SNS(val topicARN: String, val roleARN: String) : ToXContent { + + init { + require(SNS_ARN_REGEX.matcher(topicARN).find()) { "Invalid AWS SNS topic ARN: $topicARN" } + require(IAM_ARN_REGEX.matcher(roleARN).find()) { "Invalid AWS role ARN: $roleARN " } + } + + override fun toXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { + return builder.startObject(SNS_TYPE) + .field(TOPIC_ARN_FIELD, topicARN) + .field(ROLE_ARN_FIELD, roleARN) + .endObject() + } + + companion object { + + private val SNS_ARN_REGEX = Pattern.compile("^arn:aws(-[^:]+)?:sns:([a-zA-Z0-9-]+):([0-9]{12}):([a-zA-Z0-9-_]+)$") + private val IAM_ARN_REGEX = Pattern.compile("^arn:aws(-[^:]+)?:iam::([0-9]{12}):([a-zA-Z0-9-/_]+)$") + + const val TOPIC_ARN_FIELD = "topic_arn" + const val ROLE_ARN_FIELD = "role_arn" + const val SNS_TYPE = "sns" + + @JvmStatic + @Throws(IOException::class) + fun parse(xcp: XContentParser): SNS { + lateinit var topicARN: String + lateinit var roleARN: String + + XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, xcp.currentToken(), xcp::getTokenLocation) + while (xcp.nextToken() != XContentParser.Token.END_OBJECT) { + val fieldName = xcp.currentName() + xcp.nextToken() + when (fieldName) { + TOPIC_ARN_FIELD -> topicARN = xcp.textOrNull() + ROLE_ARN_FIELD -> roleARN = xcp.textOrNull() + else -> { + throw IllegalStateException("Unexpected field: $fieldName, while parsing SNS destination") + } + } + } + return SNS(requireNotNull(topicARN) { "SNS Action topic_arn is null" }, + requireNotNull(roleARN) { "SNS Action role_arn is null" }) + } + } +} diff --git a/alerting/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/model/destination/Slack.kt b/alerting/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/model/destination/Slack.kt new file mode 100644 index 00000000..2580323b --- /dev/null +++ b/alerting/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/model/destination/Slack.kt @@ -0,0 +1,77 @@ +/* + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package com.amazon.opendistroforelasticsearch.alerting.model.destination + +import com.amazon.opendistroforelasticsearch.alerting.elasticapi.string +import org.elasticsearch.common.Strings +import org.elasticsearch.common.xcontent.ToXContent +import org.elasticsearch.common.xcontent.XContentBuilder +import org.elasticsearch.common.xcontent.XContentFactory +import org.elasticsearch.common.xcontent.XContentParser +import org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken +import org.elasticsearch.common.xcontent.XContentType +import java.io.IOException +import java.lang.IllegalStateException + +/** + * A value object that represents a Slack message. Slack message will be + * submitted to the Slack destination + */ +data class Slack(val url: String) : ToXContent { + + init { + require(!Strings.isNullOrEmpty(url)) { "URL is null or empty" } + } + + override fun toXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { + return builder.startObject(TYPE) + .field(URL, url) + .endObject() + } + + companion object { + const val URL = "url" + const val TYPE = "slack" + + @JvmStatic + @Throws(IOException::class) + fun parse(xcp: XContentParser): Slack { + lateinit var url: String + + ensureExpectedToken(XContentParser.Token.START_OBJECT, xcp.currentToken(), xcp::getTokenLocation) + while (xcp.nextToken() != XContentParser.Token.END_OBJECT) { + val fieldName = xcp.currentName() + xcp.nextToken() + when (fieldName) { + URL -> url = xcp.text() + else -> { + throw IllegalStateException("Unexpected field: $fieldName, while parsing Slack destination") + } + } + } + return Slack(url) + } + } + + fun constructMessageContent(subject: String?, message: String): String { + val messageContent: String? = if (Strings.isNullOrEmpty(subject)) message else "$subject \n\n $message" + val builder = XContentFactory.contentBuilder(XContentType.JSON) + builder.startObject() + .field("text", messageContent) + .endObject() + return builder.string() + } +} diff --git a/alerting/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/resthandler/AsyncActionHandler.kt b/alerting/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/resthandler/AsyncActionHandler.kt new file mode 100644 index 00000000..be1b7c2c --- /dev/null +++ b/alerting/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/resthandler/AsyncActionHandler.kt @@ -0,0 +1,27 @@ +/* + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package com.amazon.opendistroforelasticsearch.alerting.resthandler + +import org.elasticsearch.client.node.NodeClient +import org.elasticsearch.rest.BytesRestResponse +import org.elasticsearch.rest.RestChannel + +abstract class AsyncActionHandler(protected val client: NodeClient, protected val channel: RestChannel) { + + protected fun onFailure(e: Exception) { + channel.sendResponse(BytesRestResponse(channel, e)) + } +} diff --git a/alerting/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/resthandler/RestAcknowledgeAlertAction.kt b/alerting/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/resthandler/RestAcknowledgeAlertAction.kt new file mode 100644 index 00000000..02ad7e51 --- /dev/null +++ b/alerting/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/resthandler/RestAcknowledgeAlertAction.kt @@ -0,0 +1,217 @@ +/* + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package com.amazon.opendistroforelasticsearch.alerting.resthandler + +import com.amazon.opendistroforelasticsearch.alerting.alerts.AlertIndices +import com.amazon.opendistroforelasticsearch.alerting.model.Alert +import com.amazon.opendistroforelasticsearch.alerting.model.Alert.State.ACKNOWLEDGED +import com.amazon.opendistroforelasticsearch.alerting.model.Alert.State.ACTIVE +import com.amazon.opendistroforelasticsearch.alerting.model.Alert.State.COMPLETED +import com.amazon.opendistroforelasticsearch.alerting.model.Alert.State.ERROR +import com.amazon.opendistroforelasticsearch.alerting.util.REFRESH +import com.amazon.opendistroforelasticsearch.alerting.elasticapi.ElasticAPI +import com.amazon.opendistroforelasticsearch.alerting.AlertingPlugin +import com.amazon.opendistroforelasticsearch.alerting.elasticapi.optionalTimeField +import org.elasticsearch.action.ActionListener +import org.elasticsearch.action.bulk.BulkRequest +import org.elasticsearch.action.bulk.BulkResponse +import org.elasticsearch.action.search.SearchRequest +import org.elasticsearch.action.search.SearchResponse +import org.elasticsearch.action.support.WriteRequest +import org.elasticsearch.action.support.WriteRequest.RefreshPolicy +import org.elasticsearch.action.update.UpdateRequest +import org.elasticsearch.client.node.NodeClient +import org.elasticsearch.common.settings.Settings +import org.elasticsearch.common.xcontent.XContentBuilder +import org.elasticsearch.common.xcontent.XContentFactory +import org.elasticsearch.common.xcontent.XContentParser +import org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken +import org.elasticsearch.index.query.QueryBuilders +import org.elasticsearch.rest.BaseRestHandler +import org.elasticsearch.rest.BaseRestHandler.RestChannelConsumer +import org.elasticsearch.rest.BytesRestResponse +import org.elasticsearch.rest.RestChannel +import org.elasticsearch.rest.RestController +import org.elasticsearch.rest.RestRequest +import org.elasticsearch.rest.RestRequest.Method.POST +import org.elasticsearch.rest.RestStatus +import org.elasticsearch.search.builder.SearchSourceBuilder +import java.io.IOException +import java.time.Instant + +/** + * This class consists of the REST handler to acknowledge alerts. + * The user provides the monitorID to which these alerts pertain and in the content of the request provides + * the ids to the alerts he would like to acknowledge. + */ +class RestAcknowledgeAlertAction(settings: Settings, controller: RestController) : BaseRestHandler(settings) { + + init { + // Acknowledge alerts + controller.registerHandler(POST, "${AlertingPlugin.MONITOR_BASE_URI}/{monitorID}/_acknowledge/alerts", this) + } + + override fun getName(): String { + return "acknowledge_alert_action" + } + + @Throws(IOException::class) + override fun prepareRequest(request: RestRequest, client: NodeClient): RestChannelConsumer { + val monitorId = request.param("monitorID") + require(!monitorId.isNullOrEmpty()) { "Missing monitor id." } + val alertIds = getAlertIds(request.contentParser()) + require(alertIds.isNotEmpty()) { "You must provide at least one alert id." } + val refreshPolicy = RefreshPolicy.parse(request.param(REFRESH, RefreshPolicy.IMMEDIATE.value)) + + return RestChannelConsumer { channel -> + AcknowledgeHandler(client, channel, monitorId, alertIds, refreshPolicy).start() + } + } + + inner class AcknowledgeHandler( + client: NodeClient, + channel: RestChannel, + private val monitorId: String, + private val alertIds: List, + private val refreshPolicy: WriteRequest.RefreshPolicy? + ) : AsyncActionHandler(client, channel) { + val alerts = mutableMapOf() + + fun start() = findActiveAlerts() + + private fun findActiveAlerts() { + val queryBuilder = QueryBuilders.boolQuery() + .filter(QueryBuilders.termQuery(Alert.MONITOR_ID_FIELD, monitorId)) + .filter(QueryBuilders.termsQuery("_id", alertIds)) + val searchRequest = SearchRequest() + .indices(AlertIndices.ALERT_INDEX) + .types(Alert.ALERT_TYPE) + .routing(monitorId) + .source(SearchSourceBuilder().query(queryBuilder).version(true)) + + client.search(searchRequest, ActionListener.wrap(::onSearchResponse, ::onFailure)) + } + + private fun onSearchResponse(response: SearchResponse) { + val updateRequests = response.hits.flatMap { hit -> + val xcp = ElasticAPI.INSTANCE.jsonParser(channel.request().xContentRegistry, hit.sourceRef) + ensureExpectedToken(XContentParser.Token.START_OBJECT, xcp.nextToken(), xcp::getTokenLocation) + val alert = Alert.parse(xcp, hit.id, hit.version) + alerts[alert.id] = alert + if (alert.state == ACTIVE) { + listOf(UpdateRequest(AlertIndices.ALERT_INDEX, AlertIndices.MAPPING_TYPE, hit.id) + .routing(monitorId) + .version(hit.version) + .doc(XContentFactory.jsonBuilder().startObject() + .field(Alert.STATE_FIELD, ACKNOWLEDGED.toString()) + .optionalTimeField(Alert.ACKNOWLEDGED_TIME_FIELD, Instant.now()) + .endObject())) + } else { + emptyList() + } + } + + logger.info("Acknowledging monitor: $monitorId, alerts: ${updateRequests.map { it.id() }}") + val request = BulkRequest().add(updateRequests).setRefreshPolicy(refreshPolicy) + client.bulk(request, ActionListener.wrap(::onBulkResponse, ::onFailure)) + } + + private fun onBulkResponse(response: BulkResponse) { + val missing = alertIds.toMutableSet() + val acknowledged = mutableListOf() + val failed = mutableListOf() + // First handle all alerts that aren't currently ACTIVE. These can't be acknowledged. + alerts.values.forEach { + if (it.state != ACTIVE) { + missing.remove(it.id) + failed.add(it) + } + } + // Now handle all alerts we tried to acknowledge... + response.items.forEach { item -> + missing.remove(item.id) + if (item.isFailed) { + failed.add(alerts[item.id]!!) + } else { + acknowledged.add(alerts[item.id]!!) + } + } + + channel.sendResponse(BytesRestResponse(RestStatus.OK, + responseBuilder(channel.newBuilder(), acknowledged.toList(), failed.toList(), missing.toList()))) + } + } + + /** + * Parse the request content and return a list of the alert ids to acknowledge + */ + private fun getAlertIds(xcp: XContentParser): List { + val ids = mutableListOf() + ensureExpectedToken(XContentParser.Token.START_OBJECT, xcp.nextToken(), xcp::getTokenLocation) + while (xcp.nextToken() != XContentParser.Token.END_OBJECT) { + val fieldName = xcp.currentName() + xcp.nextToken() + when (fieldName) { + "alerts" -> { + ensureExpectedToken(XContentParser.Token.START_ARRAY, xcp.currentToken(), xcp::getTokenLocation) + while (xcp.nextToken() != XContentParser.Token.END_ARRAY) { + ids.add(xcp.text()) + } + } + } + } + return ids + } + + /** + * Build the response containing the acknowledged alerts and the failed to acknowledge alerts. + */ + private fun responseBuilder( + builder: XContentBuilder, + acknowledgedAlerts: List, + failedAlerts: List, + missing: List + ): XContentBuilder { + builder.startObject().startArray("success") + acknowledgedAlerts.forEach { builder.value(it.id) } + builder.endArray().startArray("failed") + failedAlerts.forEach { buildFailedAlertAcknowledgeObject(builder, it) } + missing.forEach { buildMissingAlertAcknowledgeObject(builder, it) } + return builder.endArray().endObject() + } + + private fun buildFailedAlertAcknowledgeObject(builder: XContentBuilder, failedAlert: Alert) { + builder.startObject() + .startObject(failedAlert.id) + val reason = when (failedAlert.state) { + ERROR -> "Alert is in an error state and can not be acknowledged." + COMPLETED -> "Alert has already completed and can not be acknowledged." + ACKNOWLEDGED -> "Alert has already been acknowledged." + else -> "Alert state unknown and can not be acknowledged" + } + builder.field("failed_reason", reason) + .endObject() + .endObject() + } + + private fun buildMissingAlertAcknowledgeObject(builder: XContentBuilder, alertID: String) { + builder.startObject() + .startObject(alertID) + .field("failed_reason", "Alert: $alertID does not exist (it may have already completed).") + .endObject() + .endObject() + } +} diff --git a/alerting/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/resthandler/RestDeleteDestinationAction.kt b/alerting/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/resthandler/RestDeleteDestinationAction.kt new file mode 100644 index 00000000..7447469d --- /dev/null +++ b/alerting/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/resthandler/RestDeleteDestinationAction.kt @@ -0,0 +1,57 @@ +/* + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package com.amazon.opendistroforelasticsearch.alerting.resthandler + +import com.amazon.opendistroforelasticsearch.alerting.core.model.ScheduledJob +import com.amazon.opendistroforelasticsearch.alerting.AlertingPlugin +import com.amazon.opendistroforelasticsearch.alerting.util.REFRESH +import org.elasticsearch.action.delete.DeleteRequest +import org.elasticsearch.action.support.WriteRequest +import org.elasticsearch.client.node.NodeClient +import org.elasticsearch.common.settings.Settings +import org.elasticsearch.rest.BaseRestHandler +import org.elasticsearch.rest.BaseRestHandler.RestChannelConsumer +import org.elasticsearch.rest.RestController +import org.elasticsearch.rest.RestRequest +import org.elasticsearch.rest.action.RestStatusToXContentListener +import java.io.IOException + +/** + * This class consists of the REST handler to delete destination. + */ +class RestDeleteDestinationAction(settings: Settings, controller: RestController) : BaseRestHandler(settings) { + + init { + controller.registerHandler(RestRequest.Method.DELETE, "${AlertingPlugin.DESTINATION_BASE_URI}/{destinationID}", this) + } + + override fun getName(): String { + return "delete_destination_action" + } + + @Throws(IOException::class) + override fun prepareRequest(request: RestRequest, client: NodeClient): RestChannelConsumer { + val destinationId = request.param("destinationID") + val refreshPolicy = WriteRequest.RefreshPolicy.parse(request.param(REFRESH, WriteRequest.RefreshPolicy.IMMEDIATE.value)) + + return RestChannelConsumer { channel -> + val deleteDestinationRequest = + DeleteRequest(ScheduledJob.SCHEDULED_JOBS_INDEX, ScheduledJob.SCHEDULED_JOB_TYPE, destinationId) + .setRefreshPolicy(refreshPolicy) + client.delete(deleteDestinationRequest, RestStatusToXContentListener(channel)) + } + } +} diff --git a/alerting/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/resthandler/RestDeleteMonitorAction.kt b/alerting/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/resthandler/RestDeleteMonitorAction.kt new file mode 100644 index 00000000..0250d50f --- /dev/null +++ b/alerting/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/resthandler/RestDeleteMonitorAction.kt @@ -0,0 +1,60 @@ +/* + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ +package com.amazon.opendistroforelasticsearch.alerting.resthandler + +import com.amazon.opendistroforelasticsearch.alerting.core.model.ScheduledJob +import com.amazon.opendistroforelasticsearch.alerting.model.Alert +import com.amazon.opendistroforelasticsearch.alerting.util.REFRESH +import com.amazon.opendistroforelasticsearch.alerting.AlertingPlugin +import org.elasticsearch.action.delete.DeleteRequest +import org.elasticsearch.action.support.WriteRequest.RefreshPolicy +import org.elasticsearch.client.node.NodeClient +import org.elasticsearch.common.settings.Settings +import org.elasticsearch.rest.BaseRestHandler +import org.elasticsearch.rest.BaseRestHandler.RestChannelConsumer +import org.elasticsearch.rest.RestController +import org.elasticsearch.rest.RestRequest +import org.elasticsearch.rest.RestRequest.Method.DELETE +import org.elasticsearch.rest.action.RestStatusToXContentListener +import java.io.IOException + +/** + * This class consists of the REST handler to delete monitors. + * When a monitor is deleted, all alerts are moved to the [Alert.State.DELETED] state and moved to the alert history index. + * If this process fails the monitor is not deleted. + */ +class RestDeleteMonitorAction(settings: Settings, controller: RestController) : + BaseRestHandler(settings) { + + init { + controller.registerHandler(DELETE, "${AlertingPlugin.MONITOR_BASE_URI}/{monitorID}", this) // Delete a monitor + } + + override fun getName(): String { + return "delete_monitor_action" + } + + @Throws(IOException::class) + override fun prepareRequest(request: RestRequest, client: NodeClient): RestChannelConsumer { + val monitorId = request.param("monitorID") + val refreshPolicy = RefreshPolicy.parse(request.param(REFRESH, RefreshPolicy.IMMEDIATE.value)) + + return RestChannelConsumer { channel -> + val deleteRequest = DeleteRequest(ScheduledJob.SCHEDULED_JOBS_INDEX, ScheduledJob.SCHEDULED_JOB_TYPE, monitorId) + .setRefreshPolicy(refreshPolicy) + client.delete(deleteRequest, RestStatusToXContentListener(channel)) + } + } +} diff --git a/alerting/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/resthandler/RestExecuteMonitorAction.kt b/alerting/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/resthandler/RestExecuteMonitorAction.kt new file mode 100644 index 00000000..372b2680 --- /dev/null +++ b/alerting/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/resthandler/RestExecuteMonitorAction.kt @@ -0,0 +1,110 @@ +/* + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package com.amazon.opendistroforelasticsearch.alerting.resthandler + +import com.amazon.opendistroforelasticsearch.alerting.core.model.ScheduledJob +import com.amazon.opendistroforelasticsearch.alerting.MonitorRunner +import com.amazon.opendistroforelasticsearch.alerting.model.Monitor +import com.amazon.opendistroforelasticsearch.alerting.elasticapi.ElasticAPI +import com.amazon.opendistroforelasticsearch.alerting.AlertingPlugin +import org.elasticsearch.action.get.GetRequest +import org.elasticsearch.action.get.GetResponse +import org.elasticsearch.client.node.NodeClient +import org.elasticsearch.common.settings.Settings +import org.elasticsearch.common.unit.TimeValue +import org.elasticsearch.common.xcontent.XContentParser.Token.START_OBJECT +import org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken +import org.elasticsearch.common.xcontent.XContentType +import org.elasticsearch.rest.BaseRestHandler +import org.elasticsearch.rest.BaseRestHandler.RestChannelConsumer +import org.elasticsearch.rest.BytesRestResponse +import org.elasticsearch.rest.RestChannel +import org.elasticsearch.rest.RestController +import org.elasticsearch.rest.RestRequest +import org.elasticsearch.rest.RestRequest.Method.POST +import org.elasticsearch.rest.RestStatus +import org.elasticsearch.rest.action.RestActionListener +import java.time.Instant + +class RestExecuteMonitorAction( + val settings: Settings, + restController: RestController, + private val runner: MonitorRunner +) : BaseRestHandler(settings) { + + init { + restController.registerHandler(POST, "${AlertingPlugin.MONITOR_BASE_URI}/{monitorID}/_execute", this) + restController.registerHandler(POST, "${AlertingPlugin.MONITOR_BASE_URI}/_execute", this) + } + + override fun getName(): String = "execute_monitor_action" + + override fun prepareRequest(request: RestRequest, client: NodeClient): RestChannelConsumer { + return RestChannelConsumer { channel -> + val dryrun = request.paramAsBoolean("dryrun", false) + val requestEnd = request.paramAsTime("period_end", TimeValue(Instant.now().toEpochMilli())) + + val executeMonitor = fun(monitor: Monitor) { + runner.executor().submit { + val (periodStart, periodEnd) = + monitor.schedule.getPeriodEndingAt(Instant.ofEpochMilli(requestEnd.millis)) + try { + val response = runner.runMonitor(monitor, periodStart, periodEnd, dryrun) + channel.sendResponse(BytesRestResponse(RestStatus.OK, channel.newBuilder().value(response))) + } catch (e: Exception) { + logger.error("Unexpected error running monitor", e) + channel.sendResponse(BytesRestResponse(channel, e)) + } + } + } + + if (request.hasParam("monitorID")) { + val getRequest = GetRequest(ScheduledJob.SCHEDULED_JOBS_INDEX).id(request.param("monitorID")) + client.get(getRequest, processGetResponse(channel, executeMonitor)) + } else { + val xcp = request.contentParser() + ensureExpectedToken(START_OBJECT, xcp.nextToken(), xcp::getTokenLocation) + executeMonitor(Monitor.parse(xcp, Monitor.NO_ID, Monitor.NO_VERSION)) + } + } + } + + override fun responseParams(): Set { + return setOf("dryrun", "period_end", "monitorID") + } + + private fun processGetResponse(channel: RestChannel, block: (Monitor) -> Unit): RestActionListener { + return object : RestActionListener(channel) { + + override fun processResponse(response: GetResponse) { + if (!response.isExists) { + val ret = this.channel.newErrorBuilder().startObject() + .field("message", "Can't find monitor with id: ${response.id}") + .endObject() + this.channel.sendResponse(BytesRestResponse(RestStatus.NOT_FOUND, ret)) + } + + val xcp = ElasticAPI.INSTANCE.createParser(this.channel.request().xContentRegistry, + response.sourceAsBytesRef, this.channel.request().xContentType ?: XContentType.JSON) + val monitor = xcp.use { + ScheduledJob.parse(xcp, response.id, response.version) as Monitor + } + + block(monitor) + } + } + } +} diff --git a/alerting/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/resthandler/RestGetMonitorAction.kt b/alerting/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/resthandler/RestGetMonitorAction.kt new file mode 100644 index 00000000..3ba75307 --- /dev/null +++ b/alerting/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/resthandler/RestGetMonitorAction.kt @@ -0,0 +1,96 @@ +/* + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ +package com.amazon.opendistroforelasticsearch.alerting.resthandler + +import com.amazon.opendistroforelasticsearch.alerting.core.model.ScheduledJob +import com.amazon.opendistroforelasticsearch.alerting.core.model.ScheduledJob.Companion.SCHEDULED_JOBS_INDEX +import com.amazon.opendistroforelasticsearch.alerting.core.model.ScheduledJob.Companion.SCHEDULED_JOB_TYPE +import com.amazon.opendistroforelasticsearch.alerting.util._ID +import com.amazon.opendistroforelasticsearch.alerting.util._VERSION +import com.amazon.opendistroforelasticsearch.alerting.util.context +import com.amazon.opendistroforelasticsearch.alerting.elasticapi.ElasticAPI +import com.amazon.opendistroforelasticsearch.alerting.AlertingPlugin +import org.elasticsearch.action.get.GetRequest +import org.elasticsearch.action.get.GetResponse +import org.elasticsearch.client.node.NodeClient +import org.elasticsearch.common.settings.Settings +import org.elasticsearch.rest.BaseRestHandler +import org.elasticsearch.rest.BaseRestHandler.RestChannelConsumer +import org.elasticsearch.rest.BytesRestResponse +import org.elasticsearch.rest.RestChannel +import org.elasticsearch.rest.RestController +import org.elasticsearch.rest.RestRequest +import org.elasticsearch.rest.RestRequest.Method.GET +import org.elasticsearch.rest.RestRequest.Method.HEAD +import org.elasticsearch.rest.RestResponse +import org.elasticsearch.rest.RestStatus +import org.elasticsearch.rest.action.RestActions +import org.elasticsearch.rest.action.RestResponseListener +import org.elasticsearch.search.fetch.subphase.FetchSourceContext + +/** + * This class consists of the REST handler to retrieve a monitor . + */ +class RestGetMonitorAction(settings: Settings, controller: RestController) : BaseRestHandler(settings) { + + init { + // Get a specific monitor + controller.registerHandler(GET, "${AlertingPlugin.MONITOR_BASE_URI}/{monitorID}", this) + controller.registerHandler(HEAD, "${AlertingPlugin.MONITOR_BASE_URI}/{monitorID}", this) + } + + override fun getName(): String { + return "get_monitor_action" + } + + override fun prepareRequest(request: RestRequest, client: NodeClient): RestChannelConsumer { + val monitorId = request.param("monitorID") + if (monitorId == null || monitorId.isEmpty()) { + throw IllegalArgumentException("missing id") + } + val getRequest = GetRequest(SCHEDULED_JOBS_INDEX, SCHEDULED_JOB_TYPE, monitorId) + .version(RestActions.parseVersion(request)) + .fetchSourceContext(context(request)) + if (request.method() == HEAD) { + getRequest.fetchSourceContext(FetchSourceContext.DO_NOT_FETCH_SOURCE) + } + return RestChannelConsumer { channel -> client.get(getRequest, getMonitorResponse(channel)) } + } + + private fun getMonitorResponse(channel: RestChannel): RestResponseListener { + return object : RestResponseListener(channel) { + @Throws(Exception::class) + override fun buildResponse(response: GetResponse): RestResponse { + if (!response.isExists) { + return BytesRestResponse(RestStatus.NOT_FOUND, channel.newBuilder()) + } + + val builder = channel.newBuilder() + .startObject() + .field(_ID, response.id) + .field(_VERSION, response.version) + if (!response.isSourceEmpty) { + ElasticAPI.INSTANCE + .jsonParser(channel.request().xContentRegistry, response.sourceAsBytesRef).use { xcp -> + val monitor = ScheduledJob.parse(xcp, response.id, response.version) + builder.field("monitor", monitor) + } + } + builder.endObject() + return BytesRestResponse(RestStatus.OK, builder) + } + } + } +} diff --git a/alerting/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/resthandler/RestIndexDestinationAction.kt b/alerting/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/resthandler/RestIndexDestinationAction.kt new file mode 100644 index 00000000..417bec94 --- /dev/null +++ b/alerting/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/resthandler/RestIndexDestinationAction.kt @@ -0,0 +1,190 @@ +/* + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package com.amazon.opendistroforelasticsearch.alerting.resthandler + +import com.amazon.opendistroforelasticsearch.alerting.core.ScheduledJobIndices +import com.amazon.opendistroforelasticsearch.alerting.model.destination.Destination +import com.amazon.opendistroforelasticsearch.alerting.settings.AlertingSettings.Companion.INDEX_TIMEOUT +import com.amazon.opendistroforelasticsearch.alerting.util.REFRESH +import com.amazon.opendistroforelasticsearch.alerting.util._ID +import com.amazon.opendistroforelasticsearch.alerting.util._VERSION +import com.amazon.opendistroforelasticsearch.alerting.core.model.ScheduledJob +import com.amazon.opendistroforelasticsearch.alerting.core.model.ScheduledJob.Companion.SCHEDULED_JOBS_INDEX +import com.amazon.opendistroforelasticsearch.alerting.core.model.ScheduledJob.Companion.SCHEDULED_JOB_TYPE +import com.amazon.opendistroforelasticsearch.alerting.AlertingPlugin +import org.elasticsearch.action.ActionListener +import org.elasticsearch.action.admin.indices.create.CreateIndexResponse +import org.elasticsearch.action.get.GetRequest +import org.elasticsearch.action.get.GetResponse +import org.elasticsearch.action.index.IndexRequest +import org.elasticsearch.action.index.IndexResponse +import org.elasticsearch.action.support.WriteRequest +import org.elasticsearch.client.node.NodeClient +import org.elasticsearch.cluster.service.ClusterService +import org.elasticsearch.common.settings.Settings +import org.elasticsearch.common.xcontent.ToXContent +import org.elasticsearch.common.xcontent.XContentParser +import org.elasticsearch.common.xcontent.XContentParserUtils +import org.elasticsearch.rest.BaseRestHandler +import org.elasticsearch.rest.BaseRestHandler.RestChannelConsumer +import org.elasticsearch.rest.BytesRestResponse +import org.elasticsearch.rest.RestChannel +import org.elasticsearch.rest.RestController +import org.elasticsearch.rest.RestRequest +import org.elasticsearch.rest.RestResponse +import org.elasticsearch.rest.RestStatus +import org.elasticsearch.rest.action.RestActions +import org.elasticsearch.rest.action.RestResponseListener +import java.io.IOException + +/** + * Rest handlers to create and update Destination + */ +class RestIndexDestinationAction( + settings: Settings, + controller: RestController, + jobIndices: ScheduledJobIndices, + clusterService: ClusterService +) : BaseRestHandler(settings) { + private var scheduledJobIndices: ScheduledJobIndices + @Volatile private var indexTimeout = INDEX_TIMEOUT.get(settings) + + init { + controller.registerHandler(RestRequest.Method.POST, AlertingPlugin.DESTINATION_BASE_URI, this) // Creates new destination + controller.registerHandler(RestRequest.Method.PUT, "${AlertingPlugin.DESTINATION_BASE_URI}/{destinationID}", this) + scheduledJobIndices = jobIndices + + clusterService.clusterSettings.addSettingsUpdateConsumer(INDEX_TIMEOUT) { indexTimeout = it } + } + + override fun getName(): String { + return "index_destination_action" + } + + @Throws(IOException::class) + override fun prepareRequest(request: RestRequest, client: NodeClient): BaseRestHandler.RestChannelConsumer { + val id = request.param("destinationID", Destination.NO_ID) + if (request.method() == RestRequest.Method.PUT && Destination.NO_ID == id) { + throw IllegalArgumentException("Missing destination ID") + } + + // Validate request by parsing JSON to Destination + val xcp = request.contentParser() + XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, xcp.nextToken(), xcp::getTokenLocation) + val destination = Destination.parse(xcp, id) + val destintaionVersion = RestActions.parseVersion(request) + val refreshPolicy = if (request.hasParam(REFRESH)) { + WriteRequest.RefreshPolicy.parse(request.param(REFRESH)) + } else { + WriteRequest.RefreshPolicy.IMMEDIATE + } + return RestChannelConsumer { channel -> + IndexDestinationHandler(client, channel, id, destintaionVersion, refreshPolicy, destination).start() + } + } + + inner class IndexDestinationHandler( + client: NodeClient, + channel: RestChannel, + private val destinationId: String, + private val destinationVersion: Long, + private val refreshPolicy: WriteRequest.RefreshPolicy, + private var newDestination: Destination + ) : AsyncActionHandler(client, channel) { + + fun start() { + if (!scheduledJobIndices.scheduledJobIndexExists()) { + scheduledJobIndices.initScheduledJobIndex(ActionListener.wrap(::onCreateMappingsResponse, ::onFailure)) + } else { + prepareDestinationIndexing() + } + } + + private fun prepareDestinationIndexing() { + if (channel.request().method() == RestRequest.Method.PUT) updateDestination() + else { + val indexRequest = IndexRequest(SCHEDULED_JOBS_INDEX, SCHEDULED_JOB_TYPE) + .setRefreshPolicy(refreshPolicy) + .source(newDestination.toXContent(channel.newBuilder(), ToXContent.MapParams(mapOf("with_type" to "true")))) + .version(destinationVersion) + .timeout(indexTimeout) + client.index(indexRequest, indexDestinationResponse()) + } + } + + private fun onCreateMappingsResponse(response: CreateIndexResponse) { + if (response.isAcknowledged) { + logger.info("Created ${ScheduledJob.SCHEDULED_JOBS_INDEX} with mappings.") + prepareDestinationIndexing() + } else { + logger.error("Create ${ScheduledJob.SCHEDULED_JOBS_INDEX} mappings call not acknowledged.") + channel.sendResponse(BytesRestResponse(RestStatus.INTERNAL_SERVER_ERROR, + response.toXContent(channel.newErrorBuilder(), ToXContent.EMPTY_PARAMS))) + } + } + + private fun updateDestination() { + val getRequest = GetRequest(SCHEDULED_JOBS_INDEX, SCHEDULED_JOB_TYPE, destinationId) + client.get(getRequest, ActionListener.wrap(::onGetResponse, ::onFailure)) + } + + private fun onGetResponse(response: GetResponse) { + if (!response.isExists) { + val builder = channel.newErrorBuilder() + .startObject() + .field("Message", "Destination with $destinationId is not found") + .endObject() + return channel.sendResponse(BytesRestResponse(RestStatus.NOT_FOUND, response.toXContent(builder, ToXContent.EMPTY_PARAMS))) + } + val indexRequest = IndexRequest(SCHEDULED_JOBS_INDEX, SCHEDULED_JOB_TYPE) + .setRefreshPolicy(refreshPolicy) + .source(newDestination.toXContent(channel.newBuilder(), ToXContent.MapParams(mapOf("with_type" to "true")))) + .id(destinationId) + .version(destinationVersion) + .timeout(indexTimeout) + return client.index(indexRequest, indexDestinationResponse()) + } + + private fun indexDestinationResponse(): RestResponseListener { + return object : RestResponseListener(channel) { + @Throws(Exception::class) + override fun buildResponse(response: IndexResponse): RestResponse { + val failureReasons = mutableListOf() + if (response.shardInfo.failed > 0) { + response.shardInfo.failures.forEach { entry -> failureReasons.add(entry.reason()) } + val builder = channel.newErrorBuilder().startObject() + .field("reasons", failureReasons.toTypedArray()) + .endObject() + return BytesRestResponse(response.status(), response.toXContent(builder, ToXContent.EMPTY_PARAMS)) + } + val builder = channel.newBuilder() + .startObject() + .field(_ID, response.id) + .field(_VERSION, response.version) + .field("destination", newDestination) + .endObject() + + val restResponse = BytesRestResponse(response.status(), builder) + if (response.status() == RestStatus.CREATED) { + val location = AlertingPlugin.DESTINATION_BASE_URI + response.id + restResponse.addHeader("Location", location) + } + return restResponse + } + } + } + } +} diff --git a/alerting/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/resthandler/RestIndexMonitorAction.kt b/alerting/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/resthandler/RestIndexMonitorAction.kt new file mode 100644 index 00000000..00e6dac0 --- /dev/null +++ b/alerting/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/resthandler/RestIndexMonitorAction.kt @@ -0,0 +1,229 @@ +/* + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ +package com.amazon.opendistroforelasticsearch.alerting.resthandler + +import com.amazon.opendistroforelasticsearch.alerting.core.ScheduledJobIndices +import com.amazon.opendistroforelasticsearch.alerting.core.model.ScheduledJob +import com.amazon.opendistroforelasticsearch.alerting.core.model.ScheduledJob.Companion.SCHEDULED_JOBS_INDEX +import com.amazon.opendistroforelasticsearch.alerting.core.model.ScheduledJob.Companion.SCHEDULED_JOB_TYPE +import com.amazon.opendistroforelasticsearch.alerting.model.Monitor +import com.amazon.opendistroforelasticsearch.alerting.settings.AlertingSettings.Companion.ALERTING_MAX_MONITORS +import com.amazon.opendistroforelasticsearch.alerting.settings.AlertingSettings.Companion.INDEX_TIMEOUT +import com.amazon.opendistroforelasticsearch.alerting.settings.AlertingSettings.Companion.REQUEST_TIMEOUT +import com.amazon.opendistroforelasticsearch.alerting.util.REFRESH +import com.amazon.opendistroforelasticsearch.alerting.util._ID +import com.amazon.opendistroforelasticsearch.alerting.util._VERSION +import com.amazon.opendistroforelasticsearch.alerting.elasticapi.ElasticAPI +import com.amazon.opendistroforelasticsearch.alerting.AlertingPlugin +import org.elasticsearch.action.ActionListener +import org.elasticsearch.action.admin.indices.create.CreateIndexResponse +import org.elasticsearch.action.get.GetRequest +import org.elasticsearch.action.get.GetResponse +import org.elasticsearch.action.index.IndexRequest +import org.elasticsearch.action.index.IndexResponse +import org.elasticsearch.action.search.SearchRequest +import org.elasticsearch.action.search.SearchResponse +import org.elasticsearch.action.support.WriteRequest +import org.elasticsearch.client.node.NodeClient +import org.elasticsearch.cluster.service.ClusterService +import org.elasticsearch.common.settings.Settings +import org.elasticsearch.common.xcontent.ToXContent.EMPTY_PARAMS +import org.elasticsearch.common.xcontent.XContentParser.Token +import org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken +import org.elasticsearch.index.query.QueryBuilders +import org.elasticsearch.rest.BaseRestHandler +import org.elasticsearch.rest.BaseRestHandler.RestChannelConsumer +import org.elasticsearch.rest.BytesRestResponse +import org.elasticsearch.rest.RestChannel +import org.elasticsearch.rest.RestController +import org.elasticsearch.rest.RestRequest +import org.elasticsearch.rest.RestRequest.Method.POST +import org.elasticsearch.rest.RestRequest.Method.PUT +import org.elasticsearch.rest.RestResponse +import org.elasticsearch.rest.RestStatus +import org.elasticsearch.rest.action.RestActions +import org.elasticsearch.rest.action.RestResponseListener +import org.elasticsearch.search.builder.SearchSourceBuilder +import java.io.IOException +import java.time.Instant + +/** + * Rest handlers to create and update monitors. + */ +class RestIndexMonitorAction( + settings: Settings, + controller: RestController, + jobIndices: ScheduledJobIndices, + clusterService: ClusterService +) : BaseRestHandler(settings) { + + private var scheduledJobIndices: ScheduledJobIndices + @Volatile private var maxMonitors = ALERTING_MAX_MONITORS.get(settings) + @Volatile private var requestTimeout = REQUEST_TIMEOUT.get(settings) + @Volatile private var indexTimeout = INDEX_TIMEOUT.get(settings) + + init { + controller.registerHandler(POST, AlertingPlugin.MONITOR_BASE_URI, this) // Create a new monitor + controller.registerHandler(PUT, "${AlertingPlugin.MONITOR_BASE_URI}/{monitorID}", this) + scheduledJobIndices = jobIndices + + clusterService.clusterSettings.addSettingsUpdateConsumer(ALERTING_MAX_MONITORS) { maxMonitors = it } + clusterService.clusterSettings.addSettingsUpdateConsumer(REQUEST_TIMEOUT) { requestTimeout = it } + clusterService.clusterSettings.addSettingsUpdateConsumer(INDEX_TIMEOUT) { indexTimeout = it } + } + + override fun getName(): String { + return "index_monitor_action" + } + + @Throws(IOException::class) + override fun prepareRequest(request: RestRequest, client: NodeClient): BaseRestHandler.RestChannelConsumer { + val id = request.param("monitorID", Monitor.NO_ID) + if (request.method() == PUT && Monitor.NO_ID == id) { + throw IllegalArgumentException("Missing monitor ID") + } + + // Validate request by parsing JSON to Monitor + val xcp = request.contentParser() + ensureExpectedToken(Token.START_OBJECT, xcp.nextToken(), xcp::getTokenLocation) + val monitor = Monitor.parse(xcp, id).copy(lastUpdateTime = Instant.now()) + val monitorVersion = RestActions.parseVersion(request) + val refreshPolicy = if (request.hasParam(REFRESH)) { + WriteRequest.RefreshPolicy.parse(request.param(REFRESH)) + } else { + WriteRequest.RefreshPolicy.IMMEDIATE + } + return RestChannelConsumer { channel -> + IndexMonitorHandler(client, channel, id, monitorVersion, refreshPolicy, monitor).start() + } + } + + inner class IndexMonitorHandler( + client: NodeClient, + channel: RestChannel, + private val monitorId: String, + private val monitorVersion: Long, + private val refreshPolicy: WriteRequest.RefreshPolicy, + private var newMonitor: Monitor + ) : AsyncActionHandler(client, channel) { + + fun start() { + if (!scheduledJobIndices.scheduledJobIndexExists()) { + scheduledJobIndices.initScheduledJobIndex(ActionListener.wrap(::onCreateMappingsResponse, ::onFailure)) + } else { + prepareMonitorIndexing() + } + } + + /** + * This function prepares for indexing a new monitor. + * If this is an update request we can simply update the monitor. Otherwise we first check to see how many monitors already exist, + * and compare this to the [maxMonitorCount]. Requests that breach this threshold will be rejected. + */ + private fun prepareMonitorIndexing() { + if (channel.request().method() == PUT) return updateMonitor() + val query = QueryBuilders.boolQuery().filter(QueryBuilders.termQuery("${Monitor.MONITOR_TYPE}.type", Monitor.MONITOR_TYPE)) + val searchSource = SearchSourceBuilder().query(query).timeout(requestTimeout) + val searchRequest = SearchRequest(ScheduledJob.SCHEDULED_JOBS_INDEX) + .types(ScheduledJob.SCHEDULED_JOB_TYPE) + .source(searchSource) + client.search(searchRequest, ActionListener.wrap(::onSearchResponse, ::onFailure)) + } + + /** + * After searching for all existing monitors we validate the system can support another monitor to be created. + */ + private fun onSearchResponse(response: SearchResponse) { + if (response.hits.totalHits >= maxMonitors) { + logger.error("This request would create more than the allowed monitors [$maxMonitors].") + onFailure(IllegalArgumentException("This request would create more than the allowed monitors [$maxMonitors].")) + } else { + indexMonitor() + } + } + + private fun onCreateMappingsResponse(response: CreateIndexResponse) { + if (response.isAcknowledged) { + logger.info("Created ${ScheduledJob.SCHEDULED_JOBS_INDEX} with mappings.") + prepareMonitorIndexing() + } else { + logger.error("Create ${ScheduledJob.SCHEDULED_JOBS_INDEX} mappings call not acknowledged.") + channel.sendResponse(BytesRestResponse(RestStatus.INTERNAL_SERVER_ERROR, + response.toXContent(channel.newErrorBuilder(), EMPTY_PARAMS))) + } + } + + private fun indexMonitor() { + val indexRequest = IndexRequest(SCHEDULED_JOBS_INDEX, SCHEDULED_JOB_TYPE) + .setRefreshPolicy(refreshPolicy) + .source(newMonitor.toXContentWithType(channel.newBuilder())) + .version(monitorVersion) + .timeout(indexTimeout) + client.index(indexRequest, indexMonitorResponse()) + } + + private fun updateMonitor() { + val getRequest = GetRequest(SCHEDULED_JOBS_INDEX, SCHEDULED_JOB_TYPE, monitorId) + client.get(getRequest, ActionListener.wrap(::onGetResponse, ::onFailure)) + } + + private fun onGetResponse(response: GetResponse) { + if (!response.isExists) { + val builder = channel.newErrorBuilder() + .startObject() + .field("Message", "Monitor with $monitorId is not found") + .endObject() + return channel.sendResponse(BytesRestResponse(RestStatus.NOT_FOUND, response.toXContent(builder, EMPTY_PARAMS))) + } + val xcp = ElasticAPI.INSTANCE.jsonParser(channel.request().xContentRegistry, response.sourceAsBytesRef) + val currentMonitor = ScheduledJob.parse(xcp, monitorId) as Monitor + // If both are enabled, use the current existing monitor enabled time, otherwise the next execution will be + // incorrect. + if (newMonitor.enabled && currentMonitor.enabled) newMonitor = newMonitor.copy(enabledTime = currentMonitor.enabledTime) + val indexRequest = IndexRequest(SCHEDULED_JOBS_INDEX, SCHEDULED_JOB_TYPE) + .setRefreshPolicy(refreshPolicy) + .source(newMonitor.toXContentWithType(channel.newBuilder())) + .id(monitorId) + .version(monitorVersion) + .timeout(indexTimeout) + return client.index(indexRequest, indexMonitorResponse()) + } + + private fun indexMonitorResponse(): RestResponseListener { + return object : RestResponseListener(channel) { + @Throws(Exception::class) + override fun buildResponse(response: IndexResponse): RestResponse { + if (response.shardInfo.successful < 1) { + return BytesRestResponse(response.status(), response.toXContent(channel.newErrorBuilder(), EMPTY_PARAMS)) + } + + val builder = channel.newBuilder() + .startObject() + .field(_ID, response.id) + .field(_VERSION, response.version) + .field("monitor", newMonitor) + .endObject() + + val restResponse = BytesRestResponse(response.status(), builder) + if (response.status() == RestStatus.CREATED) { + val location = "${AlertingPlugin.MONITOR_BASE_URI}/${response.id}" + restResponse.addHeader("Location", location) + } + return restResponse + } + } + } + } +} diff --git a/alerting/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/resthandler/RestSearchMonitorAction.kt b/alerting/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/resthandler/RestSearchMonitorAction.kt new file mode 100644 index 00000000..a55b6a90 --- /dev/null +++ b/alerting/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/resthandler/RestSearchMonitorAction.kt @@ -0,0 +1,94 @@ +/* + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ +package com.amazon.opendistroforelasticsearch.alerting.resthandler + +import com.amazon.opendistroforelasticsearch.alerting.core.model.ScheduledJob +import com.amazon.opendistroforelasticsearch.alerting.core.model.ScheduledJob.Companion.SCHEDULED_JOBS_INDEX +import com.amazon.opendistroforelasticsearch.alerting.core.model.ScheduledJob.Companion.SCHEDULED_JOB_TYPE +import com.amazon.opendistroforelasticsearch.alerting.model.Monitor +import com.amazon.opendistroforelasticsearch.alerting.util.context +import com.amazon.opendistroforelasticsearch.alerting.elasticapi.ElasticAPI +import com.amazon.opendistroforelasticsearch.alerting.AlertingPlugin +import org.elasticsearch.action.search.SearchRequest +import org.elasticsearch.action.search.SearchResponse +import org.elasticsearch.client.node.NodeClient +import org.elasticsearch.common.settings.Settings +import org.elasticsearch.common.xcontent.ToXContent.EMPTY_PARAMS +import org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder +import org.elasticsearch.index.query.QueryBuilders +import org.elasticsearch.rest.BaseRestHandler +import org.elasticsearch.rest.BaseRestHandler.RestChannelConsumer +import org.elasticsearch.rest.BytesRestResponse +import org.elasticsearch.rest.RestChannel +import org.elasticsearch.rest.RestController +import org.elasticsearch.rest.RestRequest +import org.elasticsearch.rest.RestRequest.Method.GET +import org.elasticsearch.rest.RestRequest.Method.POST +import org.elasticsearch.rest.RestResponse +import org.elasticsearch.rest.RestStatus +import org.elasticsearch.rest.action.RestResponseListener +import org.elasticsearch.search.builder.SearchSourceBuilder +import java.io.IOException + +/** + * Rest handlers to search for monitors. + */ +class RestSearchMonitorAction(settings: Settings, controller: RestController) : BaseRestHandler(settings) { + init { + // Search for monitors + controller.registerHandler(POST, "${AlertingPlugin.MONITOR_BASE_URI}/_search", this) + controller.registerHandler(GET, "${AlertingPlugin.MONITOR_BASE_URI}/_search", this) + } + + override fun getName(): String { + return "search_monitor_action" + } + + @Throws(IOException::class) + override fun prepareRequest(request: RestRequest, client: NodeClient): RestChannelConsumer { + val searchSourceBuilder = SearchSourceBuilder() + searchSourceBuilder.parseXContent(request.contentOrSourceParamParser()) + searchSourceBuilder.fetchSource(context(request)) + // We add a term query ontop of the customer query to ensure that only scheduled jobs of monitor type are + // searched. + searchSourceBuilder.query(QueryBuilders.boolQuery().must(searchSourceBuilder.query()) + .filter(QueryBuilders.termQuery(Monitor.MONITOR_TYPE + ".type", Monitor.MONITOR_TYPE))) + val searchRequest = SearchRequest() + .source(searchSourceBuilder) + .indices(SCHEDULED_JOBS_INDEX) + .types(SCHEDULED_JOB_TYPE) + return RestChannelConsumer { channel -> client.search(searchRequest, searchMonitorResponse(channel)) } + } + + private fun searchMonitorResponse(channel: RestChannel): RestResponseListener { + return object : RestResponseListener(channel) { + @Throws(Exception::class) + override fun buildResponse(response: SearchResponse): RestResponse { + if (response.isTimedOut) { + return BytesRestResponse(RestStatus.REQUEST_TIMEOUT, response.toString()) + } + for (hit in response.hits) { + ElasticAPI.INSTANCE + .jsonParser(channel.request().xContentRegistry, hit.sourceAsString).use { hitsParser -> + val monitor = ScheduledJob.parse(hitsParser, hit.id, hit.version) + val xcb = monitor.toXContent(jsonBuilder(), EMPTY_PARAMS) + hit.sourceRef(ElasticAPI.INSTANCE.builderToBytesRef(xcb)) + } + } + return BytesRestResponse(RestStatus.OK, response.toXContent(channel.newBuilder(), EMPTY_PARAMS)) + } + } + } +} diff --git a/alerting/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/script/TriggerExecutionContext.kt b/alerting/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/script/TriggerExecutionContext.kt new file mode 100644 index 00000000..5630e8f3 --- /dev/null +++ b/alerting/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/script/TriggerExecutionContext.kt @@ -0,0 +1,51 @@ +/* + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package com.amazon.opendistroforelasticsearch.alerting.script + +import com.amazon.opendistroforelasticsearch.alerting.model.Alert +import com.amazon.opendistroforelasticsearch.alerting.model.Monitor +import com.amazon.opendistroforelasticsearch.alerting.model.MonitorRunResult +import com.amazon.opendistroforelasticsearch.alerting.model.Trigger +import java.time.Instant + +data class TriggerExecutionContext( + val monitor: Monitor, + val trigger: Trigger, + val results: List>, + val periodStart: Instant, + val periodEnd: Instant, + val alert: Alert? = null, + val error: Exception? = null +) { + + constructor(monitor: Monitor, trigger: Trigger, monitorRunResult: MonitorRunResult, alert: Alert? = null): + this(monitor, trigger, monitorRunResult.inputResults.results, monitorRunResult.periodStart, + monitorRunResult.periodEnd, alert, monitorRunResult.scriptContextError(trigger)) + + /** + * Mustache templates need special permissions to reflectively introspect field names. To avoid doing this we + * translate the context to a Map of Strings to primitive types, which can be accessed without reflection. + */ + fun asTemplateArg(): Map { + return mapOf("monitor" to monitor.asTemplateArg(), + "trigger" to trigger.asTemplateArg(), + "results" to results, + "periodStart" to periodStart, + "periodEnd" to periodEnd, + "alert" to alert?.asTemplateArg(), + "error" to error) + } +} diff --git a/alerting/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/script/TriggerScript.kt b/alerting/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/script/TriggerScript.kt new file mode 100644 index 00000000..69719024 --- /dev/null +++ b/alerting/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/script/TriggerScript.kt @@ -0,0 +1,55 @@ +/* + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package com.amazon.opendistroforelasticsearch.alerting.script + +import org.elasticsearch.script.Script +import org.elasticsearch.script.ScriptContext + +abstract class TriggerScript(_scriptParams: Map) { + + /** + * [scriptParams] are the [user-defined parameters][Script.getParams] specified in the script definition. + * The [scriptParams] are defined when the script is compiled and DON'T change every time the script executes. This field + * is named **script**Params to avoid confusion with the [PARAMETERS] field. However to remain consistent with every other + * painless script context we surface it to the painless script as just `params` using a custom getter name. + */ + val scriptParams: Map = _scriptParams + @JvmName("getParams") get + + companion object { + /** + * [PARAMETERS] contains the names of the formal arguments to the [execute] method which define the + * script's execution context. These argument names (`_results` etc.) are available as named parameters + * in the painless script. These arguments passed to the [execute] method change every time the trigger is executed. + * In a sane world this would have been named `ARGUMENTS` to avoid confusing the hell out of everyone who has to + * work with this code. + */ + @JvmField val PARAMETERS = arrayOf("ctx") + + val CONTEXT = ScriptContext("trigger", Factory::class.java) + } + + /** + * Run a trigger script with the given context. + * + * @param ctx - the trigger execution context + */ + abstract fun execute(ctx: TriggerExecutionContext): Boolean + + interface Factory { + fun newInstance(scriptParams: Map): TriggerScript + } +} diff --git a/alerting/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/settings/AlertingSettings.kt b/alerting/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/settings/AlertingSettings.kt new file mode 100644 index 00000000..8e35aefb --- /dev/null +++ b/alerting/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/settings/AlertingSettings.kt @@ -0,0 +1,81 @@ +/* + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package com.amazon.opendistroforelasticsearch.alerting.settings + +import com.amazon.opendistroforelasticsearch.alerting.AlertingPlugin +import org.elasticsearch.common.settings.Setting +import org.elasticsearch.common.unit.TimeValue + +/** + * settings specific to [AlertingPlugin]. These settings include things like history index max age, request timeout, etc... + */ +class AlertingSettings { + + companion object { + const val MONITOR_MAX_INPUTS = 1 + const val MONITOR_MAX_TRIGGERS = 10 + + val ALERTING_MAX_MONITORS = Setting.intSetting( + "opendistro.alerting.monitor.max_monitors", + 1000, + Setting.Property.NodeScope, Setting.Property.Dynamic) + val INPUT_TIMEOUT = Setting.positiveTimeSetting( + "opendistro.alerting.input_timeout", + TimeValue.timeValueSeconds(30), + Setting.Property.NodeScope, Setting.Property.Dynamic) + val INDEX_TIMEOUT = Setting.positiveTimeSetting( + "opendistro.alerting.index_timeout", + TimeValue.timeValueSeconds(60), + Setting.Property.NodeScope, Setting.Property.Dynamic) + val BULK_TIMEOUT = Setting.positiveTimeSetting( + "opendistro.alerting.bulk_timeout", + TimeValue.timeValueSeconds(120), + Setting.Property.NodeScope, Setting.Property.Dynamic) + val ALERT_BACKOFF_MILLIS = Setting.positiveTimeSetting( + "opendistro.alerting.alert_backoff_millis", + TimeValue.timeValueMillis(50), + Setting.Property.NodeScope, Setting.Property.Dynamic) + val ALERT_BACKOFF_COUNT = Setting.intSetting( + "opendistro.alerting.alert_backoff_count", + 2, + Setting.Property.NodeScope, Setting.Property.Dynamic) + val MOVE_ALERTS_BACKOFF_MILLIS = Setting.positiveTimeSetting( + "opendistro.alerting.move_alerts_backoff_millis", + TimeValue.timeValueMillis(250), + Setting.Property.NodeScope, Setting.Property.Dynamic) + val MOVE_ALERTS_BACKOFF_COUNT = Setting.intSetting( + "opendistro.alerting.move_alerts_backoff_count", + 3, + Setting.Property.NodeScope, Setting.Property.Dynamic) + val ALERT_HISTORY_ROLLOVER_PERIOD = Setting.positiveTimeSetting( + "opendistro.alerting.alert_history_rollover_period", + TimeValue.timeValueHours(12), + Setting.Property.NodeScope, Setting.Property.Dynamic) + val ALERT_HISTORY_INDEX_MAX_AGE = Setting.positiveTimeSetting( + "opendistro.alerting.alert_history_max_age", + TimeValue.timeValueHours(24), + Setting.Property.NodeScope, Setting.Property.Dynamic) + val ALERT_HISTORY_MAX_DOCS = Setting.longSetting( + "opendistro.alerting.alert_history_max_docs", + 1000L, + 0L, + Setting.Property.NodeScope, Setting.Property.Dynamic) + val REQUEST_TIMEOUT = Setting.positiveTimeSetting( + "opendistro.alerting.request_timeout", + TimeValue.timeValueSeconds(10), + Setting.Property.NodeScope, Setting.Property.Dynamic) + } +} diff --git a/alerting/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/util/DestinationType.kt b/alerting/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/util/DestinationType.kt new file mode 100644 index 00000000..3e380f97 --- /dev/null +++ b/alerting/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/util/DestinationType.kt @@ -0,0 +1,23 @@ +/* + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package com.amazon.opendistroforelasticsearch.alerting.util + +enum class DestinationType(val value: String) { + CHIME("chime"), + SLACK("slack"), + CUSTOM_WEBHOOK("custom_webhook"), + TEST_ACTION("test_action") +} diff --git a/alerting/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/util/RestHandlerUtils.kt b/alerting/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/util/RestHandlerUtils.kt new file mode 100644 index 00000000..9297958f --- /dev/null +++ b/alerting/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/util/RestHandlerUtils.kt @@ -0,0 +1,39 @@ +/* + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package com.amazon.opendistroforelasticsearch.alerting.util + +import com.amazon.opendistroforelasticsearch.alerting.AlertingPlugin +import org.elasticsearch.common.Strings +import org.elasticsearch.rest.RestRequest +import org.elasticsearch.search.fetch.subphase.FetchSourceContext + +/** + * Checks to see if the request came from Kibana, if so we want to return the UI Metadata from the document. + * If the request came from the client then we exclude the UI Metadata from the search result. + * + * @param request + * @return FetchSourceContext + */ +fun context(request: RestRequest): FetchSourceContext? { + val userAgent = Strings.coalesceToEmpty(request.header("User-Agent")) + return if (!userAgent.contains(AlertingPlugin.KIBANA_USER_AGENT)) { + FetchSourceContext(true, Strings.EMPTY_ARRAY, AlertingPlugin.UI_METADATA_EXCLUDE) + } else null +} + +const val _ID = "_id" +const val _VERSION = "_version" +const val REFRESH = "refresh" diff --git a/alerting/src/main/plugin-metadata/plugin-security.policy b/alerting/src/main/plugin-metadata/plugin-security.policy new file mode 100644 index 00000000..bcee5e9e --- /dev/null +++ b/alerting/src/main/plugin-metadata/plugin-security.policy @@ -0,0 +1,8 @@ +grant { + // needed to find the classloader to load whitelisted classes. + permission java.lang.RuntimePermission "createClassLoader"; + permission java.lang.RuntimePermission "getClassLoader"; + + permission java.net.SocketPermission "*", "connect,resolve"; + permission java.net.NetPermission "getProxySelector"; +}; diff --git a/alerting/src/main/resources/DUMMY-FILE b/alerting/src/main/resources/DUMMY-FILE new file mode 100644 index 00000000..74623997 --- /dev/null +++ b/alerting/src/main/resources/DUMMY-FILE @@ -0,0 +1 @@ +THIS IS A DUMMY FILE \ No newline at end of file diff --git a/alerting/src/main/resources/META-INF/services/org.elasticsearch.painless.spi.PainlessExtension b/alerting/src/main/resources/META-INF/services/org.elasticsearch.painless.spi.PainlessExtension new file mode 100644 index 00000000..b1f6fad8 --- /dev/null +++ b/alerting/src/main/resources/META-INF/services/org.elasticsearch.painless.spi.PainlessExtension @@ -0,0 +1,16 @@ +# +# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). +# You may not use this file except in compliance with the License. +# A copy of the License is located at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# or in the "license" file accompanying this file. This file is distributed +# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +# express or implied. See the License for the specific language governing +# permissions and limitations under the License. +# + +com.amazon.opendistroforelasticsearch.alerting.AlertingPlugin \ No newline at end of file diff --git a/alerting/src/main/resources/com/amazon/opendistroforelasticsearch/alerting/alerts/alert_mapping.json b/alerting/src/main/resources/com/amazon/opendistroforelasticsearch/alerting/alerts/alert_mapping.json new file mode 100644 index 00000000..8b9a9a2f --- /dev/null +++ b/alerting/src/main/resources/com/amazon/opendistroforelasticsearch/alerting/alerts/alert_mapping.json @@ -0,0 +1,69 @@ +{ + "_doc": { + "dynamic": "strict", + "_routing": { + "required" : true + }, + "properties" : { + "monitor_id" : { + "type" : "keyword" + }, + "monitor_version" : { + "type" : "long" + }, + "severity": { + "type": "keyword" + }, + "monitor_name" : { + "type" : "text", + "fields": { + "keyword": { + "type": "keyword", + "ignore_above" : 256 + } + } + }, + "trigger_id" : { + "type" : "keyword" + }, + "trigger_name" : { + "type": "text", + "fields": { + "keyword": { + "type": "keyword", + "ignore_above" : 256 + } + } + }, + "state" : { + "type": "keyword" + }, + "start_time" : { + "type" : "date" + }, + "last_notification_time" : { + "type" : "date" + }, + "acknowledged_time" : { + "type" : "date" + }, + "end_time" : { + "type" : "date" + }, + "error_message" : { + "type" : "text" + }, + "alert_history": { + "type": "nested", + "properties" : { + "timestamp": { + "type": "date" + }, + "message": { + "type": "text" + } + } + } + } + } +} \ No newline at end of file diff --git a/alerting/src/main/resources/com/amazon/opendistroforelasticsearch/alerting/com.amazon.opendistroforelasticsearch.alerting.txt b/alerting/src/main/resources/com/amazon/opendistroforelasticsearch/alerting/com.amazon.opendistroforelasticsearch.alerting.txt new file mode 100644 index 00000000..b548ad53 --- /dev/null +++ b/alerting/src/main/resources/com/amazon/opendistroforelasticsearch/alerting/com.amazon.opendistroforelasticsearch.alerting.txt @@ -0,0 +1,47 @@ +# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. + +# Painless definition of classes used by alerting plugin + +class com.amazon.opendistroforelasticsearch.alerting.script.TriggerScript { + Map getParams() + boolean execute(TriggerExecutionContext) + String[] PARAMETERS +} + +class com.amazon.opendistroforelasticsearch.alerting.script.TriggerScript$Factory { + TriggerScript newInstance(Map) +} + +class com.amazon.opendistroforelasticsearch.alerting.script.TriggerExecutionContext { + Monitor getMonitor() + Trigger getTrigger() + List getResults() + java.time.Instant getPeriodStart() + java.time.Instant getPeriodEnd() + Alert getAlert() + Exception getError() +} + +class com.amazon.opendistroforelasticsearch.alerting.model.Monitor { + String getId() + long getVersion() + String getName() + boolean getEnabled() +} + +class com.amazon.opendistroforelasticsearch.alerting.model.Trigger { + String getId() + String getName() + String getSeverity() + List getActions() +} + +class com.amazon.opendistroforelasticsearch.alerting.model.action.Action { + String getName() +} + +class com.amazon.opendistroforelasticsearch.alerting.model.Alert { + String getId() + long getVersion() + boolean isAcknowledged() +} \ No newline at end of file diff --git a/alerting/src/test/kotlin/com/amazon/opendistroforelasticsearch/alerting/AlertingRestTestCase.kt b/alerting/src/test/kotlin/com/amazon/opendistroforelasticsearch/alerting/AlertingRestTestCase.kt new file mode 100644 index 00000000..80233106 --- /dev/null +++ b/alerting/src/test/kotlin/com/amazon/opendistroforelasticsearch/alerting/AlertingRestTestCase.kt @@ -0,0 +1,339 @@ +/* + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package com.amazon.opendistroforelasticsearch.alerting + +import com.amazon.opendistroforelasticsearch.alerting.alerts.AlertIndices +import com.amazon.opendistroforelasticsearch.alerting.core.model.SearchInput +import com.amazon.opendistroforelasticsearch.alerting.core.settings.ScheduledJobSettings +import com.amazon.opendistroforelasticsearch.alerting.elasticapi.ElasticAPI +import com.amazon.opendistroforelasticsearch.alerting.elasticapi.string +import com.amazon.opendistroforelasticsearch.alerting.model.Alert +import com.amazon.opendistroforelasticsearch.alerting.model.Monitor +import com.amazon.opendistroforelasticsearch.alerting.model.destination.Destination +import com.amazon.opendistroforelasticsearch.alerting.test.makeRequest +import com.amazon.opendistroforelasticsearch.alerting.util.DestinationType +import org.apache.http.HttpEntity +import org.apache.http.HttpHeaders +import org.apache.http.entity.ContentType +import org.apache.http.entity.ContentType.APPLICATION_JSON +import org.apache.http.entity.StringEntity +import org.apache.http.message.BasicHeader +import org.elasticsearch.action.search.SearchResponse +import org.elasticsearch.client.Response +import org.elasticsearch.client.RestClient +import org.elasticsearch.common.settings.Settings +import org.elasticsearch.common.unit.TimeValue +import org.elasticsearch.common.xcontent.NamedXContentRegistry +import org.elasticsearch.common.xcontent.ToXContent +import org.elasticsearch.common.xcontent.XContentFactory +import org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder +import org.elasticsearch.common.xcontent.XContentParser +import org.elasticsearch.common.xcontent.XContentParserUtils +import org.elasticsearch.common.xcontent.XContentType +import org.elasticsearch.common.xcontent.json.JsonXContent +import org.elasticsearch.rest.RestStatus +import org.elasticsearch.search.SearchModule +import org.elasticsearch.test.rest.ESRestTestCase +import org.junit.rules.DisableOnDebug +import java.net.URLEncoder +import java.time.Instant +import java.util.Locale + +abstract class AlertingRestTestCase : ESRestTestCase() { + + private val isDebuggingTest = DisableOnDebug(null).isDebugging + private val isDebuggingRemoteCluster = System.getProperty("cluster.debug", "false")!!.toBoolean() + + override fun xContentRegistry(): NamedXContentRegistry { + return NamedXContentRegistry(mutableListOf(Monitor.XCONTENT_REGISTRY, + SearchInput.XCONTENT_REGISTRY) + + SearchModule(Settings.EMPTY, false, emptyList()).namedXContents) + } + + fun Response.asMap(): Map { + return entityAsMap(this) + } + + protected fun createMonitor(monitor: Monitor, refresh: Boolean = true): Monitor { + val response = client().makeRequest("POST", "$ALERTING_BASE_URI?refresh=$refresh", emptyMap(), + monitor.toHttpEntity()) + assertEquals("Unable to create a new monitor", RestStatus.CREATED, response.restStatus()) + + val monitorJson = ElasticAPI.INSTANCE.jsonParser(NamedXContentRegistry.EMPTY, response.entity.content).map() + return monitor.copy(id = monitorJson["_id"] as String, version = (monitorJson["_version"] as Int).toLong()) + } + + protected fun createDestination(destination: Destination = getTestDestination(), refresh: Boolean = true): Destination { + val response = client().makeRequest( + "POST", + "$DESTINATION_BASE_URI?refresh=$refresh", + emptyMap(), + destination.toHttpEntity()) + assertEquals("Unable to create a new destination", RestStatus.CREATED, response.restStatus()) + val destinationJson = ElasticAPI.INSTANCE.jsonParser(NamedXContentRegistry.EMPTY, response.entity.content).map() + return destination.copy(id = destinationJson["_id"] as String, version = (destinationJson["_version"] as Int).toLong()) + } + + protected fun updateDestination(destination: Destination, refresh: Boolean = true): Destination { + val response = client().makeRequest( + "PUT", + "$DESTINATION_BASE_URI/${destination.id}?refresh=$refresh", + emptyMap(), + destination.toHttpEntity()) + assertEquals("Unable to update a destination", RestStatus.OK, response.restStatus()) + val destinationJson = ElasticAPI.INSTANCE.jsonParser(NamedXContentRegistry.EMPTY, response.entity.content).map() + return destination.copy(id = destinationJson["_id"] as String, version = (destinationJson["_version"] as Int).toLong()) + } + + private fun getTestDestination(): Destination { + return Destination( + type = DestinationType.TEST_ACTION, + name = "test", + lastUpdateTime = Instant.now(), + chime = null, + slack = null, + customWebhook = null) + } + + protected fun createAlert(alert: Alert): Alert { + val response = client().makeRequest("POST", "/${AlertIndices.ALERT_INDEX}/_doc?refresh=true&routing=${alert.monitorId}", + emptyMap(), alert.toHttpEntity()) + assertEquals("Unable to create a new alert", RestStatus.CREATED, response.restStatus()) + + val alertJson = ElasticAPI.INSTANCE.jsonParser(NamedXContentRegistry.EMPTY, response.entity.content).map() + return alert.copy(id = alertJson["_id"] as String, version = (alertJson["_version"] as Int).toLong()) + } + + protected fun createRandomMonitor(refresh: Boolean = false, withMetadata: Boolean = false): Monitor { + val monitor = randomMonitor(withMetadata = withMetadata) + val monitorId = createMonitor(monitor, refresh).id + if (withMetadata) { + return getMonitor(monitorId = monitorId, header = BasicHeader(HttpHeaders.USER_AGENT, "Kibana")) + } + return getMonitor(monitorId = monitorId) + } + + protected fun updateMonitor(monitor: Monitor, refresh: Boolean = false): Monitor { + val response = client().makeRequest("PUT", "${monitor.relativeUrl()}?refresh=$refresh", + emptyMap(), monitor.toHttpEntity()) + assertEquals("Unable to update a monitor", RestStatus.OK, response.restStatus()) + return getMonitor(monitorId = monitor.id) + } + + protected fun getMonitor(monitorId: String, header: BasicHeader = BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json")): Monitor { + val response = client().makeRequest("GET", "$ALERTING_BASE_URI/$monitorId", null, header) + assertEquals("Unable to get monitor $monitorId", RestStatus.OK, response.restStatus()) + + val parser = createParser(XContentType.JSON.xContent(), response.entity.content) + XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser::getTokenLocation) + + lateinit var id: String + var version: Long = 0 + lateinit var monitor: Monitor + + while (parser.nextToken() != XContentParser.Token.END_OBJECT) { + parser.nextToken() + + when (parser.currentName()) { + "_id" -> id = parser.text() + "_version" -> version = parser.longValue() + "monitor" -> monitor = Monitor.parse(parser) + } + } + return monitor.copy(id = id, version = version) + } + + protected fun searchAlerts(monitor: Monitor, indices: String = AlertIndices.ALERT_INDEX, refresh: Boolean = true): List { + if (refresh) refreshIndex(indices) + + // If this is a test monitor (it doesn't have an ID) and no alerts will be saved for it. + val searchParams = if (monitor.id != Monitor.NO_ID) mapOf("routing" to monitor.id) else mapOf() + val request = """ + { "version" : true, + "query" : { "term" : { "${Alert.MONITOR_ID_FIELD}" : "${monitor.id}" } } + } + """.trimIndent() + val httpResponse = client().makeRequest("GET", "/$indices/_search", searchParams, StringEntity(request, APPLICATION_JSON)) + assertEquals("Search failed", RestStatus.OK, httpResponse.restStatus()) + + val searchResponse = SearchResponse.fromXContent(createParser(JsonXContent.jsonXContent, httpResponse.entity.content)) + return searchResponse.hits.hits.map { + val xcp = createParser(JsonXContent.jsonXContent, it.sourceRef).also { it.nextToken() } + Alert.parse(xcp, it.id, it.version) + } + } + + protected fun acknowledgeAlerts(monitor: Monitor, vararg alerts: Alert): Response { + val request = XContentFactory.jsonBuilder().startObject() + .array("alerts", *alerts.map { it.id }.toTypedArray()) + .endObject() + .string() + .let { StringEntity(it, APPLICATION_JSON) } + + val response = client().makeRequest("POST", "${monitor.relativeUrl()}/_acknowledge/alerts?refresh=true", + emptyMap(), request) + assertEquals("Acknowledge call failed.", RestStatus.OK, response.restStatus()) + return response + } + + protected fun refreshIndex(index: String): Response { + val response = client().makeRequest("POST", "/$index/_refresh") + assertEquals("Unable to refresh index", RestStatus.OK, response.restStatus()) + return response + } + + protected fun deleteIndex(index: String): Response { + val response = adminClient().makeRequest("DELETE", "/$index") + assertEquals("Unable to delete index", RestStatus.OK, response.restStatus()) + return response + } + + protected fun executeMonitor(monitorId: String, params: Map = mutableMapOf()): Response { + return client().makeRequest("POST", "$ALERTING_BASE_URI/$monitorId/_execute", params) + } + + protected fun executeMonitor(monitor: Monitor, params: Map = mapOf()): Response = + client().makeRequest("POST", "$ALERTING_BASE_URI/_execute", params, monitor.toHttpEntity()) + + protected fun indexDoc(index: String, id: String, doc: String, refresh: Boolean = true): Response { + val requestBody = StringEntity(doc, APPLICATION_JSON) + val params = if (refresh) mapOf("refresh" to "true") else mapOf() + val response = client().makeRequest("PUT", "$index/_doc/$id", params, requestBody) + assertTrue("Unable to index doc: '${doc.take(15)}...' to index: '$index'", + listOf(RestStatus.OK, RestStatus.CREATED).contains(response.restStatus())) + return response + } + + /** A test index that can be used across tests. Feel free to add new fields but don't remove any. */ + protected fun createTestIndex(index: String = randomAlphaOfLength(10).toLowerCase(Locale.ROOT)): String { + createIndex(index, Settings.EMPTY, """ + "_doc" : { + "properties" : { + "test_strict_date_time" : { "type" : "date", "format" : "strict_date_time" } + } + } + """.trimIndent()) + return index + } + + fun putAlertMappings() { + val mappingHack = AlertIndices.alertMapping().trimStart('{').trimEnd('}') + val encodedHistoryIndex = URLEncoder.encode(AlertIndices.HISTORY_INDEX_PATTERN, Charsets.UTF_8.toString()) + createIndex(AlertIndices.ALERT_INDEX, Settings.EMPTY, mappingHack) + createIndex(encodedHistoryIndex, Settings.EMPTY, mappingHack) + client().makeRequest("PUT", "/$encodedHistoryIndex/_alias/${AlertIndices.HISTORY_WRITE_INDEX}") + } + + protected fun Response.restStatus(): RestStatus { + return RestStatus.fromCode(this.statusLine.statusCode) + } + + protected fun Monitor.toHttpEntity(): HttpEntity { + return StringEntity(toJsonString(), APPLICATION_JSON) + } + + private fun Monitor.toJsonString(): String { + val builder = XContentFactory.jsonBuilder() + return shuffleXContent(toXContent(builder)).string() + } + + private fun Destination.toHttpEntity(): HttpEntity { + return StringEntity(toJsonString(), APPLICATION_JSON) + } + + private fun Destination.toJsonString(): String { + val builder = XContentFactory.jsonBuilder() + return shuffleXContent(toXContent(builder)).string() + } + + private fun Alert.toHttpEntity(): HttpEntity { + return StringEntity(toJsonString(), APPLICATION_JSON) + } + + private fun Alert.toJsonString(): String { + val builder = XContentFactory.jsonBuilder() + return shuffleXContent(toXContent(builder, ToXContent.EMPTY_PARAMS)).string() + } + + protected fun Monitor.relativeUrl() = "$ALERTING_BASE_URI/$id" + + // Useful settings when debugging to prevent timeouts + override fun restClientSettings(): Settings { + return if (isDebuggingTest || isDebuggingRemoteCluster) { + Settings.builder() + .put(CLIENT_RETRY_TIMEOUT, TimeValue.timeValueMinutes(10)) + .put(CLIENT_SOCKET_TIMEOUT, TimeValue.timeValueMinutes(10)) + .build() + } else { + super.restClientSettings() + } + } + + fun RestClient.getClusterSettings(settings: Map): Map { + val response = this.makeRequest("GET", "_cluster/settings", settings) + assertEquals(RestStatus.OK, response.restStatus()) + return response.asMap() + } + + fun RestClient.updateSettings(setting: String, value: Any): Map { + val settings = jsonBuilder() + .startObject() + .startObject("persistent") + .field(setting, value) + .endObject() + .endObject() + .string() + val response = this.makeRequest("PUT", "_cluster/settings", StringEntity(settings, APPLICATION_JSON)) + assertEquals(RestStatus.OK, response.restStatus()) + return response.asMap() + } + + @Suppress("UNCHECKED_CAST") + fun Map.opendistroSettings(): Map? { + val map = this as Map>>> + return map["defaults"]?.get("opendistro")?.get("alerting") + } + + @Suppress("UNCHECKED_CAST") + fun Map.stringMap(key: String): Map? { + val map = this as Map> + return map[key] + } + + fun getAlertingStats(metrics: String = ""): Map { + val monitorStatsResponse = client().makeRequest("GET", "/_opendistro/_alerting/stats$metrics") + val responseMap = createParser(XContentType.JSON.xContent(), monitorStatsResponse.entity.content).map() + return responseMap + } + + fun enableScheduledJob(): Response { + val updateResponse = client().makeRequest("PUT", "_cluster/settings", + emptyMap(), + StringEntity(XContentFactory.jsonBuilder().startObject().field("persistent") + .startObject().field(ScheduledJobSettings.SWEEPER_ENABLED.key, true).endObject() + .endObject().string(), ContentType.APPLICATION_JSON)) + return updateResponse + } + + fun disableScheduledJob(): Response { + val updateResponse = client().makeRequest("PUT", "_cluster/settings", + emptyMap(), + StringEntity(XContentFactory.jsonBuilder().startObject().field("persistent") + .startObject().field(ScheduledJobSettings.SWEEPER_ENABLED.key, false).endObject() + .endObject().string(), ContentType.APPLICATION_JSON)) + return updateResponse + } +} diff --git a/alerting/src/test/kotlin/com/amazon/opendistroforelasticsearch/alerting/MonitorRunnerIT.kt b/alerting/src/test/kotlin/com/amazon/opendistroforelasticsearch/alerting/MonitorRunnerIT.kt new file mode 100644 index 00000000..9febaed7 --- /dev/null +++ b/alerting/src/test/kotlin/com/amazon/opendistroforelasticsearch/alerting/MonitorRunnerIT.kt @@ -0,0 +1,540 @@ +/* + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package com.amazon.opendistroforelasticsearch.alerting + +import com.amazon.opendistroforelasticsearch.alerting.alerts.AlertError +import com.amazon.opendistroforelasticsearch.alerting.alerts.AlertIndices +import com.amazon.opendistroforelasticsearch.alerting.model.Alert +import com.amazon.opendistroforelasticsearch.alerting.model.Alert.State.ACKNOWLEDGED +import com.amazon.opendistroforelasticsearch.alerting.model.Alert.State.ACTIVE +import com.amazon.opendistroforelasticsearch.alerting.model.Alert.State.COMPLETED +import com.amazon.opendistroforelasticsearch.alerting.model.Alert.State.ERROR +import com.amazon.opendistroforelasticsearch.alerting.model.Monitor +import com.amazon.opendistroforelasticsearch.alerting.core.model.SearchInput +import com.amazon.opendistroforelasticsearch.alerting.test.makeRequest +import org.elasticsearch.common.settings.Settings +import org.elasticsearch.index.query.QueryBuilders +import org.elasticsearch.rest.RestStatus +import org.elasticsearch.script.Script +import org.elasticsearch.search.builder.SearchSourceBuilder +import java.time.Instant +import java.time.ZonedDateTime +import java.time.format.DateTimeFormatter +import java.time.temporal.ChronoUnit.DAYS +import java.time.temporal.ChronoUnit.MILLIS + +class MonitorRunnerIT : AlertingRestTestCase() { + + fun `test execute monitor with dryrun`() { + val action = randomAction(template = randomTemplateScript("Hello {{ctx.monitor.name}}"), destinationId = createDestination().id) + val monitor = randomMonitor(triggers = listOf(randomTrigger(condition = ALWAYS_RUN, actions = listOf(action)))) + + val response = executeMonitor(monitor, params = DRYRUN_MONITOR) + + val output = entityAsMap(response) + assertEquals(monitor.name, output["monitor_name"]) + for (triggerResult in output.objectMap("trigger_results").values) { + for (actionResult in triggerResult.objectMap("action_results").values) { + @Suppress("UNCHECKED_CAST") val actionOutput = actionResult["output"] as Map + assertEquals("Hello ${monitor.name}", actionOutput["subject"]) + assertEquals("Hello ${monitor.name}", actionOutput["message"]) + } + } + + val alerts = searchAlerts(monitor) + assertEquals("Alert saved for test monitor", 0, alerts.size) + } + + fun `test execute monitor returns search result`() { + val uniqueName = "unique name" + val query = QueryBuilders.termQuery("monitor.name.keyword", uniqueName) + val input = SearchInput(indices = listOf("*"), query = SearchSourceBuilder().query(query)) + val monitor = createMonitor(randomMonitor(name = uniqueName, inputs = listOf(input), + triggers = listOf(randomTrigger(condition = ALWAYS_RUN)))) + + val response = executeMonitor(monitor, params = DRYRUN_MONITOR) + + val output = entityAsMap(response) + assertEquals(monitor.name, output["monitor_name"]) + @Suppress("UNCHECKED_CAST") + val searchResult = (output.objectMap("input_results")["results"] as List>).first() + @Suppress("UNCHECKED_CAST") + val total = searchResult.stringMap("hits")?.get("total") as Int + assertEquals("Incorrect search result", 1, total) + } + + fun `test execute monitor not triggered`() { + val monitor = randomMonitor(triggers = listOf(randomTrigger(condition = NEVER_RUN))) + + val response = executeMonitor(monitor) + + val output = entityAsMap(response) + assertEquals(monitor.name, output["monitor_name"]) + for (triggerResult in output.objectMap("trigger_results").values) { + assertTrue("Unexpected trigger was run", triggerResult.objectMap("action_results").isEmpty()) + } + + val alerts = searchAlerts(monitor) + assertEquals("Alert saved for test monitor", 0, alerts.size) + } + + fun `test active alert is updated on each run`() { + val monitor = createMonitor( + randomMonitor(triggers = listOf(randomTrigger(condition = ALWAYS_RUN, destinationId = createDestination().id)))) + + executeMonitor(monitor.id) + val firstRunAlert = searchAlerts(monitor).single() + verifyAlert(firstRunAlert, monitor) + // Runner uses ThreadPool.CachedTimeThread thread which only updates once every 200 ms. Wait a bit to + // see lastNotificationTime change. + Thread.sleep(200) + executeMonitor(monitor.id) + val secondRunAlert = searchAlerts(monitor).single() + verifyAlert(secondRunAlert, monitor) + + assertEquals("New alert was created, instead of updating existing alert.", firstRunAlert.id, secondRunAlert.id) + assertEquals("Start time shouldn't change", firstRunAlert.startTime, secondRunAlert.startTime) + assertNotEquals("Last notification should be different.", + firstRunAlert.lastNotificationTime, secondRunAlert.lastNotificationTime) + } + + fun `test execute monitor input error`() { + // use a non-existent index to trigger an input error + val input = SearchInput(indices = listOf("foo"), query = SearchSourceBuilder().query(QueryBuilders.matchAllQuery())) + val monitor = createMonitor(randomMonitor(inputs = listOf(input), + triggers = listOf(randomTrigger(condition = NEVER_RUN)))) + + val response = executeMonitor(monitor.id) + + val output = entityAsMap(response) + assertEquals(monitor.name, output["monitor_name"]) + @Suppress("UNCHECKED_CAST") + val inputResults = output.stringMap("input_results") + assertTrue("Missing monitor error message", (inputResults?.get("error") as String).isNotEmpty()) + + val alerts = searchAlerts(monitor) + assertEquals("Alert not saved", 1, alerts.size) + verifyAlert(alerts.single(), monitor, ERROR) + } + + fun `test acknowledged alert does not suppress subsequent errors`() { + val destinationId = createDestination().id + + createIndex("foo", Settings.EMPTY) + val input = SearchInput(indices = listOf("foo"), query = SearchSourceBuilder().query(QueryBuilders.matchAllQuery())) + val monitor = createMonitor( + randomMonitor(inputs = listOf(input), + triggers = listOf(randomTrigger(condition = ALWAYS_RUN, destinationId = destinationId)))) + + var response = executeMonitor(monitor.id) + + var output = entityAsMap(response) + assertEquals(monitor.name, output["monitor_name"]) + assertTrue("Unexpected monitor error message", (output["error"] as String?).isNullOrEmpty()) + val activeAlert = searchAlerts(monitor).single() + verifyAlert(activeAlert, monitor) + + // Now acknowledge the alert and delete the index to cause the next run of the monitor to fail... + acknowledgeAlerts(monitor, activeAlert) + deleteIndex("foo") + response = executeMonitor(monitor.id) + + output = entityAsMap(response) + assertEquals(monitor.name, output["monitor_name"]) + val errorAlert = searchAlerts(monitor).single { it.state == ERROR } + verifyAlert(errorAlert, monitor, ERROR) + } + + fun `test acknowledged alert is not updated unnecessarily`() { + val monitor = createMonitor( + randomMonitor(triggers = listOf(randomTrigger(condition = ALWAYS_RUN, destinationId = createDestination().id)))) + executeMonitor(monitor.id) + acknowledgeAlerts(monitor, searchAlerts(monitor).single()) + val acknowledgedAlert = searchAlerts(monitor).single() + verifyAlert(acknowledgedAlert, monitor, ACKNOWLEDGED) + + // Runner uses ThreadPool.CachedTimeThread thread which only updates once every 200 ms. Wait a bit to + // let lastNotificationTime change. W/o this sleep the test can result in a false negative. + Thread.sleep(200) + val response = executeMonitor(monitor.id) + + val output = entityAsMap(response) + assertEquals(monitor.name, output["monitor_name"]) + val currentAlert = searchAlerts(monitor).single() + assertEquals("Acknowledged alert was updated when nothing changed", currentAlert, acknowledgedAlert) + for (triggerResult in output.objectMap("trigger_results").values) { + assertTrue("Action run when alert is acknowledged.", triggerResult.objectMap("action_results").isEmpty()) + } + } + + fun `test alert completion`() { + val trigger = randomTrigger(condition = Script("ctx.alert == null"), destinationId = createDestination().id) + val monitor = createMonitor(randomMonitor(triggers = listOf(trigger))) + + executeMonitor(monitor.id) + val activeAlert = searchAlerts(monitor).single() + verifyAlert(activeAlert, monitor) + + executeMonitor(monitor.id) + assertTrue("There's still an active alert", searchAlerts(monitor, AlertIndices.ALERT_INDEX).isEmpty()) + val completedAlert = searchAlerts(monitor, AlertIndices.ALL_INDEX_PATTERN).single() + verifyAlert(completedAlert, monitor, COMPLETED) + } + + fun `test execute monitor script error`() { + // This painless script should cause a syntax error + val trigger = randomTrigger(condition = Script("foo bar baz")) + val monitor = randomMonitor(triggers = listOf(trigger)) + + val response = executeMonitor(monitor) + + val output = entityAsMap(response) + assertEquals(monitor.name, output["monitor_name"]) + for (triggerResult in output.objectMap("trigger_results").values) { + assertTrue("Missing trigger error message", (triggerResult["error"] as String).isNotEmpty()) + } + + val alerts = searchAlerts(monitor) + assertEquals("Alert saved for test monitor", 0, alerts.size) + } + + fun `test execute action template error`() { + // Intentional syntax error in mustache template + val action = randomAction(template = randomTemplateScript("Hello {{ctx.monitor.name")) + val monitor = randomMonitor(triggers = listOf(randomTrigger(condition = ALWAYS_RUN, actions = listOf(action)))) + + val response = executeMonitor(monitor) + + val output = entityAsMap(response) + assertEquals(monitor.name, output["monitor_name"]) + for (triggerResult in output.objectMap("trigger_results").values) { + for (actionResult in triggerResult.objectMap("action_results").values) { + assertTrue("Missing action error message", (actionResult["error"] as String).isNotEmpty()) + } + } + + val alerts = searchAlerts(monitor) + assertEquals("Alert saved for test monitor", 0, alerts.size) + } + + fun `test execute monitor search with period`() { + val query = QueryBuilders.rangeQuery("monitor.last_update_time").gte("{{period_start}}").lte("{{period_end}}") + val input = SearchInput(indices = listOf("_all"), query = SearchSourceBuilder().query(query)) + val triggerScript = """ + // make sure there is at least one monitor + return ctx.results[0].hits.hits.size() > 0 + """.trimIndent() + val destinationId = createDestination().id + val trigger = randomTrigger(condition = Script(triggerScript), destinationId = destinationId) + val monitor = createMonitor(randomMonitor(inputs = listOf(input), triggers = listOf(trigger))) + + val response = executeMonitor(monitor.id) + + val output = entityAsMap(response) + assertEquals(monitor.name, output["monitor_name"]) + val triggerResult = output.objectMap("trigger_results").objectMap(trigger.id) + assertEquals(true, triggerResult["triggered"].toString().toBoolean()) + assertTrue("Unexpected trigger error message", triggerResult["error"]?.toString().isNullOrEmpty()) + + val alerts = searchAlerts(monitor) + assertEquals("Alert not saved", 1, alerts.size) + verifyAlert(alerts.single(), monitor) + } + + fun `test execute monitor search with period date math`() { + val testIndex = createTestIndex() + val fiveDaysAgo = ZonedDateTime.now().minus(5, DAYS).truncatedTo(MILLIS) + val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(fiveDaysAgo) + val testDoc = """{ "test_strict_date_time" : "$testTime" }""" + indexDoc(testIndex, "1", testDoc) + + // Queries that use period_start/end should expect these values to always be formatted as 'epoch_millis'. Either + // the query should specify the format (like below) or the mapping for the index/field being queried should allow + // epoch_millis as an alternative (ES's default mapping for date fields "strict_date_optional_time||epoch_millis") + val query = QueryBuilders.rangeQuery("test_strict_date_time") + .gt("{{period_end}}||-10d") + .lte("{{period_end}}") + .format("epoch_millis") + val input = SearchInput(indices = listOf(testIndex), query = SearchSourceBuilder().query(query)) + val triggerScript = """ + // make sure there is exactly one hit + return ctx.results[0].hits.hits.size() == 1 + """.trimIndent() + val trigger = randomTrigger(condition = Script(triggerScript)) + val monitor = randomMonitor(inputs = listOf(input), triggers = listOf(trigger)) + + val response = executeMonitor(monitor) + + val output = entityAsMap(response) + assertEquals(monitor.name, output["monitor_name"]) + val triggerResult = output.objectMap("trigger_results").objectMap(trigger.id) + assertEquals(true, triggerResult["triggered"].toString().toBoolean()) + assertTrue("Unexpected trigger error message", triggerResult["error"]?.toString().isNullOrEmpty()) + assertNotEquals("period incorrect", output["period_start"], output["period_end"]) + + // Don't expect any alerts for this monitor as it has not been saved + val alerts = searchAlerts(monitor) + assertEquals("Alert saved for test monitor", 0, alerts.size) + } + + fun `test monitor with one bad action and one good action`() { + val goodAction = randomAction(template = randomTemplateScript("Hello {{ctx.monitor.name}}"), destinationId = createDestination().id) + val syntaxErrorAction = randomAction( + name = "bad syntax", + template = randomTemplateScript("{{foo"), + destinationId = createDestination().id) + val actions = listOf(goodAction, syntaxErrorAction) + val monitor = createMonitor(randomMonitor(triggers = listOf(randomTrigger(condition = ALWAYS_RUN, actions = actions)))) + + val output = entityAsMap(executeMonitor(monitor.id)) + + assertEquals(monitor.name, output["monitor_name"]) + for (triggerResult in output.objectMap("trigger_results").values) { + for (actionResult in triggerResult.objectMap("action_results").values) { + @Suppress("UNCHECKED_CAST") val actionOutput = actionResult["output"] as Map + if (actionResult["name"] == goodAction.name) { + assertEquals("Hello ${monitor.name}", actionOutput["message"]) + } else if (actionResult["name"] == syntaxErrorAction.name) { + assertTrue("Missing action error message", (actionResult["error"] as String).isNotEmpty()) + } else { + fail("Unknown action: ${actionResult["name"]}") + } + } + } + + val alerts = searchAlerts(monitor) + assertEquals("Alert not saved", 1, alerts.size) + verifyAlert(alerts.single(), monitor, ERROR) + } + + fun `test execute monitor adds to alert error history`() { + putAlertMappings() // Required as we do not have a create alert API. + // This template script has a parsing error to purposefully create an errorMessage during runMonitor + val action = randomAction(template = randomTemplateScript("Hello {{ctx.monitor.name")) + val trigger = randomTrigger(condition = ALWAYS_RUN, actions = listOf(action)) + val monitor = createMonitor(randomMonitor(triggers = listOf(trigger))) + val listOfFiveErrorMessages = (1..5).map { i -> AlertError(timestamp = Instant.now(), message = "error message $i") } + val activeAlert = createAlert(randomAlert(monitor).copy(state = ACTIVE, errorHistory = listOfFiveErrorMessages, + triggerId = trigger.id, triggerName = trigger.name, severity = trigger.severity)) + + val response = executeMonitor(monitor.id) + + val updatedAlert = searchAlerts(monitor).single() + assertEquals("Existing active alert was not updated", activeAlert.id, updatedAlert.id) + val output = entityAsMap(response) + assertEquals(monitor.name, output["monitor_name"]) + for (triggerResult in output.objectMap("trigger_results").values) { + for (actionResult in triggerResult.objectMap("action_results").values) { + assertTrue("Missing action error message", (actionResult["error"] as String).isNotEmpty()) + } + } + assertEquals("Wrong number of error messages in history", 6, updatedAlert.errorHistory.size) + } + + fun `test latest error is not lost when alert is completed`() { + // Creates an active alert the first time it's run and completes it the second time the monitor is run. + val trigger = randomTrigger(condition = Script(""" + if (ctx.alert == null) { + throw new RuntimeException("foo"); + } else { + return false; + } + """.trimIndent())) + val monitor = createMonitor(randomMonitor(triggers = listOf(trigger))) + + executeMonitor(monitor.id) + val errorAlert = searchAlerts(monitor).single() + verifyAlert(errorAlert, monitor, ERROR) + executeMonitor(monitor.id) + val completedAlert = searchAlerts(monitor, AlertIndices.ALL_INDEX_PATTERN).single() + verifyAlert(completedAlert, monitor, COMPLETED) + + assertNull("Completed alert still has error message.", completedAlert.errorMessage) + assertTrue("Missing error history.", completedAlert.errorHistory.isNotEmpty()) + val latestError = completedAlert.errorHistory.single().message + assertTrue("Latest error is missing from history.", latestError.contains("RuntimeException(\"foo\")")) + } + + fun `test throw script exception`() { + // Creates an active alert the first time it's run and completes it the second time the monitor is run. + val trigger = randomTrigger(condition = Script(""" + param[0]; return true + """.trimIndent())) + val monitor = createMonitor(randomMonitor(triggers = listOf(trigger))) + + executeMonitor(monitor.id) + val errorAlert = searchAlerts(monitor).single() + verifyAlert(errorAlert, monitor, ERROR) + executeMonitor(monitor.id) + assertEquals("Error does not match", + "Error evaluating trigger:\nparam[0]; return true\n^---- HERE", errorAlert.errorMessage) + } + + fun `test execute monitor limits alert error history to 10 error messages`() { + putAlertMappings() // Required as we do not have a create alert API. + // This template script has a parsing error to purposefully create an errorMessage during runMonitor + val action = randomAction(template = randomTemplateScript("Hello {{ctx.monitor.name")) + val trigger = randomTrigger(condition = ALWAYS_RUN, actions = listOf(action)) + val monitor = createMonitor(randomMonitor(triggers = listOf(trigger))) + val listOfTenErrorMessages = (1..10).map { i -> AlertError(timestamp = Instant.now(), message = "error message $i") } + val activeAlert = createAlert(randomAlert(monitor).copy(state = ACTIVE, errorHistory = listOfTenErrorMessages, + triggerId = trigger.id, triggerName = trigger.name, severity = trigger.severity)) + + val response = executeMonitor(monitor.id) + + val output = entityAsMap(response) + assertEquals(monitor.name, output["monitor_name"]) + for (triggerResult in output.objectMap("trigger_results").values) { + for (actionResult in triggerResult.objectMap("action_results").values) { + assertTrue("Missing action error message", (actionResult["error"] as String).isNotEmpty()) + } + } + val updatedAlert = searchAlerts(monitor).single() + assertEquals("Existing active alert was not updated", activeAlert.id, updatedAlert.id) + assertEquals("Wrong number of error messages in history", 10, updatedAlert.errorHistory.size) + } + + fun `test execute monitor creates alert for trigger with no actions`() { + putAlertMappings() // Required as we do not have a create alert API. + + val trigger = randomTrigger(condition = ALWAYS_RUN, actions = emptyList(), destinationId = createDestination().id) + val monitor = createMonitor(randomMonitor(triggers = listOf(trigger))) + + executeMonitor(monitor.id) + + val alerts = searchAlerts(monitor) + assertEquals("Alert not saved", 1, alerts.size) + verifyAlert(alerts.single(), monitor, ACTIVE) + } + + fun `test execute monitor with bad search`() { + val query = QueryBuilders.matchAllQuery() + val input = SearchInput(indices = listOf("_#*IllegalIndexCharacters"), query = SearchSourceBuilder().query(query)) + val monitor = createMonitor(randomMonitor(inputs = listOf(input), triggers = listOf(randomTrigger(condition = ALWAYS_RUN)))) + + val response = executeMonitor(monitor.id) + + val output = entityAsMap(response) + assertEquals(monitor.name, output["monitor_name"]) + @Suppress("UNCHECKED_CAST") + val inputResults = output.stringMap("input_results") + assertTrue("Missing error message from a bad query", (inputResults?.get("error") as String).isNotEmpty()) + } + + fun `test execute monitor non-dryrun`() { + val monitor = createMonitor( + randomMonitor(triggers = listOf(randomTrigger( + condition = ALWAYS_RUN, + actions = listOf(randomAction(destinationId = createDestination().id)))))) + + val response = executeMonitor(monitor.id, mapOf("dryrun" to "false")) + + assertEquals("failed dryrun", RestStatus.OK, response.restStatus()) + val alerts = searchAlerts(monitor) + assertEquals("Alert not saved", 1, alerts.size) + verifyAlert(alerts.single(), monitor, ACTIVE) + } + + fun `test execute monitor with already active alert`() { + val monitor = createMonitor( + randomMonitor(triggers = listOf(randomTrigger( + condition = ALWAYS_RUN, + actions = listOf(randomAction(destinationId = createDestination().id)))))) + + val firstExecuteResponse = executeMonitor(monitor.id, mapOf("dryrun" to "false")) + + assertEquals("failed dryrun", RestStatus.OK, firstExecuteResponse.restStatus()) + val alerts = searchAlerts(monitor) + assertEquals("Alert not saved", 1, alerts.size) + verifyAlert(alerts.single(), monitor, ACTIVE) + + val secondExecuteResponse = executeMonitor(monitor.id, mapOf("dryrun" to "false")) + + assertEquals("failed dryrun", RestStatus.OK, secondExecuteResponse.restStatus()) + val newAlerts = searchAlerts(monitor) + assertEquals("Second alert not saved", 1, newAlerts.size) + verifyAlert(newAlerts.single(), monitor, ACTIVE) + } + + fun `test delete monitor with no alerts after alert indices is initialized`() { + putAlertMappings() + + val newMonitor = createMonitor( + randomMonitor(triggers = listOf(randomTrigger(condition = NEVER_RUN, actions = listOf(randomAction()))))) + val deleteNewMonitorResponse = client().makeRequest("DELETE", "$ALERTING_BASE_URI/${newMonitor.id}") + + assertEquals("Delete request not successful", RestStatus.OK, deleteNewMonitorResponse.restStatus()) + } + + fun `test update monitor stays on schedule`() { + val monitor = createMonitor(randomMonitor(enabled = true)) + + updateMonitor(monitor.copy(enabledTime = Instant.now())) + + val retrievedMonitor = getMonitor(monitorId = monitor.id) + assertEquals("Monitor enabled time changed.", monitor.enabledTime, retrievedMonitor.enabledTime) + } + + fun `test enabled time by disabling and re-enabling monitor`() { + val monitor = createMonitor(randomMonitor(enabled = true)) + assertNotNull("Enabled time is null on a enabled monitor.", getMonitor(monitor.id).enabledTime) + + val disabledMonitor = updateMonitor(randomMonitor(enabled = false).copy(id = monitor.id)) + assertNull("Enabled time is not null on a disabled monitor.", disabledMonitor.enabledTime) + + val enabledMonitor = updateMonitor(randomMonitor(enabled = true).copy(id = monitor.id)) + assertNotNull("Enabled time is null on a enabled monitor.", enabledMonitor.enabledTime) + } + + fun `test enabled time by providing enabled time`() { + val enabledTime = Instant.ofEpochSecond(1538164858L) // This is 2018-09-27 20:00:58 GMT + val monitor = createMonitor(randomMonitor(enabled = true, enabledTime = enabledTime)) + + val retrievedMonitor = getMonitor(monitorId = monitor.id) + assertTrue("Monitor is not enabled", retrievedMonitor.enabled) + assertEquals("Enabled times do not match", monitor.enabledTime, retrievedMonitor.enabledTime) + } + + private fun verifyAlert(alert: Alert, monitor: Monitor, expectedState: Alert.State = ACTIVE) { + assertNotNull(alert.id) + assertNotNull(alert.startTime) + assertNotNull(alert.lastNotificationTime) + assertEquals("Alert in wrong state", expectedState, alert.state) + if (expectedState == ERROR) { + assertNotNull("Missing error message", alert.errorMessage) + } else { + assertNull("Unexpected error message", alert.errorMessage) + } + if (expectedState == COMPLETED) { + assertNotNull("End time missing for completed alert.", alert.endTime) + } else { + assertNull("End time set for active alert", alert.endTime) + } + assertEquals(monitor.id, alert.monitorId) + assertEquals(monitor.name, alert.monitorName) + assertEquals(monitor.version, alert.monitorVersion) + + // assert trigger exists for alert + val trigger = monitor.triggers.single { it.id == alert.triggerId } + assertEquals(trigger.name, alert.triggerName) + } + + @Suppress("UNCHECKED_CAST") + /** helper that returns a field in a json map whose values are all json objects */ + private fun Map.objectMap(key: String): Map> { + return this[key] as Map> + } +} diff --git a/alerting/src/test/kotlin/com/amazon/opendistroforelasticsearch/alerting/MonitorTests.kt b/alerting/src/test/kotlin/com/amazon/opendistroforelasticsearch/alerting/MonitorTests.kt new file mode 100644 index 00000000..1ee7744d --- /dev/null +++ b/alerting/src/test/kotlin/com/amazon/opendistroforelasticsearch/alerting/MonitorTests.kt @@ -0,0 +1,57 @@ +/* + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package com.amazon.opendistroforelasticsearch.alerting + +import com.amazon.opendistroforelasticsearch.alerting.model.Trigger +import org.elasticsearch.test.ESTestCase +import java.lang.IllegalArgumentException +import java.time.Instant + +class MonitorTests : ESTestCase() { + + fun `test enabled time`() { + val monitor = randomMonitor() + val enabledMonitor = monitor.copy(enabled = true, enabledTime = Instant.now()) + try { + enabledMonitor.copy(enabled = false) + fail("Disabling monitor with enabled time set should fail.") + } catch (e: IllegalArgumentException) { + } + + val disabledMonitor = monitor.copy(enabled = false, enabledTime = null) + + try { + disabledMonitor.copy(enabled = true) + fail("Enabling monitor without enabled time should fail") + } catch (e: IllegalArgumentException) { + } + } + + fun `test max triggers`() { + val monitor = randomMonitor() + + val tooManyTriggers = mutableListOf() + for (i in 0..10) { + tooManyTriggers.add(randomTrigger()) + } + + try { + monitor.copy(triggers = tooManyTriggers) + fail("Monitor with too many triggers should be rejected.") + } catch (e: IllegalArgumentException) { + } + } +} diff --git a/alerting/src/test/kotlin/com/amazon/opendistroforelasticsearch/alerting/TestHelpers.kt b/alerting/src/test/kotlin/com/amazon/opendistroforelasticsearch/alerting/TestHelpers.kt new file mode 100644 index 00000000..e1d7ce75 --- /dev/null +++ b/alerting/src/test/kotlin/com/amazon/opendistroforelasticsearch/alerting/TestHelpers.kt @@ -0,0 +1,96 @@ +/* + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ +package com.amazon.opendistroforelasticsearch.alerting + +import com.amazon.opendistroforelasticsearch.alerting.model.Alert +import com.amazon.opendistroforelasticsearch.alerting.model.Monitor +import com.amazon.opendistroforelasticsearch.alerting.model.Trigger +import com.amazon.opendistroforelasticsearch.alerting.model.action.Action +import com.amazon.opendistroforelasticsearch.alerting.core.model.Input +import com.amazon.opendistroforelasticsearch.alerting.core.model.IntervalSchedule +import com.amazon.opendistroforelasticsearch.alerting.core.model.Schedule +import com.amazon.opendistroforelasticsearch.alerting.core.model.SearchInput +import com.amazon.opendistroforelasticsearch.alerting.elasticapi.string +import org.elasticsearch.common.UUIDs +import org.elasticsearch.common.xcontent.XContentFactory +import org.elasticsearch.index.query.QueryBuilders +import org.elasticsearch.script.Script +import org.elasticsearch.script.ScriptType +import org.elasticsearch.search.builder.SearchSourceBuilder +import org.elasticsearch.test.ESTestCase +import org.elasticsearch.test.ESTestCase.randomInt +import org.elasticsearch.test.rest.ESRestTestCase +import java.time.Instant +import java.time.temporal.ChronoUnit + +fun randomMonitor( + name: String = ESRestTestCase.randomAlphaOfLength(10), + inputs: List = listOf(SearchInput(emptyList(), SearchSourceBuilder().query(QueryBuilders.matchAllQuery()))), + schedule: Schedule = IntervalSchedule(interval = 5, unit = ChronoUnit.MINUTES), + enabled: Boolean = ESTestCase.randomBoolean(), + triggers: List = (1..randomInt(10)).map { randomTrigger() }, + enabledTime: Instant? = if (enabled) Instant.now().truncatedTo(ChronoUnit.MILLIS) else null, + lastUpdateTime: Instant = Instant.now().truncatedTo(ChronoUnit.MILLIS), + withMetadata: Boolean = false +): Monitor { + return Monitor(name = name, enabled = enabled, inputs = inputs, schedule = schedule, triggers = triggers, + enabledTime = enabledTime, lastUpdateTime = lastUpdateTime, + uiMetadata = if (withMetadata) mapOf("foo" to "bar") else mapOf()) +} + +fun randomTrigger( + id: String = UUIDs.base64UUID(), + name: String = ESRestTestCase.randomAlphaOfLength(10), + severity: String = "1", + condition: Script = randomScript(), + actions: List = mutableListOf(), + destinationId: String = "" +): Trigger { + return Trigger( + id = id, + name = name, + severity = severity, + condition = condition, + actions = if (actions.isEmpty()) (0..randomInt(10)).map { randomAction(destinationId = destinationId) } else actions) +} + +fun randomScript(source: String = "return " + ESRestTestCase.randomBoolean().toString()): Script = Script(source) + +val ALERTING_BASE_URI = "/_opendistro/_alerting/monitors" +val DESTINATION_BASE_URI = "/_opendistro/_alerting/destinations" +val ALWAYS_RUN = Script("return true") +val NEVER_RUN = Script("return false") +val DRYRUN_MONITOR = mapOf("dryrun" to "true") + +fun randomTemplateScript( + source: String, + params: Map = emptyMap() +): Script = Script(ScriptType.INLINE, Script.DEFAULT_TEMPLATE_LANG, source, params) + +fun randomAction( + name: String = ESRestTestCase.randomUnicodeOfLength(10), + template: Script = randomTemplateScript("Hello World"), + destinationId: String = "123" +) = Action(name, destinationId, template, template) + +fun randomAlert(monitor: Monitor = randomMonitor()): Alert { + val trigger = randomTrigger() + return Alert(monitor, trigger, Instant.now().truncatedTo(ChronoUnit.MILLIS), null) +} + +fun Monitor.toJsonString(): String { + val builder = XContentFactory.jsonBuilder() + return this.toXContent(builder).string() +} diff --git a/alerting/src/test/kotlin/com/amazon/opendistroforelasticsearch/alerting/alerts/AlertIndicesIT.kt b/alerting/src/test/kotlin/com/amazon/opendistroforelasticsearch/alerting/alerts/AlertIndicesIT.kt new file mode 100644 index 00000000..adf0bbcf --- /dev/null +++ b/alerting/src/test/kotlin/com/amazon/opendistroforelasticsearch/alerting/alerts/AlertIndicesIT.kt @@ -0,0 +1,80 @@ +/* + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package com.amazon.opendistroforelasticsearch.alerting.alerts + +import com.amazon.opendistroforelasticsearch.alerting.ALWAYS_RUN +import com.amazon.opendistroforelasticsearch.alerting.AlertingRestTestCase +import com.amazon.opendistroforelasticsearch.alerting.randomMonitor +import com.amazon.opendistroforelasticsearch.alerting.randomTrigger +import com.amazon.opendistroforelasticsearch.alerting.settings.AlertingSettings +import com.amazon.opendistroforelasticsearch.alerting.test.makeRequest +import org.elasticsearch.common.xcontent.XContentType +import org.elasticsearch.rest.RestStatus + +class AlertIndicesIT : AlertingRestTestCase() { + + fun `test create alert index`() { + executeMonitor(randomMonitor(triggers = listOf(randomTrigger(condition = ALWAYS_RUN)))) + + assertIndexExists(AlertIndices.ALERT_INDEX) + assertIndexExists(AlertIndices.HISTORY_WRITE_INDEX) + } + + fun `test alert index gets recreated automatically if deleted`() { + val trueMonitor = randomMonitor(triggers = listOf(randomTrigger(condition = ALWAYS_RUN))) + + executeMonitor(trueMonitor) + assertIndexExists(AlertIndices.ALERT_INDEX) + assertIndexExists(AlertIndices.HISTORY_WRITE_INDEX) + + client().makeRequest("DELETE", "*") + assertIndexDoesNotExist(AlertIndices.ALERT_INDEX) + assertIndexDoesNotExist(AlertIndices.HISTORY_WRITE_INDEX) + + val executeResponse = executeMonitor(trueMonitor) + val xcp = createParser(XContentType.JSON.xContent(), executeResponse.entity.content) + val output = xcp.map() + assertNull("Error running a monitor after wiping alert indices", output["error"]) + } + + fun `test rollover history index`() { + // Update the rollover check to be every 1 second and the index max age to be 1 second + client().updateSettings(AlertingSettings.ALERT_HISTORY_ROLLOVER_PERIOD.key, "1s") + client().updateSettings(AlertingSettings.ALERT_HISTORY_INDEX_MAX_AGE.key, "1s") + + val trueMonitor = randomMonitor(triggers = listOf(randomTrigger(condition = ALWAYS_RUN))) + executeMonitor(trueMonitor) + + // Allow for a rollover index. + Thread.sleep(2000) + val response = client().makeRequest("GET", "/_cat/indices?format=json") + val xcp = createParser(XContentType.JSON.xContent(), response.entity.content) + val responseList = xcp.list() + val indices = mutableListOf() + responseList.filterIsInstance>().forEach { indices.add(it["index"] as String) } + assertTrue("Did not find 3 alert indices", indices.size >= 3) + } + + private fun assertIndexExists(index: String) { + val response = client().makeRequest("HEAD", "$index") + assertEquals("Index $index does not exist.", RestStatus.OK, response.restStatus()) + } + + private fun assertIndexDoesNotExist(index: String) { + val response = client().makeRequest("HEAD", "$index") + assertEquals("Index $index does not exist.", RestStatus.NOT_FOUND, response.restStatus()) + } +} diff --git a/alerting/src/test/kotlin/com/amazon/opendistroforelasticsearch/alerting/model/AlertTests.kt b/alerting/src/test/kotlin/com/amazon/opendistroforelasticsearch/alerting/model/AlertTests.kt new file mode 100644 index 00000000..91c0692a --- /dev/null +++ b/alerting/src/test/kotlin/com/amazon/opendistroforelasticsearch/alerting/model/AlertTests.kt @@ -0,0 +1,45 @@ +/* + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package com.amazon.opendistroforelasticsearch.alerting.model + +import com.amazon.opendistroforelasticsearch.alerting.randomAlert +import org.elasticsearch.test.ESTestCase + +class AlertTests : ESTestCase() { + fun `test alert as template args`() { + val alert = randomAlert().copy(acknowledgedTime = null, lastNotificationTime = null) + + val templateArgs = alert.asTemplateArg() + + assertEquals("Template args state does not match", templateArgs["state"], alert.state.toString()) + assertEquals("Template args error message does not match", templateArgs["error_message"], alert.errorMessage) + assertEquals( + "Template args acknowledged time does not match", + templateArgs["acknowledged_time"], + alert.acknowledgedTime?.toEpochMilli()) + assertEquals("Template args last notification time does not match", + templateArgs["last_notification_time"], + alert.lastNotificationTime?.toEpochMilli()) + } + + fun `test alert acknowledged`() { + val ackAlert = randomAlert().copy(state = Alert.State.ACKNOWLEDGED) + assertTrue("Alert is not acknowledged", ackAlert.isAcknowledged()) + + val activeAlert = randomAlert().copy(state = Alert.State.ACTIVE) + assertFalse("Alert is acknowledged", activeAlert.isAcknowledged()) + } +} diff --git a/alerting/src/test/kotlin/com/amazon/opendistroforelasticsearch/alerting/model/DestinationTests.kt b/alerting/src/test/kotlin/com/amazon/opendistroforelasticsearch/alerting/model/DestinationTests.kt new file mode 100644 index 00000000..446dd94d --- /dev/null +++ b/alerting/src/test/kotlin/com/amazon/opendistroforelasticsearch/alerting/model/DestinationTests.kt @@ -0,0 +1,77 @@ +/* + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package com.amazon.opendistroforelasticsearch.alerting.model + +import com.amazon.opendistroforelasticsearch.alerting.model.destination.Chime +import com.amazon.opendistroforelasticsearch.alerting.model.destination.CustomWebhook +import com.amazon.opendistroforelasticsearch.alerting.model.destination.Slack +import org.elasticsearch.test.ESTestCase + +class DestinationTests : ESTestCase() { + + fun `test chime destination`() { + val chime = Chime("http://abc.com") + assertEquals("Url is manipulated", chime.url, "http://abc.com") + } + + fun `test chime destination with out url`() { + try { + Chime("") + fail("Creating a chime destination with empty url did not fail.") + } catch (ignored: IllegalArgumentException) { + } + } + + fun `test slack destination`() { + val slack = Slack("http://abc.com") + assertEquals("Url is manipulated", slack.url, "http://abc.com") + } + + fun `test slack destination with out url`() { + try { + Slack("") + fail("Creating a slack destination with empty url did not fail.") + } catch (ignored: IllegalArgumentException) { + } + } + + fun `test custom webhook destination with url and no host`() { + val customWebhook = CustomWebhook("http://abc.com", null, null, -1, null, emptyMap(), emptyMap(), null, null) + assertEquals("Url is manipulated", customWebhook.url, "http://abc.com") + } + + fun `test custom webhook destination with host and no url`() { + try { + val customWebhook = CustomWebhook(null, null, "abc.com", 80, null, emptyMap(), emptyMap(), null, null) + assertEquals("host is manipulated", customWebhook.host, "abc.com") + } catch (ignored: IllegalArgumentException) { + } + } + + fun `test custom webhook destination with url and host`() { + // In this case, url will be given priority + val customWebhook = CustomWebhook("http://abc.com", null, null, -1, null, emptyMap(), emptyMap(), null, null) + assertEquals("Url is manipulated", customWebhook.url, "http://abc.com") + } + + fun `test custom webhook destination with no url and no host`() { + try { + CustomWebhook("", null, null, 80, null, emptyMap(), emptyMap(), null, null) + fail("Creating a custom webhook destination with empty url did not fail.") + } catch (ignored: IllegalArgumentException) { + } + } +} diff --git a/alerting/src/test/kotlin/com/amazon/opendistroforelasticsearch/alerting/model/XContentTests.kt b/alerting/src/test/kotlin/com/amazon/opendistroforelasticsearch/alerting/model/XContentTests.kt new file mode 100644 index 00000000..a6c26385 --- /dev/null +++ b/alerting/src/test/kotlin/com/amazon/opendistroforelasticsearch/alerting/model/XContentTests.kt @@ -0,0 +1,103 @@ +/* + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package com.amazon.opendistroforelasticsearch.alerting.model + +import com.amazon.opendistroforelasticsearch.alerting.model.action.Action +import com.amazon.opendistroforelasticsearch.alerting.randomAlert +import com.amazon.opendistroforelasticsearch.alerting.randomMonitor +import com.amazon.opendistroforelasticsearch.alerting.randomTemplateScript +import com.amazon.opendistroforelasticsearch.alerting.randomTrigger +import com.amazon.opendistroforelasticsearch.alerting.toJsonString +import com.amazon.opendistroforelasticsearch.alerting.core.model.SearchInput +import com.amazon.opendistroforelasticsearch.alerting.elasticapi.ElasticAPI +import com.amazon.opendistroforelasticsearch.alerting.elasticapi.string +import org.elasticsearch.common.settings.Settings +import org.elasticsearch.common.xcontent.NamedXContentRegistry +import org.elasticsearch.common.xcontent.ToXContent +import org.elasticsearch.common.xcontent.XContentBuilder +import org.elasticsearch.common.xcontent.XContentParser +import org.elasticsearch.common.xcontent.XContentType +import org.elasticsearch.script.Script +import org.elasticsearch.search.SearchModule +import org.elasticsearch.test.ESTestCase +import org.elasticsearch.test.rest.ESRestTestCase + +class XContentTests : ESTestCase() { + + fun `test action parsing`() { + val action = randomAction() + val actionString = action.toXContent(builder(), ToXContent.EMPTY_PARAMS).string() + val parsedAction = Action.parse(parser(actionString)) + assertEquals("Round tripping Monitor doesn't work", action, parsedAction) + } + + private fun randomAction( + name: String = ESRestTestCase.randomUnicodeOfLength(10), + template: Script = randomTemplateScript("Hello World"), + destinationId: String = "123" + ) = Action(name, destinationId, template, template) + + fun `test monitor parsing`() { + val monitor = randomMonitor() + + val monitorString = monitor.toJsonString() + val parsedMonitor = Monitor.parse(parser(monitorString)) + assertEquals("Round tripping Monitor doesn't work", monitor, parsedMonitor) + } + + fun `test trigger parsing`() { + val trigger = randomTrigger() + + val triggerString = trigger.toXContent(builder(), ToXContent.EMPTY_PARAMS).string() + val parsedTrigger = Trigger.parse(parser(triggerString)) + + assertEquals("Round tripping Trigger doesn't work", trigger, parsedTrigger) + } + + fun `test alert parsing`() { + val alert = randomAlert() + + val alertString = alert.toXContent(builder(), ToXContent.EMPTY_PARAMS).string() + val parsedAlert = Alert.parse(parser(alertString)) + + assertEquals("Round tripping alert doesn't work", alert, parsedAlert) + } + + fun `test creating a monitor with duplicate trigger ids fails`() { + try { + val repeatedTrigger = randomTrigger() + randomMonitor().copy(triggers = listOf(repeatedTrigger, repeatedTrigger)) + fail("Creating a monitor with duplicate triggers did not fail.") + } catch (ignored: IllegalArgumentException) { + } + } + + private fun builder(): XContentBuilder { + return XContentBuilder.builder(XContentType.JSON.xContent()) + } + + private fun parser(xc: String): XContentParser { + val parser = ElasticAPI.INSTANCE.jsonParser(xContentRegistry(), xc) + parser.nextToken() + return parser + } + + override fun xContentRegistry(): NamedXContentRegistry { + return NamedXContentRegistry(listOf( + SearchInput.XCONTENT_REGISTRY) + + SearchModule(Settings.EMPTY, false, emptyList()).namedXContents) + } +} diff --git a/alerting/src/test/kotlin/com/amazon/opendistroforelasticsearch/alerting/resthandler/DestinationRestApiIT.kt b/alerting/src/test/kotlin/com/amazon/opendistroforelasticsearch/alerting/resthandler/DestinationRestApiIT.kt new file mode 100644 index 00000000..9532f219 --- /dev/null +++ b/alerting/src/test/kotlin/com/amazon/opendistroforelasticsearch/alerting/resthandler/DestinationRestApiIT.kt @@ -0,0 +1,157 @@ +/* + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package com.amazon.opendistroforelasticsearch.alerting.resthandler + +import com.amazon.opendistroforelasticsearch.alerting.DESTINATION_BASE_URI +import com.amazon.opendistroforelasticsearch.alerting.AlertingRestTestCase +import com.amazon.opendistroforelasticsearch.alerting.model.destination.Chime +import com.amazon.opendistroforelasticsearch.alerting.model.destination.CustomWebhook +import com.amazon.opendistroforelasticsearch.alerting.model.destination.Destination +import com.amazon.opendistroforelasticsearch.alerting.model.destination.Slack +import com.amazon.opendistroforelasticsearch.alerting.test.makeRequest +import com.amazon.opendistroforelasticsearch.alerting.util.DestinationType +import org.elasticsearch.rest.RestStatus +import org.elasticsearch.test.junit.annotations.TestLogging +import org.junit.Assert +import java.time.Instant + +@TestLogging("level:DEBUG") +@Suppress("UNCHECKED_CAST") +class DestinationRestApiIT : AlertingRestTestCase() { + + fun `test creating a chime destination`() { + val chime = Chime("http://abc.com") + val destination = Destination( + type = DestinationType.CHIME, + name = "test", + lastUpdateTime = Instant.now(), + chime = chime, + slack = null, + customWebhook = null) + val createdDestination = createDestination(destination = destination) + assertEquals("Incorrect destination name", createdDestination.name, "test") + assertEquals("Incorrect destination type", createdDestination.type, DestinationType.CHIME) + Assert.assertNotNull("chime object should not be null", createdDestination.chime) + } + + fun `test updating a chime destination`() { + val destination = createDestination() + val chime = Chime("http://updated.com") + var updatedDestination = updateDestination( + destination.copy(name = "updatedName", chime = chime, type = DestinationType.CHIME)) + assertEquals("Incorrect destination name after update", updatedDestination.name, "updatedName") + assertEquals("Incorrect destination ID after update", updatedDestination.id, destination.id) + assertEquals("Incorrect destination type after update", updatedDestination.type, DestinationType.CHIME) + assertEquals("Incorrect destination url after update", "http://updated.com", updatedDestination.chime?.url) + val updatedChime = Chime("http://updated2.com") + updatedDestination = updateDestination( + destination.copy(id = destination.id, name = "updatedName", chime = updatedChime, type = DestinationType.CHIME)) + assertEquals("Incorrect destination url after update", "http://updated2.com", updatedDestination.chime?.url) + } + + fun `test creating a slack destination`() { + val slack = Slack("http://abc.com") + val destination = Destination( + type = DestinationType.SLACK, + name = "test", + lastUpdateTime = Instant.now(), + chime = null, + slack = slack, + customWebhook = null) + val createdDestination = createDestination(destination = destination) + assertEquals("Incorrect destination name", createdDestination.name, "test") + assertEquals("Incorrect destination type", createdDestination.type, DestinationType.SLACK) + Assert.assertNotNull("slack object should not be null", createdDestination.slack) + } + + fun `test updating a slack destination`() { + val destination = createDestination() + val slack = Slack("http://updated.com") + var updatedDestination = updateDestination( + destination.copy(name = "updatedName", slack = slack, type = DestinationType.SLACK)) + assertEquals("Incorrect destination name after update", updatedDestination.name, "updatedName") + assertEquals("Incorrect destination ID after update", updatedDestination.id, destination.id) + assertEquals("Incorrect destination type after update", updatedDestination.type, DestinationType.SLACK) + assertEquals("Incorrect destination url after update", "http://updated.com", updatedDestination.slack?.url) + val updatedSlack = Slack("http://updated2.com") + updatedDestination = updateDestination( + destination.copy(name = "updatedName", slack = updatedSlack, type = DestinationType.SLACK)) + assertEquals("Incorrect destination url after update", "http://updated2.com", updatedDestination.slack?.url) + } + + fun `test creating a custom webhook destination with url`() { + val customWebhook = CustomWebhook("http://abc.com", null, null, 80, null, emptyMap(), emptyMap(), null, null) + val destination = Destination( + type = DestinationType.CUSTOM_WEBHOOK, + name = "test", + lastUpdateTime = Instant.now(), + chime = null, + slack = null, + customWebhook = customWebhook) + val createdDestination = createDestination(destination = destination) + assertEquals("Incorrect destination name", createdDestination.name, "test") + assertEquals("Incorrect destination type", createdDestination.type, DestinationType.CUSTOM_WEBHOOK) + Assert.assertNotNull("custom webhook object should not be null", createdDestination.customWebhook) + } + + fun `test creating a custom webhook destination with host`() { + val customWebhook = CustomWebhook("", "http", "abc.com", 80, "a/b/c", + mapOf("foo" to "1", "bar" to "2"), mapOf("h1" to "1", "h2" to "2"), null, null) + val destination = Destination( + type = DestinationType.CUSTOM_WEBHOOK, + name = "test", + lastUpdateTime = Instant.now(), + chime = null, + slack = null, + customWebhook = customWebhook) + val createdDestination = createDestination(destination = destination) + assertEquals("Incorrect destination name", createdDestination.name, "test") + assertEquals("Incorrect destination type", createdDestination.type, DestinationType.CUSTOM_WEBHOOK) + assertEquals("Incorrect destination host", createdDestination.customWebhook?.host, "abc.com") + assertEquals("Incorrect destination port", createdDestination.customWebhook?.port, 80) + assertEquals("Incorrect destination path", createdDestination.customWebhook?.path, "a/b/c") + assertEquals("Incorrect destination scheme", createdDestination.customWebhook?.scheme, "http") + Assert.assertNotNull("custom webhook object should not be null", createdDestination.customWebhook) + } + + fun `test updating a custom webhook destination`() { + val destination = createDestination() + val customWebhook = CustomWebhook("http://update1.com", "http", "abc.com", 80, null, emptyMap(), emptyMap(), null, null) + var updatedDestination = updateDestination( + destination.copy(name = "updatedName", customWebhook = customWebhook, + type = DestinationType.CUSTOM_WEBHOOK)) + assertEquals("Incorrect destination name after update", updatedDestination.name, "updatedName") + assertEquals("Incorrect destination ID after update", updatedDestination.id, destination.id) + assertEquals("Incorrect destination type after update", updatedDestination.type, DestinationType.CUSTOM_WEBHOOK) + assertEquals("Incorrect destination url after update", "http://update1.com", updatedDestination.customWebhook?.url) + var updatedCustomWebhook = CustomWebhook("http://update2.com", "http", "abc.com", 80, null, emptyMap(), emptyMap(), null, null) + updatedDestination = updateDestination( + destination.copy(name = "updatedName", customWebhook = updatedCustomWebhook, + type = DestinationType.CUSTOM_WEBHOOK)) + assertEquals("Incorrect destination url after update", "http://update2.com", updatedDestination.customWebhook?.url) + updatedCustomWebhook = CustomWebhook("", "http", "abc.com", 80, null, emptyMap(), emptyMap(), null, null) + updatedDestination = updateDestination( + destination.copy(name = "updatedName", customWebhook = updatedCustomWebhook, + type = DestinationType.CUSTOM_WEBHOOK)) + assertEquals("Incorrect destination url after update", "abc.com", updatedDestination.customWebhook?.host) + } + + fun `test delete destination`() { + val destination = createDestination() + val deletedDestinationResponse = client().makeRequest("DELETE", "$DESTINATION_BASE_URI/${destination.id}") + assertEquals("Delete request not successful", RestStatus.OK, deletedDestinationResponse.restStatus()) + } +} diff --git a/alerting/src/test/kotlin/com/amazon/opendistroforelasticsearch/alerting/resthandler/MonitorRestApiIT.kt b/alerting/src/test/kotlin/com/amazon/opendistroforelasticsearch/alerting/resthandler/MonitorRestApiIT.kt new file mode 100644 index 00000000..65046204 --- /dev/null +++ b/alerting/src/test/kotlin/com/amazon/opendistroforelasticsearch/alerting/resthandler/MonitorRestApiIT.kt @@ -0,0 +1,531 @@ +/* + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ +package com.amazon.opendistroforelasticsearch.alerting.resthandler + +import com.amazon.opendistroforelasticsearch.alerting.AlertingRestTestCase +import com.amazon.opendistroforelasticsearch.alerting.alerts.AlertIndices +import com.amazon.opendistroforelasticsearch.alerting.model.Alert +import com.amazon.opendistroforelasticsearch.alerting.model.Monitor +import com.amazon.opendistroforelasticsearch.alerting.model.Trigger +import com.amazon.opendistroforelasticsearch.alerting.randomAlert +import com.amazon.opendistroforelasticsearch.alerting.randomMonitor +import com.amazon.opendistroforelasticsearch.alerting.settings.AlertingSettings +import com.amazon.opendistroforelasticsearch.alerting.ALERTING_BASE_URI +import com.amazon.opendistroforelasticsearch.alerting.randomTrigger +import com.amazon.opendistroforelasticsearch.alerting.core.model.CronSchedule +import com.amazon.opendistroforelasticsearch.alerting.core.model.ScheduledJob +import com.amazon.opendistroforelasticsearch.alerting.core.model.SearchInput +import com.amazon.opendistroforelasticsearch.alerting.core.settings.ScheduledJobSettings +import com.amazon.opendistroforelasticsearch.alerting.test.makeRequest +import com.amazon.opendistroforelasticsearch.alerting.elasticapi.ElasticAPI +import org.apache.http.HttpHeaders +import org.apache.http.entity.ContentType +import org.apache.http.message.BasicHeader +import org.apache.http.nio.entity.NStringEntity +import org.elasticsearch.client.ResponseException +import org.elasticsearch.common.xcontent.ToXContent +import org.elasticsearch.common.xcontent.XContentBuilder +import org.elasticsearch.common.xcontent.XContentType +import org.elasticsearch.index.query.QueryBuilders +import org.elasticsearch.rest.RestStatus +import org.elasticsearch.script.Script +import org.elasticsearch.search.builder.SearchSourceBuilder +import org.elasticsearch.test.ESTestCase +import org.elasticsearch.test.junit.annotations.TestLogging +import org.elasticsearch.test.rest.ESRestTestCase +import java.time.ZoneId + +@TestLogging("level:DEBUG") +@Suppress("UNCHECKED_CAST") +class MonitorRestApiIT : AlertingRestTestCase() { + + val USE_TYPED_KEYS = ToXContent.MapParams(mapOf("with_type" to "true")) + + @Throws(Exception::class) + fun `test plugin is loaded`() { + val response = entityAsMap(ESRestTestCase.client().makeRequest("GET", "_nodes/plugins")) + val nodesInfo = response["nodes"] as Map> + for (nodeInfo in nodesInfo.values) { + val plugins = nodeInfo["plugins"] as List> + for (plugin in plugins) { + if (plugin["name"] == "opendistro_alerting") { + return + } + } + } + fail("Plugin not installed") + } + + fun `test parsing monitor as a scheduled job`() { + val monitor = createRandomMonitor() + + val builder = monitor.toXContent(XContentBuilder.builder(XContentType.JSON.xContent()), USE_TYPED_KEYS) + val string = ElasticAPI.INSTANCE.builderToBytesRef(builder).utf8ToString() + val xcp = createParser(XContentType.JSON.xContent(), string) + val scheduledJob = ScheduledJob.parse(xcp, monitor.id, monitor.version) + assertEquals(monitor, scheduledJob) + } + + @Throws(Exception::class) + fun `test creating a monitor`() { + val monitor = randomMonitor() + + val createResponse = client().makeRequest("POST", ALERTING_BASE_URI, emptyMap(), monitor.toHttpEntity()) + + assertEquals("Create monitor failed", RestStatus.CREATED, createResponse.restStatus()) + val responseBody = createResponse.asMap() + val createdId = responseBody["_id"] as String + val createdVersion = responseBody["_version"] as Int + assertNotEquals("response is missing Id", Monitor.NO_ID, createdId) + assertTrue("incorrect version", createdVersion > 0) + assertEquals("Incorrect Location header", "$ALERTING_BASE_URI/$createdId", createResponse.getHeader("Location")) + } + + fun `test creating a monitor with PUT fails`() { + try { + val monitor = randomMonitor() + client().makeRequest("PUT", ALERTING_BASE_URI, emptyMap(), monitor.toHttpEntity()) + fail("Expected 405 Method Not Allowed response") + } catch (e: ResponseException) { + assertEquals("Unexpected status", RestStatus.METHOD_NOT_ALLOWED, e.response.restStatus()) + } + } + + @Throws(Exception::class) + fun `test updating search for a monitor`() { + val monitor = createRandomMonitor() + + val updatedSearch = SearchInput(emptyList(), + SearchSourceBuilder().query(QueryBuilders.termQuery("foo", "bar"))) + val updateResponse = client().makeRequest("PUT", monitor.relativeUrl(), + emptyMap(), monitor.copy(inputs = listOf(updatedSearch)).toHttpEntity()) + + assertEquals("Update monitor failed", RestStatus.OK, updateResponse.restStatus()) + val responseBody = updateResponse.asMap() + assertEquals("Updated monitor id doesn't match", monitor.id, responseBody["_id"] as String) + assertEquals("Version not incremented", (monitor.version + 1).toInt(), responseBody["_version"] as Int) + + val updatedMonitor = getMonitor(monitor.id) + assertEquals("Monitor search not updated", listOf(updatedSearch), updatedMonitor.inputs) + } + + @Throws(Exception::class) + fun `test updating conditions for a monitor`() { + val monitor = createRandomMonitor() + + val updatedTriggers = listOf(Trigger("foo", "1", Script("return true"), emptyList())) + val updateResponse = client().makeRequest("PUT", monitor.relativeUrl(), + emptyMap(), monitor.copy(triggers = updatedTriggers).toHttpEntity()) + + assertEquals("Update monitor failed", RestStatus.OK, updateResponse.restStatus()) + val responseBody = updateResponse.asMap() + assertEquals("Updated monitor id doesn't match", monitor.id, responseBody["_id"] as String) + assertEquals("Version not incremented", (monitor.version + 1).toInt(), responseBody["_version"] as Int) + + val updatedMonitor = getMonitor(monitor.id) + assertEquals("Monitor trigger not updated", updatedTriggers, updatedMonitor.triggers) + } + + @Throws(Exception::class) + fun `test updating schedule for a monitor`() { + val monitor = createRandomMonitor() + + val updatedSchedule = CronSchedule(expression = "0 9 * * *", timezone = ZoneId.of("UTC")) + val updateResponse = client().makeRequest("PUT", monitor.relativeUrl(), + emptyMap(), monitor.copy(schedule = updatedSchedule).toHttpEntity()) + + assertEquals("Update monitor failed", RestStatus.OK, updateResponse.restStatus()) + val responseBody = updateResponse.asMap() + assertEquals("Updated monitor id doesn't match", monitor.id, responseBody["_id"] as String) + assertEquals("Version not incremented", (monitor.version + 1).toInt(), responseBody["_version"] as Int) + + val updatedMonitor = getMonitor(monitor.id) + assertEquals("Monitor trigger not updated", updatedSchedule, updatedMonitor.schedule) + } + + @Throws(Exception::class) + fun `test getting a monitor`() { + val monitor = createRandomMonitor() + + val storedMonitor = getMonitor(monitor.id) + + assertEquals("Indexed and retrieved monitor differ", monitor, storedMonitor) + } + + @Throws(Exception::class) + fun `test getting a monitor that doesn't exist`() { + try { + getMonitor(randomAlphaOfLength(20)) + fail("expected response exception") + } catch (e: ResponseException) { + assertEquals(RestStatus.NOT_FOUND, e.response.restStatus()) + } + } + + @Throws(Exception::class) + fun `test checking if a monitor exists`() { + val monitor = createRandomMonitor() + + val headResponse = client().makeRequest("HEAD", monitor.relativeUrl()) + assertEquals("Unable to HEAD monitor", RestStatus.OK, headResponse.restStatus()) + assertNull("Response contains unexpected body", headResponse.entity) + } + + fun `test checking if a non-existent monitor exists`() { + val headResponse = client().makeRequest("HEAD", "$ALERTING_BASE_URI/foobarbaz") + assertEquals("Unexpected status", RestStatus.NOT_FOUND, headResponse.restStatus()) + } + + @Throws(Exception::class) + fun `test deleting a monitor`() { + val monitor = createRandomMonitor() + + val deleteResponse = client().makeRequest("DELETE", monitor.relativeUrl()) + assertEquals("Delete failed", RestStatus.OK, deleteResponse.restStatus()) + + val getResponse = client().makeRequest("HEAD", monitor.relativeUrl()) + assertEquals("Deleted monitor still exists", RestStatus.NOT_FOUND, getResponse.restStatus()) + } + + @Throws(Exception::class) + fun `test deleting a monitor that doesn't exist`() { + try { + client().makeRequest("DELETE", "$ALERTING_BASE_URI/foobarbaz") + fail("expected 404 ResponseException") + } catch (e: ResponseException) { + assertEquals(RestStatus.NOT_FOUND, e.response.restStatus()) + } + } + + fun `test getting UI metadata monitor not from Kibana`() { + val monitor = createRandomMonitor(withMetadata = true) + val getMonitor = getMonitor(monitorId = monitor.id) + assertEquals("UI Metadata returned but request did not come from Kibana.", getMonitor.uiMetadata, mapOf()) + } + + fun `test getting UI metadata monitor from Kibana`() { + val monitor = createRandomMonitor(refresh = true, withMetadata = true) + val header = BasicHeader(HttpHeaders.USER_AGENT, "Kibana") + val getMonitor = getMonitor(monitorId = monitor.id, header = header) + assertEquals("", monitor.uiMetadata, getMonitor.uiMetadata) + } + + fun `test query a monitor that exists`() { + val monitor = createRandomMonitor(true) + + val search = SearchSourceBuilder().query(QueryBuilders.termQuery("_id", monitor.id)).toString() + val searchResponse = client().makeRequest("GET", "$ALERTING_BASE_URI/_search", + emptyMap(), + NStringEntity(search, ContentType.APPLICATION_JSON)) + assertEquals("Search monitor failed", RestStatus.OK, searchResponse.restStatus()) + val xcp = createParser(XContentType.JSON.xContent(), searchResponse.entity.content) + val hits = xcp.map()["hits"]!! as Map + val numberDocsFound = hits["total"] + assertEquals("Monitor not found during search", 1, numberDocsFound) + } + + fun `test query a monitor that exists POST`() { + val monitor = createRandomMonitor(true) + + val search = SearchSourceBuilder().query(QueryBuilders.termQuery("_id", monitor.id)).toString() + val searchResponse = client().makeRequest("POST", "$ALERTING_BASE_URI/_search", + emptyMap(), + NStringEntity(search, ContentType.APPLICATION_JSON)) + assertEquals("Search monitor failed", RestStatus.OK, searchResponse.restStatus()) + val xcp = createParser(XContentType.JSON.xContent(), searchResponse.entity.content) + val hits = xcp.map()["hits"]!! as Map + val numberDocsFound = hits["total"] + assertEquals("Monitor not found during search", 1, numberDocsFound) + } + + fun `test query a monitor that doesn't exist`() { + // Create a random monitor to create the ScheduledJob index. Otherwise we test will fail with 404 index not found. + createRandomMonitor(refresh = true) + val search = SearchSourceBuilder().query(QueryBuilders.termQuery(ESTestCase.randomAlphaOfLength(5), + ESTestCase.randomAlphaOfLength(5))).toString() + + val searchResponse = client().makeRequest( + "GET", + "$ALERTING_BASE_URI/_search", + emptyMap(), + NStringEntity(search, ContentType.APPLICATION_JSON)) + assertEquals("Search monitor failed", RestStatus.OK, searchResponse.restStatus()) + val xcp = createParser(XContentType.JSON.xContent(), searchResponse.entity.content) + val hits = xcp.map()["hits"]!! as Map + val numberDocsFound = hits["total"] + assertEquals("Monitor found during search when no document present.", 0, numberDocsFound) + } + + fun `test query a monitor with UI metadata from Kibana`() { + val monitor = createRandomMonitor(refresh = true, withMetadata = true) + val search = SearchSourceBuilder().query(QueryBuilders.termQuery("_id", monitor.id)).toString() + val header = BasicHeader(HttpHeaders.USER_AGENT, "Kibana") + val searchResponse = client().makeRequest( + "GET", + "$ALERTING_BASE_URI/_search", + emptyMap(), + NStringEntity(search, ContentType.APPLICATION_JSON), + header) + assertEquals("Search monitor failed", RestStatus.OK, searchResponse.restStatus()) + + val xcp = createParser(XContentType.JSON.xContent(), searchResponse.entity.content) + val hits = xcp.map()["hits"] as Map + val numberDocsFound = hits["total"] + assertEquals("Monitor not found during search", 1, numberDocsFound) + + val searchHits = hits["hits"] as List + val hit = searchHits[0] as Map + val monitorHit = hit["_source"] as Map + assertNotNull("UI Metadata returned from search but request did not come from Kibana", monitorHit[Monitor.UI_METADATA_FIELD]) + } + + fun `test query a monitor with UI metadata as user`() { + val monitor = createRandomMonitor(refresh = true, withMetadata = true) + val search = SearchSourceBuilder().query(QueryBuilders.termQuery("_id", monitor.id)).toString() + val searchResponse = client().makeRequest( + "GET", + "$ALERTING_BASE_URI/_search", + emptyMap(), + NStringEntity(search, ContentType.APPLICATION_JSON)) + assertEquals("Search monitor failed", RestStatus.OK, searchResponse.restStatus()) + + val xcp = createParser(XContentType.JSON.xContent(), searchResponse.entity.content) + val hits = xcp.map()["hits"] as Map + val numberDocsFound = hits["total"] + assertEquals("Monitor not found during search", 1, numberDocsFound) + + val searchHits = hits["hits"] as List + val hit = searchHits[0] as Map + val monitorHit = hit["_source"] as Map + assertNull("UI Metadata returned from search but request did not come from Kibana", monitorHit[Monitor.UI_METADATA_FIELD]) + } + + fun `test acknowledge all alert states`() { + putAlertMappings() // Required as we do not have a create alert API. + val monitor = createRandomMonitor(refresh = true) + val acknowledgedAlert = createAlert(randomAlert(monitor).copy(state = Alert.State.ACKNOWLEDGED)) + val completedAlert = createAlert(randomAlert(monitor).copy(state = Alert.State.COMPLETED)) + val errorAlert = createAlert(randomAlert(monitor).copy(state = Alert.State.ERROR)) + val activeAlert = createAlert(randomAlert(monitor).copy(state = Alert.State.ACTIVE)) + val invalidAlert = randomAlert(monitor).copy(id = "foobar") + + val response = acknowledgeAlerts(monitor, acknowledgedAlert, completedAlert, errorAlert, activeAlert, invalidAlert) + val responseMap = response.asMap() + + val activeAlertAcknowledged = searchAlerts(monitor).single { it.id == activeAlert.id } + assertNotNull("Unsuccessful acknowledgement", responseMap["success"] as List) + assertTrue("Alert not in acknowledged response", responseMap["success"].toString().contains(activeAlert.id)) + assertEquals("Alert not acknowledged.", Alert.State.ACKNOWLEDGED, activeAlertAcknowledged.state) + assertNotNull("Alert acknowledged time is NULL", activeAlertAcknowledged.acknowledgedTime) + + val failedResponseList = responseMap["failed"].toString() + assertTrue("Alert in state ${acknowledgedAlert.state} not found in failed list", failedResponseList.contains(acknowledgedAlert.id)) + assertTrue("Alert in state ${completedAlert.state} not found in failed list", failedResponseList.contains(errorAlert.id)) + assertTrue("Alert in state ${errorAlert.state} not found in failed list", failedResponseList.contains(completedAlert.id)) + assertTrue("Invalid alert not found in failed list", failedResponseList.contains(invalidAlert.id)) + assertFalse("Alert in state ${activeAlert.state} found in failed list", failedResponseList.contains(activeAlert.id)) + } + + fun `test mappings after monitor creation`() { + createRandomMonitor(refresh = true) + + val response = client().makeRequest("GET", "/${ScheduledJob.SCHEDULED_JOBS_INDEX}/_mapping/_doc") + val parserMap = createParser(XContentType.JSON.xContent(), response.entity.content).map() as Map> + val mappingsMap = parserMap[ScheduledJob.SCHEDULED_JOBS_INDEX]!!["mappings"] as Map + val expected = createParser( + XContentType.JSON.xContent(), + javaClass.classLoader.getResource("mappings/scheduled-jobs.json").readText()) + val expectedMap = expected.map() + + assertEquals("Mappings are different", expectedMap, mappingsMap) + } + + fun `test delete monitor moves alerts`() { + client().updateSettings(ScheduledJobSettings.SWEEPER_ENABLED.key, true) + putAlertMappings() + val monitor = createRandomMonitor(true) + val alert = createAlert(randomAlert(monitor).copy(state = Alert.State.ACTIVE)) + refreshIndex("*") + val deleteResponse = client().makeRequest("DELETE", "$ALERTING_BASE_URI/${monitor.id}") + assertEquals("Delete request not successful", RestStatus.OK, deleteResponse.restStatus()) + + // Wait 5 seconds for event to be processed and alerts moved + Thread.sleep(5000) + + val alerts = searchAlerts(monitor) + assertEquals("Active alert was not deleted", 0, alerts.size) + + val historyAlerts = searchAlerts(monitor, AlertIndices.HISTORY_WRITE_INDEX) + assertEquals("Alert was not moved to history", 1, historyAlerts.size) + assertEquals("Alert data incorrect", alert.copy(state = Alert.State.DELETED), historyAlerts.single()) + } + + fun `test delete trigger moves alerts`() { + client().updateSettings(ScheduledJobSettings.SWEEPER_ENABLED.key, true) + putAlertMappings() + val trigger = randomTrigger() + val monitor = createMonitor(randomMonitor(triggers = listOf(trigger))) + val alert = createAlert(randomAlert(monitor).copy(triggerId = trigger.id, state = Alert.State.ACTIVE)) + refreshIndex("*") + val updatedMonitor = monitor.copy(triggers = emptyList()) + val updateResponse = client().makeRequest("PUT", "$ALERTING_BASE_URI/${monitor.id}", emptyMap(), + updatedMonitor.toHttpEntity()) + assertEquals("Update request not successful", RestStatus.OK, updateResponse.restStatus()) + + // Wait 5 seconds for event to be processed and alerts moved + Thread.sleep(5000) + + val alerts = searchAlerts(monitor) + assertEquals("Active alert was not deleted", 0, alerts.size) + + val historyAlerts = searchAlerts(monitor, AlertIndices.HISTORY_WRITE_INDEX) + assertEquals("Alert was not moved to history", 1, historyAlerts.size) + assertEquals("Alert data incorrect", alert.copy(state = Alert.State.DELETED), historyAlerts.single()) + } + + fun `test delete trigger moves alerts only for deleted trigger`() { + client().updateSettings(ScheduledJobSettings.SWEEPER_ENABLED.key, true) + putAlertMappings() + val triggerToDelete = randomTrigger() + val triggerToKeep = randomTrigger() + val monitor = createMonitor(randomMonitor(triggers = listOf(triggerToDelete, triggerToKeep))) + val alertKeep = createAlert(randomAlert(monitor).copy(triggerId = triggerToKeep.id, state = Alert.State.ACTIVE)) + val alertDelete = createAlert(randomAlert(monitor).copy(triggerId = triggerToDelete.id, state = Alert.State.ACTIVE)) + refreshIndex("*") + val updatedMonitor = monitor.copy(triggers = listOf(triggerToKeep)) + val updateResponse = client().makeRequest("PUT", "$ALERTING_BASE_URI/${monitor.id}", emptyMap(), + updatedMonitor.toHttpEntity()) + assertEquals("Update request not successful", RestStatus.OK, updateResponse.restStatus()) + + // Wait 5 seconds for event to be processed and alerts moved + Thread.sleep(5000) + + val alerts = searchAlerts(monitor) + // We have two alerts from above, 1 for each trigger, there should be only 1 left in active index + assertEquals("One alert should be in active index", 1, alerts.size) + assertEquals("Wrong alert in active index", alertKeep, alerts.single()) + + val historyAlerts = searchAlerts(monitor, AlertIndices.HISTORY_WRITE_INDEX) + // Only alertDelete should of been moved to history index + assertEquals("One alert should be in history index", 1, historyAlerts.size) + assertEquals("Alert data incorrect", alertDelete.copy(state = Alert.State.DELETED), historyAlerts.single()) + } + + fun `test update monitor with wrong version`() { + val monitor = createRandomMonitor(refresh = true) + try { + client().makeRequest("PUT", "${monitor.relativeUrl()}?refresh=true&version=1234", + emptyMap(), monitor.toHttpEntity()) + fail("expected 409 ResponseException") + } catch (e: ResponseException) { + assertEquals(RestStatus.CONFLICT, e.response.restStatus()) + } + } + + fun `test monitor stats disable plugin`() { + // Disable the Monitor plugin. + disableScheduledJob() + + val responseMap = getAlertingStats() + assertEquals("Cluster name is incorrect", responseMap["cluster_name"], "alerting_integTestCluster") + assertEquals("Scheduled job is not enabled", false, responseMap[ScheduledJobSettings.SWEEPER_ENABLED.key]) + assertEquals("Scheduled job index exists but there are no scheduled jobs.", false, responseMap["scheduled_job_index_exists"]) + val _nodes = responseMap["_nodes"] as Map + assertEquals("Incorrect number of nodes", 1, _nodes["total"]) + assertEquals("Failed nodes found during monitor stats call", 0, _nodes["failed"]) + assertEquals("More than one successful node", 1, _nodes["successful"]) + } + + fun `test monitor stats no jobs`() { + // Disable the Monitor plugin. + enableScheduledJob() + + val responseMap = getAlertingStats() + assertEquals("Cluster name is incorrect", responseMap["cluster_name"], "alerting_integTestCluster") + assertEquals("Scheduled job is not enabled", true, responseMap[ScheduledJobSettings.SWEEPER_ENABLED.key]) + assertEquals("Scheduled job index exists but there are no scheduled jobs.", false, responseMap["scheduled_job_index_exists"]) + val _nodes = responseMap["_nodes"] as Map + assertEquals("Incorrect number of nodes", 1, _nodes["total"]) + assertEquals("Failed nodes found during monitor stats call", 0, _nodes["failed"]) + assertEquals("More than one successful node", 1, _nodes["successful"]) + } + + fun `test monitor stats jobs`() { + // Enable the Monitor plugin. + enableScheduledJob() + createRandomMonitor(refresh = true) + + val responseMap = getAlertingStats() + assertEquals("Cluster name is incorrect", responseMap["cluster_name"], "alerting_integTestCluster") + assertEquals("Scheduled job is not enabled", true, responseMap[ScheduledJobSettings.SWEEPER_ENABLED.key]) + assertEquals("Scheduled job index does not exist", true, responseMap["scheduled_job_index_exists"]) + assertEquals("Scheduled job index is not yellow", "yellow", responseMap["scheduled_job_index_status"]) + assertEquals("Node is not on schedule", 1, responseMap["nodes_on_schedule"]) + + val _nodes = responseMap["_nodes"] as Map + assertEquals("Incorrect number of nodes", 1, _nodes["total"]) + assertEquals("Failed nodes found during monitor stats call", 0, _nodes["failed"]) + assertEquals("More than one successful node", 1, _nodes["successful"]) + } + + @Throws(Exception::class) + fun `test max number of monitors`() { + client().updateSettings(AlertingSettings.ALERTING_MAX_MONITORS.key, "1") + + createRandomMonitor(refresh = true) + try { + createRandomMonitor(refresh = true) + fail("Request should be rejected as there are too many monitors.") + } catch (e: ResponseException) { + assertEquals("Unexpected status", RestStatus.BAD_REQUEST, e.response.restStatus()) + } + } + + fun `test monitor specific metric`() { + // Enable the Monitor plugin. + enableScheduledJob() + createRandomMonitor(refresh = true) + + val responseMap = getAlertingStats("/jobs_info") + assertEquals("Cluster name is incorrect", responseMap["cluster_name"], "alerting_integTestCluster") + assertEquals("Scheduled job is not enabled", true, responseMap[ScheduledJobSettings.SWEEPER_ENABLED.key]) + assertEquals("Scheduled job index does not exist", true, responseMap["scheduled_job_index_exists"]) + assertEquals("Scheduled job index is not yellow", "yellow", responseMap["scheduled_job_index_status"]) + assertEquals("Node is not on schedule", 1, responseMap["nodes_on_schedule"]) + + val _nodes = responseMap["_nodes"] as Map + assertEquals("Incorrect number of nodes", 1, _nodes["total"]) + assertEquals("Failed nodes found during monitor stats call", 0, _nodes["failed"]) + assertEquals("More than one successful node", 1, _nodes["successful"]) + } + + fun `test monitor stats incorrect metric`() { + try { + getAlertingStats("/foobarzzz") + fail("Incorrect stats metric should have failed") + } catch (e: ResponseException) { + assertEquals("Failed", RestStatus.BAD_REQUEST, e.response.restStatus()) + } + } + + fun `test monitor stats _all and other metric`() { + try { + getAlertingStats("/_all,jobs_info") + fail("Incorrect stats metric should have failed") + } catch (e: ResponseException) { + assertEquals("Failed", RestStatus.BAD_REQUEST, e.response.restStatus()) + } + } +} diff --git a/build-tools/esplugin-coverage.gradle b/build-tools/esplugin-coverage.gradle new file mode 100644 index 00000000..6e9c9768 --- /dev/null +++ b/build-tools/esplugin-coverage.gradle @@ -0,0 +1,98 @@ +/* + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +import javax.management.remote.JMXConnectorFactory +import javax.management.remote.JMXServiceURL + +/** + * ES Plugin build tools don't work with the Gradle Jacoco Plugin to report coverage out of the box. + * https://github.com/elastic/elasticsearch/issues/28867. + * + * This code sets up coverage reporting manually for ES plugin tests. This is complicated because: + * 1. The ES integTest Task doesn't implement Gradle's JavaForkOptions so we have to manually start the jacoco agent with the test JVM + * 2. The cluster nodes are stopped using 'kill -9' which means jacoco can't dump it's execution output to a file on VM shutdown + * 3. The Java Security Manager prevents JMX from writing execution output to the file. + * + * To workaround these we start the cluster with jmx enabled and then use Jacoco's JMX MBean to get the execution data before the + * cluster is stopped and dump it to a file. Luckily our current security policy seems to allow this. This will also probably + * break if there are multiple nodes in the integTestCluster. But for now... it sorta works. + */ +apply plugin: 'jacoco' + +// Get gradle to generate the required jvm agent arg for us using a dummy tasks of type Test. Unfortunately Elastic's +// testing tasks don't derive from Test so the jacoco plugin can't do this automatically. +task dummyTest(type: Test) { + enabled = false + workingDir = file("/") // Force absolute path to jacoco agent jar + jacoco { + destinationFile = file("${buildDir}/jacoco/test.exec") + destinationFile.parentFile.mkdirs() + jmx = true + } +} + +task dummyIntegTest(type: Test) { + enabled = false + workingDir = file("/") // Force absolute path to jacoco agent jar + jacoco { + destinationFile = file("${buildDir}/jacoco/integTest.exec") + destinationFile.parentFile.mkdirs() + jmx = true + } +} + +test { + jvmArg dummyTest.jacoco.getAsJvmArg() +} + +integTestCluster { + jvmArgs += " ${dummyIntegTest.jacoco.getAsJvmArg()}" + systemProperty 'com.sun.management.jmxremote', "true" + systemProperty 'com.sun.management.jmxremote.authenticate', "false" + systemProperty 'com.sun.management.jmxremote.port', "7777" + systemProperty 'com.sun.management.jmxremote.ssl', "false" + systemProperty 'java.rmi.server.hostname', "127.0.0.1" +} + +task jacocoTestReport(type: JacocoReport, group: 'verification') { + dependsOn integTest, test + executionData dummyTest.jacoco.destinationFile, dummyIntegTest.jacoco.destinationFile + sourceDirectories = sourceSets.main.allSource + classDirectories = sourceSets.main.output + reports { + html.enabled = true // human readable + xml.enabled = true // for coverlay + } +} + +// See https://www.eclemma.org/jacoco/trunk/doc/api/org/jacoco/agent/rt/IAgent.html +task dumpCoverage { + doFirst { + def serverUrl = "service:jmx:rmi:///jndi/rmi://127.0.0.1:7777/jmxrmi" + def connector = JMXConnectorFactory.connect(new JMXServiceURL(serverUrl)) + try { + def jacocoMBean = new GroovyMBean(connector.MBeanServerConnection, "org.jacoco:type=Runtime") + byte[] data = jacocoMBean.getExecutionData(false) + file(dummyIntegTest.jacoco.destinationFile).setBytes(data) + } finally { + connector.close() + } + } +} +project.gradle.projectsEvaluated { + dumpCoverage.dependsOn integTestRunner + tasks['integTestCluster#stop'].dependsOn dumpCoverage + jacocoTestReport.dependsOn dumpCoverage +} diff --git a/build-tools/merged-coverage.gradle b/build-tools/merged-coverage.gradle new file mode 100644 index 00000000..59339492 --- /dev/null +++ b/build-tools/merged-coverage.gradle @@ -0,0 +1,58 @@ +/* + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +allprojects { + plugins.withId('jacoco') { + // 0.8.2 doesn't show missing coverage of Kotlin generated methods. Remove once this becomes gradle's default + jacoco.toolVersion = '0.8.2' + tasks.withType(Test) { + jacoco { append = false } + } + // For some reason this dependency isn't getting setup automatically by the jacoco plugin + tasks.withType(JacocoReport) { + dependsOn tasks.withType(Test) + } + } +} + +task jacocoMerge(type: JacocoMerge) { + gradle.projectsEvaluated { + subprojects.each { + jacocoMerge.dependsOn it.tasks.withType(JacocoReport) + jacocoMerge.executionData it.tasks.withType(JacocoReport).collect { it.executionData } + } + } + doFirst { + executionData = files(executionData.findAll { it.exists() }) + } +} + +task jacocoReport(type: JacocoReport, group: 'verification') { + description = 'Generates an aggregate report from all subprojects' + dependsOn jacocoMerge + executionData jacocoMerge.destinationFile + + reports { + html.enabled = true // human readable + xml.enabled = true + } + + gradle.projectsEvaluated { + sourceDirectories = files(subprojects.sourceSets.main.allSource.srcDirs) + classDirectories = files(subprojects.sourceSets.main.output) + } +} + +check.dependsOn jacocoReport diff --git a/build-tools/repositories.gradle b/build-tools/repositories.gradle new file mode 100644 index 00000000..bdc57c0a --- /dev/null +++ b/build-tools/repositories.gradle @@ -0,0 +1,19 @@ +/* + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +repositories { + mavenCentral() + jcenter() +} \ No newline at end of file diff --git a/build-tools/rpmbuild.gradle b/build-tools/rpmbuild.gradle new file mode 100644 index 00000000..5248eca2 --- /dev/null +++ b/build-tools/rpmbuild.gradle @@ -0,0 +1,40 @@ +apply plugin: 'nebula.ospackage' + +// This is afterEvaluate because the bundlePlugin ZIP task is updated afterEvaluate and changes the ZIP name to match the plugin name +afterEvaluate { + ospackage { + packageName = "opendistro-${name}" + release = isSnapshot ? "0.1" : '1' + version = "${project.version}" - "-SNAPSHOT" + + into '/usr/share/elasticsearch/plugins' + from(zipTree(bundlePlugin.archivePath)) { + into esplugin.name + } + + user 'root' + permissionGroup 'root' + fileMode 0644 + dirMode 0755 + + requires('elasticsearch-oss', versions.elasticsearch, EQUAL) + arch = 'NOARCH' + packager = 'Amazon' + vendor = 'Amazon' + os = 'LINUX' + prefix '/usr' + + license 'ASL-2.0' + maintainer 'OpenDistro for Elasticsearch Team ' + url 'https://opendistro.github.io/elasticsearch/downloads' + summary ''' + Alerting plugin for OpenDistro for Elasticsearch. + Reference documentation can be found at https://opendistro.github.io/elasticsearch/docs. + '''.stripIndent().replace('\n', ' ').trim() + } + + buildRpm { + archiveName "${packageName}-${version}.rpm" + dependsOn 'assemble' + } +} \ No newline at end of file diff --git a/build.gradle b/build.gradle new file mode 100644 index 00000000..0ca67cd1 --- /dev/null +++ b/build.gradle @@ -0,0 +1,91 @@ +/* + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +buildscript { + apply from: 'build-tools/repositories.gradle' + + ext { + es_version = '6.5.4' + kotlin_version = '1.2.60' + } + + repositories { + mavenCentral() + maven { url "https://plugins.gradle.org/m2/" } + jcenter() + } + dependencies { + classpath "org.elasticsearch.gradle:build-tools:${es_version}" + classpath "org.jetbrains.kotlin:kotlin-gradle-plugin:${kotlin_version}" + } +} + +plugins { + id 'nebula.ospackage' version "5.3.0" apply false +} + +apply plugin: 'base' +apply plugin: 'jacoco' +apply from: 'build-tools/merged-coverage.gradle' + +ext { + opendistroVersion = '0.7.0' + isSnapshot = "true" == System.getProperty("build.snapshot", "true") +} + +allprojects { + group = "com.amazon.opendistroforelasticsearch" + // Increment the final digit when there's a new plugin versions for the same opendistro version + // Reset the final digit to 0 when upgrading to a new opendistro version + version = "${opendistroVersion}.0" + + apply from: "$rootDir/build-tools/repositories.gradle" + + plugins.withId('java') { + sourceCompatibility = targetCompatibility = "1.8" + } + plugins.withId('org.jetbrains.kotlin.jvm') { + compileKotlin.kotlinOptions.jvmTarget = compileTestKotlin.kotlinOptions.jvmTarget = "1.8" + } +} + +evaluationDependsOnChildren() + +check.dependsOn subprojects*.check + +configurations { + ktlint +} + +dependencies { + ktlint "com.github.shyiko:ktlint:0.29.0" +} + +task ktlint(type: JavaExec, group: "verification") { + description = "Check Kotlin code style." + main = "com.github.shyiko.ktlint.Main" + classpath = configurations.ktlint + args "alerting/**/*.kt", "elastic-api/**/*.kt", "core/**/*.kt" + +} + +task ktlintFormat(type: JavaExec, group: "formatting") { + description = "Fix Kotlin code style deviations." + main = "com.github.shyiko.ktlint.Main" + classpath = configurations.ktlint + args "-F", "alerting/**/*.kt", "elastic-api/**/*.kt", "core/**/*.kt" +} + +check.dependsOn ktlint diff --git a/core/build.gradle b/core/build.gradle new file mode 100644 index 00000000..aebab4eb --- /dev/null +++ b/core/build.gradle @@ -0,0 +1,30 @@ +/* + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +apply plugin: 'java' +apply plugin: 'org.jetbrains.kotlin.jvm' +apply plugin: 'jacoco' + +dependencies { + compileOnly "org.elasticsearch:elasticsearch:${es_version}" + compile "org.jetbrains.kotlin:kotlin-stdlib:${kotlin_version}" + compile "com.cronutils:cron-utils:7.0.5" + compile project(':alerting-elastic-api') + + testImplementation "org.elasticsearch.test:framework:${es_version}" + testImplementation "org.jetbrains.kotlin:kotlin-test:${kotlin_version}" + testImplementation "org.jetbrains.kotlin:kotlin-test-junit:${kotlin_version}" + testCompile project(path: ':alerting-elastic-api', configuration: 'testOutput') +} diff --git a/core/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/core/JobRunner.kt b/core/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/core/JobRunner.kt new file mode 100644 index 00000000..bb0ea7df --- /dev/null +++ b/core/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/core/JobRunner.kt @@ -0,0 +1,27 @@ +/* + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package com.amazon.opendistroforelasticsearch.alerting.core + +import com.amazon.opendistroforelasticsearch.alerting.core.model.ScheduledJob +import java.time.Instant + +interface JobRunner { + fun postDelete(jobId: String) + + fun postIndex(job: ScheduledJob) + + fun runJob(job: ScheduledJob, periodStart: Instant, periodEnd: Instant) +} diff --git a/core/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/core/JobSweeper.kt b/core/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/core/JobSweeper.kt new file mode 100644 index 00000000..e04a48b4 --- /dev/null +++ b/core/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/core/JobSweeper.kt @@ -0,0 +1,465 @@ +/* + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package com.amazon.opendistroforelasticsearch.alerting.core + +import com.amazon.opendistroforelasticsearch.alerting.core.model.ScheduledJob +import com.amazon.opendistroforelasticsearch.alerting.core.schedule.JobScheduler +import com.amazon.opendistroforelasticsearch.alerting.core.settings.ScheduledJobSettings.Companion.REQUEST_TIMEOUT +import com.amazon.opendistroforelasticsearch.alerting.core.settings.ScheduledJobSettings.Companion.SWEEPER_ENABLED +import com.amazon.opendistroforelasticsearch.alerting.core.settings.ScheduledJobSettings.Companion.SWEEP_BACKOFF_MILLIS +import com.amazon.opendistroforelasticsearch.alerting.core.settings.ScheduledJobSettings.Companion.SWEEP_BACKOFF_RETRY_COUNT +import com.amazon.opendistroforelasticsearch.alerting.core.settings.ScheduledJobSettings.Companion.SWEEP_PAGE_SIZE +import com.amazon.opendistroforelasticsearch.alerting.core.settings.ScheduledJobSettings.Companion.SWEEP_PERIOD +import com.amazon.opendistroforelasticsearch.alerting.elasticapi.ElasticAPI +import com.amazon.opendistroforelasticsearch.alerting.elasticapi.firstFailureOrNull +import com.amazon.opendistroforelasticsearch.alerting.elasticapi.retry +import org.elasticsearch.action.bulk.BackoffPolicy +import org.elasticsearch.action.search.SearchRequest +import org.elasticsearch.client.Client +import org.elasticsearch.cluster.ClusterChangedEvent +import org.elasticsearch.cluster.ClusterStateListener +import org.elasticsearch.cluster.routing.IndexShardRoutingTable +import org.elasticsearch.cluster.routing.Murmur3HashFunction +import org.elasticsearch.cluster.service.ClusterService +import org.elasticsearch.common.Strings +import org.elasticsearch.common.bytes.BytesReference +import org.elasticsearch.common.component.LifecycleListener +import org.elasticsearch.common.lucene.uid.Versions +import org.elasticsearch.common.settings.Settings +import org.elasticsearch.common.unit.TimeValue +import org.elasticsearch.common.util.concurrent.EsExecutors +import org.elasticsearch.common.xcontent.NamedXContentRegistry +import org.elasticsearch.common.xcontent.XContentParser +import org.elasticsearch.common.xcontent.XContentParserUtils +import org.elasticsearch.index.engine.Engine +import org.elasticsearch.index.query.BoolQueryBuilder +import org.elasticsearch.index.query.QueryBuilders +import org.elasticsearch.index.shard.IndexingOperationListener +import org.elasticsearch.index.shard.ShardId +import org.elasticsearch.rest.RestStatus +import org.elasticsearch.search.builder.SearchSourceBuilder +import org.elasticsearch.search.sort.FieldSortBuilder +import org.elasticsearch.threadpool.Scheduler +import org.elasticsearch.threadpool.ThreadPool +import java.util.TreeMap +import java.util.concurrent.ConcurrentHashMap +import java.util.concurrent.Executors + +typealias JobId = String +typealias JobVersion = Long + +/** + * 'Sweeping' is the process of listening for new and updated [ScheduledJob]s and deciding if they should be scheduled for + * execution on this node. The [JobSweeper] runs on every node, sweeping all local active shards that are present on the node. + * + * A [consistent hash][ShardNodes] is used to distribute jobs across all nodes that contain an active instance of the same shard. + * This minimizes any interruptions in job execution when the cluster configuration changes. + * + * There are two types of sweeps: + * - *Full sweeps* occur when the [routing table][IndexShardRoutingTable] for the shard changes (for e.g. a replica has been + * added or removed). The full sweep re-reads all jobs in the shard, deciding which ones to run locally. All full sweeps + * happen asynchronously in the background in a serial manner. See the [sweepAllShards] method. + * - *Single job sweeps* occur when a new version of the job is indexed or deleted. An [IndexingOperationListener] listens + * for index changes and synchronously schedules or removes the job from the scheduler. + */ +class JobSweeper( + private val settings: Settings, + private val client: Client, + private val clusterService: ClusterService, + private val threadPool: ThreadPool, + private val xContentRegistry: NamedXContentRegistry, + private val scheduler: JobScheduler, + private val sweepableJobTypes: List +) : ClusterStateListener, IndexingOperationListener, LifecycleListener() { + private val logger = ElasticAPI.INSTANCE.getLogger(javaClass, settings) + + private val fullSweepExecutor = Executors.newSingleThreadExecutor(EsExecutors.daemonThreadFactory("opendistro_job_sweeper")) + + private val sweptJobs = ConcurrentHashMap>() + + private var scheduledFullSweep: Scheduler.Cancellable? = null + + @Volatile private var lastFullSweepTimeNano = System.nanoTime() + + @Volatile private var requestTimeout = REQUEST_TIMEOUT.get(settings) + @Volatile private var sweepPeriod = SWEEP_PERIOD.get(settings) + @Volatile private var sweeperEnabled = SWEEPER_ENABLED.get(settings) + @Volatile private var sweepPageSize = SWEEP_PAGE_SIZE.get(settings) + @Volatile private var sweepBackoffMillis = SWEEP_BACKOFF_MILLIS.get(settings) + @Volatile private var sweepBackoffRetryCount = SWEEP_BACKOFF_RETRY_COUNT.get(settings) + @Volatile private var sweepSearchBackoff = BackoffPolicy.exponentialBackoff(sweepBackoffMillis, sweepBackoffRetryCount) + + init { + clusterService.addListener(this) + clusterService.addLifecycleListener(this) + clusterService.clusterSettings.addSettingsUpdateConsumer(SWEEP_PERIOD) { + // if sweep period change, restart background sweep with new sweep period + logger.debug("Reinitializing background full sweep with period: ${sweepPeriod.minutes()}") + sweepPeriod = it + initBackgroundSweep() + } + clusterService.clusterSettings.addSettingsUpdateConsumer(SWEEPER_ENABLED) { + sweeperEnabled = it + if (!sweeperEnabled) disable() else enable() + } + clusterService.clusterSettings.addSettingsUpdateConsumer(SWEEP_BACKOFF_MILLIS) { + sweepBackoffMillis = it + sweepSearchBackoff = BackoffPolicy.exponentialBackoff(sweepBackoffMillis, sweepBackoffRetryCount) + } + clusterService.clusterSettings.addSettingsUpdateConsumer(SWEEP_BACKOFF_RETRY_COUNT) { + sweepBackoffRetryCount = it + sweepSearchBackoff = BackoffPolicy.exponentialBackoff(sweepBackoffMillis, sweepBackoffRetryCount) + } + clusterService.clusterSettings.addSettingsUpdateConsumer(SWEEP_PAGE_SIZE) { sweepPageSize = it } + clusterService.clusterSettings.addSettingsUpdateConsumer(REQUEST_TIMEOUT) { requestTimeout = it } + } + + override fun afterStart() { + initBackgroundSweep() + } + + override fun beforeStop() { + scheduledFullSweep?.cancel() + } + + override fun beforeClose() { + fullSweepExecutor.shutdown() + } + + /** + * Initiates a full sweep of all local shards when the index routing table is changed (for e.g. when the node joins + * the cluster, a replica is added, removed or promoted to primary). + * + * This callback won't be invoked concurrently since cluster state changes are applied serially to the node + * in the order they occur on the master. However we can't block this callback for the duration of a full sweep so + * we perform the sweep in the background in a single threaded executor [fullSweepExecutor]. + */ + override fun clusterChanged(event: ClusterChangedEvent) { + if (!isSweepingEnabled()) return + + if (!event.indexRoutingTableChanged(ScheduledJob.SCHEDULED_JOBS_INDEX)) return + + logger.debug("Scheduled Jobs routing table changed. Running full sweep...") + fullSweepExecutor.submit { + sweepAllShards() + } + } + + /** + * This callback is invoked when a new job (or new version of a job) is indexed. If the job is assigned to the node + * it is scheduled. Relies on all indexing operations using optimistic concurrency control to ensure that stale versions + * of jobs are not scheduled. It schedules job only if it is one of the [sweepableJobTypes] + * + */ + override fun postIndex(shardId: ShardId, index: Engine.Index, result: Engine.IndexResult) { + if (!isSweepingEnabled()) return + + if (ElasticAPI.INSTANCE.hasWriteFailed(result)) { + val shardJobs = sweptJobs[shardId] ?: emptyMap() + val currentVersion = shardJobs[index.id()] ?: Versions.NOT_FOUND + logger.debug("Indexing failed for ScheduledJob: ${index.id()}. Continuing with current version $currentVersion") + return + } + + if (isOwningNode(shardId, index.id())) { + val xcp = ElasticAPI.INSTANCE.jsonParser(xContentRegistry, index.source()) + if (isSweepableJobType(xcp)) { + val job = parseAndSweepJob(xcp, shardId, index.id(), result.version, index.source(), true) + if (job != null) scheduler.postIndex(job) + } else { + logger.debug("Not a valid job type in document ${index.id()} to sweep.") + } + } + } + + /** + * This callback is invoked when a job is deleted from a shard. The job is descheduled. Relies on all delete operations + * using optimistic concurrency control to ensure that stale versions of jobs are not scheduled. + */ + override fun postDelete(shardId: ShardId, delete: Engine.Delete, result: Engine.DeleteResult) { + if (!isSweepingEnabled()) return + + if (ElasticAPI.INSTANCE.hasWriteFailed(result)) { + val shardJobs = sweptJobs[shardId] ?: emptyMap() + val currentVersion = shardJobs[delete.id()] ?: Versions.NOT_FOUND + logger.debug("Deletion failed for ScheduledJob: ${delete.id()}. Continuing with current version $currentVersion") + return + } + + if (isOwningNode(shardId, delete.id())) { + if (scheduler.scheduledJobs().contains(delete.id())) { + sweep(shardId, delete.id(), result.version, null) + } + scheduler.postDelete(delete.id()) + } + } + + fun enable() { + // initialize background sweep + initBackgroundSweep() + // set sweeperEnabled flag to true to make the listeners aware of this setting + sweeperEnabled = true + } + + fun disable() { + // cancel background sweep + scheduledFullSweep?.cancel() + // deschedule existing jobs on this node + logger.info("Descheduling all jobs as sweeping is disabled") + scheduler.deschedule(scheduler.scheduledJobs()) + // set sweeperEnabled flag to false to make the listeners aware of this setting + sweeperEnabled = false + } + + public fun isSweepingEnabled(): Boolean { + // Although it is a single link check, keeping it as a separate function, so we + // can abstract out logic of finding out whether to proceed or not + return sweeperEnabled == true + } + + private fun initBackgroundSweep() { + + // if sweeping disabled, background sweep should not be triggered + if (!isSweepingEnabled()) return + + // cancel existing background thread if present + scheduledFullSweep?.cancel() + + // Setup an anti-entropy/self-healing background sweep, in case a sweep that was triggered by an event fails. + val scheduledSweep = Runnable { + val elapsedTime = getFullSweepElapsedTime() + + // Rate limit to at most one full sweep per sweep period + // The schedule runs may wake up a few milliseconds early. + // Delta will be giving some buffer on the schedule to allow waking up slightly earlier. + val delta = sweepPeriod.millis - elapsedTime.millis + if (delta < 20L) { // give 20ms buffer. + fullSweepExecutor.submit { + logger.debug("Performing background sweep of scheduled jobs.") + sweepAllShards() + } + } + } + scheduledFullSweep = threadPool.scheduleWithFixedDelay(scheduledSweep, sweepPeriod, ThreadPool.Names.SAME) + } + + private fun sweepAllShards() { + val clusterState = clusterService.state() + if (!clusterState.routingTable.hasIndex(ScheduledJob.SCHEDULED_JOBS_INDEX)) { + scheduler.deschedule(scheduler.scheduledJobs()) + sweptJobs.clear() + return + } + + // Find all shards that are currently assigned to this node. + val localNodeId = clusterState.nodes.localNodeId + val localShards = clusterState.routingTable.allShards(ScheduledJob.SCHEDULED_JOBS_INDEX) + // Find all active shards + .filter { it.active() } + // group by shardId + .groupBy { it.shardId() } + // assigned to local node + .filter { (_, shards) -> shards.any { it.currentNodeId() == localNodeId } } + + // Remove all jobs on shards that are no longer assigned to this node. + val removedShards = sweptJobs.keys - localShards.keys + removedShards.forEach { shardId -> + val shardJobs = sweptJobs.remove(shardId) ?: emptyMap() + scheduler.deschedule(shardJobs.keys) + } + + // resweep all shards that are assigned to this node. + localShards.forEach { (shardId, shards) -> + try { + sweepShard(shardId, ShardNodes(localNodeId, shards.map { it.currentNodeId() })) + } catch (e: Exception) { + val shardLogger = ElasticAPI.INSTANCE.getLogger(javaClass, settings, shardId) + shardLogger.error("Error while sweeping shard $shardId", e) + } + } + lastFullSweepTimeNano = System.nanoTime() + } + + private fun sweepShard(shardId: ShardId, shardNodes: ShardNodes, startAfter: String = "") { + val logger = ElasticAPI.INSTANCE.getLogger(javaClass, settings, shardId) + logger.debug("Sweeping shard $shardId") + + // Remove any jobs that are currently scheduled that are no longer owned by this node + val currentJobs = sweptJobs.getOrPut(shardId) { ConcurrentHashMap() } + currentJobs.keys.filterNot { shardNodes.isOwningNode(it) }.forEach { + scheduler.deschedule(it) + currentJobs.remove(it) + } + + // sweep the shard for new and updated jobs. Uses a search after query to paginate, assuming that any concurrent + // updates and deletes are handled by the index operation listener. + var searchAfter: String? = startAfter + while (searchAfter != null) { + val boolQueryBuilder = BoolQueryBuilder() + sweepableJobTypes.forEach { boolQueryBuilder.should(QueryBuilders.existsQuery(it)) } + val jobSearchRequest = SearchRequest() + .indices(ScheduledJob.SCHEDULED_JOBS_INDEX) + .preference("_shards:${shardId.id}|_only_local") + .source(SearchSourceBuilder.searchSource() + .version(true) + .sort(FieldSortBuilder("_id") + .unmappedType("keyword") + .missing("_last")) + .searchAfter(arrayOf(searchAfter)) + .size(sweepPageSize) + .query(boolQueryBuilder)) + + val response = sweepSearchBackoff.retry { + client.search(jobSearchRequest).actionGet(requestTimeout) + } + if (response.status() != RestStatus.OK) { + logger.error("Error sweeping shard $shardId.", response.firstFailureOrNull()) + return + } + for (hit in response.hits) { + if (shardNodes.isOwningNode(hit.id)) { + val xcp = ElasticAPI.INSTANCE.jsonParser(xContentRegistry, hit.sourceRef) + parseAndSweepJob(xcp, shardId, hit.id, hit.version, hit.sourceRef) + } + } + searchAfter = response.hits.lastOrNull()?.id + } + } + + private fun sweep( + shardId: ShardId, + jobId: JobId, + newVersion: JobVersion, + job: ScheduledJob?, + failedToParse: Boolean = false + ) { + sweptJobs.getOrPut(shardId) { ConcurrentHashMap() } + // Use [compute] to update atomically in case another thread concurrently indexes/deletes the same job + .compute(jobId) { _, currentVersion -> + if (newVersion <= (currentVersion ?: Versions.NOT_FOUND)) { + logger.debug("Skipping job $jobId, $newVersion <= $currentVersion") + return@compute currentVersion + } + + // deschedule the currently scheduled version + if (scheduler.scheduledJobs().contains(jobId)) { + scheduler.deschedule(jobId) + } + + if (failedToParse) { + return@compute currentVersion + } + if (job != null) { + if (job.enabled) { + scheduler.schedule(job) + } + return@compute newVersion + } else { + return@compute null + } + } + } + + private fun parseAndSweepJob( + xcp: XContentParser, + shardId: ShardId, + jobId: JobId, + jobVersion: JobVersion, + jobSource: BytesReference, + typeIsParsed: Boolean = false + ): ScheduledJob? { + return try { + val job = parseScheduledJob(xcp, jobId, jobVersion, typeIsParsed) + sweep(shardId, jobId, jobVersion, job) + job + } catch (e: Exception) { + logger.warn("Unable to parse ScheduledJob source: {}", + Strings.cleanTruncate(jobSource.utf8ToString(), 1000)) + sweep(shardId, jobId, jobVersion, null, true) + null + } + } + + private fun parseScheduledJob(xcp: XContentParser, jobId: JobId, jobVersion: JobVersion, typeIsParsed: Boolean): ScheduledJob { + return if (typeIsParsed) { + ScheduledJob.parse(xcp, xcp.currentName(), jobId, jobVersion) + } else { + ScheduledJob.parse(xcp, jobId, jobVersion) + } + } + + private fun getFullSweepElapsedTime(): TimeValue { + return TimeValue.timeValueNanos(System.nanoTime() - lastFullSweepTimeNano) + } + + fun getJobSweeperMetrics(): JobSweeperMetrics { + if (!isSweepingEnabled()) { + return JobSweeperMetrics(-1, true) + } + val elapsedTime = getFullSweepElapsedTime() + return JobSweeperMetrics(elapsedTime.millis, elapsedTime.millis <= sweepPeriod.millis) + } + + private fun isSweepableJobType(xcp: XContentParser): Boolean { + XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, xcp.nextToken(), xcp::getTokenLocation) + XContentParserUtils.ensureExpectedToken(XContentParser.Token.FIELD_NAME, xcp.nextToken(), xcp::getTokenLocation) + val jobType = xcp.currentName() + return sweepableJobTypes.contains(jobType) + } + + private fun isOwningNode(shardId: ShardId, jobId: JobId): Boolean { + val localNodeId = clusterService.localNode().id + val shardNodeIds = clusterService.state().routingTable.shardRoutingTable(shardId) + .filter { it.active() } + .map { it.currentNodeId() } + val shardNodes = ShardNodes(localNodeId, shardNodeIds) + return shardNodes.isOwningNode(jobId) + } +} + +/** + * A group of nodes in the cluster that contain active instances of a single ES shard. This uses a consistent hash to divide + * the jobs indexed in that shard amongst the nodes such that each job is "owned" by exactly one of the nodes. + * The local node must have an active instance of the shard. + * + * Implementation notes: This class is not thread safe. It uses the same [hash function][Murmur3HashFunction] that ES uses + * for routing. For each real node `100` virtual nodes are added to provide a good distribution. + */ +private class ShardNodes(val localNodeId: String, activeShardNodeIds: Collection) { + + private val circle = TreeMap() + + companion object { + private const val VIRTUAL_NODE_COUNT = 100 + } + + init { + for (node in activeShardNodeIds) { + for (i in 0 until VIRTUAL_NODE_COUNT) { + circle[Murmur3HashFunction.hash(node + i)] = node + } + } + } + + fun isOwningNode(id: JobId): Boolean { + if (circle.isEmpty()) { + return false + } + val hash = Murmur3HashFunction.hash(id) + val nodeId = (circle.higherEntry(hash) ?: circle.firstEntry()).value + return (localNodeId == nodeId) + } +} diff --git a/core/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/core/JobSweeperMetrics.kt b/core/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/core/JobSweeperMetrics.kt new file mode 100644 index 00000000..8e4acb22 --- /dev/null +++ b/core/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/core/JobSweeperMetrics.kt @@ -0,0 +1,39 @@ +/* + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package com.amazon.opendistroforelasticsearch.alerting.core + +import org.elasticsearch.common.io.stream.StreamInput +import org.elasticsearch.common.io.stream.StreamOutput +import org.elasticsearch.common.io.stream.Writeable +import org.elasticsearch.common.xcontent.ToXContent +import org.elasticsearch.common.xcontent.ToXContentFragment +import org.elasticsearch.common.xcontent.XContentBuilder + +data class JobSweeperMetrics(val lastFullSweepTimeMillis: Long, val fullSweepOnTime: Boolean) : ToXContentFragment, Writeable { + + constructor(si: StreamInput) : this(si.readLong(), si.readBoolean()) + + override fun writeTo(out: StreamOutput) { + out.writeLong(lastFullSweepTimeMillis) + out.writeBoolean(fullSweepOnTime) + } + + override fun toXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { + builder.field("last_full_sweep_time_millis", lastFullSweepTimeMillis) + builder.field("full_sweep_on_time", fullSweepOnTime) + return builder + } +} diff --git a/core/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/core/ScheduledJobIndices.kt b/core/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/core/ScheduledJobIndices.kt new file mode 100644 index 00000000..129e88e5 --- /dev/null +++ b/core/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/core/ScheduledJobIndices.kt @@ -0,0 +1,72 @@ +/* + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package com.amazon.opendistroforelasticsearch.alerting.core + +import com.amazon.opendistroforelasticsearch.alerting.core.model.ScheduledJob +import org.elasticsearch.action.ActionListener +import org.elasticsearch.action.admin.indices.create.CreateIndexRequest +import org.elasticsearch.action.admin.indices.create.CreateIndexResponse +import org.elasticsearch.client.AdminClient +import org.elasticsearch.cluster.health.ClusterIndexHealth +import org.elasticsearch.cluster.service.ClusterService +import org.elasticsearch.common.xcontent.XContentType + +/** + * Initialize the Elasticsearch components required to run [ScheduledJobs]. + * + * [initScheduledJobIndex] is called before indexing a new scheduled job. It verifies that the index exists before + * allowing the index to go through. This is to ensure the correct mappings exist for [ScheduledJob]. + */ +class ScheduledJobIndices(private val client: AdminClient, private val clusterService: ClusterService) { + + /** + * Initialize the indices required for scheduled jobs. + * First check if the index exists, and if not create the index with the provided callback listeners. + * + * @param actionListener A callback listener for the index creation call. Generally in the form of onSuccess, onFailure + */ + fun initScheduledJobIndex(actionListener: ActionListener) { + if (!scheduledJobIndexExists()) { + var indexRequest = CreateIndexRequest(ScheduledJob.SCHEDULED_JOBS_INDEX) + .mapping(ScheduledJob.SCHEDULED_JOB_TYPE, scheduledJobMappings(), XContentType.JSON) + client.indices().create(indexRequest, actionListener) + } + } + + private fun scheduledJobMappings(): String { + return javaClass.classLoader.getResource("mappings/scheduled-jobs.json").readText() + } + + fun scheduledJobIndexExists(): Boolean { + val clusterState = clusterService.state() + return clusterState.routingTable.hasIndex(ScheduledJob.SCHEDULED_JOBS_INDEX) + } + + /** + * Check if the index exists. If the index does not exist, return null. + */ + fun scheduledJobIndexHealth(): ClusterIndexHealth? { + var indexHealth: ClusterIndexHealth? = null + + if (scheduledJobIndexExists()) { + val indexRoutingTable = clusterService.state().routingTable.index(ScheduledJob.SCHEDULED_JOBS_INDEX) + val indexMetaData = clusterService.state().metaData().index(ScheduledJob.SCHEDULED_JOBS_INDEX) + + indexHealth = ClusterIndexHealth(indexMetaData, indexRoutingTable) + } + return indexHealth + } +} diff --git a/core/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/core/action/node/ScheduledJobStats.kt b/core/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/core/action/node/ScheduledJobStats.kt new file mode 100644 index 00000000..50b5205e --- /dev/null +++ b/core/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/core/action/node/ScheduledJobStats.kt @@ -0,0 +1,104 @@ +/* + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package com.amazon.opendistroforelasticsearch.alerting.core.action.node + +import com.amazon.opendistroforelasticsearch.alerting.core.JobSweeperMetrics +import com.amazon.opendistroforelasticsearch.alerting.core.resthandler.RestScheduledJobStatsHandler +import com.amazon.opendistroforelasticsearch.alerting.core.schedule.JobSchedulerMetrics +import org.elasticsearch.action.support.nodes.BaseNodeResponse +import org.elasticsearch.cluster.node.DiscoveryNode +import org.elasticsearch.common.io.stream.StreamInput +import org.elasticsearch.common.io.stream.StreamOutput +import org.elasticsearch.common.xcontent.ToXContent +import org.elasticsearch.common.xcontent.ToXContentFragment +import org.elasticsearch.common.xcontent.XContentBuilder + +/** + * Scheduled job stat that will be generated by each node. + */ +class ScheduledJobStats : BaseNodeResponse, ToXContentFragment { + + enum class ScheduleStatus(val status: String) { + RED("red"), + GREEN("green"); + + override fun toString(): String { + return status + } + } + + lateinit var status: ScheduleStatus + var jobSweeperMetrics: JobSweeperMetrics? = null + var jobInfos: Array? = null + + constructor() + + constructor( + node: DiscoveryNode, + status: ScheduleStatus, + jobSweeperMetrics: JobSweeperMetrics?, + jobsInfo: Array? + ) : super(node) { + this.status = status + this.jobSweeperMetrics = jobSweeperMetrics + this.jobInfos = jobsInfo + } + + companion object { + @JvmStatic + fun readScheduledJobStatus(si: StreamInput): ScheduledJobStats { + val scheduledJobStatus = ScheduledJobStats() + scheduledJobStatus.readFrom(si) + return scheduledJobStatus + } + } + + override fun readFrom(si: StreamInput) { + super.readFrom(si) + this.status = si.readEnum(ScheduleStatus::class.java) + this.jobSweeperMetrics = si.readOptionalWriteable { JobSweeperMetrics(si) } + this.jobInfos = si.readOptionalArray({ JobSchedulerMetrics(si) }, { arrayOfNulls(it) }) + } + + override fun writeTo(out: StreamOutput) { + super.writeTo(out) + out.writeEnum(status) + out.writeOptionalWriteable(jobSweeperMetrics) + out.writeOptionalArray(jobInfos) + } + + override fun toXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { + builder.field("name", node.name) + builder.field("schedule_status", status) + builder.field("roles", node.roles.toTypedArray()) + if (jobSweeperMetrics != null) { + builder.startObject(RestScheduledJobStatsHandler.JOB_SCHEDULING_METRICS) + jobSweeperMetrics!!.toXContent(builder, params) + builder.endObject() + } + + if (jobInfos != null) { + builder.startObject(RestScheduledJobStatsHandler.JOBS_INFO) + for (job in jobInfos!!) { + builder.startObject(job.scheduledJobId) + job.toXContent(builder, params) + builder.endObject() + } + builder.endObject() + } + return builder + } +} diff --git a/core/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/core/action/node/ScheduledJobsStatsAction.kt b/core/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/core/action/node/ScheduledJobsStatsAction.kt new file mode 100644 index 00000000..140e03e8 --- /dev/null +++ b/core/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/core/action/node/ScheduledJobsStatsAction.kt @@ -0,0 +1,34 @@ +/* + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package com.amazon.opendistroforelasticsearch.alerting.core.action.node + +import org.elasticsearch.action.Action +import org.elasticsearch.client.ElasticsearchClient + +class ScheduledJobsStatsAction : Action(NAME) { + companion object { + val INSTANCE = ScheduledJobsStatsAction() + const val NAME = "cluster:admin/opendistro/_scheduled_jobs/stats" + } + + override fun newRequestBuilder(client: ElasticsearchClient): ScheduledJobsStatsRequestBuilder { + return ScheduledJobsStatsRequestBuilder(client, this) + } + + override fun newResponse(): ScheduledJobsStatsResponse { + return ScheduledJobsStatsResponse() + } +} diff --git a/core/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/core/action/node/ScheduledJobsStatsRequest.kt b/core/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/core/action/node/ScheduledJobsStatsRequest.kt new file mode 100644 index 00000000..67dbe082 --- /dev/null +++ b/core/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/core/action/node/ScheduledJobsStatsRequest.kt @@ -0,0 +1,59 @@ +/* + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package com.amazon.opendistroforelasticsearch.alerting.core.action.node + +import org.elasticsearch.action.support.nodes.BaseNodesRequest +import org.elasticsearch.common.io.stream.StreamInput +import org.elasticsearch.common.io.stream.StreamOutput +import java.io.IOException + +/** + * A request to get node (cluster) level ScheduledJobsStatus. + * By default all the parameters will be true. + */ +class ScheduledJobsStatsRequest : BaseNodesRequest { + var jobSchedulingMetrics: Boolean = true + var jobsInfo: Boolean = true + + constructor() + constructor(nodeIds: Array) : super(*nodeIds) + + @Throws(IOException::class) + override fun readFrom(si: StreamInput) { + super.readFrom(si) + jobSchedulingMetrics = si.readBoolean() + jobsInfo = si.readBoolean() + } + + @Throws(IOException::class) + override fun writeTo(out: StreamOutput) { + super.writeTo(out) + out.writeBoolean(jobSchedulingMetrics) + out.writeBoolean(jobsInfo) + } + + fun all(): ScheduledJobsStatsRequest { + jobSchedulingMetrics = true + jobsInfo = true + return this + } + + fun clear(): ScheduledJobsStatsRequest { + jobSchedulingMetrics = false + jobsInfo = false + return this + } +} diff --git a/core/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/core/action/node/ScheduledJobsStatsRequestBuilder.kt b/core/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/core/action/node/ScheduledJobsStatsRequestBuilder.kt new file mode 100644 index 00000000..44022b98 --- /dev/null +++ b/core/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/core/action/node/ScheduledJobsStatsRequestBuilder.kt @@ -0,0 +1,29 @@ +/* + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package com.amazon.opendistroforelasticsearch.alerting.core.action.node + +import org.elasticsearch.action.support.nodes.NodesOperationRequestBuilder +import org.elasticsearch.client.ElasticsearchClient + +class ScheduledJobsStatsRequestBuilder( + client: ElasticsearchClient, + action: ScheduledJobsStatsAction +) : + NodesOperationRequestBuilder< + ScheduledJobsStatsRequest, + ScheduledJobsStatsResponse, + ScheduledJobsStatsRequestBuilder> + (client, action, ScheduledJobsStatsRequest()) diff --git a/core/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/core/action/node/ScheduledJobsStatsResponse.kt b/core/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/core/action/node/ScheduledJobsStatsResponse.kt new file mode 100644 index 00000000..eac832b6 --- /dev/null +++ b/core/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/core/action/node/ScheduledJobsStatsResponse.kt @@ -0,0 +1,81 @@ +/* + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package com.amazon.opendistroforelasticsearch.alerting.core.action.node + +import com.amazon.opendistroforelasticsearch.alerting.core.settings.ScheduledJobSettings +import org.elasticsearch.action.FailedNodeException +import org.elasticsearch.action.support.nodes.BaseNodesResponse +import org.elasticsearch.cluster.ClusterName +import org.elasticsearch.cluster.health.ClusterIndexHealth +import org.elasticsearch.common.io.stream.StreamInput +import org.elasticsearch.common.io.stream.StreamOutput +import org.elasticsearch.common.xcontent.ToXContent +import org.elasticsearch.common.xcontent.ToXContentFragment +import org.elasticsearch.common.xcontent.XContentBuilder + +/** + * ScheduledJobsStatsResponse is a class that will contain all the response from each node. + */ +class ScheduledJobsStatsResponse : BaseNodesResponse, ToXContentFragment { + + private var scheduledJobEnabled: Boolean = false + private var indexExists: Boolean? = null + private var indexHealth: ClusterIndexHealth? = null + + constructor() + constructor( + clusterName: ClusterName, + nodeResponses: List, + failures: List, + scheduledJobEnabled: Boolean, + indexExists: Boolean, + indexHealth: ClusterIndexHealth? + ) : super(clusterName, nodeResponses, failures) { + this.scheduledJobEnabled = scheduledJobEnabled + this.indexExists = indexExists + this.indexHealth = indexHealth + } + + override fun writeNodesTo( + out: StreamOutput, + nodes: MutableList + ) { + out.writeStreamableList(nodes) + } + + override fun readNodesFrom(si: StreamInput): MutableList { + return si.readList { ScheduledJobStats.readScheduledJobStatus(it) } + } + + override fun toXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { + builder.field(ScheduledJobSettings.SWEEPER_ENABLED.key, scheduledJobEnabled) + builder.field("scheduled_job_index_exists", indexExists) + builder.field("scheduled_job_index_status", indexHealth?.status?.name?.toLowerCase()) + val nodesOnSchedule = nodes.count { it.status == ScheduledJobStats.ScheduleStatus.GREEN } + val nodesNotOnSchedule = nodes.count { it.status == ScheduledJobStats.ScheduleStatus.RED } + builder.field("nodes_on_schedule", nodesOnSchedule) + builder.field("nodes_not_on_schedule", nodesNotOnSchedule) + builder.startObject("nodes") + for (scheduledJobStatus in nodes) { + builder.startObject(scheduledJobStatus.node.id) + scheduledJobStatus.toXContent(builder, params) + builder.endObject() + } + builder.endObject() + + return builder + } +} diff --git a/core/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/core/action/node/ScheduledJobsStatsTransportAction.kt b/core/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/core/action/node/ScheduledJobsStatsTransportAction.kt new file mode 100644 index 00000000..e41356ff --- /dev/null +++ b/core/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/core/action/node/ScheduledJobsStatsTransportAction.kt @@ -0,0 +1,154 @@ +/* + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package com.amazon.opendistroforelasticsearch.alerting.core.action.node + +import com.amazon.opendistroforelasticsearch.alerting.core.JobSweeper +import com.amazon.opendistroforelasticsearch.alerting.core.JobSweeperMetrics +import com.amazon.opendistroforelasticsearch.alerting.core.ScheduledJobIndices +import com.amazon.opendistroforelasticsearch.alerting.core.schedule.JobScheduler +import com.amazon.opendistroforelasticsearch.alerting.core.schedule.JobSchedulerMetrics +import org.elasticsearch.action.FailedNodeException +import org.elasticsearch.action.support.ActionFilters +import org.elasticsearch.action.support.nodes.BaseNodeRequest +import org.elasticsearch.action.support.nodes.TransportNodesAction +import org.elasticsearch.cluster.health.ClusterIndexHealth +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver +import org.elasticsearch.cluster.service.ClusterService +import org.elasticsearch.common.inject.Inject +import org.elasticsearch.common.io.stream.StreamInput +import org.elasticsearch.common.io.stream.StreamOutput +import org.elasticsearch.common.logging.Loggers +import org.elasticsearch.common.settings.Settings +import org.elasticsearch.threadpool.ThreadPool +import org.elasticsearch.transport.TransportService +import java.io.IOException + +class ScheduledJobsStatsTransportAction : TransportNodesAction { + + private val log = Loggers.getLogger(ScheduledJobsStatsTransportAction::class.java) + + private val jobSweeper: JobSweeper + private val jobScheduler: JobScheduler + private val scheduledJobIndices: ScheduledJobIndices + + @Inject + constructor( + settings: Settings, + threadPool: ThreadPool, + clusterService: ClusterService, + transportService: TransportService, + actionFilters: ActionFilters, + indexNameExpressionResolver: IndexNameExpressionResolver, + jobSweeper: JobSweeper, + jobScheduler: JobScheduler, + scheduledJobIndices: ScheduledJobIndices + ) : super( + settings, + ScheduledJobsStatsAction.NAME, + threadPool, + clusterService, + transportService, + actionFilters, + indexNameExpressionResolver, + { ScheduledJobsStatsRequest() }, + { ScheduledJobStatusRequest() }, + ThreadPool.Names.MANAGEMENT, + ScheduledJobStats::class.java + ) { + this.jobSweeper = jobSweeper + this.jobScheduler = jobScheduler + this.scheduledJobIndices = scheduledJobIndices + } + + override fun newNodeRequest(nodeId: String, request: ScheduledJobsStatsRequest): ScheduledJobStatusRequest { + return ScheduledJobStatusRequest(nodeId, request) + } + + override fun newNodeResponse(): ScheduledJobStats { + return ScheduledJobStats() + } + + override fun newResponse( + request: ScheduledJobsStatsRequest, + responses: MutableList, + failures: MutableList + ): ScheduledJobsStatsResponse { + val scheduledJobEnabled = jobSweeper.isSweepingEnabled() + val scheduledJobIndexExist = scheduledJobIndices.scheduledJobIndexExists() + val indexHealth: ClusterIndexHealth? = if (scheduledJobIndexExist) scheduledJobIndices.scheduledJobIndexHealth() else null + + return ScheduledJobsStatsResponse( + clusterService.clusterName, + responses, + failures, + scheduledJobEnabled, + scheduledJobIndexExist, + indexHealth) + } + + override fun nodeOperation(request: ScheduledJobStatusRequest): ScheduledJobStats { + return createScheduledJobStatus(request.request) + } + + private fun createScheduledJobStatus( + scheduledJobsStatusRequest: ScheduledJobsStatsRequest + ): ScheduledJobStats { + val jobSweeperMetrics = jobSweeper.getJobSweeperMetrics() + val jobSchedulerMetrics = jobScheduler.getJobSchedulerMetric() + + val status: ScheduledJobStats.ScheduleStatus = evaluateStatus(jobSchedulerMetrics, jobSweeperMetrics) + return ScheduledJobStats(this.transportService.localNode, + status, + if (scheduledJobsStatusRequest.jobSchedulingMetrics) jobSweeperMetrics else null, + if (scheduledJobsStatusRequest.jobsInfo) jobSchedulerMetrics.toTypedArray() else null) + } + + private fun evaluateStatus( + jobsInfo: List, + jobSweeperMetrics: JobSweeperMetrics + ): ScheduledJobStats.ScheduleStatus { + val allJobsRunningOnTime = jobsInfo.all { it.runningOnTime } + if (allJobsRunningOnTime && jobSweeperMetrics.fullSweepOnTime) { + return ScheduledJobStats.ScheduleStatus.GREEN + } + log.info("Jobs Running on time: $allJobsRunningOnTime, Sweeper on time: ${jobSweeperMetrics.fullSweepOnTime}") + return ScheduledJobStats.ScheduleStatus.RED + } + + class ScheduledJobStatusRequest : BaseNodeRequest { + + lateinit var request: ScheduledJobsStatsRequest + + constructor() : super() + constructor(nodeId: String, request: ScheduledJobsStatsRequest) : super(nodeId) { + this.request = request + } + + @Throws(IOException::class) + override fun readFrom(si: StreamInput) { + super.readFrom(si) + request = ScheduledJobsStatsRequest() + request.readFrom(si) + } + + @Throws(IOException::class) + override fun writeTo(out: StreamOutput) { + super.writeTo(out) + request.writeTo(out) + } + } +} diff --git a/core/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/core/model/Input.kt b/core/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/core/model/Input.kt new file mode 100644 index 00000000..177baa85 --- /dev/null +++ b/core/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/core/model/Input.kt @@ -0,0 +1,39 @@ +/* + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package com.amazon.opendistroforelasticsearch.alerting.core.model + +import org.elasticsearch.common.xcontent.ToXContentObject +import org.elasticsearch.common.xcontent.XContentParser +import org.elasticsearch.common.xcontent.XContentParser.Token +import org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken +import java.io.IOException + +interface Input : ToXContentObject { + companion object { + + @Throws(IOException::class) + fun parse(xcp: XContentParser): Input { + ensureExpectedToken(Token.START_OBJECT, xcp.currentToken(), xcp::getTokenLocation) + ensureExpectedToken(Token.FIELD_NAME, xcp.nextToken(), xcp::getTokenLocation) + ensureExpectedToken(Token.START_OBJECT, xcp.nextToken(), xcp::getTokenLocation) + val input = xcp.namedObject(Input::class.java, xcp.currentName(), null) + ensureExpectedToken(Token.END_OBJECT, xcp.nextToken(), xcp::getTokenLocation) + return input + } + } + + fun name(): String +} diff --git a/core/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/core/model/Schedule.kt b/core/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/core/model/Schedule.kt new file mode 100644 index 00000000..938aa02f --- /dev/null +++ b/core/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/core/model/Schedule.kt @@ -0,0 +1,311 @@ +/* + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package com.amazon.opendistroforelasticsearch.alerting.core.model + +import com.cronutils.model.CronType +import com.cronutils.model.definition.CronDefinitionBuilder +import com.cronutils.model.time.ExecutionTime +import com.cronutils.parser.CronParser +import org.elasticsearch.common.xcontent.ToXContent +import org.elasticsearch.common.xcontent.ToXContentObject +import org.elasticsearch.common.xcontent.XContentBuilder +import org.elasticsearch.common.xcontent.XContentParser +import org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken +import java.io.IOException +import java.time.DateTimeException +import java.time.Duration +import java.time.Instant +import java.time.ZoneId +import java.time.ZonedDateTime +import java.time.temporal.ChronoUnit +import java.time.zone.ZoneRulesException + +sealed class Schedule : ToXContentObject { + enum class TYPE { CRON, INTERVAL } + companion object { + const val CRON_FIELD = "cron" + const val EXPRESSION_FIELD = "expression" + const val TIMEZONE_FIELD = "timezone" + const val PERIOD_FIELD = "period" + const val INTERVAL_FIELD = "interval" + const val UNIT_FIELD = "unit" + + val cronParser = CronParser(CronDefinitionBuilder.instanceDefinitionFor(CronType.UNIX)) + + @JvmStatic @Throws(IOException::class) + fun parse(xcp: XContentParser): Schedule { + var expression: String? = null + var timezone: ZoneId? = null + var interval: Int? = null + var unit: ChronoUnit? = null + var schedule: Schedule? = null + var type: TYPE? = null + ensureExpectedToken(XContentParser.Token.START_OBJECT, xcp.currentToken(), xcp::getTokenLocation) + while (xcp.nextToken() != XContentParser.Token.END_OBJECT) { + val fieldname = xcp.currentName() + xcp.nextToken() + // If the type field has already been set the customer has provide more than one type of schedule. + if (type != null) { + throw IllegalArgumentException("You can only specify one type of schedule.") + } + when (fieldname) { + CRON_FIELD -> { + type = TYPE.CRON + while (xcp.nextToken() != XContentParser.Token.END_OBJECT) { + val cronFieldName = xcp.currentName() + xcp.nextToken() + when (cronFieldName) { + EXPRESSION_FIELD -> expression = xcp.textOrNull() + TIMEZONE_FIELD -> timezone = getTimeZone(xcp.text()) + } + } + } + PERIOD_FIELD -> { + type = TYPE.INTERVAL + while (xcp.nextToken() != XContentParser.Token.END_OBJECT) { + val cronFieldName = xcp.currentName() + xcp.nextToken() + when (cronFieldName) { + INTERVAL_FIELD -> interval = xcp.intValue() + UNIT_FIELD -> unit = ChronoUnit.valueOf(xcp.text().toUpperCase()) + } + } + } + else -> { + throw IllegalArgumentException("Invalid field: [$fieldname] found in schedule.") + } + } + } + if (type == TYPE.CRON) { + schedule = CronSchedule(requireNotNull(expression) { "Expression in cron schedule is null." }, + requireNotNull(timezone) { "Timezone in cron schedule is null." }) + } else if (type == TYPE.INTERVAL) { + schedule = IntervalSchedule(requireNotNull(interval) { "Interval in period schedule is null." }, + requireNotNull(unit) { "Unit in period schedule is null." }) + } + return requireNotNull(schedule) { "Schedule is null." } + } + + @JvmStatic @Throws(IllegalArgumentException::class) + private fun getTimeZone(timeZone: String): ZoneId { + try { + return ZoneId.of(timeZone) + } catch (zre: ZoneRulesException) { + throw IllegalArgumentException("Timezone $timeZone is not supported") + } catch (dte: DateTimeException) { + throw IllegalArgumentException("Timezone $timeZone is not supported") + } + } + } + + /** + * @param enabledTime is used in IntervalSchedule to calculate next time to execute the schedule. + */ + abstract fun nextTimeToExecute(enabledTime: Instant): Duration? + + /** + * @param expectedPreviousExecutionTime is the calculated previous execution time that should always be correct, + * the first time this is called the value passed in is the enabledTime which acts as the expectedPreviousExecutionTime + */ + abstract fun getExpectedNextExecutionTime(enabledTime: Instant, expectedPreviousExecutionTime: Instant?): Instant? + + /** + * Returns the start and end time for this schedule starting at the given start time (if provided). + * If not, the start time is assumed to be the last time the Schedule would have executed (if it's a Cron schedule) + * or [Instant.now] if it's an interval schedule. + * + * If this is a schedule that runs only once this function will return [Instant.now] for both start and end time. + */ + abstract fun getPeriodStartingAt(startTime: Instant?): Pair + + /** + * Returns the start and end time for this schedule ending at the given end time (if provided). + * If not, the end time is assumed to be the next time the Schedule would have executed (if it's a Cron schedule) + * or [Instant.now] if it's an interval schedule. + * + * If this is a schedule that runs only once this function will return [Instant.now] for both start and end time. + */ + abstract fun getPeriodEndingAt(endTime: Instant?): Pair + + abstract fun runningOnTime(lastExecutionTime: Instant?): Boolean +} + +/** + * @param testInstant Normally this not be set and it should only be used in unit test to control time. + */ +data class CronSchedule( + val expression: String, + val timezone: ZoneId, + // visible for testing + @Transient val testInstant: Instant? = null +) : Schedule() { + @Transient + val executionTime: ExecutionTime = ExecutionTime.forCron(cronParser.parse(expression)) + + /* + * @param enabledTime is not used in CronSchedule. + */ + override fun nextTimeToExecute(enabledTime: Instant): Duration? { + val zonedDateTime = ZonedDateTime.ofInstant(testInstant ?: Instant.now(), timezone) + val timeToNextExecution = executionTime.timeToNextExecution(zonedDateTime) + return timeToNextExecution.orElse(null) + } + + override fun getExpectedNextExecutionTime(enabledTime: Instant, expectedPreviousExecutionTime: Instant?): Instant? { + val zonedDateTime = ZonedDateTime.ofInstant(expectedPreviousExecutionTime ?: testInstant ?: Instant.now(), timezone) + val nextExecution = executionTime.nextExecution(zonedDateTime) + return nextExecution.orElse(null)?.toInstant() + } + + override fun getPeriodStartingAt(startTime: Instant?): Pair { + val realStartTime = if (startTime != null) { + startTime + } else { + // Probably the first time we're running. Try to figure out the last execution time + val lastExecutionTime = executionTime.lastExecution(ZonedDateTime.now(timezone)) + // This shouldn't happen unless the cron is configured to run only once, which our current cron syntax doesn't support + if (!lastExecutionTime.isPresent) { + val currentTime = Instant.now() + return Pair(currentTime, currentTime) + } + lastExecutionTime.get().toInstant() + } + val zonedDateTime = ZonedDateTime.ofInstant(realStartTime, timezone) + val newEndTime = executionTime.nextExecution(zonedDateTime).orElse(null) + return Pair(realStartTime, newEndTime?.toInstant() ?: realStartTime) + } + + override fun getPeriodEndingAt(endTime: Instant?): Pair { + val realEndTime = if (endTime != null) { + endTime + } else { + val nextExecutionTime = executionTime.nextExecution(ZonedDateTime.now(timezone)) + // This shouldn't happen unless the cron is configured to run only once which our current cron syntax doesn't support + if (!nextExecutionTime.isPresent) { + val currentTime = Instant.now() + return Pair(currentTime, currentTime) + } + nextExecutionTime.get().toInstant() + } + val zonedDateTime = ZonedDateTime.ofInstant(realEndTime, timezone) + val newStartTime = executionTime.lastExecution(zonedDateTime).orElse(null) + return Pair(newStartTime?.toInstant() ?: realEndTime, realEndTime) + } + + override fun runningOnTime(lastExecutionTime: Instant?): Boolean { + if (lastExecutionTime == null) { + return true + } + + val zonedDateTime = ZonedDateTime.ofInstant(testInstant ?: Instant.now(), timezone) + val expectedExecutionTime = executionTime.lastExecution(zonedDateTime) + + if (!expectedExecutionTime.isPresent) { + // At this point we know lastExecutionTime is not null, this should never happen. + // If expected execution time is null, we shouldn't have executed the ScheduledJob. + return false + } + val actualExecutionTime = ZonedDateTime.ofInstant(lastExecutionTime, timezone) + + return ChronoUnit.SECONDS.between(expectedExecutionTime.get(), actualExecutionTime) == 0L + } + + override fun toXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { + builder.startObject() + .startObject(CRON_FIELD) + .field(EXPRESSION_FIELD, expression) + .field(TIMEZONE_FIELD, timezone.id) + .endObject() + .endObject() + return builder + } +} + +data class IntervalSchedule( + val interval: Int, + val unit: ChronoUnit, + // visible for testing + @Transient val testInstant: Instant? = null +) : Schedule() { + companion object { + @Transient + private val SUPPORTED_UNIT = listOf(ChronoUnit.MINUTES, ChronoUnit.HOURS, ChronoUnit.DAYS) + } + + init { + if (!SUPPORTED_UNIT.contains(unit)) { + throw IllegalArgumentException("Timezone $unit is not supported expected $SUPPORTED_UNIT") + } + } + + @Transient + private val intervalInMills = Duration.of(interval.toLong(), unit).toMillis() + + override fun nextTimeToExecute(enabledTime: Instant): Duration? { + val enabledTimeEpochMillis = enabledTime.toEpochMilli() + + val currentTime = testInstant ?: Instant.now() + val delta = currentTime.toEpochMilli() - enabledTimeEpochMillis + // Remainder of the Delta time is how much we have already spent waiting. + // We need to subtract remainder of that time from the interval time to get remaining schedule time to wait. + val remainingScheduleTime = intervalInMills - delta.rem(intervalInMills) + return Duration.of(remainingScheduleTime, ChronoUnit.MILLIS) + } + + override fun getExpectedNextExecutionTime(enabledTime: Instant, expectedPreviousExecutionTime: Instant?): Instant? { + val expectedPreviousExecutionTimeEpochMillis = (expectedPreviousExecutionTime ?: enabledTime).toEpochMilli() + // We still need to calculate the delta even when using expectedPreviousExecutionTime because the initial value passed in + // is the enabledTime (which also happens with cluster/node restart) + val currentTime = testInstant ?: Instant.now() + val delta = currentTime.toEpochMilli() - expectedPreviousExecutionTimeEpochMillis + // Remainder of the Delta time is how much we have already spent waiting. + // We need to subtract remainder of that time from the interval time to get remaining schedule time to wait. + val remainingScheduleTime = intervalInMills - delta.rem(intervalInMills) + return Instant.ofEpochMilli(currentTime.toEpochMilli() + remainingScheduleTime) + } + + override fun getPeriodStartingAt(startTime: Instant?): Pair { + val realStartTime = startTime ?: Instant.now() + val newEndTime = realStartTime.plusMillis(intervalInMills) + return Pair(realStartTime, newEndTime) + } + + override fun getPeriodEndingAt(endTime: Instant?): Pair { + val realEndTime = endTime ?: Instant.now() + val newStartTime = realEndTime.minusMillis(intervalInMills) + return Pair(newStartTime, realEndTime) + } + + override fun runningOnTime(lastExecutionTime: Instant?): Boolean { + if (lastExecutionTime == null) { + return true + } + + // Make sure the lastExecutionTime is less than interval time. + val delta = ChronoUnit.MILLIS.between(lastExecutionTime, testInstant ?: Instant.now()) + return 0 < delta && delta < intervalInMills + } + + override fun toXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { + builder.startObject() + .startObject(PERIOD_FIELD) + .field(INTERVAL_FIELD, interval) + .field(UNIT_FIELD, unit.name) + .endObject() + .endObject() + return builder + } +} diff --git a/core/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/core/model/ScheduledJob.kt b/core/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/core/model/ScheduledJob.kt new file mode 100644 index 00000000..297df59c --- /dev/null +++ b/core/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/core/model/ScheduledJob.kt @@ -0,0 +1,122 @@ +/* + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package com.amazon.opendistroforelasticsearch.alerting.core.model + +import com.amazon.opendistroforelasticsearch.alerting.core.model.ScheduledJob.Companion.NO_ID +import com.amazon.opendistroforelasticsearch.alerting.core.model.ScheduledJob.Companion.NO_VERSION +import com.amazon.opendistroforelasticsearch.alerting.core.model.ScheduledJob.Companion.SCHEDULED_JOBS_INDEX +import org.elasticsearch.common.xcontent.ToXContent +import org.elasticsearch.common.xcontent.ToXContentObject +import org.elasticsearch.common.xcontent.XContentBuilder +import org.elasticsearch.common.xcontent.XContentParser +import org.elasticsearch.common.xcontent.XContentParser.Token +import org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken +import java.io.IOException +import java.time.Instant + +/** + * A job that runs periodically in the ElasticSearch cluster. + * + * All implementations of [ScheduledJob]s are stored in the [SCHEDULED_JOBS_INDEX] index and are scheduled in a + * single global Scheduler running on each node. Each implementation should have its own separate APIs for writing, + * updating and deleting instances of that job type into the [SCHEDULED_JOBS_INDEX] index. The index is periodically + * scanned for updates which are then scheduled or unscheduled with the Scheduler. + * + * Like all documents in Elasticsearch [ScheduledJob]s also have an [id] and a [version]. Jobs that have not been + * persisted in the cluster should use the special sentinel values [NO_ID] and [NO_VERSION] for these fields. + */ +interface ScheduledJob : ToXContentObject { + + fun toXContentWithType(builder: XContentBuilder): XContentBuilder = toXContent(builder, XCONTENT_WITH_TYPE) + + companion object { + /** The name of the ElasticSearch index in which we store jobs */ + const val SCHEDULED_JOBS_INDEX = ".opendistro-alerting-config" + + /** + * The mapping type of [ScheduledJob]s in the ES index. Unrelated to [ScheduledJob.type]. + * + * This should go away starting ES 7. We use "_doc" for future compatibility as described here: + * https://www.elastic.co/guide/en/elasticsearch/reference/6.x/removal-of-types.html#_schedule_for_removal_of_mapping_types + */ + const val SCHEDULED_JOB_TYPE = "_doc" + + const val NO_ID = "" + + const val NO_VERSION = 1L + + private val XCONTENT_WITH_TYPE = ToXContent.MapParams(mapOf("with_type" to "true")) + + /** + * This function parses the job, delegating to the specific subtype parser registered in the [XContentParser.getXContentRegistry] + * at runtime. Each concrete job subclass is expected to register a parser in this registry. + * The Job's json representation is expected to be of the form: + * { "" : { } } + * + * If the job comes from an Elasticsearch index it's [id] and [version] can also be supplied. + */ + @Throws(IOException::class) + fun parse(xcp: XContentParser, id: String = NO_ID, version: Long = NO_VERSION): ScheduledJob { + ensureExpectedToken(Token.START_OBJECT, xcp.nextToken(), xcp::getTokenLocation) + ensureExpectedToken(Token.FIELD_NAME, xcp.nextToken(), xcp::getTokenLocation) + ensureExpectedToken(Token.START_OBJECT, xcp.nextToken(), xcp::getTokenLocation) + val job = xcp.namedObject(ScheduledJob::class.java, xcp.currentName(), null) + ensureExpectedToken(Token.END_OBJECT, xcp.nextToken(), xcp::getTokenLocation) + return job.fromDocument(id, version) + } + + /** + * This function parses the job, but expects the type to be passed in. This is for the specific + * use case in sweeper where we first want to check if the job is allowed to be swept before + * trying to fully parse it. If you need to parse a job, you most likely want to use + * the above parse function. + */ + @Throws(IOException::class) + fun parse(xcp: XContentParser, type: String, id: String = NO_ID, version: Long = NO_VERSION): ScheduledJob { + ensureExpectedToken(Token.START_OBJECT, xcp.nextToken(), xcp::getTokenLocation) + val job = xcp.namedObject(ScheduledJob::class.java, type, null) + ensureExpectedToken(Token.END_OBJECT, xcp.nextToken(), xcp::getTokenLocation) + return job.fromDocument(id, version) + } + } + + /** The id of the job in the [SCHEDULED_JOBS_INDEX] or [NO_ID] if not persisted */ + val id: String + + /** The version of the job in the [SCHEDULED_JOBS_INDEX] or [NO_VERSION] if not persisted */ + val version: Long + + /** The name of the job */ + val name: String + + /** The type of the job */ + val type: String + + /** Controls whether the job will be scheduled or not */ + val enabled: Boolean + + /** The schedule for running the job */ + val schedule: Schedule + + /** The last time the job was updated */ + val lastUpdateTime: Instant + + /** The time the job was enabled */ + val enabledTime: Instant? + + /** Copy constructor for persisted jobs */ + fun fromDocument(id: String, version: Long): ScheduledJob +} diff --git a/core/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/core/model/SearchInput.kt b/core/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/core/model/SearchInput.kt new file mode 100644 index 00000000..36a4efbc --- /dev/null +++ b/core/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/core/model/SearchInput.kt @@ -0,0 +1,78 @@ +/* + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package com.amazon.opendistroforelasticsearch.alerting.core.model + +import com.amazon.opendistroforelasticsearch.alerting.elasticapi.ElasticAPI +import org.elasticsearch.common.CheckedFunction +import org.elasticsearch.common.ParseField +import org.elasticsearch.common.xcontent.NamedXContentRegistry +import org.elasticsearch.common.xcontent.ToXContent +import org.elasticsearch.common.xcontent.XContentBuilder +import org.elasticsearch.common.xcontent.XContentParser +import org.elasticsearch.common.xcontent.XContentParser.Token +import org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken +import org.elasticsearch.search.builder.SearchSourceBuilder +import java.io.IOException + +data class SearchInput(val indices: List, val query: SearchSourceBuilder) : Input { + + override fun toXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { + return builder.startObject() + .startObject(SEARCH_FIELD) + .field(INDICES_FIELD, indices.toTypedArray()) + .field(QUERY_FIELD, query) + .endObject() + .endObject() + } + + override fun name(): String { + return SEARCH_FIELD + } + + companion object { + const val INDICES_FIELD = "indices" + const val QUERY_FIELD = "query" + const val SEARCH_FIELD = "search" + + val XCONTENT_REGISTRY = NamedXContentRegistry.Entry(Input::class.java, ParseField("search"), CheckedFunction { parseInner(it) }) + + @JvmStatic @Throws(IOException::class) + private fun parseInner(xcp: XContentParser): SearchInput { + val indices = mutableListOf() + lateinit var searchSourceBuilder: SearchSourceBuilder + + ensureExpectedToken(Token.START_OBJECT, xcp.currentToken(), xcp::getTokenLocation) + while (xcp.nextToken() != Token.END_OBJECT) { + val fieldName = xcp.currentName() + xcp.nextToken() + when (fieldName) { + INDICES_FIELD -> { + ensureExpectedToken(Token.START_ARRAY, xcp.currentToken(), xcp::getTokenLocation) + while (xcp.nextToken() != Token.END_ARRAY) { + indices.add(xcp.text()) + } + } + QUERY_FIELD -> { + searchSourceBuilder = ElasticAPI.INSTANCE.parseSearchSource(xcp) + } + } + } + + return SearchInput(indices, + requireNotNull(searchSourceBuilder) { "SearchInput query is null" }) + } + } +} diff --git a/core/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/core/resthandler/RestScheduledJobStatsHandler.kt b/core/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/core/resthandler/RestScheduledJobStatsHandler.kt new file mode 100644 index 00000000..62e41c97 --- /dev/null +++ b/core/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/core/resthandler/RestScheduledJobStatsHandler.kt @@ -0,0 +1,103 @@ +/* + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package com.amazon.opendistroforelasticsearch.alerting.core.resthandler + +import com.amazon.opendistroforelasticsearch.alerting.core.action.node.ScheduledJobsStatsAction +import com.amazon.opendistroforelasticsearch.alerting.core.action.node.ScheduledJobsStatsRequest +import org.elasticsearch.client.node.NodeClient +import org.elasticsearch.common.Strings +import org.elasticsearch.common.settings.Settings +import org.elasticsearch.rest.BaseRestHandler +import org.elasticsearch.rest.RestController +import org.elasticsearch.rest.RestRequest + +import org.elasticsearch.rest.RestRequest.Method.GET +import org.elasticsearch.rest.action.RestActions +import java.util.Locale +import java.util.TreeSet + +/** + * RestScheduledJobStatsHandler is handler for getting ScheduledJob Stats. + */ +class RestScheduledJobStatsHandler(settings: Settings, controller: RestController, private val path: String) : BaseRestHandler(settings) { + init { + controller.registerHandler(GET, "/_opendistro/$path/{nodeId}/stats/", this) + controller.registerHandler(GET, "/_opendistro/$path/{nodeId}/stats/{metric}", this) + + controller.registerHandler(GET, "/_opendistro/$path/stats/", this) + controller.registerHandler(GET, "/_opendistro/$path/stats/{metric}", this) + } + + companion object { + const val JOB_SCHEDULING_METRICS: String = "job_scheduling_metrics" + const val JOBS_INFO: String = "jobs_info" + private val METRICS = mapOf Unit>( + JOB_SCHEDULING_METRICS to { it -> it.jobSchedulingMetrics = true }, + JOBS_INFO to { it -> it.jobsInfo = true } + ) + } + + override fun getName(): String { + return "${path}_jobs_stats" + } + + override fun prepareRequest(request: RestRequest, client: NodeClient): RestChannelConsumer { + val scheduledJobNodesStatsRequest = getRequest(request) + return RestChannelConsumer { channel -> + client.execute( + ScheduledJobsStatsAction.INSTANCE, + scheduledJobNodesStatsRequest, + RestActions.NodesResponseRestListener(channel) + ) + } + } + + private fun getRequest(request: RestRequest): ScheduledJobsStatsRequest { + val nodesIds = Strings.splitStringByCommaToArray(request.param("nodeId")) + val metrics = Strings.tokenizeByCommaToSet(request.param("metric")) + val scheduledJobsStatsRequest = ScheduledJobsStatsRequest(nodesIds) + scheduledJobsStatsRequest.timeout(request.param("timeout")) + + if (metrics.isEmpty()) { + return scheduledJobsStatsRequest + } else if (metrics.size == 1 && metrics.contains("_all")) { + scheduledJobsStatsRequest.all() + } else if (metrics.contains("_all")) { + throw IllegalArgumentException( + String.format(Locale.ROOT, + "request [%s] contains _all and individual metrics [%s]", + request.path(), + request.param("metric"))) + } else { + // use a sorted set so the unrecognized parameters appear in a reliable sorted order + scheduledJobsStatsRequest.clear() + val invalidMetrics = TreeSet() + for (metric in metrics) { + val handler = METRICS[metric] + if (handler != null) { + handler.invoke(scheduledJobsStatsRequest) + } else { + invalidMetrics.add(metric) + } + } + + if (!invalidMetrics.isEmpty()) { + throw IllegalArgumentException(unrecognized(request, invalidMetrics, METRICS.keys, "metric")) + } + } + return scheduledJobsStatsRequest + } +} diff --git a/core/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/core/schedule/JobScheduler.kt b/core/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/core/schedule/JobScheduler.kt new file mode 100644 index 00000000..60e44025 --- /dev/null +++ b/core/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/core/schedule/JobScheduler.kt @@ -0,0 +1,235 @@ +/* + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package com.amazon.opendistroforelasticsearch.alerting.core.schedule + +import com.amazon.opendistroforelasticsearch.alerting.core.JobRunner +import com.amazon.opendistroforelasticsearch.alerting.core.model.ScheduledJob +import org.elasticsearch.common.logging.Loggers +import org.elasticsearch.common.unit.TimeValue +import org.elasticsearch.threadpool.ThreadPool +import java.time.Duration +import java.time.Instant +import java.util.concurrent.ConcurrentHashMap +import java.util.concurrent.ScheduledFuture +import java.util.concurrent.TimeUnit +import java.util.stream.Collectors + +/** + * JobScheduler is a class for scheduling and descheduling ScheduleJobs. This class keeps list of ScheduledJob Ids that are currently scheduled. + * + * JobScheduler is unaware of the ScheduledJob version and it is up to callers to ensure that the older version of ScheduledJob to be descheduled and schedule the new version. + */ +class JobScheduler(private val threadPool: ThreadPool, private val jobRunner: JobRunner) { + private val logger = Loggers.getLogger(JobScheduler::class.java) + + /** + * Map of ScheduledJobName to Info of the ScheduledJob. + */ + private val scheduledJobIdToInfo = ConcurrentHashMap() + + /** + * Schedules the jobs in [jobsToSchedule] for execution. + * + * @return List of jobs that could not be scheduled + */ + fun schedule(vararg jobsToSchedule: ScheduledJob): List { + return jobsToSchedule.filter { + !this.schedule(it) + } + } + + /** + * Schedules a single [scheduledJob] + * + * The [schedule] does not check for new version of the ScheduledJob. + * The caller should be aware of the update that happened in [ScheduledJob] and must first call [deschedule] if the Job version is updated and then followed by [schedule] + * + * [schedule] is considered successfully scheduled when + * 1. Cron expression is out of Scheduled. eg. past year 2016. + * 2. If the schedule already exists. This is to keep the function idempotent. + * 3. we are able to schedule the job in the [ThreadPool.schedule] + * + * [schedule] is considered unsuccessfully schedule when + * 1. Schedule is disabled. + * 2. In rare race condition where scheduledJob is already marked [ScheduledJobInfo.descheduled] true at the time of making [ThreadPool.schedule] + * 3. any unexpected failures. + * + * @return true if the ScheduledJob is scheduled successfully; + * false otherwise. + */ + fun schedule(scheduledJob: ScheduledJob): Boolean { + logger.info("Scheduling jobId : ${scheduledJob.id}, name: ${scheduledJob.name}") + + if (!scheduledJob.enabled) { + // ensure that the ScheduledJob is not enabled. The caller should be also checking this before calling this function. + return false + } + + val scheduledJobInfo = scheduledJobIdToInfo.getOrPut(scheduledJob.id) { + ScheduledJobInfo(scheduledJob.id, scheduledJob) + } + if (scheduledJobInfo.scheduledFuture != null) { + // This means that the given ScheduledJob already has schedule running. We should not schedule any more. + return true + } + + // Start the first schedule. + return this.reschedule(scheduledJob, scheduledJobInfo) + } + + /** + * Deschedules the jobs given ScheduledJob [ids]. + * + * caller should retry [deschedule] that failed. + * + * @return List of job ids failed to deschedule. + */ + fun deschedule(ids: Collection): List { + return ids.filter { + !this.deschedule(it) + }.also { + if (it.isNotEmpty()) { + logger.error("Unable to deschedule jobs $it") + } + } + } + + /** + * Mark the scheduledJob as descheduled and try to cancel any future schedule for given scheduledJob id. + * + * [deschedule] is considered successful when + * 1. ScheduledJob id does not exist. + * 2. ScheduledJob is complete. + * 3. ScheduledJob is not complete and is successfully cancelled. + * + * Caller should retry if ScheduledJob [deschedule] fails. + * + * @return true if job is successfully descheduled; + * false otherwise. + */ + fun deschedule(id: String): Boolean { + val scheduledJobInfo = scheduledJobIdToInfo[id] + if (scheduledJobInfo == null) { + logger.info("JobId $id does not exist.") + return true + } else { + logger.info("Descheduling jobId : $id") + scheduledJobInfo.descheduled = true + scheduledJobInfo.actualPreviousExecutionTime = null + scheduledJobInfo.expectedNextExecutionTime = null + var result = true + val scheduledFuture = scheduledJobInfo.scheduledFuture + + if (scheduledFuture != null && !scheduledFuture.isDone) { + result = scheduledFuture.cancel(false) + } + + if (result) { + // If we have successfully descheduled the job, remove from the info map. + scheduledJobIdToInfo.remove(scheduledJobInfo.scheduledJobId, scheduledJobInfo) + } + return result + } + } + + /** + * @return list of jobIds that are scheduled. + */ + fun scheduledJobs(): Set { + return scheduledJobIdToInfo.keys + } + + private fun reschedule(scheduleJob: ScheduledJob, scheduledJobInfo: ScheduledJobInfo): Boolean { + if (scheduleJob.enabledTime == null) { + logger.info("${scheduleJob.name} there is no enabled time. This job should never have been scheduled.") + return false + } + scheduledJobInfo.expectedNextExecutionTime = scheduleJob.schedule.getExpectedNextExecutionTime( + scheduleJob.enabledTime!!, scheduledJobInfo.expectedNextExecutionTime) + + // Validate if there is next execution that needs to happen. + // e.g cron job that is expected to run in 30th of Feb (which doesn't exist). "0/5 * 30 2 *" + if (scheduledJobInfo.expectedNextExecutionTime == null) { + logger.info("${scheduleJob.name} there is no next execution time.") + return true + } + + val duration = Duration.between(Instant.now(), scheduledJobInfo.expectedNextExecutionTime) + + // Create anonymous runnable. + val runnable = Runnable { + // Check again if the scheduled job is marked descheduled. + if (scheduledJobInfo.descheduled) { + return@Runnable // skip running job if job is marked descheduled. + } + + // Order of operations inside here matter, we specifically call getPeriodEndingAt before reschedule because + // reschedule will update expectedNextExecutionTime to the next one which would throw off the startTime/endTime + val (startTime, endTime) = scheduleJob.schedule.getPeriodEndingAt(scheduledJobInfo.expectedNextExecutionTime) + scheduledJobInfo.actualPreviousExecutionTime = Instant.now() + + this.reschedule(scheduleJob, scheduledJobInfo) + + jobRunner.runJob(scheduleJob, startTime, endTime) + } + + // Check descheduled flag as close as possible before we actually schedule a job. + // This way we will can minimize race conditions. + if (scheduledJobInfo.descheduled) { + // Do not reschedule if schedule has been marked descheduled. + return false + } + + // Finally schedule the job in the ThreadPool with next time to execute. + val scheduledFuture = threadPool.schedule(TimeValue(duration.toNanos(), TimeUnit.NANOSECONDS), ThreadPool.Names.SAME, runnable) + scheduledJobInfo.scheduledFuture = scheduledFuture + + return true + } + + fun getJobSchedulerMetric(): List { + return scheduledJobIdToInfo.entries.stream() + .map { entry -> + JobSchedulerMetrics(entry.value.scheduledJobId, + entry.value.actualPreviousExecutionTime?.toEpochMilli(), + entry.value.scheduledJob.schedule.runningOnTime(entry.value.actualPreviousExecutionTime)) + } + .collect(Collectors.toList()) + } + + fun postIndex(job: ScheduledJob) { + jobRunner.postIndex(job) + } + + fun postDelete(jobId: String) { + jobRunner.postDelete(jobId) + } + + /** + * ScheduledJobInfo which we can use to check if the job should be descheduled. + * Some Idea for more use of this class is + * 1. Total number of runs. + * 2. Tracking of number of failed runs (helps to control error handling.) + */ + private data class ScheduledJobInfo( + val scheduledJobId: String, + val scheduledJob: ScheduledJob, + var descheduled: Boolean = false, + var actualPreviousExecutionTime: Instant? = null, + var expectedNextExecutionTime: Instant? = null, + var scheduledFuture: ScheduledFuture<*>? = null + ) +} diff --git a/core/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/core/schedule/JobSchedulerMetrics.kt b/core/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/core/schedule/JobSchedulerMetrics.kt new file mode 100644 index 00000000..6e8d7305 --- /dev/null +++ b/core/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/core/schedule/JobSchedulerMetrics.kt @@ -0,0 +1,56 @@ +/* + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package com.amazon.opendistroforelasticsearch.alerting.core.schedule + +import com.amazon.opendistroforelasticsearch.alerting.elasticapi.ElasticAPI +import org.elasticsearch.common.io.stream.StreamInput +import org.elasticsearch.common.io.stream.StreamOutput +import org.elasticsearch.common.io.stream.Writeable +import org.elasticsearch.common.xcontent.ToXContent +import org.elasticsearch.common.xcontent.ToXContentFragment +import org.elasticsearch.common.xcontent.XContentBuilder +import java.time.Instant + +class JobSchedulerMetrics : ToXContentFragment, Writeable { + val scheduledJobId: String + val lastExecutionTime: Long? + val runningOnTime: Boolean + + constructor(scheduledJobId: String, lastExecutionTime: Long?, runningOnTime: Boolean) { + this.scheduledJobId = scheduledJobId + this.lastExecutionTime = lastExecutionTime + this.runningOnTime = runningOnTime + } + + constructor(si: StreamInput) { + scheduledJobId = si.readString() + lastExecutionTime = si.readOptionalLong() + runningOnTime = si.readBoolean() + } + + override fun writeTo(out: StreamOutput) { + out.writeString(scheduledJobId) + out.writeOptionalLong(lastExecutionTime) + out.writeBoolean(runningOnTime) + } + + override fun toXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { + if (lastExecutionTime != null) + ElasticAPI.INSTANCE.timeField(builder, "last_execution_time", Instant.ofEpochMilli(lastExecutionTime)) + builder.field("running_on_time", runningOnTime) + return builder + } +} diff --git a/core/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/core/settings/ScheduledJobSettings.kt b/core/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/core/settings/ScheduledJobSettings.kt new file mode 100644 index 00000000..c29b5bdc --- /dev/null +++ b/core/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/core/settings/ScheduledJobSettings.kt @@ -0,0 +1,57 @@ +/* + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package com.amazon.opendistroforelasticsearch.alerting.core.settings + +import com.amazon.opendistroforelasticsearch.alerting.core.model.ScheduledJob +import org.elasticsearch.common.settings.Setting +import org.elasticsearch.common.unit.TimeValue + +/** + * settings used for [ScheduledJob]'s. These include back off settings, retry counts, timeouts etc... + */ +class ScheduledJobSettings { + + companion object { + val SWEEPER_ENABLED = Setting.boolSetting( + "opendistro.scheduled_jobs.enabled", + true, + Setting.Property.NodeScope, Setting.Property.Dynamic) + val REQUEST_TIMEOUT = Setting.positiveTimeSetting( + "opendistro.scheduled_jobs.request_timeout", + TimeValue.timeValueSeconds(10), + Setting.Property.NodeScope, Setting.Property.Dynamic) + + val SWEEP_BACKOFF_MILLIS = Setting.positiveTimeSetting( + "opendistro.scheduled_jobs.sweeper.backoff_millis", + TimeValue.timeValueMillis(50), + Setting.Property.NodeScope, Setting.Property.Dynamic) + + val SWEEP_BACKOFF_RETRY_COUNT = Setting.intSetting( + "opendistro.scheduled_jobs.retry_count", + 3, + Setting.Property.NodeScope, Setting.Property.Dynamic) + + val SWEEP_PERIOD = Setting.positiveTimeSetting( + "opendistro.scheduled_jobs.sweeper.period", + TimeValue.timeValueMinutes(5), + Setting.Property.NodeScope, Setting.Property.Dynamic) + + val SWEEP_PAGE_SIZE = Setting.intSetting( + "opendistro.scheduled_jobs.sweeper.page_size", + 100, + Setting.Property.NodeScope, Setting.Property.Dynamic) + } +} diff --git a/core/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/elasticapi/ElasticExtensions.kt b/core/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/elasticapi/ElasticExtensions.kt new file mode 100644 index 00000000..3a04752e --- /dev/null +++ b/core/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/elasticapi/ElasticExtensions.kt @@ -0,0 +1,93 @@ +/* + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package com.amazon.opendistroforelasticsearch.alerting.elasticapi + +import org.elasticsearch.ElasticsearchException +import org.elasticsearch.action.bulk.BackoffPolicy +import org.elasticsearch.action.search.SearchResponse +import org.elasticsearch.action.search.ShardSearchFailure +import org.elasticsearch.common.xcontent.ToXContent +import org.elasticsearch.common.xcontent.XContentBuilder +import org.elasticsearch.common.xcontent.XContentHelper +import org.elasticsearch.common.xcontent.XContentParser +import org.elasticsearch.common.xcontent.XContentParserUtils +import org.elasticsearch.common.xcontent.XContentType +import org.elasticsearch.rest.RestStatus.BAD_GATEWAY +import org.elasticsearch.rest.RestStatus.GATEWAY_TIMEOUT +import org.elasticsearch.rest.RestStatus.SERVICE_UNAVAILABLE +import java.time.Instant + +/** Convert an object to maps and lists representation */ +fun ToXContent.convertToMap(): Map { + val bytesReference = XContentHelper.toXContent(this, XContentType.JSON, false) + return XContentHelper.convertToMap(bytesReference, false, XContentType.JSON).v2() +} + +/** + * Backs off and retries a lambda that makes a request. This should not be called on any of the [standard][ThreadPool] + * executors since those executors are not meant to be blocked by sleeping. + */ +fun BackoffPolicy.retry(block: () -> T): T { + val iter = iterator() + do { + try { + return block() + } catch (e: ElasticsearchException) { + if (iter.hasNext() && e.isRetriable()) { + Thread.sleep(iter.next().millis) + } else { + throw e + } + } + } while (true) +} + +/** + * Retries on 502, 503 and 504 per elastic client's behavior: https://github.com/elastic/elasticsearch-net/issues/2061 + * 429 must be retried manually as it's not clear if it's ok to retry for requests other than Bulk requests. + */ +fun ElasticsearchException.isRetriable(): Boolean { + return (status() in listOf(BAD_GATEWAY, SERVICE_UNAVAILABLE, GATEWAY_TIMEOUT)) +} + +fun SearchResponse.firstFailureOrNull(): ShardSearchFailure? { + return shardFailures?.getOrNull(0) +} + +fun XContentParser.instant(): Instant? { + return when { + currentToken() == XContentParser.Token.VALUE_NULL -> null + currentToken().isValue -> Instant.ofEpochMilli(longValue()) + else -> { + XContentParserUtils.throwUnknownToken(currentToken(), tokenLocation) + null // unreachable + } + } +} + +fun XContentBuilder.optionalTimeField(name: String, instant: Instant?): XContentBuilder { + if (instant == null) { + return nullField(name) + } + return ElasticAPI.INSTANCE.timeField(this, name, instant) +} + +/** + * Extension function for ES 6.3 that duplicates the ES 6.2 XContentBuilder.string() method. On 6.2 this method shadows + * the existing [XContentBuilder.string] method and so is not invoked. + */ +@Suppress("EXTENSION_SHADOWED_BY_MEMBER") +fun XContentBuilder.string(): String = ElasticAPI.INSTANCE.builderToBytesRef(this).utf8ToString() diff --git a/core/src/main/resources/mappings/scheduled-jobs.json b/core/src/main/resources/mappings/scheduled-jobs.json new file mode 100644 index 00000000..a9a2dda9 --- /dev/null +++ b/core/src/main/resources/mappings/scheduled-jobs.json @@ -0,0 +1,216 @@ +{ + "_doc": { + "properties": { + "monitor": { + "dynamic": "false", + "properties": { + "name": { + "type": "text", + "fields": { + "keyword": { + "type": "keyword", + "ignore_above": 256 + } + } + }, + "type": { + "type": "keyword" + }, + "enabled": { + "type": "boolean" + }, + "enabled_time": { + "type": "date", + "format": "strict_date_time||epoch_millis" + }, + "last_update_time": { + "type": "date", + "format": "strict_date_time||epoch_millis" + }, + "schedule": { + "properties": { + "period": { + "properties": { + "interval": { + "type": "integer" + }, + "unit": { + "type": "keyword" + } + } + }, + "cron": { + "properties": { + "expression": { + "type": "text" + }, + "timezone": { + "type": "keyword" + } + } + } + } + }, + "inputs": { + "type": "nested", + "properties": { + "search": { + "properties": { + "indices": { + "type": "text", + "fields": { + "keyword": { + "type": "keyword", + "ignore_above": 256 + } + } + }, + "query": { + "type": "object", + "enabled": false + } + } + } + } + }, + "triggers": { + "type": "nested", + "properties": { + "name": { + "type": "text", + "fields": { + "keyword": { + "type": "keyword", + "ignore_above": 256 + } + } + }, + "min_time_between_executions": { + "type": "integer" + }, + "condition": { + "type": "object", + "enabled": false + }, + "actions": { + "type": "nested", + "properties": { + "name": { + "type": "text", + "fields": { + "keyword": { + "type": "keyword", + "ignore_above": 256 + } + } + }, + "destination_id": { + "type": "keyword" + }, + "subject_template": { + "type": "object", + "enabled": false + }, + "message_template": { + "type": "object", + "enabled": false + } + } + } + } + }, + "ui_metadata": { + "type": "object", + "enabled": false + } + } + }, + "destination": { + "dynamic": "false", + "properties": { + "name": { + "type": "text", + "fields": { + "keyword": { + "type": "keyword", + "ignore_above": 256 + } + } + }, + "type": { + "type": "keyword" + }, + "last_update_time": { + "type": "date", + "format": "strict_date_time||epoch_millis" + }, + "chime": { + "properties": { + "url": { + "type": "text", + "fields": { + "keyword": { + "type": "keyword", + "ignore_above": 256 + } + } + } + } + }, + "slack": { + "properties": { + "url": { + "type": "text", + "fields": { + "keyword": { + "type": "keyword", + "ignore_above": 256 + } + } + } + } + }, + "custom_webhook": { + "properties": { + "url": { + "type": "text", + "fields": { + "keyword": { + "type": "keyword", + "ignore_above": 256 + } + } + }, + "scheme": { + "type": "keyword" + }, + "host": { + "type": "text" + }, + "port": { + "type": "integer" + }, + "path": { + "type": "keyword" + }, + "query_params": { + "type": "object", + "enabled": false + }, + "header_params": { + "type": "object", + "enabled": false + }, + "username": { + "type": "text" + }, + "password": { + "type": "text" + } + } + } + } + } + } + } +} diff --git a/core/src/test/kotlin/com/amazon/opendistroforelasticsearch/alerting/core/XContentTests.kt b/core/src/test/kotlin/com/amazon/opendistroforelasticsearch/alerting/core/XContentTests.kt new file mode 100644 index 00000000..141e3241 --- /dev/null +++ b/core/src/test/kotlin/com/amazon/opendistroforelasticsearch/alerting/core/XContentTests.kt @@ -0,0 +1,44 @@ +/* + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package com.amazon.opendistroforelasticsearch.alerting.core + +import com.amazon.opendistroforelasticsearch.alerting.core.model.Input +import com.amazon.opendistroforelasticsearch.alerting.core.model.SearchInput +import com.amazon.opendistroforelasticsearch.alerting.core.model.XContentTestBase +import com.amazon.opendistroforelasticsearch.alerting.elasticapi.string +import org.elasticsearch.common.xcontent.ToXContent +import org.elasticsearch.index.query.QueryBuilders +import org.elasticsearch.search.builder.SearchSourceBuilder +import kotlin.test.Test +import kotlin.test.assertEquals + +class XContentTests : XContentTestBase { + + @Test + fun `test input parsing`() { + val input = randomInput() + + val inputString = input.toXContent(builder(), ToXContent.EMPTY_PARAMS).string() + val parsedInput = Input.parse(parser(inputString)) + + assertEquals(input, parsedInput, "Round tripping input doesn't work") + } + + private fun randomInput(): Input { + return SearchInput(indices = listOf("foo", "bar"), + query = SearchSourceBuilder().query(QueryBuilders.matchAllQuery())) + } +} diff --git a/core/src/test/kotlin/com/amazon/opendistroforelasticsearch/alerting/core/model/MockScheduledJob.kt b/core/src/test/kotlin/com/amazon/opendistroforelasticsearch/alerting/core/model/MockScheduledJob.kt new file mode 100644 index 00000000..41f12581 --- /dev/null +++ b/core/src/test/kotlin/com/amazon/opendistroforelasticsearch/alerting/core/model/MockScheduledJob.kt @@ -0,0 +1,39 @@ +/* + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package com.amazon.opendistroforelasticsearch.alerting.core.model + +import org.elasticsearch.common.xcontent.ToXContent +import org.elasticsearch.common.xcontent.XContentBuilder +import java.time.Instant + +class MockScheduledJob( + override val id: String, + override val version: Long, + override val name: String, + override val type: String, + override val enabled: Boolean, + override val schedule: Schedule, + override var lastUpdateTime: Instant, + override val enabledTime: Instant? +) : ScheduledJob { + override fun fromDocument(id: String, version: Long): ScheduledJob { + TODO("not implemented") + } + + override fun toXContent(builder: XContentBuilder?, params: ToXContent.Params?): XContentBuilder { + TODO("not implemented") + } +} diff --git a/core/src/test/kotlin/com/amazon/opendistroforelasticsearch/alerting/core/model/ScheduleTest.kt b/core/src/test/kotlin/com/amazon/opendistroforelasticsearch/alerting/core/model/ScheduleTest.kt new file mode 100644 index 00000000..8a3572ba --- /dev/null +++ b/core/src/test/kotlin/com/amazon/opendistroforelasticsearch/alerting/core/model/ScheduleTest.kt @@ -0,0 +1,340 @@ +/* + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package com.amazon.opendistroforelasticsearch.alerting.core.model + +import com.amazon.opendistroforelasticsearch.alerting.elasticapi.string +import org.elasticsearch.common.xcontent.ToXContent +import java.time.Instant +import java.time.ZoneId +import java.time.ZonedDateTime +import java.time.temporal.ChronoUnit +import kotlin.test.Test +import kotlin.test.assertEquals +import kotlin.test.assertFailsWith +import kotlin.test.assertFalse +import kotlin.test.assertNotNull +import kotlin.test.assertTrue + +class ScheduleTest : XContentTestBase { + @Test + fun `test time zone conversion`() { + val cronExpression = "31 * * * *" // Run at minute 31. + // This is 2018-09-27 20:00:58 GMT which will in conversion lead to 30min 58 seconds IST + val testInstance = Instant.ofEpochSecond(1538164858L) + + val cronSchedule = CronSchedule(cronExpression, ZoneId.of("Asia/Kolkata"), testInstance) + val nextTimeToExecute = cronSchedule.nextTimeToExecute(Instant.now()) + assertNotNull(nextTimeToExecute, "There should be next execute time.") + assertTrue(nextTimeToExecute!!.seconds == 2L, "Execute time should be 2 seconds") + } + + @Test + fun `test time zone`() { + val cronExpression = "0 11 * * 3" // Run at 11:00 on Wednesday. + // This is 2018-09-26 01:59:58 GMT which will in conversion lead to Wednesday 10:59:58 JST + val testInstance = Instant.ofEpochSecond(1537927198L) + + val cronSchedule = CronSchedule(cronExpression, ZoneId.of("Asia/Tokyo"), testInstance) + val nextTimeToExecute = cronSchedule.nextTimeToExecute(Instant.now()) + assertNotNull(nextTimeToExecute, "There should be next execute time.") + assertTrue(nextTimeToExecute!!.seconds == 2L, "Execute time should be 2 seconds") + } + + @Test + fun `test cron calculates next time to execute after restart`() { + val cronExpression = "* * * * *" + // This is 2018-09-26 01:59:58 GMT + val testInstance = Instant.ofEpochSecond(1537927198L) + // This enabled time represents GMT: Wednesday, September 19, 2018 3:19:51 AM + val enabledTimeInstance = Instant.ofEpochSecond(1537327191) + + val cronSchedule = CronSchedule(cronExpression, ZoneId.of("America/Los_Angeles"), testInstance) + // The nextTimeToExecute should be the minute after the test instance, not enabledTimeInstance, replicating a cluster restart + val nextTimeToExecute = cronSchedule.getExpectedNextExecutionTime(enabledTimeInstance, null) + assertNotNull(nextTimeToExecute, "There should be next execute time") + assertEquals(testInstance.plusSeconds(2L), nextTimeToExecute, + "nextTimeToExecute should be 2 seconds after test instance") + } + + @Test + fun `test cron calculates next time to execute using cached previous time`() { + val cronExpression = "* * * * *" + // This is 2018-09-26 01:59:58 GMT + val previousExecutionTimeInstance = Instant.ofEpochSecond(1537927198L) + // This enabled time represents GMT: Wednesday, September 19, 2018 3:19:51 AM + val enabledTimeInstance = Instant.ofEpochSecond(1537327191) + + val cronSchedule = CronSchedule(cronExpression, ZoneId.of("America/Los_Angeles")) + // The nextTimeToExecute should be the minute after the previous execution time instance, not enabledTimeInstance + val nextTimeToExecute = cronSchedule.getExpectedNextExecutionTime(enabledTimeInstance, previousExecutionTimeInstance) + assertNotNull(nextTimeToExecute, "There should be next execute time") + assertEquals(previousExecutionTimeInstance.plusSeconds(2L), nextTimeToExecute, + "nextTimeToExecute should be 2 seconds after test instance") + } + + @Test + fun `test interval calculates next time to execute using enabled time`() { + // This enabled time represents 2018-09-26 01:59:58 GMT + val enabledTimeInstance = Instant.ofEpochSecond(1537927138L) + // This is 2018-09-26 01:59:59 GMT, which is 61 seconds after enabledTime + val testInstance = Instant.ofEpochSecond(1537927199L) + + val intervalSchedule = IntervalSchedule(1, ChronoUnit.MINUTES, testInstance) + + // The nextTimeToExecute should be 120 seconds after the enabled time + val nextTimeToExecute = intervalSchedule.getExpectedNextExecutionTime(enabledTimeInstance, null) + assertNotNull(nextTimeToExecute, "There should be next execute time") + assertEquals(enabledTimeInstance.plusSeconds(120L), nextTimeToExecute, + "nextTimeToExecute should be 120 seconds seconds after enabled time") + } + + @Test + fun `test interval calculates next time to execute using cached previous time`() { + // This is 2018-09-26 01:59:58 GMT + val previousExecutionTimeInstance = Instant.ofEpochSecond(1537927198L) + // This is 2018-09-26 02:00:00 GMT + val testInstance = Instant.ofEpochSecond(1537927200L) + // This enabled time represents 2018-09-26 01:58:58 GMT + val enabledTimeInstance = Instant.ofEpochSecond(1537927138L) + + val intervalSchedule = IntervalSchedule(1, ChronoUnit.MINUTES, testInstance) + + // The nextTimeToExecute should be the minute after the previous execution time instance + val nextTimeToExecute = intervalSchedule.getExpectedNextExecutionTime(enabledTimeInstance, previousExecutionTimeInstance) + assertNotNull(nextTimeToExecute, "There should be next execute time") + assertEquals(previousExecutionTimeInstance.plusSeconds(60L), nextTimeToExecute, + "nextTimeToExecute should be 60 seconds after previous execution time") + } + + @Test + fun `test cron schedule round trip`() { + val cronExpression = "0 * * * *" + val cronSchedule = CronSchedule(cronExpression, ZoneId.of("Asia/Tokyo")) + + val scheduleString = cronSchedule.toXContent(builder(), ToXContent.EMPTY_PARAMS).string() + val parsedSchedule = Schedule.parse(parser(scheduleString)) + + assertTrue(parsedSchedule is CronSchedule, "Parsed scheduled is not Cron Scheduled Type.") + assertEquals(cronSchedule, parsedSchedule, "Round tripping Cron Schedule doesn't work") + } + + @Test + fun `test interval schedule round trip`() { + val intervalSchedule = IntervalSchedule(1, ChronoUnit.MINUTES) + + val scheduleString = intervalSchedule.toXContent(builder(), ToXContent.EMPTY_PARAMS).string() + val parsedSchedule = Schedule.parse(parser(scheduleString)) + assertTrue(parsedSchedule is IntervalSchedule, "Parsed scheduled is not Interval Scheduled Type.") + assertEquals(intervalSchedule, parsedSchedule, "Round tripping Interval Schedule doesn't work") + } + + @Test + fun `test cron invalid missing timezone`() { + val scheduleString = "{\"cron\":{\"expression\":\"0 * * * *\"}}" + assertFailsWith(IllegalArgumentException::class, "Expected IllegalArgumentException") { + Schedule.parse(parser(scheduleString)) + } + } + + @Test + fun `test cron invalid timezone rule`() { + val scheduleString = "{\"cron\":{\"expression\":\"0 * * * *\",\"timezone\":\"Going/Nowhere\"}}" + assertFailsWith(IllegalArgumentException::class, "Expected IllegalArgumentException") { + Schedule.parse(parser(scheduleString)) + } + } + + @Test + fun `test cron invalid timezone offset`() { + val scheduleString = "{\"cron\":{\"expression\":\"0 * * * *\",\"timezone\":\"+++9\"}}" + assertFailsWith(IllegalArgumentException::class, "Expected IllegalArgumentException") { + Schedule.parse(parser(scheduleString)) + } + } + + @Test + fun `test invalid type`() { + val scheduleString = "{\"foobarzzz\":{\"expression\":\"0 * * * *\",\"timezone\":\"+++9\"}}" + assertFailsWith(IllegalArgumentException::class, "Expected IllegalArgumentException") { + Schedule.parse(parser(scheduleString)) + } + } + + @Test + fun `test two types`() { + val scheduleString = "{\"cron\":{\"expression\":\"0 * * * *\",\"timezone\":\"Asia/Tokyo\"}, " + + "\"period\":{\"interval\":\"1\",\"unit\":\"Minutes\"}}" + assertFailsWith(IllegalArgumentException::class, "Expected IllegalArgumentException") { + Schedule.parse(parser(scheduleString)) + } + } + + @Test + fun `test invalid cron expression`() { + val scheduleString = "{\"cron\":{\"expression\":\"5 * 1 * * *\",\"timezone\":\"Asia/Tokyo\"}}" + assertFailsWith(IllegalArgumentException::class, "Expected IllegalArgumentException") { + Schedule.parse(parser(scheduleString)) + } + } + + @Test + fun `test interval period starting at`() { + val intervalSchedule = IntervalSchedule(1, ChronoUnit.MINUTES) + + val (periodStartTime, periodEndTime) = intervalSchedule.getPeriodStartingAt(null) + + assertEquals(periodStartTime, periodEndTime.minus(1, ChronoUnit.MINUTES), "Period didn't match interval") + + val startTime = Instant.now() + // Kotlin has destructuring declarations but no destructuring assignments? Gee, thanks... + val (periodStartTime2, _) = intervalSchedule.getPeriodStartingAt(startTime) + assertEquals(startTime, periodStartTime2, "Periods doesn't start at provided start time") + } + + @Test + fun `test interval period ending at`() { + val intervalSchedule = IntervalSchedule(1, ChronoUnit.MINUTES) + + val (periodStartTime, periodEndTime) = intervalSchedule.getPeriodEndingAt(null) + + assertEquals(periodStartTime, periodEndTime.minus(1, ChronoUnit.MINUTES), "Period didn't match interval") + + val endTime = Instant.now() + // destructuring declarations but no destructuring assignments? Gee, thanks... https://youtrack.jetbrains.com/issue/KT-11362 + val (_, periodEndTime2) = intervalSchedule.getPeriodEndingAt(endTime) + assertEquals(endTime, periodEndTime2, "Periods doesn't end at provided end time") + } + + @Test + fun `test cron period starting at`() { + val cronSchedule = CronSchedule("0 * * * *", ZoneId.of("Asia/Tokyo")) + + val (startTime1, endTime) = cronSchedule.getPeriodStartingAt(null) + assertTrue(startTime1 <= Instant.now(), "startTime is in future; should be the last execution time") + assertTrue(cronSchedule.executionTime.isMatch(ZonedDateTime.ofInstant(endTime, ZoneId.of("Asia/Tokyo")))) + + val (startTime, _) = cronSchedule.getPeriodStartingAt(endTime) + assertEquals(startTime, endTime, "Subsequent period doesn't start at provided end time") + } + + @Test + fun `test cron period ending at`() { + val cronSchedule = CronSchedule("0 * * * *", ZoneId.of("Asia/Tokyo")) + + val (startTime, endTime1) = cronSchedule.getPeriodEndingAt(null) + assertTrue(endTime1 >= Instant.now(), "endTime is in past; should be the next execution time") + assertTrue(cronSchedule.executionTime.isMatch(ZonedDateTime.ofInstant(startTime, ZoneId.of("Asia/Tokyo")))) + + val (_, endTime2) = cronSchedule.getPeriodEndingAt(startTime) + assertEquals(endTime2, startTime, "Previous period doesn't end at provided start time") + } + + @Test + fun `cron job not running on time`() { + val cronSchedule = createTestCronSchedule() + + val lastExecutionTime = 1539715560L + assertFalse(cronSchedule.runningOnTime(Instant.ofEpochSecond(lastExecutionTime))) + } + + @Test + fun `cron job running on time`() { + val cronSchedule = createTestCronSchedule() + + val lastExecutionTime = 1539715620L + assertTrue(cronSchedule.runningOnTime(Instant.ofEpochSecond(lastExecutionTime))) + } + + @Test + fun `period job running exactly at interval`() { + val testInstance = Instant.ofEpochSecond(1539715678L) + val enabledTime = Instant.ofEpochSecond(1539615178L) + val intervalSchedule = IntervalSchedule(1, ChronoUnit.MINUTES, testInstance) + + val nextTimeToExecute = intervalSchedule.nextTimeToExecute(enabledTime) + assertNotNull(nextTimeToExecute, "There should be next execute time.") + assertTrue(nextTimeToExecute!!.seconds == 60L, "Excepted 60 seconds but was ${nextTimeToExecute.seconds}") + } + + @Test + fun `period job 3 minutes`() { + val testInstance = Instant.ofEpochSecond(1539615226L) + val enabledTime = Instant.ofEpochSecond(1539615144L) + val intervalSchedule = IntervalSchedule(3, ChronoUnit.MINUTES, testInstance) + + val nextTimeToExecute = intervalSchedule.nextTimeToExecute(enabledTime) + assertNotNull(nextTimeToExecute, "There should be next execute time.") + assertTrue(nextTimeToExecute!!.seconds == 98L, "Excepted 98 seconds but was ${nextTimeToExecute.seconds}") + } + + @Test + fun `period job running on time`() { + val intervalSchedule = createTestIntervalSchedule() + + val lastExecutionTime = 1539715620L + assertTrue(intervalSchedule.runningOnTime(Instant.ofEpochSecond(lastExecutionTime))) + } + + @Test + fun `period job not running on time`() { + val intervalSchedule = createTestIntervalSchedule() + + val lastExecutionTime = 1539715560L + assertFalse(intervalSchedule.runningOnTime(Instant.ofEpochSecond(lastExecutionTime))) + } + + @Test + fun `period job test null last execution time`() { + val intervalSchedule = createTestIntervalSchedule() + + assertTrue(intervalSchedule.runningOnTime(null)) + } + + private fun createTestIntervalSchedule(): IntervalSchedule { + val testInstance = Instant.ofEpochSecond(1539715678L) + val enabledTime = Instant.ofEpochSecond(1539615146L) + val intervalSchedule = IntervalSchedule(1, ChronoUnit.MINUTES, testInstance) + + val nextTimeToExecute = intervalSchedule.nextTimeToExecute(enabledTime) + assertNotNull(nextTimeToExecute, "There should be next execute time.") + assertTrue(nextTimeToExecute!!.seconds == 28L, "Excepted 28 seconds but was ${nextTimeToExecute.seconds}") + + return intervalSchedule + } + + private fun createTestCronSchedule(): CronSchedule { + val cronExpression = "* * * * *" + val testInstance = Instant.ofEpochSecond(1539715678L) + + val cronSchedule = CronSchedule(cronExpression, ZoneId.of("UTC"), testInstance) + val nextTimeToExecute = cronSchedule.nextTimeToExecute(Instant.now()) + assertNotNull(nextTimeToExecute, "There should be next execute time.") + assertTrue(nextTimeToExecute!!.seconds == 2L, "Execute time should be 2 seconds") + + return cronSchedule + } + + @Test + fun `test invalid interval units`() { + assertFailsWith(IllegalArgumentException::class, "Expected IllegalArgumentException") { + IntervalSchedule(1, ChronoUnit.SECONDS) + } + + assertFailsWith(IllegalArgumentException::class, "Expected IllegalArgumentException") { + IntervalSchedule(1, ChronoUnit.MONTHS) + } + } +} diff --git a/core/src/test/kotlin/com/amazon/opendistroforelasticsearch/alerting/core/model/XContentTestBase.kt b/core/src/test/kotlin/com/amazon/opendistroforelasticsearch/alerting/core/model/XContentTestBase.kt new file mode 100644 index 00000000..6957e096 --- /dev/null +++ b/core/src/test/kotlin/com/amazon/opendistroforelasticsearch/alerting/core/model/XContentTestBase.kt @@ -0,0 +1,41 @@ +/* + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package com.amazon.opendistroforelasticsearch.alerting.core.model + +import com.amazon.opendistroforelasticsearch.alerting.elasticapi.ElasticAPI +import org.elasticsearch.common.settings.Settings +import org.elasticsearch.common.xcontent.NamedXContentRegistry +import org.elasticsearch.common.xcontent.XContentBuilder +import org.elasticsearch.common.xcontent.XContentParser +import org.elasticsearch.common.xcontent.XContentType +import org.elasticsearch.search.SearchModule + +interface XContentTestBase { + fun builder(): XContentBuilder { + return XContentBuilder.builder(XContentType.JSON.xContent()) + } + + fun parser(xc: String): XContentParser { + val parser = ElasticAPI.INSTANCE.jsonParser(xContentRegistry(), xc) + parser.nextToken() + return parser + } + + fun xContentRegistry(): NamedXContentRegistry { + return NamedXContentRegistry(listOf(SearchInput.XCONTENT_REGISTRY) + + SearchModule(Settings.EMPTY, false, emptyList()).namedXContents) + } +} diff --git a/core/src/test/kotlin/com/amazon/opendistroforelasticsearch/alerting/core/schedule/JobSchedulerTest.kt b/core/src/test/kotlin/com/amazon/opendistroforelasticsearch/alerting/core/schedule/JobSchedulerTest.kt new file mode 100644 index 00000000..0b998c80 --- /dev/null +++ b/core/src/test/kotlin/com/amazon/opendistroforelasticsearch/alerting/core/schedule/JobSchedulerTest.kt @@ -0,0 +1,189 @@ +/* + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package com.amazon.opendistroforelasticsearch.alerting.core.schedule + +import com.amazon.opendistroforelasticsearch.alerting.core.model.CronSchedule +import com.amazon.opendistroforelasticsearch.alerting.core.model.IntervalSchedule +import com.amazon.opendistroforelasticsearch.alerting.core.model.MockScheduledJob +import org.elasticsearch.common.settings.Settings +import org.elasticsearch.threadpool.ThreadPool +import org.junit.Before +import java.time.Instant +import java.time.ZoneId +import java.time.temporal.ChronoUnit +import kotlin.test.Test +import kotlin.test.assertEquals +import kotlin.test.assertFalse +import kotlin.test.assertTrue + +class JobSchedulerTest { + + private var testSettings: Settings = Settings.builder().put("node.name", "node-0").build() + private val testThreadPool = ThreadPool(testSettings) + private var jobRunner: MockJobRunner = MockJobRunner() + private var jobScheduler: JobScheduler = JobScheduler(ThreadPool(testSettings), jobRunner) + + @Before + fun `setup`() { + jobRunner = MockJobRunner() + jobScheduler = JobScheduler(ThreadPool(testSettings), jobRunner) + } + + @Test + fun `schedule and deschedule`() { + val mockScheduledJob = MockScheduledJob( + "mockScheduledJob-id", + 1L, + "mockScheduledJob-name", + "MockScheduledJob", + true, + IntervalSchedule(1, ChronoUnit.MINUTES), + Instant.now(), + Instant.now()) + + assertTrue(jobScheduler.schedule(mockScheduledJob)) + + assertEquals(setOf("mockScheduledJob-id"), jobScheduler.scheduledJobs(), "List of ScheduledJobs are not the same.") + assertEquals(0, jobRunner.numberOfRun, "Number of JobRunner ran is wrong.") + assertTrue(jobScheduler.deschedule("mockScheduledJob-id"), "Descheduling should be true.") + } + + @Test + fun `schedule cron past year`() { + // This is to run cron in Feb 30 which we should never run. + val cronExpression = "0/5 * 30 2 *" + val jobRunner = MockJobRunner() + val jobScheduler = JobScheduler(testThreadPool, jobRunner) + val mockScheduledJob = MockScheduledJob( + "mockScheduledJob-id", + 1L, + "mockScheduledJob-name", + "MockScheduledJob", + true, + CronSchedule(cronExpression, ZoneId.of("UTC")), + Instant.now(), + Instant.now()) + + assertTrue(jobScheduler.schedule(mockScheduledJob)) + assertEquals(setOf("mockScheduledJob-id"), jobScheduler.scheduledJobs(), "List of ScheduledJobs are not the same.") + + assertEquals(0, jobRunner.numberOfRun, "Number of JobRunner ran is wrong.") + + assertTrue(jobScheduler.deschedule("mockScheduledJob-id"), "Descheduling should be true.") + } + + @Test + fun `schedule disabled`() { + val cronExpression = "0/5 * * * *" + val jobRunner = MockJobRunner() + val jobScheduler = JobScheduler(testThreadPool, jobRunner) + val mockScheduledJob = MockScheduledJob( + "mockScheduledJob-id", + 1L, + "mockScheduledJob-name", + "MockScheduledJob", + false, + CronSchedule(cronExpression, ZoneId.of("UTC")), + Instant.now(), + Instant.now()) + + assertFalse(jobScheduler.schedule(mockScheduledJob), "We should return false if we try to schedule disabled schedule.") + assertEquals(setOf(), jobScheduler.scheduledJobs(), "List of ScheduledJobs are not the same.") + } + + @Test + fun `deschedule non existing schedule`() { + val cronExpression = "0/5 * * * *" + val jobRunner = MockJobRunner() + val jobScheduler = JobScheduler(testThreadPool, jobRunner) + val mockScheduledJob = MockScheduledJob( + "mockScheduledJob-id", + 1L, + "mockScheduledJob-name", + "MockScheduledJob", + true, + CronSchedule(cronExpression, ZoneId.of("UTC")), + Instant.now(), + Instant.now()) + + assertTrue(jobScheduler.schedule(mockScheduledJob)) + assertEquals(setOf("mockScheduledJob-id"), jobScheduler.scheduledJobs(), "List of ScheduledJobs are not the same.") + + assertEquals(0, jobRunner.numberOfRun, "Number of JobRunner ran is wrong.") + + assertTrue(jobScheduler.deschedule("mockScheduledJob-invalid"), "Descheduling should be true.") + assertTrue(jobScheduler.deschedule("mockScheduledJob-id"), "Descheduling should be true.") + } + + @Test + fun `schedule multiple jobs`() { + val cronExpression = "0/5 * * * *" + val mockScheduledJob1 = MockScheduledJob( + "mockScheduledJob-1", + 1L, + "mockScheduledJob-name", + "MockScheduledJob", + true, + CronSchedule(cronExpression, ZoneId.of("UTC")), + Instant.now(), + Instant.now()) + val mockScheduledJob2 = MockScheduledJob( + "mockScheduledJob-2", + 1L, + "mockScheduledJob-name", + "MockScheduledJob", + true, + CronSchedule(cronExpression, ZoneId.of("UTC")), + Instant.now(), + Instant.now()) + + assertTrue(jobScheduler.schedule(mockScheduledJob1, mockScheduledJob2).isEmpty()) + } + + @Test + fun `schedule null enabled time job`() { + val cronExpression = "0/5 * * * *" + val mockScheduledJob2 = MockScheduledJob("mockScheduledJob-2", 1L, "mockScheduledJob-name", "MockScheduledJob", true, + CronSchedule(cronExpression, ZoneId.of("UTC")), Instant.now(), null) + + assertFalse(jobScheduler.schedule(mockScheduledJob2)) + } + + @Test + fun `schedule disabled job`() { + val cronExpression = "0/5 * * * *" + val mockScheduledJob1 = MockScheduledJob("mockScheduledJob-1", 1L, "mockScheduledJob-name", "MockScheduledJob", false, + CronSchedule(cronExpression, ZoneId.of("UTC")), Instant.now(), Instant.now()) + + assertFalse(jobScheduler.schedule(mockScheduledJob1)) + } + + @Test + fun `run Job`() { + val cronExpression = "0/5 * * * *" + val mockScheduledJob = MockScheduledJob( + "mockScheduledJob-id", + 1L, + "mockScheduledJob-name", + "MockScheduledJob", + true, + CronSchedule(cronExpression, ZoneId.of("UTC")), + Instant.now(), + Instant.now()) + + jobRunner.runJob(mockScheduledJob, Instant.now(), Instant.now()) + } +} diff --git a/core/src/test/kotlin/com/amazon/opendistroforelasticsearch/alerting/core/schedule/MockJobRunner.kt b/core/src/test/kotlin/com/amazon/opendistroforelasticsearch/alerting/core/schedule/MockJobRunner.kt new file mode 100644 index 00000000..d490f9be --- /dev/null +++ b/core/src/test/kotlin/com/amazon/opendistroforelasticsearch/alerting/core/schedule/MockJobRunner.kt @@ -0,0 +1,41 @@ +/* + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package com.amazon.opendistroforelasticsearch.alerting.core.schedule + +import com.amazon.opendistroforelasticsearch.alerting.core.JobRunner +import com.amazon.opendistroforelasticsearch.alerting.core.model.ScheduledJob +import java.time.Instant + +class MockJobRunner : JobRunner { + var numberOfRun: Int = 0 + private set + var numberOfIndex: Int = 0 + private set + var numberOfDelete: Int = 0 + private set + + override fun postDelete(jobId: String) { + numberOfDelete++ + } + + override fun postIndex(job: ScheduledJob) { + numberOfIndex++ + } + + override fun runJob(job: ScheduledJob, periodStart: Instant, periodEnd: Instant) { + numberOfRun++ + } +} diff --git a/elastic-api/build.gradle b/elastic-api/build.gradle new file mode 100644 index 00000000..dbc33a4e --- /dev/null +++ b/elastic-api/build.gradle @@ -0,0 +1,55 @@ +/* + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +apply plugin: 'java' +apply plugin: 'org.jetbrains.kotlin.jvm' + +// Elastic API version specific code for both ES server api and ES test framework API. +// Add a dependency on project(':elastic-api') to compile configuration and a dependency on +// project(path: ':elastic-api', configuration: 'testOutput') to the testCompile configuration. +def versionDir = 'es65' +assert versionDir != null : "Can't find elastic-api adapter for ES version $es_mv" +sourceSets { + main { + java.srcDir "$versionDir/main/java/" + kotlin.srcDir "$versionDir/main/kotlin" + resources.srcDir "$versionDir/main/resources" + } + test { + java.srcDir "$versionDir/test/java" + kotlin.srcDir "$versionDir/test/kotlin" + resources.srcDir "$versionDir/test/resources" + } +} + +configurations { + testOutput.extendsFrom testCompile +} + +task testJar(type: Jar) { + from sourceSets.test.output + classifier = 'test' +} + +dependencies { + compileOnly "org.elasticsearch:elasticsearch:${es_version}" + compile "org.jetbrains.kotlin:kotlin-stdlib:${kotlin_version}" + + testCompile "org.elasticsearch.test:framework:${es_version}" +} + +artifacts { + testOutput testJar +} diff --git a/elastic-api/es65/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/elasticapi/ElasticAPI65.kt b/elastic-api/es65/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/elasticapi/ElasticAPI65.kt new file mode 100644 index 00000000..e1d7c1d3 --- /dev/null +++ b/elastic-api/es65/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/elasticapi/ElasticAPI65.kt @@ -0,0 +1,67 @@ + +/* + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package com.amazon.opendistroforelasticsearch.alerting.elasticapi + +import org.apache.logging.log4j.LogManager +import org.apache.logging.log4j.Logger +import org.elasticsearch.action.admin.indices.create.CreateIndexRequest +import org.elasticsearch.action.admin.indices.rollover.RolloverRequest +import org.elasticsearch.common.bytes.BytesReference +import org.elasticsearch.common.logging.Loggers +import org.elasticsearch.common.settings.Settings +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler +import org.elasticsearch.common.xcontent.NamedXContentRegistry +import org.elasticsearch.common.xcontent.XContentBuilder +import org.elasticsearch.common.xcontent.XContentHelper +import org.elasticsearch.common.xcontent.XContentParser +import org.elasticsearch.common.xcontent.XContentType +import org.elasticsearch.index.engine.Engine +import org.elasticsearch.index.shard.ShardId +import org.elasticsearch.search.builder.SearchSourceBuilder +import java.io.InputStream +import java.time.Instant + +class ElasticAPI65 : ElasticAPI() { + override fun getLogger(clazz: Class<*>, settings: Settings): Logger = LogManager.getLogger(clazz) + + override fun getLogger(clazz: Class<*>, settings: Settings, shardId: ShardId): Logger = + Loggers.getLogger(clazz, shardId) + + override fun jsonParser(xcr: NamedXContentRegistry, source: String): XContentParser = + XContentType.JSON.xContent().createParser(xcr, LoggingDeprecationHandler.INSTANCE, source) + + override fun parseSearchSource(xcp: XContentParser): SearchSourceBuilder = + SearchSourceBuilder.fromXContent(xcp, false) + + override fun jsonParser(xcr: NamedXContentRegistry, bytesRef: BytesReference) = + XContentHelper.createParser(xcr, LoggingDeprecationHandler.INSTANCE, bytesRef, XContentType.JSON) + + override fun jsonParser(xcr: NamedXContentRegistry, istr: InputStream): XContentParser = + XContentType.JSON.xContent().createParser(xcr, LoggingDeprecationHandler.INSTANCE, istr) + + override fun createParser(xcr: NamedXContentRegistry, bytesRef: BytesReference, xContentType: XContentType): XContentParser = + xContentType.xContent().createParser(xcr, LoggingDeprecationHandler.INSTANCE, bytesRef.streamInput()) + + override fun hasWriteFailed(result: Engine.Result): Boolean = result.resultType != Engine.Result.Type.SUCCESS + + override fun timeField(xcb: XContentBuilder, fieldName: String, value: Instant): XContentBuilder = + xcb.timeField(fieldName, fieldName, value.toEpochMilli()) + + override fun builderToBytesRef(xcb: XContentBuilder): BytesReference = BytesReference.bytes(xcb) + + override fun getCreateIndexRequest(rr: RolloverRequest): CreateIndexRequest = rr.createIndexRequest +} diff --git a/elastic-api/es65/main/resources/META-INF/services/com.amazon.opendistroforelasticsearch.alerting.elasticapi.ElasticAPI b/elastic-api/es65/main/resources/META-INF/services/com.amazon.opendistroforelasticsearch.alerting.elasticapi.ElasticAPI new file mode 100644 index 00000000..00060347 --- /dev/null +++ b/elastic-api/es65/main/resources/META-INF/services/com.amazon.opendistroforelasticsearch.alerting.elasticapi.ElasticAPI @@ -0,0 +1,16 @@ +# +# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). +# You may not use this file except in compliance with the License. +# A copy of the License is located at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# or in the "license" file accompanying this file. This file is distributed +# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +# express or implied. See the License for the specific language governing +# permissions and limitations under the License. +# + +com.amazon.opendistroforelasticsearch.alerting.elasticapi.ElasticAPI65 diff --git a/elastic-api/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/elasticapi/ElasticAPI.kt b/elastic-api/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/elasticapi/ElasticAPI.kt new file mode 100644 index 00000000..dd72e831 --- /dev/null +++ b/elastic-api/src/main/kotlin/com/amazon/opendistroforelasticsearch/alerting/elasticapi/ElasticAPI.kt @@ -0,0 +1,103 @@ +/* + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package com.amazon.opendistroforelasticsearch.alerting.elasticapi + +import org.apache.logging.log4j.Logger +import org.elasticsearch.action.admin.indices.create.CreateIndexRequest +import org.elasticsearch.action.admin.indices.rollover.RolloverRequest +import org.elasticsearch.common.bytes.BytesReference +import org.elasticsearch.common.settings.Settings +import org.elasticsearch.common.xcontent.NamedXContentRegistry +import org.elasticsearch.common.xcontent.XContent +import org.elasticsearch.common.xcontent.XContentBuilder +import org.elasticsearch.common.xcontent.XContentParser +import org.elasticsearch.common.xcontent.XContentType +import org.elasticsearch.index.engine.Engine +import org.elasticsearch.index.shard.ShardId +import org.elasticsearch.search.builder.SearchSourceBuilder +import java.io.InputStream +import java.time.Instant +import java.util.ServiceLoader + +/** + * Wrapper to abstract away changes in plugin API between elastic versions. The actual implementation lives in the ES version + * specific module (e.g. es65) and is loaded via a [ServiceLoader] at runtime. + */ +abstract class ElasticAPI { + + companion object { + @JvmStatic val INSTANCE: ElasticAPI by lazy { + val loader = ServiceLoader.load(ElasticAPI::class.java, ElasticAPI::class.java.classLoader) + loader.first() // There must always be an instance on the classpath + } + } + + /** + * Function moved from [ServerLoggers] in ES 6.2 to [Loggers] in ES 6.3 + */ + abstract fun getLogger(clazz: Class<*>, settings: Settings): Logger + + /** + * Function moved from [ServerLoggers] in ES 6.2 to [Loggers] in ES 6.3 + */ + abstract fun getLogger(clazz: Class<*>, settings: Settings, shardId: ShardId): Logger + + /** + * [XContent.createParser] takes a [DeprecationHandler] param in ES 6.3 + */ + abstract fun jsonParser(xcr: NamedXContentRegistry, source: String): XContentParser + + /** + * [XContent.createParser] takes a [DeprecationHandler] param in ES 6.3 + */ + abstract fun jsonParser(xcr: NamedXContentRegistry, bytesRef: BytesReference): XContentParser + + /** + * [XContent.createParser] takes a [DeprecationHandler] param in ES 6.3 + */ + abstract fun jsonParser(xcr: NamedXContentRegistry, istr: InputStream): XContentParser + + /** + * [XContent.createParser] takes a [DeprecationHandler] param in ES 6.3 + */ + abstract fun createParser(xcr: NamedXContentRegistry, bytesRef: BytesReference, xContentType: XContentType): XContentParser + + /** + * [Engine.Result] has a ResultType enum in ES 6.3 but not in 6.2 + */ + abstract fun hasWriteFailed(result: Engine.Result): Boolean + + /** + * [XContentBuilder.dateField] method in ES 6.2 renamed to [XContentBuilder.timeField] in ES 6.3 + */ + abstract fun timeField(xcb: XContentBuilder, fieldName: String, value: Instant): XContentBuilder + + /** + * [XContentBuilder.toBytes] method in ES 6.2 moved to [BytesReference.bytes] in ES 6.3 + */ + abstract fun builderToBytesRef(xcb: XContentBuilder): BytesReference + + /** + * [RolloverRequest.getCreateIndexRequest] is public in ES 6.3 but not in 6.2 + */ + abstract fun getCreateIndexRequest(rr: RolloverRequest): CreateIndexRequest + + /** + * Elastic consumes trailing tokens at the end of a search source by default in ES >= 6.3. + * See [ES migration doc](https://github.com/elastic/elasticsearch/blob/6.3/docs/reference/migration/migrate_6_0/search.asciidoc#invalid-_search-request-body) + */ + abstract fun parseSearchSource(xcp: XContentParser): SearchSourceBuilder +} diff --git a/elastic-api/src/test/kotlin/com/amazon/opendistroforelasticsearch/alerting/test/ElasticTestAPI.kt b/elastic-api/src/test/kotlin/com/amazon/opendistroforelasticsearch/alerting/test/ElasticTestAPI.kt new file mode 100644 index 00000000..a75f31ae --- /dev/null +++ b/elastic-api/src/test/kotlin/com/amazon/opendistroforelasticsearch/alerting/test/ElasticTestAPI.kt @@ -0,0 +1,60 @@ +/* + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package com.amazon.opendistroforelasticsearch.alerting.test + +import org.apache.http.Header +import org.apache.http.HttpEntity +import org.elasticsearch.client.Response +import org.elasticsearch.client.RestClient + +/** + * Wrapper for [RestClient.performRequest] which was deprecated in ES 6.5 and is used in tests. This provides + * a single place to suppress deprecation warnings. This will probably need further work when the API is removed entirely + * but that's an exercise for another day. + */ +@Suppress("DEPRECATION") +fun RestClient.makeRequest( + method: String, + endpoint: String, + params: Map = emptyMap(), + entity: HttpEntity? = null, + vararg headers: Header +): Response { + return if (entity != null) { + performRequest(method, endpoint, params, entity, *headers) + } else { + performRequest(method, endpoint, params, *headers) + } +} + +/** + * Wrapper for [RestClient.performRequest] which was deprecated in ES 6.5 and is used in tests. This provides + * a single place to suppress deprecation warnings. This will probably need further work when the API is removed entirely + * but that's an exercise for another day. + */ +@Suppress("DEPRECATION") +fun RestClient.makeRequest( + method: String, + endpoint: String, + entity: HttpEntity? = null, + vararg headers: Header +): Response { + return if (entity != null) { + performRequest(method, endpoint, emptyMap(), entity, *headers) + } else { + performRequest(method, endpoint, emptyMap(), *headers) + } +} diff --git a/gradle/wrapper/gradle-wrapper.jar b/gradle/wrapper/gradle-wrapper.jar new file mode 100644 index 0000000000000000000000000000000000000000..1948b9074f1016d15d505d185bc3f73deb82d8c8 GIT binary patch literal 54413 zcmafaV|Zr4wq`oEZQHiZj%|LijZQlLf{tz5M#r{o+fI6V=G-$g=gzrzeyqLskF}nv zRZs0&c;EUi2L_G~0s;*U0szbMMwKS>Gw zRZ#mYf6f1oqJoH`jHHCB8l!^by~4z}yc`4LEP@;Z?bO6{g9`Hk+s@(L1jC5Tq{1Yf z4E;CQvrx0-gF+peRxFC*gF=&$zNYjO?HlJ?=WqXMz`tYs@0o%B{dRD+{C_6(f9t^g zhmNJQv6-#;f2)f2uc{u-#*U8W&i{|ewYN^n_1~cv|1J!}zc&$eaBy{T{cEpa46s*q zHFkD2cV;xTHFj}{*3kBt*FgS4A5SI|$F%$gB@It9FlC}D3y`sbZG{2P6gGwC$U`6O zb_cId9AhQl#A<&=x>-xDD%=Ppt$;y71@Lwsl{x943#T@8*?cbR<~d`@@}4V${+r$jICUIOzgZJy_9I zu*eA(F)$~J07zX%tmQN}1^wj+RM|9bbwhQA=xrPE*{vB_P!pPYT5{Or^m*;Qz#@Bl zRywCG_RDyM6bf~=xn}FtiFAw|rrUxa1+z^H`j6e|GwKDuq}P)z&@J>MEhsVBvnF|O zOEm)dADU1wi8~mX(j_8`DwMT_OUAnjbWYer;P*^Uku_qMu3}qJU zTAkza-K9aj&wcsGuhQ>RQoD?gz~L8RwCHOZDzhBD$az*$TQ3!uygnx_rsXG`#_x5t zn*lb(%JI3%G^MpYp-Y(KI4@_!&kBRa3q z|Fzn&3R%ZsoMNEn4pN3-BSw2S_{IB8RzRv(eQ1X zyBQZHJ<(~PfUZ~EoI!Aj`9k<+Cy z2DtI<+9sXQu!6&-Sk4SW3oz}?Q~mFvy(urUy<)x!KQ>#7yIPC)(ORhKl7k)4eSy~} z7#H3KG<|lt68$tk^`=yjev%^usOfpQ#+Tqyx|b#dVA(>fPlGuS@9ydo z!Cs#hse9nUETfGX-7lg;F>9)+ml@M8OO^q|W~NiysX2N|2dH>qj%NM`=*d3GvES_# zyLEHw&1Fx<-dYxCQbk_wk^CI?W44%Q9!!9aJKZW-bGVhK?N;q`+Cgc*WqyXcxZ%U5QXKu!Xn)u_dxeQ z;uw9Vysk!3OFzUmVoe)qt3ifPin0h25TU zrG*03L~0|aaBg7^YPEW^Yq3>mSNQgk-o^CEH?wXZ^QiPiuH}jGk;75PUMNquJjm$3 zLcXN*uDRf$Jukqg3;046b;3s8zkxa_6yAlG{+7{81O3w96i_A$KcJhD&+oz1<>?lun#C3+X0q zO4JxN{qZ!e#FCl@e_3G?0I^$CX6e$cy7$BL#4<`AA)Lw+k`^15pmb-447~5lkSMZ` z>Ce|adKhb-F%yy!vx>yQbXFgHyl(an=x^zi(!-~|k;G1=E(e@JgqbAF{;nv`3i)oi zDeT*Q+Mp{+NkURoabYb9@#Bi5FMQnBFEU?H{~9c;g3K%m{+^hNe}(MdpPb?j9`?2l z#%AO!|2QxGq7-2Jn2|%atvGb(+?j&lmP509i5y87`9*BSY++<%%DXb)kaqG0(4Eft zj|2!Od~2TfVTi^0dazAIeVe&b#{J4DjN6;4W;M{yWj7#+oLhJyqeRaO;>?%mX>Ec{Mp~;`bo}p;`)@5dA8fNQ38FyMf;wUPOdZS{U*8SN6xa z-kq3>*Zos!2`FMA7qjhw-`^3ci%c91Lh`;h{qX1r;x1}eW2hYaE*3lTk4GwenoxQ1kHt1Lw!*N8Z%DdZSGg5~Bw}+L!1#d$u+S=Bzo7gi zqGsBV29i)Jw(vix>De)H&PC; z-t2OX_ak#~eSJ?Xq=q9A#0oaP*dO7*MqV;dJv|aUG00UX=cIhdaet|YEIhv6AUuyM zH1h7fK9-AV)k8sr#POIhl+?Z^r?wI^GE)ZI=H!WR<|UI(3_YUaD#TYV$Fxd015^mT zpy&#-IK>ahfBlJm-J(n(A%cKV;)8&Y{P!E|AHPtRHk=XqvYUX?+9po4B$0-6t74UUef${01V{QLEE8gzw* z5nFnvJ|T4dlRiW9;Ed_yB{R@)fC=zo4hCtD?TPW*WJmMXYxN_&@YQYg zBQ$XRHa&EE;YJrS{bn7q?}Y&DH*h;){5MmE(9A6aSU|W?{3Ox%5fHLFScv7O-txuRbPG1KQtI`Oay=IcEG=+hPhlnYC;`wSHeo|XGio0aTS6&W($E$ z?N&?TK*l8;Y^-xPl-WVZwrfdiQv10KdsAb9u-*1co*0-Z(h#H)k{Vc5CT!708cs%sExvPC+7-^UY~jTfFq=cj z!Dmy<+NtKp&}}$}rD{l?%MwHdpE(cPCd;-QFPk1`E5EVNY2i6E`;^aBlx4}h*l42z zpY#2cYzC1l6EDrOY*ccb%kP;k8LHE3tP>l3iK?XZ%FI<3666yPw1rM%>eCgnv^JS_ zK7c~;g7yXt9fz@(49}Dj7VO%+P!eEm& z;z8UXs%NsQ%@2S5nve)@;yT^61BpVlc}=+i6{ZZ9r7<({yUYqe==9*Z+HguP3`sA& z{`inI4G)eLieUQ*pH9M@)u7yVnWTQva;|xq&-B<>MoP(|xP(HqeCk1&h>DHNLT>Zi zQ$uH%s6GoPAi0~)sC;`;ngsk+StYL9NFzhFEoT&Hzfma1f|tEnL0 zMWdX4(@Y*?*tM2@H<#^_l}BC&;PYJl%~E#veQ61{wG6!~nyop<^e)scV5#VkGjYc2 z$u)AW-NmMm%T7WschOnQ!Hbbw&?`oMZrJ&%dVlN3VNra1d0TKfbOz{dHfrCmJ2Jj= zS#Gr}JQcVD?S9X!u|oQ7LZ+qcq{$40 ziG5=X^+WqeqxU00YuftU7o;db=K+Tq!y^daCZgQ)O=M} zK>j*<3oxs=Rcr&W2h%w?0Cn3);~vqG>JO_tTOzuom^g&^vzlEjkx>Sv!@NNX%_C!v zaMpB>%yVb}&ND9b*O>?HxQ$5-%@xMGe4XKjWh7X>CYoRI2^JIwi&3Q5UM)?G^k8;8 zmY$u;(KjZx>vb3fe2zgD7V;T2_|1KZQW$Yq%y5Ioxmna9#xktcgVitv7Sb3SlLd6D zfmBM9Vs4rt1s0M}c_&%iP5O{Dnyp|g1(cLYz^qLqTfN6`+o}59Zlu%~oR3Q3?{Bnr zkx+wTpeag^G12fb_%SghFcl|p2~<)Av?Agumf@v7y-)ecVs`US=q~=QG%(_RTsqQi z%B&JdbOBOmoywgDW|DKR5>l$1^FPhxsBrja<&}*pfvE|5dQ7j-wV|ur%QUCRCzBR3q*X`05O3U@?#$<>@e+Zh&Z&`KfuM!0XL& zI$gc@ZpM4o>d&5)mg7+-Mmp98K^b*28(|Ew8kW}XEV7k^vnX-$onm9OtaO@NU9a|as7iA%5Wrw9*%UtJYacltplA5}gx^YQM` zVkn`TIw~avq)mIQO0F0xg)w$c)=8~6Jl|gdqnO6<5XD)&e7z7ypd3HOIR+ss0ikSVrWar?548HFQ*+hC)NPCq*;cG#B$7 z!n?{e9`&Nh-y}v=nK&PR>PFdut*q&i81Id`Z<0vXUPEbbJ|<~_D!)DJMqSF~ly$tN zygoa)um~xdYT<7%%m!K8+V(&%83{758b0}`b&=`))Tuv_)OL6pf=XOdFk&Mfx9y{! z6nL>V?t=#eFfM$GgGT8DgbGRCF@0ZcWaNs_#yl+6&sK~(JFwJmN-aHX{#Xkpmg;!} zgNyYYrtZdLzW1tN#QZAh!z5>h|At3m+ryJ-DFl%V>w?cmVTxt^DsCi1ZwPaCe*D{) z?#AZV6Debz{*D#C2>44Czy^yT3y92AYDcIXtZrK{L-XacVl$4i=X2|K=Fy5vAzhk{ zu3qG=qSb_YYh^HirWf~n!_Hn;TwV8FU9H8+=BO)XVFV`nt)b>5yACVr!b98QlLOBDY=^KS<*m9@_h3;64VhBQzb_QI)gbM zSDto2i*iFrvxSmAIrePB3i`Ib>LdM8wXq8(R{-)P6DjUi{2;?}9S7l7bND4w%L2!; zUh~sJ(?Yp}o!q6)2CwG*mgUUWlZ;xJZo`U`tiqa)H4j>QVC_dE7ha0)nP5mWGB268 zn~MVG<#fP#R%F=Ic@(&Va4dMk$ysM$^Avr1&hS!p=-7F>UMzd(M^N9Ijb|364}qcj zcIIh7suk$fQE3?Z^W4XKIPh~|+3(@{8*dSo&+Kr(J4^VtC{z*_{2}ld<`+mDE2)S| zQ}G#Q0@ffZCw!%ZGc@kNoMIdQ?1db%N1O0{IPPesUHI;(h8I}ETudk5ESK#boZgln z(0kvE`&6z1xH!s&={%wQe;{^&5e@N0s7IqR?L*x%iXM_czI5R1aU?!bA7)#c4UN2u zc_LZU+@elD5iZ=4*X&8%7~mA;SA$SJ-8q^tL6y)d150iM)!-ry@TI<=cnS#$kJAS# zq%eK**T*Wi2OlJ#w+d_}4=VN^A%1O+{?`BK00wkm)g8;u?vM;RR+F1G?}({ENT3i= zQsjJkp-dmJ&3-jMNo)wrz0!g*1z!V7D(StmL(A}gr^H-CZ~G9u?*Uhcx|x7rb`v^X z9~QGx;wdF4VcxCmEBp$F#sms@MR?CF67)rlpMxvwhEZLgp2?wQq|ci#rLtrYRV~iR zN?UrkDDTu114&d~Utjcyh#tXE_1x%!dY?G>qb81pWWH)Ku@Kxbnq0=zL#x@sCB(gs zm}COI(!{6-XO5li0>1n}Wz?w7AT-Sp+=NQ1aV@fM$`PGZjs*L+H^EW&s!XafStI!S zzgdntht=*p#R*o8-ZiSb5zf6z?TZr$^BtmIfGAGK;cdg=EyEG)fc*E<*T=#a?l=R5 zv#J;6C(umoSfc)W*EODW4z6czg3tXIm?x8{+8i^b;$|w~k)KLhJQnNW7kWXcR^sol z1GYOp?)a+}9Dg*nJ4fy*_riThdkbHO37^csfZRGN;CvQOtRacu6uoh^gg%_oEZKDd z?X_k67s$`|Q&huidfEonytrq!wOg07H&z@`&BU6D114p!rtT2|iukF}>k?71-3Hk< zs6yvmsMRO%KBQ44X4_FEYW~$yx@Y9tKrQ|rC1%W$6w}-9!2%4Zk%NycTzCB=nb)r6*92_Dg+c0;a%l1 zsJ$X)iyYR2iSh|%pIzYV1OUWER&np{w1+RXb~ zMUMRymjAw*{M)UtbT)T!kq5ZAn%n=gq3ssk3mYViE^$paZ;c^7{vXDJ`)q<}QKd2?{r9`X3mpZ{AW^UaRe2^wWxIZ$tuyKzp#!X-hXkHwfD zj@2tA--vFi3o_6B?|I%uwD~emwn0a z+?2Lc1xs(`H{Xu>IHXpz=@-84uw%dNV;{|c&ub|nFz(=W-t4|MME(dE4tZQi?0CE|4_?O_dyZj1)r zBcqB8I^Lt*#)ABdw#yq{OtNgf240Jvjm8^zdSf40 z;H)cp*rj>WhGSy|RC5A@mwnmQ`y4{O*SJ&S@UFbvLWyPdh)QnM=(+m3p;0&$^ysbZ zJt!ZkNQ%3hOY*sF2_~-*`aP|3Jq7_<18PX*MEUH*)t{eIx%#ibC|d&^L5FwoBN}Oe z?!)9RS@Zz%X1mqpHgym75{_BM4g)k1!L{$r4(2kL<#Oh$Ei7koqoccI3(MN1+6cDJ zp=xQhmilz1?+ZjkX%kfn4{_6K_D{wb~rdbkh!!k!Z@cE z^&jz55*QtsuNSlGPrU=R?}{*_8?4L7(+?>?(^3Ss)f!ou&{6<9QgH>#2$?-HfmDPN z6oIJ$lRbDZb)h-fFEm^1-v?Slb8udG{7GhbaGD_JJ8a9f{6{TqQN;m@$&)t81k77A z?{{)61za|e2GEq2)-OqcEjP`fhIlUs_Es-dfgX-3{S08g`w=wGj2{?`k^GD8d$}6Z zBT0T1lNw~fuwjO5BurKM593NGYGWAK%UCYiq{$p^GoYz^Uq0$YQ$j5CBXyog8(p_E znTC+$D`*^PFNc3Ih3b!2Lu|OOH6@46D)bbvaZHy%-9=$cz}V^|VPBpmPB6Ivzlu&c zPq6s7(2c4=1M;xlr}bkSmo9P`DAF>?Y*K%VPsY`cVZ{mN&0I=jagJ?GA!I;R)i&@{ z0Gl^%TLf_N`)`WKs?zlWolWvEM_?{vVyo(!taG$`FH2bqB`(o50pA=W34kl-qI62lt z1~4LG_j%sR2tBFteI{&mOTRVU7AH>>-4ZCD_p6;-J<=qrod`YFBwJz(Siu(`S}&}1 z6&OVJS@(O!=HKr-Xyzuhi;swJYK*ums~y1ePdX#~*04=b9)UqHHg;*XJOxnS6XK#j zG|O$>^2eW2ZVczP8#$C`EpcWwPFX4^}$omn{;P(fL z>J~%-r5}*D3$Kii z34r@JmMW2XEa~UV{bYP=F;Y5=9miJ+Jw6tjkR+cUD5+5TuKI`mSnEaYE2=usXNBs9 zac}V13%|q&Yg6**?H9D620qj62dM+&&1&a{NjF}JqmIP1I1RGppZ|oIfR}l1>itC% zl>ed${{_}8^}m2^br*AIX$L!Vc?Sm@H^=|LnpJg`a7EC+B;)j#9#tx-o0_e4!F5-4 zF4gA;#>*qrpow9W%tBzQ89U6hZ9g=-$gQpCh6Nv_I0X7t=th2ajJ8dBbh{i)Ok4{I z`Gacpl?N$LjC$tp&}7Sm(?A;;Nb0>rAWPN~@3sZ~0_j5bR+dz;Qs|R|k%LdreS3Nn zp*36^t#&ASm=jT)PIjNqaSe4mTjAzlAFr*@nQ~F+Xdh$VjHWZMKaI+s#FF#zjx)BJ zufxkW_JQcPcHa9PviuAu$lhwPR{R{7CzMUi49=MaOA%ElpK;A)6Sgsl7lw)D$8FwE zi(O6g;m*86kcJQ{KIT-Rv&cbv_SY4 zpm1|lSL*o_1LGOlBK0KuU2?vWcEcQ6f4;&K=&?|f`~X+s8H)se?|~2HcJo{M?Ity) zE9U!EKGz2^NgB6Ud;?GcV*1xC^1RYIp&0fr;DrqWLi_Kts()-#&3|wz{wFQsKfnnsC||T?oIgUp z{O(?Df7&vW!i#_~*@naguLLjDAz+)~*_xV2iz2?(N|0y8DMneikrT*dG`mu6vdK`% z=&nX5{F-V!Reau}+w_V3)4?}h@A@O)6GCY7eXC{p-5~p8x{cH=hNR;Sb{*XloSZ_%0ZKYG=w<|!vy?spR4!6mF!sXMUB5S9o_lh^g0!=2m55hGR; z-&*BZ*&;YSo474=SAM!WzrvjmNtq17L`kxbrZ8RN419e=5CiQ-bP1j-C#@@-&5*(8 zRQdU~+e(teUf}I3tu%PB1@Tr{r=?@0KOi3+Dy8}+y#bvgeY(FdN!!`Kb>-nM;7u=6 z;0yBwOJ6OdWn0gnuM{0`*fd=C(f8ASnH5aNYJjpbY1apTAY$-%)uDi$%2)lpH=#)=HH z<9JaYwPKil@QbfGOWvJ?cN6RPBr`f+jBC|-dO|W@x_Vv~)bmY(U(!cs6cnhe0z31O z>yTtL4@KJ*ac85u9|=LFST22~!lb>n7IeHs)_(P_gU}|8G>{D_fJX)8BJ;Se? z67QTTlTzZykb^4!{xF!=C}VeFd@n!9E)JAK4|vWVwWop5vSWcD<;2!88v-lS&ve7C zuYRH^85#hGKX(Mrk};f$j_V&`Nb}MZy1mmfz(e`nnI4Vpq(R}26pZx?fq%^|(n~>* z5a5OFtFJJfrZmgjyHbj1`9||Yp?~`p2?4NCwu_!!*4w8K`&G7U_|np&g7oY*-i;sI zu)~kYH;FddS{7Ri#Z5)U&X3h1$Mj{{yk1Q6bh4!7!)r&rqO6K~{afz@bis?*a56i& zxi#(Ss6tkU5hDQJ0{4sKfM*ah0f$>WvuRL zunQ-eOqa3&(rv4kiQ(N4`FO6w+nko_HggKFWx@5aYr}<~8wuEbD(Icvyl~9QL^MBt zSvD)*C#{2}!Z55k1ukV$kcJLtW2d~%z$t0qMe(%2qG`iF9K_Gsae7OO%Tf8E>ooch ztAw01`WVv6?*14e1w%Wovtj7jz_)4bGAqqo zvTD|B4)Ls8x7-yr6%tYp)A7|A)x{WcI&|&DTQR&2ir(KGR7~_RhNOft)wS<+vQ*|sf;d>s zEfl&B^*ZJp$|N`w**cXOza8(ARhJT{O3np#OlfxP9Nnle4Sto)Fv{w6ifKIN^f1qO*m8+MOgA1^Du!=(@MAh8)@wU8t=Ymh!iuT_lzfm za~xEazL-0xwy9$48!+?^lBwMV{!Gx)N>}CDi?Jwax^YX@_bxl*+4itP;DrTswv~n{ zZ0P>@EB({J9ZJ(^|ptn4ks^Z2UI&87d~J_^z0&vD2yb%*H^AE!w= zm&FiH*c%vvm{v&i3S>_hacFH${|(2+q!`X~zn4$aJDAry>=n|{C7le(0a)nyV{kAD zlud4-6X>1@-XZd`3SKKHm*XNn_zCyKHmf*`C_O509$iy$Wj`Sm3y?nWLCDy>MUx1x zl-sz7^{m(&NUk*%_0(G^>wLDnXW90FzNi$Tu6* z<+{ePBD`%IByu977rI^x;gO5M)Tfa-l*A2mU-#IL2?+NXK-?np<&2rlF;5kaGGrx2 zy8Xrz`kHtTVlSSlC=nlV4_oCsbwyVHG4@Adb6RWzd|Otr!LU=% zEjM5sZ#Ib4#jF(l!)8Na%$5VK#tzS>=05GpV?&o* z3goH1co0YR=)98rPJ~PuHvkA59KUi#i(Mq_$rApn1o&n1mUuZfFLjx@3;h`0^|S##QiTP8rD`r8P+#D@gvDJh>amMIl065I)PxT6Hg(lJ?X7*|XF2Le zv36p8dWHCo)f#C&(|@i1RAag->5ch8TY!LJ3(+KBmLxyMA%8*X%_ARR*!$AL66nF= z=D}uH)D)dKGZ5AG)8N-;Il*-QJ&d8u30&$_Q0n1B58S0ykyDAyGa+BZ>FkiOHm1*& zNOVH;#>Hg5p?3f(7#q*dL74;$4!t?a#6cfy#}9H3IFGiCmevir5@zXQj6~)@zYrWZ zRl*e66rjwksx-)Flr|Kzd#Bg>We+a&E{h7bKSae9P~ z(g|zuXmZ zD?R*MlmoZ##+0c|cJ(O{*h(JtRdA#lChYhfsx25(Z`@AK?Q-S8_PQqk z>|Z@Ki1=wL1_c6giS%E4YVYD|Y-{^ZzFwB*yN8-4#+TxeQ`jhks7|SBu7X|g=!_XL z`mY=0^chZfXm%2DYHJ4z#soO7=NONxn^K3WX={dV>$CTWSZe@<81-8DVtJEw#Uhd3 zxZx+($6%4a&y_rD8a&E`4$pD6-_zZJ%LEE*1|!9uOm!kYXW< zOBXZAowsX-&$5C`xgWkC43GcnY)UQt2Qkib4!!8Mh-Q!_M%5{EC=Gim@_;0+lP%O^ zG~Q$QmatQk{Mu&l{q~#kOD;T-{b1P5u7)o-QPPnqi?7~5?7%IIFKdj{;3~Hu#iS|j z)Zoo2wjf%+rRj?vzWz(6JU`=7H}WxLF*|?WE)ci7aK?SCmd}pMW<{#1Z!_7BmVP{w zSrG>?t}yNyCR%ZFP?;}e8_ zRy67~&u11TN4UlopWGj6IokS{vB!v!n~TJYD6k?~XQkpiPMUGLG2j;lh>Eb5bLTkX zx>CZlXdoJsiPx=E48a4Fkla>8dZYB%^;Xkd(BZK$z3J&@({A`aspC6$qnK`BWL;*O z-nRF{XRS`3Y&b+}G&|pE1K-Ll_NpT!%4@7~l=-TtYRW0JJ!s2C-_UsRBQ=v@VQ+4> z*6jF0;R@5XLHO^&PFyaMDvyo?-lAD(@H61l-No#t@at@Le9xOgTFqkc%07KL^&iss z!S2Ghm)u#26D(e1Q7E;L`rxOy-N{kJ zTgfw}az9=9Su?NEMMtpRlYwDxUAUr8F+P=+9pkX4%iA4&&D<|=B|~s*-U+q6cq`y* zIE+;2rD7&D5X;VAv=5rC5&nP$E9Z3HKTqIFCEV%V;b)Y|dY?8ySn|FD?s3IO>VZ&&f)idp_7AGnwVd1Z znBUOBA}~wogNpEWTt^1Rm-(YLftB=SU|#o&pT7vTr`bQo;=ZqJHIj2MP{JuXQPV7% z0k$5Ha6##aGly<}u>d&d{Hkpu?ZQeL_*M%A8IaXq2SQl35yW9zs4^CZheVgHF`%r= zs(Z|N!gU5gj-B^5{*sF>;~fauKVTq-Ml2>t>E0xl9wywD&nVYZfs1F9Lq}(clpNLz z4O(gm_i}!k`wUoKr|H#j#@XOXQ<#eDGJ=eRJjhOUtiKOG;hym-1Hu)1JYj+Kl*To<8( za1Kf4_Y@Cy>eoC59HZ4o&xY@!G(2p^=wTCV>?rQE`Upo^pbhWdM$WP4HFdDy$HiZ~ zRUJFWTII{J$GLVWR?miDjowFk<1#foE3}C2AKTNFku+BhLUuT>?PATB?WVLzEYyu+ zM*x((pGdotzLJ{}R=OD*jUexKi`mb1MaN0Hr(Wk8-Uj0zA;^1w2rmxLI$qq68D>^$ zj@)~T1l@K|~@YJ6+@1vlWl zHg5g%F{@fW5K!u>4LX8W;ua(t6YCCO_oNu}IIvI6>Fo@MilYuwUR?9p)rKNzDmTAN zzN2d>=Za&?Z!rJFV*;mJ&-sBV80%<-HN1;ciLb*Jk^p?u<~T25%7jjFnorfr={+wm zzl5Q6O>tsN8q*?>uSU6#xG}FpAVEQ_++@}G$?;S7owlK~@trhc#C)TeIYj^N(R&a} zypm~c=fIs;M!YQrL}5{xl=tUU-Tfc0ZfhQuA-u5(*w5RXg!2kChQRd$Fa8xQ0CQIU zC`cZ*!!|O!*y1k1J^m8IIi|Sl3R}gm@CC&;4840^9_bb9%&IZTRk#=^H0w%`5pMDCUef5 zYt-KpWp2ijh+FM`!zZ35>+7eLN;s3*P!bp%-oSx34fdTZ14Tsf2v7ZrP+mitUx$rS zW(sOi^CFxe$g3$x45snQwPV5wpf}>5OB?}&Gh<~i(mU&ss#7;utaLZ!|KaTHniGO9 zVC9OTzuMKz)afey_{93x5S*Hfp$+r*W>O^$2ng|ik!<`U1pkxm3*)PH*d#>7md1y} zs7u^a8zW8bvl92iN;*hfOc-=P7{lJeJ|3=NfX{(XRXr;*W3j845SKG&%N zuBqCtDWj*>KooINK1 zFPCsCWr!-8G}G)X*QM~34R*k zmRmDGF*QE?jCeNfc?k{w<}@29e}W|qKJ1K|AX!htt2|B`nL=HkC4?1bEaHtGBg}V( zl(A`6z*tck_F$4;kz-TNF%7?=20iqQo&ohf@S{_!TTXnVh}FaW2jxAh(DI0f*SDG- z7tqf5X@p#l?7pUNI(BGi>n_phw=lDm>2OgHx-{`T>KP2YH9Gm5ma zb{>7>`tZ>0d5K$j|s2!{^sFWQo3+xDb~#=9-jp(1ydI3_&RXGB~rxWSMgDCGQG)oNoc#>)td zqE|X->35U?_M6{^lB4l(HSN|`TC2U*-`1jSQeiXPtvVXdN-?i1?d#;pw%RfQuKJ|e zjg75M+Q4F0p@8I3ECpBhGs^kK;^0;7O@MV=sX^EJLVJf>L;GmO z3}EbTcoom7QbI(N8ad!z(!6$!MzKaajSRb0c+ZDQ($kFT&&?GvXmu7+V3^_(VJx1z zP-1kW_AB&_A;cxm*g`$ z#Pl@Cg{siF0ST2-w)zJkzi@X)5i@)Z;7M5ewX+xcY36IaE0#flASPY2WmF8St0am{ zV|P|j9wqcMi%r-TaU>(l*=HxnrN?&qAyzimA@wtf;#^%{$G7i4nXu=Pp2#r@O~wi)zB>@25A*|axl zEclXBlXx1LP3x0yrSx@s-kVW4qlF+idF+{M7RG54CgA&soDU-3SfHW@-6_ z+*;{n_SixmGCeZjHmEE!IF}!#aswth_{zm5Qhj0z-@I}pR?cu=P)HJUBClC;U+9;$#@xia30o$% zDw%BgOl>%vRenxL#|M$s^9X}diJ9q7wI1-0n2#6>@q}rK@ng(4M68(t52H_Jc{f&M9NPxRr->vj-88hoI?pvpn}llcv_r0`;uN>wuE{ z&TOx_i4==o;)>V4vCqG)A!mW>dI^Ql8BmhOy$6^>OaUAnI3>mN!Zr#qo4A>BegYj` zNG_)2Nvy2Cqxs1SF9A5HHhL7sai#Umw%K@+riaF+q)7&MUJvA&;$`(w)+B@c6!kX@ zzuY;LGu6|Q2eu^06PzSLspV2v4E?IPf`?Su_g8CX!75l)PCvyWKi4YRoRThB!-BhG zubQ#<7oCvj@z`^y&mPhSlbMf0<;0D z?5&!I?nV-jh-j1g~&R(YL@c=KB_gNup$8abPzXZN`N|WLqxlN)ZJ+#k4UWq#WqvVD z^|j+8f5uxTJtgcUscKTqKcr?5g-Ih3nmbvWvvEk})u-O}h$=-p4WE^qq7Z|rLas0$ zh0j&lhm@Rk(6ZF0_6^>Rd?Ni-#u1y`;$9tS;~!ph8T7fLlYE{P=XtWfV0Ql z#z{_;A%p|8+LhbZT0D_1!b}}MBx9`R9uM|+*`4l3^O(>Mk%@ha>VDY=nZMMb2TnJ= zGlQ+#+pmE98zuFxwAQcVkH1M887y;Bz&EJ7chIQQe!pgWX>(2ruI(emhz@_6t@k8Z zqFEyJFX2PO`$gJ6p$=ku{7!vR#u+$qo|1r;orjtp9FP^o2`2_vV;W&OT)acRXLN^m zY8a;geAxg!nbVu|uS8>@Gvf@JoL&GP`2v4s$Y^5vE32&l;2)`S%e#AnFI-YY7_>d#IKJI!oL6e z_7W3e=-0iz{bmuB*HP+D{Nb;rn+RyimTFqNV9Bzpa0?l`pWmR0yQOu&9c0S*1EPr1 zdoHMYlr>BycjTm%WeVuFd|QF8I{NPT&`fm=dITj&3(M^q ze2J{_2zB;wDME%}SzVWSW6)>1QtiX)Iiy^p2eT}Ii$E9w$5m)kv(3wSCNWq=#DaKZ zs%P`#^b7F-J0DgQ1?~2M`5ClYtYN{AlU|v4pEg4z03=g6nqH`JjQuM{k`!6jaIL_F zC;sn?1x?~uMo_DFg#ypNeie{3udcm~M&bYJ1LI zE%y}P9oCX3I1Y9yhF(y9Ix_=8L(p)EYr&|XZWCOb$7f2qX|A4aJ9bl7pt40Xr zXUT#NMBB8I@xoIGSHAZkYdCj>eEd#>a;W-?v4k%CwBaR5N>e3IFLRbDQTH#m_H+4b zk2UHVymC`%IqwtHUmpS1!1p-uQB`CW1Y!+VD!N4TT}D8(V0IOL|&R&)Rwj@n8g@=`h&z9YTPDT+R9agnwPuM!JW~=_ya~% zIJ*>$Fl;y7_`B7G4*P!kcy=MnNmR`(WS5_sRsvHF42NJ;EaDram5HwQ4Aw*qbYn0j;#)bh1lyKLg#dYjN*BMlh+fxmCL~?zB;HBWho;20WA==ci0mAqMfyG>1!HW zO7rOga-I9bvut1Ke_1eFo9tbzsoPTXDW1Si4}w3fq^Z|5LGf&egnw%DV=b11$F=P~ z(aV+j8S}m=CkI*8=RcrT>GmuYifP%hCoKY22Z4 zmu}o08h3YhcXx-v-QC??8mDn<+}+*X{+gZH-I;G^|7=1fBveS?J$27H&wV5^V^P$! z84?{UeYSmZ3M!@>UFoIN?GJT@IroYr;X@H~ax*CQ>b5|Xi9FXt5j`AwUPBq`0sWEJ z3O|k+g^JKMl}L(wfCqyMdRj9yS8ncE7nI14Tv#&(?}Q7oZpti{Q{Hw&5rN-&i|=fWH`XTQSu~1jx(hqm$Ibv zRzFW9$xf@oZAxL~wpj<0ZJ3rdPAE=0B>G+495QJ7D>=A&v^zXC9)2$$EnxQJ<^WlV zYKCHb1ZzzB!mBEW2WE|QG@&k?VXarY?umPPQ|kziS4{EqlIxqYHP!HN!ncw6BKQzKjqk!M&IiOJ9M^wc~ZQ1xoaI z;4je%ern~?qi&J?eD!vTl__*kd*nFF0n6mGEwI7%dI9rzCe~8vU1=nE&n4d&8}pdL zaz`QAY?6K@{s2x%Sx%#(y+t6qLw==>2(gb>AksEebXv=@ht>NBpqw=mkJR(c?l7vo z&cV)hxNoYPGqUh9KAKT)kc(NqekzE6(wjjotP(ac?`DJF=Sb7^Xet-A3PRl%n&zKk zruT9cS~vV1{%p>OVm1-miuKr<@rotj*5gd$?K`oteNibI&K?D63RoBjw)SommJ5<4 zus$!C8aCP{JHiFn2>XpX&l&jI7E7DcTjzuLYvON2{rz<)#$HNu(;ie-5$G<%eLKnTK7QXfn(UR(n+vX%aeS6!q6kv z!3nzY76-pdJp339zsl_%EI|;ic_m56({wdc(0C5LvLULW=&tWc5PW-4;&n+hm1m`f zzQV0T>OPSTjw=Ox&UF^y< zarsYKY8}YZF+~k70=olu$b$zdLaozBE|QE@H{_R21QlD5BilYBTOyv$D5DQZ8b1r- zIpSKX!SbA0Pb5#cT)L5!KpxX+x+8DRy&`o-nj+nmgV6-Gm%Fe91R1ca3`nt*hRS|^ z<&we;TJcUuPDqkM7k0S~cR%t7a`YP#80{BI$e=E!pY}am)2v3-Iqk2qvuAa1YM>xj#bh+H2V z{b#St2<;Gg>$orQ)c2a4AwD5iPcgZ7o_}7xhO86(JSJ(q(EWKTJDl|iBjGEMbX8|P z4PQHi+n(wZ_5QrX0?X_J)e_yGcTM#E#R^u_n8pK@l5416`c9S=q-e!%0RjoPyTliO zkp{OC@Ep^#Ig-n!C)K0Cy%8~**Vci8F1U(viN{==KU0nAg2(+K+GD_Gu#Bx!{tmUm zCwTrT(tCr6X8j43_n96H9%>>?4akSGMvgd+krS4wRexwZ1JxrJy!Uhz#yt$-=aq?A z@?*)bRZxjG9OF~7d$J0cwE_^CLceRK=LvjfH-~{S><^D;6B2&p-02?cl?|$@>`Qt$ zP*iaOxg<+(rbk>34VQDQpNQ|a9*)wScu!}<{oXC87hRPqyrNWpo?#=;1%^D2n2+C* zKKQH;?rWn-@%Y9g%NHG&lHwK9pBfV1a`!TqeU_Fv8s6_(@=RHua7`VYO|!W&WL*x= zIWE9eQaPq3zMaXuf)D0$V`RIZ74f)0P73xpeyk4)-?8j;|K%pD$eq4j2%tL=;&+E91O(2p91K|85b)GQcbRe&u6Ilu@SnE={^{Ix1Eqgv8D z4=w65+&36|;5WhBm$!n*!)ACCwT9Sip#1_z&g~E1kB=AlEhO0lu`Ls@6gw*a)lzc# zKx!fFP%eSBBs)U>xIcQKF(r_$SWD3TD@^^2Ylm=kC*tR+I@X>&SoPZdJ2fT!ysjH% z-U%|SznY8Fhsq7Vau%{Ad^Pvbf3IqVk{M2oD+w>MWimJA@VSZC$QooAO3 zC=DplXdkyl>mSp^$zk7&2+eoGQ6VVh_^E#Z3>tX7Dmi<2aqlM&YBmK&U}m>a%8)LQ z8v+c}a0QtXmyd%Kc2QNGf8TK?_EK4wtRUQ*VDnf5jHa?VvH2K(FDZOjAqYufW8oIZ z31|o~MR~T;ZS!Lz%8M0*iVARJ>_G2BXEF8(}6Dmn_rFV~5NI`lJjp`Mi~g7~P%H zO`S&-)Fngo3VXDMo7ImlaZxY^s!>2|csKca6!|m7)l^M0SQT1_L~K29%x4KV8*xiu zwP=GlyIE9YPSTC0BV`6|#)30=hJ~^aYeq7d6TNfoYUkk-^k0!(3qp(7Mo-$|48d8Z2d zrsfsRM)y$5)0G`fNq!V?qQ+nh0xwFbcp{nhW%vZ?h);=LxvM(pWd9FG$Bg1;@Bv)mKDW>AP{ol zD(R~mLzdDrBv$OSi{E%OD`Ano=F^vwc)rNb*Bg3-o)bbAgYE=M7Gj2OHY{8#pM${_^ zwkU|tnTKawxUF7vqM9UfcQ`V49zg78V%W)$#5ssR}Rj7E&p(4_ib^?9luZPJ%iJTvW&-U$nFYky>KJwHpEHHx zVEC;!ETdkCnO|${Vj#CY>LLut_+c|(hpWk8HRgMGRY%E--%oKh@{KnbQ~0GZd}{b@ z`J2qHBcqqjfHk^q=uQL!>6HSSF3LXL*cCd%opM|k#=xTShX~qcxpHTW*BI!c3`)hQq{@!7^mdUaG7sFsFYnl1%blslM;?B8Q zuifKqUAmR=>33g~#>EMNfdye#rz@IHgpM$~Z7c5@bO@S>MyFE3_F}HVNLnG0TjtXU zJeRWH^j5w_qXb$IGs+E>daTa}XPtrUnnpTRO9NEx4g6uaFEfHP9gW;xZnJi{oqAH~ z5dHS(ch3^hbvkv@u3QPLuWa}ImaElDrmIc%5HN<^bwej}3+?g) z-ai7D&6Iq_P(}k`i^4l?hRLbCb>X9iq2UYMl=`9U9Rf=3Y!gnJbr?eJqy>Zpp)m>Ae zcQ4Qfs&AaE?UDTODcEj#$_n4KeERZHx-I+E5I~E#L_T3WI3cj$5EYR75H7hy%80a8Ej?Y6hv+fR6wHN%_0$-xL!eI}fdjOK7(GdFD%`f%-qY@-i@fTAS&ETI99jUVg8 zslPSl#d4zbOcrgvopvB2c2A6r^pEr&Sa5I5%@1~BpGq`Wo|x=&)WnnQjE+)$^U-wW zr2Kv?XJby(8fcn z8JgPn)2_#-OhZ+;72R6PspMfCVvtLxFHeb7d}fo(GRjm_+R(*?9QRBr+yPF(iPO~ zA4Tp1<0}#fa{v0CU6jz}q9;!3Pew>ikG1qh$5WPRTQZ~ExQH}b1hDuzRS1}65uydS z~Te*3@?o8fih=mZ`iI!hL5iv3?VUBLQv0X zLtu58MIE7Jbm?)NFUZuMN2_~eh_Sqq*56yIo!+d_zr@^c@UwR&*j!fati$W<=rGGN zD$X`$lI%8Qe+KzBU*y3O+;f-Csr4$?3_l+uJ=K@dxOfZ?3APc5_x2R=a^kLFoxt*_ z4)nvvP+(zwlT5WYi!4l7+HKqzmXKYyM9kL5wX$dTSFSN&)*-&8Q{Q$K-})rWMin8S zy*5G*tRYNqk7&+v;@+>~EIQgf_SB;VxRTQFcm5VtqtKZ)x=?-f+%OY(VLrXb^6*aP zP&0Nu@~l2L!aF8i2!N~fJiHyxRl?I1QNjB)`uP_DuaU?2W;{?0#RGKTr2qH5QqdhK zP__ojm4WV^PUgmrV)`~f>(769t3|13DrzdDeXxqN6XA|_GK*;zHU()a(20>X{y-x| z2P6Ahq;o=)Nge`l+!+xEwY`7Q(8V=93A9C+WS^W%p&yR)eiSX+lp)?*7&WSYSh4i> zJa6i5T9o;Cd5z%%?FhB?J{l+t_)c&_f86gZMU{HpOA=-KoU5lIL#*&CZ_66O5$3?# ztgjGLo`Y7bj&eYnK#5x1trB_6tpu4$EomotZLb*9l6P(JmqG`{z$?lNKgq?GAVhkA zvw!oFhLyX=$K=jTAMwDQ)E-8ZW5$X%P2$YB5aq!VAnhwGv$VR&;Ix#fu%xlG{|j_K zbEYL&bx%*YpXcaGZj<{Y{k@rsrFKh7(|saspt?OxQ~oj_6En(&!rTZPa7fLCEU~mA zB7tbVs=-;cnzv*#INgF_9f3OZhp8c5yk!Dy1+`uA7@eJfvd~g34~wKI1PW%h(y&nA zRwMni12AHEw36)C4Tr-pt6s82EJa^8N#bjy??F*rg4fS@?6^MbiY3;7x=gd~G|Hi& zwmG+pAn!aV>>nNfP7-Zn8BLbJm&7}&ZX+$|z5*5{{F}BRSxN=JKZTa#{ut$v0Z0Fs za@UjXo#3!wACv+p9k*^9^n+(0(YKIUFo`@ib@bjz?Mh8*+V$`c%`Q>mrc5bs4aEf4 zh0qtL1qNE|xQ9JrM}qE>X>Y@dQ?%` zBx(*|1FMzVY&~|dE^}gHJ37O9bjnk$d8vKipgcf+As(kt2cbxAR3^4d0?`}}hYO*O z{+L&>G>AYaauAxE8=#F&u#1YGv%`d*v+EyDcU2TnqvRE33l1r}p#Vmcl%n>NrYOqV z2Car_^^NsZ&K=a~bj%SZlfxzHAxX$>=Q|Zi;E0oyfhgGgqe1Sd5-E$8KV9=`!3jWZCb2crb;rvQ##iw}xm7Da za!H${ls5Ihwxkh^D)M<4Yy3bp<-0a+&KfV@CVd9X6Q?v)$R3*rfT@jsedSEhoV(vqv?R1E8oWV;_{l_+_6= zLjV^-bZU$D_ocfSpRxDGk*J>n4G6s-e>D8JK6-gA>aM^Hv8@)txvKMi7Pi#DS5Y?r zK0%+L;QJdrIPXS2 ztjWAxkSwt2xG$L)Zb7F??cjs!KCTF+D{mZ5e0^8bdu_NLgFHTnO*wx!_8#}NO^mu{FaYeCXGjnUgt_+B-Ru!2_Ue-0UPg2Y)K3phLmR<4 zqUCWYX!KDU!jYF6c?k;;vF@Qh^q(PWwp1ez#I+0>d7V(u_h|L+kX+MN1f5WqMLn!L z!c(pozt7tRQi&duH8n=t-|d)c^;%K~6Kpyz(o53IQ_J+aCapAif$Ek#i0F9U>i+94 zFb=OH5(fk-o`L(o|DyQ(hlozl*2cu#)Y(D*zgNMi1Z!DTex#w#)x(8A-T=S+eByJW z%-k&|XhdZOWjJ&(FTrZNWRm^pHEot_MRQ_?>tKQ&MB~g(&D_e>-)u|`Ot(4j=UT6? zQ&YMi2UnCKlBpwltP!}8a2NJ`LlfL=k8SQf69U)~=G;bq9<2GU&Q#cHwL|o4?ah1` z;fG)%t0wMC;DR?^!jCoKib_iiIjsxCSxRUgJDCE%0P;4JZhJCy)vR1%zRl>K?V6#) z2lDi*W3q9rA zo;yvMujs+)a&00~W<-MNj=dJ@4%tccwT<@+c$#CPR%#aE#Dra+-5eSDl^E>is2v^~ z8lgRwkpeU$|1LW4yFwA{PQ^A{5JY!N5PCZ=hog~|FyPPK0-i;fCl4a%1 z?&@&E-)b4cK)wjXGq|?Kqv0s7y~xqvSj-NpOImt{Riam*Z!wz-coZIMuQU>M%6ben z>P@#o^W;fizVd#?`eeEPs#Gz^ySqJn+~`Pq%-Ee6*X+E>!PJGU#rs6qu0z5{+?`-N zxf1#+JNk7e6AoJTdQwxs&GMTq?Djch_8^xL^A;9XggtGL>!@0|BRuIdE&j$tzvt7I zr@I@0<0io%lpF697s1|qNS|BsA>!>-9DVlgGgw2;;k;=7)3+&t!);W3ulPgR>#JiV zUerO;WxuJqr$ghj-veVGfKF?O7si#mzX@GVt+F&atsB@NmBoV4dK|!owGP005$7LN7AqCG(S+={YA- zn#I{UoP_$~Epc=j78{(!2NLN)3qSm-1&{F&1z4Dz&7Mj_+SdlR^Q5{J=r822d4A@?Rj~xATaWewHUOus{*C|KoH`G zHB8SUT06GpSt)}cFJ18!$Kp@r+V3tE_L^^J%9$&fcyd_AHB)WBghwqBEWW!oh@StV zDrC?ttu4#?Aun!PhC4_KF1s2#kvIh~zds!y9#PIrnk9BWkJpq}{Hlqi+xPOR&A1oP zB0~1tV$Zt1pQuHpJw1TAOS=3$Jl&n{n!a+&SgYVe%igUtvE>eHqKY0`e5lwAf}2x( zP>9Wz+9uirp7<7kK0m2&Y*mzArUx%$CkV661=AIAS=V=|xY{;$B7cS5q0)=oq0uXU z_roo90&gHSfM6@6kmB_FJZ)3y_tt0}7#PA&pWo@_qzdIMRa-;U*Dy>Oo#S_n61Fn! z%mrH%tRmvQvg%UqN_2(C#LSxgQ>m}FKLGG=uqJQuSkk=S@c~QLi4N+>lr}QcOuP&% zQCP^cRk&rk-@lpa0^Lcvdu`F*qE)-0$TnxJlwZf|dP~s8cjhL%>^+L~{umxl5Xr6@ z^7zVKiN1Xg;-h+kr4Yt2BzjZs-Mo54`pDbLc}fWq{34=6>U9@sBP~iWZE`+FhtU|x zTV}ajn*Hc}Y?3agQ+bV@oIRm=qAu%|zE;hBw7kCcDx{pm!_qCxfPX3sh5^B$k_2d` z6#rAeUZC;e-LuMZ-f?gHeZogOa*mE>ffs+waQ+fQl4YKoAyZii_!O0;h55EMzD{;) z8lSJvv((#UqgJ?SCQFqJ-UU?2(0V{;7zT3TW`u6GH6h4m3}SuAAj_K(raGBu>|S&Q zZGL?r9@caTbmRm7p=&Tv?Y1)60*9At38w)$(1c?4cpFY2RLyw9c<{OwQE{b@WI}FQ zTT<2HOF4222d%k70yL~x_d#6SNz`*%@4++8gYQ8?yq0T@w~bF@aOHL2)T4xj`AVps9k z?m;<2ClJh$B6~fOYTWIV*T9y1BpB1*C?dgE{%lVtIjw>4MK{wP6OKTb znbPWrkZjYCbr`GGa%Xo0h;iFPNJBI3fK5`wtJV?wq_G<_PZ<`eiKtvN$IKfyju*^t zXc}HNg>^PPZ16m6bfTpmaW5=qoSsj>3)HS}teRa~qj+Y}mGRE?cH!qMDBJ8 zJB!&-=MG8Tb;V4cZjI_#{>ca0VhG_P=j0kcXVX5)^Sdpk+LKNv#yhpwC$k@v^Am&! z_cz2^4Cc{_BC!K#zN!KEkPzviUFPJ^N_L-kHG6}(X#$>Q=9?!{$A(=B3)P?PkxG9gs#l! zo6TOHo$F|IvjTC3MW%XrDoc7;m-6wb9mL(^2(>PQXY53hE?%4FW$rTHtN`!VgH72U zRY)#?Y*pMA<)x3B-&fgWQ(TQ6S6nUeSY{9)XOo_k=j$<*mA=f+ghSALYwBw~!Egn!jtjubOh?6Cb-Zi3IYn*fYl()^3u zRiX0I{5QaNPJ9w{yh4(o#$geO7b5lSh<5ZaRg9_=aFdZjxjXv(_SCv^v-{ZKQFtAA}kw=GPC7l81GY zeP@0Da{aR#{6`lbI0ON0y#K=t|L*}MG_HSl$e{U;v=BSs{SU3(e*qa(l%rD;(zM^3 zrRgN3M#Sf(Cr9>v{FtB`8JBK?_zO+~{H_0$lLA!l{YOs9KQd4Zt<3*Ns7dVbT{1Ut z?N9{XkN(96?r(4BH~3qeiJ_CAt+h1}O_4IUF$S(5EyTyo=`{^16P z=VhDY!NxkDukQz>T`0*H=(D3G7Np*2P`s(6M*(*ZJa;?@JYj&_z`d5bap=KK37p3I zr5#`%aC)7fUo#;*X5k7g&gQjxlC9CF{0dz*m2&+mf$Sc1LnyXn9lpZ!!Bl!@hnsE5px};b-b-`qne0Kh;hziNC zXV|zH%+PE!2@-IrIq!HM2+ld;VyNUZiDc@Tjt|-1&kq}>muY;TA3#Oy zWdYGP3NOZWSWtx6?S6ES@>)_Yz%%nLG3P>Z7`SrhkZ?shTfrHkYI;2zAn8h65wV3r z^{4izW-c9!MTge3eN=~r5aTnz6*6l#sD68kJ7Nv2wMbL~Ojj0H;M`mAvk*`Q!`KI? z7nCYBqbu$@MSNd+O&_oWdX()8Eh|Z&v&dJPg*o-sOBb2hriny)< zd(o&&kZM^NDtV=hufp8L zCkKu7)k`+czHaAU567$?GPRGdkb4$37zlIuS&<&1pgArURzoWCbyTEl9OiXZBn4p<$48-Gekh7>e)v*?{9xBt z=|Rx!@Y3N@ffW5*5!bio$jhJ7&{!B&SkAaN`w+&3x|D^o@s{ZAuqNss8K;211tUWIi1B!%-ViYX+Ys6w)Q z^o1{V=hK#+tt&aC(g+^bt-J9zNRdv>ZYm9KV^L0y-yoY7QVZJ_ivBS02I|mGD2;9c zR%+KD&jdXjPiUv#t1VmFOM&=OUE2`SNm4jm&a<;ZH`cYqBZoAglCyixC?+I+}*ScG#;?SEAFob{v0ZKw{`zw*tX}<2k zoH(fNh!>b5w8SWSV}rQ*E24cO=_eQHWy8J!5;Y>Bh|p;|nWH|nK9+ol$k`A*u*Y^Uz^%|h4Owu}Cb$zhIxlVJ8XJ0xtrErT zcK;34CB;ohd|^NfmVIF=XlmB5raI}nXjFz;ObQ4Mpl_`$dUe7sj!P3_WIC~I`_Xy@ z>P5*QE{RSPpuV=3z4p3}dh>Dp0=We@fdaF{sJ|+_E*#jyaTrj-6Y!GfD@#y@DUa;& zu4Iqw5(5AamgF!2SI&WT$rvChhIB$RFFF|W6A>(L9XT{0%DM{L`knIQPC$4F`8FWb zGlem_>>JK-Fib;g*xd<-9^&_ue95grYH>5OvTiM;#uT^LVmNXM-n8chJBD2KeDV7t zbnv3CaiyN>w(HfGv86K5MEM{?f#BTR7**smpNZ}ftm+gafRSt=6fN$(&?#6m3hF!>e$X)hFyCF++Qvx(<~q3esTI zH#8Sv!WIl2<&~=B)#sz1x2=+KTHj=0v&}iAi8eD=M->H|a@Qm|CSSzH#eVIR3_Tvu zG8S**NFbz%*X?DbDuP(oNv2;Lo@#_y4k$W+r^#TtJ8NyL&&Rk;@Q}~24`BB)bgwcp z=a^r(K_NEukZ*|*7c2JKrm&h&NP)9<($f)eTN}3|Rt`$5uB0|!$Xr4Vn#i;muSljn zxG?zbRD(M6+8MzGhbOn%C`M#OcRK!&ZHihwl{F+OAnR>cyg~No44>vliu$8^T!>>*vYQJCJg=EF^lJ*3M^=nGCw`Yg@hCmP(Gq^=eCEE1!t-2>%Al{w@*c% zUK{maww*>K$tu;~I@ERb9*uU@LsIJ|&@qcb!&b zsWIvDo4#9Qbvc#IS%sV1_4>^`newSxEcE08c9?rHY2%TRJfK2}-I=Fq-C)jc`gzV( zCn?^noD(9pAf2MP$>ur0;da`>Hr>o>N@8M;X@&mkf;%2A*2CmQBXirsJLY zlX21ma}mKH_LgYUM-->;tt;6F?E5=fUWDwQhp*drQ%hH0<5t2m)rFP%=6aPIC0j$R znGI0hcV~}vk?^&G`v~YCKc7#DrdMM3TcPBmxx#XUC_JVEt@k=%3-+7<3*fTcQ>f~?TdLjv96nb66xj=wVQfpuCD(?kzs~dUV<}P+Fpd)BOTO^<*E#H zeE80(b~h<*Qgez(iFFOkl!G!6#9NZAnsxghe$L=Twi^(Q&48 zD0ohTj)kGLD){xu%pm|}f#ZaFPYpHtg!HB30>F1c=cP)RqzK2co`01O5qwAP zUJm0jS0#mci>|Nu4#MF@u-%-4t>oUTnn_#3K09Hrwnw13HO@9L;wFJ*Z@=gCgpA@p zMswqk;)PTXWuMC-^MQxyNu8_G-i3W9!MLd2>;cM+;Hf&w| zLv{p*hArp9+h2wsMqT5WVqkkc0>1uokMox{AgAvDG^YJebD-czexMB!lJKWllLoBI zetW2;;FKI1xNtA(ZWys!_un~+834+6y|uV&Lo%dKwhcoDzRADYM*peh{o`-tHvwWIBIXW`PKwS3|M>CW37Z2dr!uJWNFS5UwY4;I zNIy1^sr+@8Fob%DHRNa&G{lm?KWU7sV2x9(Ft5?QKsLXi!v6@n&Iyaz5&U*|hCz+d z9vu60IG<v6+^ZmBs_aN!}p|{f(ikVl&LcB+UY;PPz* zj84Tm>g5~-X=GF_4JrVmtEtm=3mMEL1#z+pc~t^Iify^ft~cE=R0TymXu*iQL+XLX zdSK$~5pglr3f@Lrcp`>==b5Z6r7c=p=@A5nXNacsPfr(5m;~ks@*Wu7A z%WyY$Pt*RAKHz_7cghHuQqdU>hq$vD?plol_1EU(Fkgyo&Q2&2e?FT3;H%!|bhU~D z>VX4-6}JLQz8g3%Bq}n^NhfJur~v5H0dbB^$~+7lY{f3ES}E?|JnoLsAG%l^%eu_PM zEl0W(sbMRB3rFeYG&tR~(i2J0)RjngE`N_Jvxx!UAA1mc7J>9)`c=`}4bVbm8&{A` z3sMPU-!r-8de=P(C@7-{GgB<5I%)x{WfzJwEvG#hn3ict8@mexdoTz*(XX!C&~}L* z^%3eYQ8{Smsmq(GIM4d5ilDUk{t@2@*-aevxhy7yk(wH?8yFz%gOAXRbCYzm)=AsM z?~+vo2;{-jkA%Pqwq&co;|m{=y}y2lN$QPK>G_+jP`&?U&Ubq~T`BzAj1TlC`%8+$ zzdwNf<3suPnbh&`AI7RAYuQ<#!sD|A=ky2?hca{uHsB|0VqShI1G3lG5g}9~WSvy4 zX3p~Us^f5AfXlBZ0hA;mR6aj~Q8yb^QDaS*LFQwg!!<|W!%WX9Yu}HThc7>oC9##H zEW`}UQ%JQ38UdsxEUBrA@=6R-v1P6IoIw8$8fw6F{OSC7`cOr*u?p_0*Jvj|S)1cd z-9T);F8F-Y_*+h-Yt9cQQq{E|y^b@r&6=Cd9j0EZL}Pj*RdyxgJentY49AyC@PM<< zl&*aq_ubX%*pqUkQ^Zsi@DqhIeR&Ad)slJ2g zmeo&+(g!tg$z1ao1a#Qq1J022mH4}y?AvWboI4H028;trScqDQrB36t!gs|uZS9}KG0}DD$ zf2xF}M*@VJSzEJ5>ucf+L_AtN-Ht=34g&C?oPP>W^bwoigIncKUyf61!ce!2zpcNT zj&;rPGI~q2!Sy>Q7_lRX*DoIs-1Cei=Cd=+Xv4=%bn#Yqo@C=V`|QwlF0Y- zONtrwpHQ##4}VCL-1ol(e<~KU9-ja^kryz!g!})y-2S5z2^gE$Isj8l{%tF=Rzy`r z^RcP7vu`jHgHLKUE957n3j+BeE(bf;f)Zw($XaU6rZ26Upl#Yv28=8Y`hew{MbH>* z-sGI6dnb5D&dUCUBS`NLAIBP!Vi!2+~=AU+)^X^IpOEAn#+ab=`7c z%7B|mZ>wU+L;^&abXKan&N)O;=XI#dTV|9OMYxYqLbtT#GY8PP$45Rm2~of+J>>HIKIVn(uQf-rp09_MwOVIp@6!8bKV(C#(KxcW z;Pesq(wSafCc>iJNV8sg&`!g&G55<06{_1pIoL`2<7hPvAzR1+>H6Rx0Ra%4j7H-<-fnivydlm{TBr06;J-Bq8GdE^Amo)ptV>kS!Kyp*`wUx=K@{3cGZnz53`+C zLco1jxLkLNgbEdU)pRKB#Pq(#(Jt>)Yh8M?j^w&RPUueC)X(6`@@2R~PV@G(8xPwO z^B8^+`qZnQr$8AJ7<06J**+T8xIs)XCV6E_3W+al18!ycMqCfV>=rW0KBRjC* zuJkvrv;t&xBpl?OB3+Li(vQsS(-TPZ)Pw2>s8(3eF3=n*i0uqv@RM^T#Ql7(Em{(~%f2Fw|Reg@eSCey~P zBQlW)_DioA*yxxDcER@_=C1MC{UswPMLr5BQ~T6AcRyt0W44ffJG#T~Fk}wU^aYoF zYTayu-s?)<`2H(w+1(6X&I4?m3&8sok^jpXBB<|ZENso#?v@R1^DdVvKoD?}3%@{}}_E7;wt9USgrfR3(wabPRhJ{#1es81yP!o4)n~CGsh2_Yj2F^z|t zk((i&%nDLA%4KFdG96pQR26W>R2^?C1X4+a*hIzL$L=n4M7r$NOTQEo+k|2~SUI{XL{ynLSCPe%gWMMPFLO{&VN2pom zBUCQ(30qj=YtD_6H0-ZrJ46~YY*A;?tmaGvHvS^H&FXUG4)%-a1K~ly6LYaIn+4lG zt=wuGLw!%h=Pyz?TP=?6O-K-sT4W%_|Nl~;k~YA^_`gqfe{Xw=PWn#9f1mNz)sFuL zJbrevo(DPgpirvGMb6ByuEPd=Rgn}fYXqeUKyM+!n(cKeo|IY%p!#va6`D8?A*{u3 zEeWw0*oylJ1X!L#OCKktX2|>-z3#>`9xr~azOH+2dXHRwdfnpri9|xmK^Q~AuY!Fg z`9Xx?hxkJge~)NVkPQ(VaW(Ce2pXEtgY*cL8i4E)mM(iz_vdm|f@%cSb*Lw{WbShh41VGuplex9E^VvW}irx|;_{VK=N_WF39^ zH4<*peWzgc)0UQi4fBk2{FEzldDh5+KlRd!$_*@eYRMMRb1gU~9lSO_>Vh-~q|NTD zL}X*~hgMj$*Gp5AEs~>Bbjjq7G>}>ki1VxA>@kIhLe+(EQS0mjNEP&eXs5)I;7m1a zmK0Ly*!d~Dk4uxRIO%iZ!1-ztZxOG#W!Q_$M7_DKND0OwI+uC;PQCbQ#k#Y=^zQve zTZVepdX>5{JSJb;DX3%3g42Wz2D@%rhIhLBaFmx#ZV8mhya}jo1u{t^tzoiQy=jJp zjY2b7D2f$ZzJx)8fknqdD6fd5-iF8e(V}(@xe)N=fvS%{X$BRvW!N3TS8jn=P%;5j zShSbzsLs3uqycFi3=iSvqH~}bQn1WQGOL4?trj(kl?+q2R23I42!ipQ&`I*&?G#i9 zWvNh8xoGKDt>%@i0+}j?Ykw&_2C4!aYEW0^7)h2Hi7$;qgF3;Go?bs=v)kHmvd|`R z%(n94LdfxxZ)zh$ET8dH1F&J#O5&IcPH3=8o;%>OIT6w$P1Yz4S!}kJHNhMQ1(prc zM-jSA-7Iq=PiqxKSWb+YbLB-)lSkD6=!`4VL~`ExISOh2ud=TI&SKfR4J08Bad&rj zcXxMpcNgOB?w$~L7l^wPcXxw$0=$oV?)`I44)}b#ChS`_lBQhvb6ks?HDr3tFgkg&td19?b8=!sETXtp=&+3T$cCwZe z0nAET-7561gsbBws$TVjP7QxY(NuBYXVn9~9%vyN-B#&tJhWgtL1B<%BTS*-2$xB` zO)cMDHoWsm%JACZF--Pa7oP;f!n%p`*trlpvZ!HKoB={l+-(8O;;eYv2A=ra z3U7rSMCkP_6wAy`l|Se(&5|AefXvV1E#XA(LT!% zjj4|~xlZ-kPLNeQLFyXb%$K}YEfCBvHA-Znw#dZSI6V%3YD{Wj2@utT5Hieyofp6Qi+lz!u)htnI1GWzvQsA)baEuw9|+&(E@p8M+#&fsX@Kf`_YQ>VM+40YLv`3-(!Z7HKYg@+l00WGr779i-%t`kid%e zDtbh8UfBVT3|=8FrNian@aR3*DTUy&u&05x%(Lm3yNoBZXMHWS7OjdqHp>cD>g!wK z#~R{1`%v$IP;rBoP0B0P><;dxN9Xr+fp*s_EK3{EZ94{AV0#Mtv?;$1YaAdEiq5)g zYME;XN9cZs$;*2p63Q9^x&>PaA1p^5m7|W?hrXp2^m;B@xg0bD?J;wIbm6O~Nq^^K z2AYQs@7k)L#tgUkTOUHsh&*6b*EjYmwngU}qesKYPWxU-z_D> zDWr|K)XLf_3#k_9Rd;(@=P^S^?Wqlwert#9(A$*Y$s-Hy)BA0U0+Y58zs~h=YtDKxY0~BO^0&9{?6Nny;3=l59(6ec9j(79M?P1cE zex!T%$Ta-KhjFZLHjmPl_D=NhJULC}i$}9Qt?nm6K6-i8&X_P+i(c*LI3mtl3 z*B+F+7pnAZ5}UU_eImDj(et;Khf-z^4uHwrA7dwAm-e4 zwP1$Ov3NP5ts+e(SvM)u!3aZMuFQq@KE-W;K6 zag=H~vzsua&4Sb$4ja>&cSJ)jjVebuj+?ivYqrwp3!5>ul`B*4hJGrF;!`FaE+wKo z#};5)euvxC1zX0-G;AV@R(ZMl=q_~u8mQ5OYl;@BAkt)~#PynFX#c1K zUQ1^_N8g+IZwUl*n0Bb-vvliVtM=zuMGU-4a8|_8f|2GEd(2zSV?aSHUN9X^GDA8M zgTZW06m*iAy@7l>F3!7+_Y3mj^vjBsAux3$%U#d$BT^fTf-7{Y z_W0l=7$ro5IDt7jp;^cWh^Zl3Ga1qFNrprdu#g=n9=KH!CjLF#ucU5gy6*uASO~|b z7gcqm90K@rqe({P>;ww_q%4}@bq`ST8!0{V08YXY)5&V!>Td)?j7#K}HVaN4FU4DZ z%|7OppQq-h`HJ;rw-BAfH* z1H$ufM~W{%+b@9NK?RAp-$(P0N=b<(;wFbBN0{u5vc+>aoZ|3&^a866X@el7E8!E7 z=9V(Ma**m_{DKZit2k;ZOINI~E$|wO99by=HO{GNc1t?nl8soP@gxk8)WfxhIoxTP zoO`RA0VCaq)&iRDN9yh_@|zqF+f07Esbhe!e-j$^PS57%mq2p=+C%0KiwV#t^%_hH zoO?{^_yk5x~S)haR6akK6d|#2TN& zfWcN zc7QAWl)E9`!KlY>7^DNw$=yYmmRto>w0L(~fe?|n6k2TBsyG@sI)goigj=mn)E)I* z4_AGyEL7?(_+2z=1N@D}9$7FYdTu;%MFGP_mEJXc2OuXEcY1-$fpt8m_r2B|<~Xfs zX@3RQi`E-1}^9N{$(|YS@#{ZWuCxo)91{k>ESD54g_LYhm~vlOK_CAJHeYFfuIVB^%cqCfvpy#sU8Do8u}# z>>%PLKOZ^+$H54o@brtL-hHorSKcsjk_ZibBKBgyHt~L z=T6?e0oLX|h!Z3lbkPMO27MM?xn|uZAJwvmX?Yvp#lE3sQFY)xqet>`S2Y@1t)Z*& z;*I3;Ha8DFhk=YBt~{zp=%%*fEC}_8?9=(-k7HfFeN^GrhNw4e?vx*#oMztnO*&zY zmRT9dGI@O)t^=Wj&Og1R3b%(m*kb&yc;i`^-tqY9(0t!eyOkH<$@~1lXmm!SJllE_ zr~{a&w|8*LI>Z^h!m%YLgKv06Js7j7RaoX}ZJGYirR<#4Mghd{#;38j3|V+&=ZUq#1$ zgZb-7kV)WJUko?{R`hpSrC;w2{qa`(Z4gM5*ZL`|#8szO=PV^vpSI-^K_*OQji^J2 zZ_1142N}zG$1E0fI%uqHOhV+7%Tp{9$bAR=kRRs4{0a`r%o%$;vu!_Xgv;go)3!B#;hC5qD-bcUrKR&Sc%Zb1Y($r78T z=eG`X#IpBzmXm(o6NVmZdCQf6wzqawqI63v@e%3TKuF!cQ#NQbZ^?6K-3`_b=?ztW zA>^?F#dvVH=H-r3;;5%6hTN_KVZ=ps4^YtRk>P1i>uLZ)Ii2G7V5vy;OJ0}0!g>j^ z&TY&E2!|BDIf1}U(+4G5L~X6sQ_e7In0qJmWYpn!5j|2V{1zhjZt9cdKm!we6|Pp$ z07E+C8=tOwF<<}11VgVMzV8tCg+cD_z?u+$sBjwPXl^(Ge7y8-=c=fgNg@FxI1i5Y-HYQMEH z_($je;nw`Otdhd1G{Vn*w*u@j8&T=xnL;X?H6;{=WaFY+NJfB2(xN`G)LW?4u39;x z6?eSh3Wc@LR&yA2tJj;0{+h6rxF zKyHo}N}@004HA(adG~0solJ(7>?LoXKoH0~bm+xItnZ;3)VJt!?ue|~2C=ylHbPP7 zv2{DH()FXXS_ho-sbto)gk|2V#;BThoE}b1EkNYGT8U#0ItdHG>vOZx8JYN*5jUh5Fdr9#12^ zsEyffqFEQD(u&76zA^9Jklbiz#S|o1EET$ujLJAVDYF znX&4%;vPm-rT<8fDutDIPC@L=zskw49`G%}q#l$1G3atT(w70lgCyfYkg7-=+r7$%E`G?1NjiH)MvnKMWo-ivPSQHbk&_l5tedNp|3NbU^wk0SSXF9ohtM zUqXiOg*8ERKx{wO%BimK)=g^?w=pxB1Vu_x<9jKOcU7N;(!o3~UxyO+*ZCw|jy2}V*Z22~KhmvxoTszc+#EMWXTM6QF*ks% zW47#2B~?wS)6>_ciKe1Fu!@Tc6oN7e+6nriSU;qT7}f@DJiDF@P2jXUv|o|Wh1QPf zLG31d>@CpThA+Ex#y)ny8wkC4x-ELYCXGm1rFI=1C4`I5qboYgDf322B_Nk@#eMZ% znluCKW2GZ{r9HR@VY`>sNgy~s+D_GkqFyz6jgXKD)U|*eKBkJRRIz{gm3tUd*yXmR z(O4&#ZA*us6!^O*TzpKAZ#}B5@}?f=vdnqnRmG}xyt=)2o%<9jj>-4wLP1X-bI{(n zD9#|rN#J;G%LJ&$+Gl2eTRPx6BQC6Uc~YK?nMmktvy^E8#Y*6ZJVZ>Y(cgsVnd!tV z!%twMNznd)?}YCWyy1-#P|2Fu%~}hcTGoy>_uawRTVl=(xo5!%F#A38L109wyh@wm zdy+S8E_&$Gjm=7va-b7@Hv=*sNo0{i8B7=n4ex-mfg`$!n#)v@xxyQCr3m&O1Jxg! z+FXX^jtlw=utuQ+>Yj$`9!E<5-c!|FX(~q`mvt6i*K!L(MHaqZBTtuSA9V~V9Q$G? zC8wAV|#XY=;TQD#H;;dcHVb9I7Vu2nI0hHo)!_{qIa@|2}9d ztpC*Q{4Py~2;~6URN^4FBCBip`QDf|O_Y%iZyA0R`^MQf$ce0JuaV(_=YA`knEMXw zP6TbjYSGXi#B4eX=QiWqb3bEw-N*a;Yg?dsVPpeYFS*&AsqtW1j2D$h$*ZOdEb$8n0 zGET4Igs^cMTXWG{2#A7w_usx=KMmNfi4oAk8!MA8Y=Rh9^*r>jEV(-{I0=rc);`Y) zm+6KHz-;MIy|@2todN&F+Yv1e&b&ZvycbTHpDoZ>FIiUn+M-=%A2C(I*^Yx@VKf(Z zxJOny&WoWcyKodkeN^5))aV|-UBFw{?AGo?;NNFFcKzk+6|gYfA#FR=y@?;3IoQ zUMI=7lwo9gV9fRvYi}Nd)&gQw7(K3=a0#p27u6Q)7JlP#A)piUUF8B3Li&38Xk$@| z9OR+tU~qgd3T3322E))eV)hAAHYIj$TmhH#R+C-&E-}5Qd{3B}gD{MXnsrS;{Erv1 z6IyQ=S2qD>Weqqj#Pd65rDSdK54%boN+a?=CkR|agnIP6;INm0A*4gF;G4PlA^3%b zN{H%#wYu|!3fl*UL1~f+Iu|;cqDax?DBkZWSUQodSDL4Es@u6zA>sIm>^Aq-&X#X8 zI=#-ucD|iAodfOIY4AaBL$cFO@s(xJ#&_@ZbtU+jjSAW^g;_w`FK%aH_hAY=!MTjI zwh_OEJ_25zTQv$#9&u0A11x_cGd92E74AbOrD`~f6Ir9ENNQAV2_J2Ig~mHWhaO5a zc>fYG$zke^S+fBupw+klDkiljJAha z6DnTemhkf>hv`8J*W_#wBj-2w(cVtXbkWWtE(3j@!A-IfF?`r$MhVknTs3D1N`rYN zKth9jZtX#>v#%U@^DVN!;ni#n1)U&H_uB{6pcq7$TqXJX!Q0P7U*JUZyclb~)l*DS zOLpoQfW_3;a0S$#V0SOwVeeqE$Hd^L`$;l_~2giLYd?7!gUYIpOs!jqSL~pI)4`YuB_692~A z^T#YYQ_W3Rakk}$SL&{`H8mc{>j+3eKprw6BK`$vSSIn;s31M~YlJLApJ)+Gi1{^- zw96WnT9M0Vr_D=e=a}${raR{(35Q!g+8`}vOFj1e&Or(_wp2U2aVQP0_jP57 z2(R4E(E$n!xl<}Zx38wO;27wuQ`P#_j!}L2 z2qr;As4D4n2X$-Jd_-!fsbu_D(64i;c4cJnP576x_>Q4WNushFwkBV!kVd(AYFXe{ zaqO5`Qfr!#ETmE(B;u_&FITotv~W}QYFCI!&ENKIb1p4fg*Yv1)EDMb==EjHHWM#{ zGMpqb2-LXdHB@D~pE3|+B392Gh4q)y9jBd$a^&cJM60VEUnLtHQD5i-X6PVF>9m_k zDvG3P(?CzdaIrC8s4cu~N9MEb!Tt(g*GK~gIp1Gyeaw3b7#YPx_1T6i zRi#pAMr~PJKe9P~I+ARa$a!K~)t(4LaVbjva1yd;b1Yz2$7MMc`aLmMl(a^DgN(u? zq2o9&Gif@Tq~Yq+qDfx^F*nCnpuPv%hRFc$I!p74*quLt^M}D_rwl10uMTr!)(*=7 zSC5ea@#;l(h87k4T4x)(o^#l76P-GYJA(pOa&F9YT=fS<*O{4agzba^dIrh0hjls<~APlIz9{ zgRY{OMv2s|`;VCoYVj?InYoq^QWuA&*VDyOn@pPvK8l~g#1~~MGVVvtLDt}>id_Z` zn(ihfL?Y}Y4YX335m*Xx(y+bbukchHrM zycIGp#1*K3$!(tgTsMD2VyUSg^yvCwB8*V~sACE(yq2!MS6f+gsxv^GR|Q7R_euYx z&X+@@H?_oQddGxJYS&ZG-9O(X+l{wcw;W7srpYjZZvanY(>Q1utSiyuuonkjh5J0q zGz6`&meSuxixIPt{UoHVupUbFKIA+3V5(?ijn}(C(v>=v?L*lJF8|yRjl-m#^|krg zLVbFV6+VkoEGNz6he;EkP!Z6|a@n8?yCzX9>FEzLnp21JpU0x!Qee}lwVKA})LZJq zlI|C??|;gZ8#fC3`gzDU%7R87KZyd)H__0c^T^$zo@TBKTP*i{)Gp3E0TZ}s3mKSY zix@atp^j#QnSc5K&LsU38#{lUdwj%xF zcx&l^?95uq9on1m*0gp$ruu||5MQo)XaN>|ngV5Jb#^wWH^5AdYcn_1>H~XtNwJd3 zd9&?orMSSuj=lhO?6)Ay7;gdU#E}pTBa5wFu`nejq##Xd71BHzH2XqLA5 zeLEo;9$}~u0pEu@(?hXB_l;{jQ=7m?~mwj-ME~Tw-OHPrR7K2Xq9eCNwQO$hR z3_A?=`FJctNXA#yQEorVoh{RWxJbdQga zU%K##XEPgy?E|K(=o#IPgnbk7E&5%J=VHube|2%!Qp}@LznjE%VQhJ?L(XJOmFVY~ zo-az+^5!Ck7Lo<7b~XC6JFk>17*_dY;=z!<0eSdFD2L?CSp_XB+?;N+(5;@=_Ss3& zXse>@sA7hpq;IAeIp3hTe9^$DVYf&?)={zc9*hZAV)|UgKoD!1w{UVo8D)Htwi8*P z%#NAn+8sd@b{h=O)dy9EGKbpyDtl@NBZw0}+Wd=@65JyQ2QgU}q2ii;ot1OsAj zUI&+Pz+NvuRv#8ugesT<<@l4L$zso0AQMh{we$tkeG*mpLmOTiy8|dNYhsqhp+q*yfZA`Z)UC*(oxTNPfOFk3RXkbzAEPofVUy zZ3A%mO?WyTRh@WdXz+zD!ogo}gbUMV!YtTNhr zrt@3PcP%5F;_SQ>Ui`Gq-lUe&taU4*h2)6RDh@8G1$o!){k~3)DT87%tQeHYdO?B` zAmoJvG6wWS?=0(Cj?Aqj59`p(SIEvYyPGJ^reI z`Hr?3#U2zI7k0=UmqMD35l`>3xMcWlDv$oo6;b`dZq3d!~)W z=4Qk)lE8&>#HV>?kRLOHZYz83{u7?^KoXmM^pazj8`7OwQ=5I!==; zA!uN`Q#n=Drmzg}@^nG!mJp9ml3ukWk96^6*us*;&>s+7hWfLXtl?a}(|-#=P12>A zon1}yqh^?9!;on?tRd6Fk0knQSLl4vBGb87A_kJNDGyrnpmn48lz_%P{* z_G*3D#IR<2SS54L5^h*%=)4D9NPpji7DZ5&lHD|99W86QN_(|aJ<5C~PX%YB`Qt_W z>jF_Os@kI6R!ub4n-!orS(G6~mKL7()1g=Lf~{D!LR7#wRHfLxTjYr{*c{neyhz#U zbm@WBKozE+kTd+h-mgF+ELWqTKin57P;0b){ zii5=(B%S(N!Z=rAFGnM6iePtvpxB_Q9-oq_xH!URn2_d-H~i;lro8r{-g!k-Ydb6_w5K@FOV?zPF_hi z%rlxBv$lQi%bjsu^7KT~@u#*c$2-;AkuP)hVEN?W5MO8C9snj*EC&|M!aK6o12q3+ z8e?+dH17E!A$tRlbJW~GtMDkMPT=m1g-v67q{sznnWOI$`g(8E!Pf!#KpO?FETxLK z2b^8^@mE#AR1z(DT~R3!nnvq}LG2zDGoE1URR=A2SA z%lN$#V@#E&ip_KZL}Q6mvm(dsS?oHoRf8TWL~1)4^5<3JvvVbEsQqSa3(lF*_mA$g zv`LWarC79G)zR0J+#=6kB`SgjQZ2460W zN%lZt%M@=EN>Wz4I;eH>C0VnDyFe)DBS_2{h6=0ZJ*w%s)QFxLq+%L%e~UQ0mM9ud zm&|r){_<*Om%vlT(K9>dE(3AHjSYro5Y1I?ZjMqWyHzuCE0nyCn`6eq%MEt(aY=M2rIzHeMds)4^Aub^iTIT|%*izG4YH;sT`D9MR(eND-SB+e66LZT z2VX)RJsn${O{D48aUBl|(>ocol$1@glsxisc#GE*=DXHXA?|hJT#{;X{i$XibrA}X zFHJa+ssa2$F_UC(o2k2Z0vwx%Wb(<6_bdDO#=a$0gK2NoscCr;vyx?#cF)JjM%;a| z$^GIlIzvz%Hx3WVU481}_e4~aWcyC|j&BZ@uWW1`bH1y9EWXOxd~f-VE5DpueNofN zv7vZeV<*!A^|36hUE;`#x%MHhL(~?eZ5fhA9Ql3KHTWoAeO-^7&|2)$IcD1r5X#-u zN~N0$6pHPhop@t1_d`dO3#TC0>y5jm>8;$F5_A2& zt#=^IDfYv?JjPPTPNx2TL-Lrl82VClQSLWW_$3=XPbH}xM34)cyW5@lnxy=&h%eRq zv29&h^fMoxjsDnmua(>~OnX{Cq!7vM0M4Mr@_18|YuSKPBKUTV$s^So zc}JlAW&bVz|JY#Eyup6Ny{|P_s0Pq;5*tinH+>5Xa--{ z2;?2PBs((S4{g=G`S?B3Ien`o#5DmUVwzpGuABthYG~OKIY`2ms;33SN9u^I8i_H5`BQ%yOfW+N3r|ufHS_;U;TWT5z;b14n1gX%Pn`uuO z6#>Vl)L0*8yl|#mICWQUtgzeFp9$puHl~m&O+vj3Ox#SxQUa?fY*uK?A;00RiFg(G zK?g=7b5~U4QIK`C*um%=Sw=OJ1eeaV@WZ%hh-3<=lR#(Xesk%?)l4p(EpTwPvN99V@TT)!A8SeFTV+frN=r|5l?K#odjijx2nFgc3kI zC$hVs1S-!z9>xn9MZcRk0YXdYlf~8*LfH$IHKD59H&gLz%6 z#mAYSRJufbRi~LRadwM*G!O2>&U<^d`@<)otXZJJxT@G}4kTx0zPDVhVXwiU)$}5Y z`0iV`8EEh&GlUk&VY9m0Mqr*U&|^Bc?FB`<%{x-o0ATntwIA%(YDcxWs$C)%a%d_@ z?fx!Co+@3p7ha$|pWYD}p6#(PG%_h8K7sQjT_P~|3ZEH0DRxa3~bP&&lPMj3C~!H2QD zq>(f^RUFSqf6K3BMBFy$jiuoSE+DhEq$xLDb7{57 z0B|1pSjYJ5F@cHG%qDZ{ogL$P!BK&sR%zD`gbK#9gRZX17EtAJxN% zys^gb2=X9=7HP}N(iRqt(tot2yyeE%s;L}AcMh;~-W~s_eAe!gIUYdQz5j~T)0trh z>#1U$uOyyl%!Pi(gD&)uHe9Q^27_kHyFCC}n^-KL(=OxHqUfex1YS__RJh0m-S>eM zqAk`aSev*z1lI&-?CycgDm=bdQCp}RqS0_d-4Mf&>u2KyGFxKe8JM1N{GNWw0n$FL z1UDp(h0(1I2Jh9I`?IS}h4R~n zRwRz>8?$fFMB2{UPe^$Ifl;Oc>}@Q9`|8DCeR{?LUQLPfaMsxs8ps=D_aAXORZH~< zdcIOca-F;+D3~M+)Vi4h)I4O3<)$65yI)goQ_vk#fb;Uim>UI4Dv9#2b1;N_Wg>-F zNwKeMKY+su#~NL0uE%_$mw1%ddX2Qs2P!ncM+>wnz}OCQX1!q~oS?OqYU;&ESAAwP z452QWL0&u^mraF#=j_ZeBWhm&F|d!QjwRl^7=Bl7@(43=BkN=3{BRv#QHIk>Umc_w zvP>q|q{lJ=zs|W9%a@8%W>C@MYN1D5{(=Af31+pR#kB`cd0-YlQQTg}+ zL|_h=F9JQ|Gux5c0ehaffHNYLf8VwF+qnM6IjBEI_eceee;o;FY@#~FFVsZjBSp!j z8V*Bgmn{RK!!zqGc;jy)z@Zjo>5{%m1?K}fLEL$l6Dl4f=ye0wNI#)2L=^K(&18Gb zJoj8@WBB;P^T#V)I0`aDSy?$rJU{+-5472NyFp>;Vw43j@3Z=;D2eSfyw5*0Q+&ML zsV&&*3c3$pa`qcaGbEB0*CA~Wp3%PkF?B87FV&rWNb|@GU$LB;l|;YutU*k za1hjUL_BX%G^s;BuzRi4Hl?eqC2z&ZrKh1tZDwnufG$g$LX(j!h%F5(n8D@in3lnX z(*8+3ZT6TVYRcSpM1eMeCps=Fz8q%gyM&B=a7(Vf`4k3dN$IM+`BO^_7HZq4BR|7w z+5kOJ;9_$X%-~arA@qmXSzD|+NMh--%5-9u6t(M=f%&z$<_V#Y_lzn{E$MZZG)+A> zu2E`_Y(MBJ2l*AqvCUmU;yBT}#oQ{V=((mC-QGJwsCOH*a;{1JRTKv7DBNG+M!XL7(^jbv&Qy-o9HNFrmN)-`D3WFtXs>1vBOJpI(=x; zKhJlFdfMf^G#oU(w1+ucMKYPZaDp>$kt=wiYsBCjUY-uz<4JziB>6fXDSLH*2Y z&Px5y`#3!fF=c4>fCMdg-tX582pemU@ZxyFbznL8-=TTo1Sybg9>7h*J^9^~XxXJO z`k9v~=4amxl<;FCV9h2k%?^-ZUzQy^#{JleyH23o1S{r<+t#z6jKS<9rbAM96^1iY zi6{IjauB)UwBhC-_L(MzGCxhhv`?ryc zja_Uwi7$8l!}*vjJppGyp#Wz=*?;jC*xQ&J894rql5A$2giJRtV&DWQh#(+Vs3-5_ z69_tj(>8%z1VtVp>a74r5}j2rG%&;uaTQ|fr&r%ew-HO}76i8`&ki%#)~}q4Y|d$_ zfNp9uc#$#OEca>>MaY6rF`dB|5#S)bghf>>TmmE&S~IFw;PF0UztO6+R-0!TSC?QP z{b(RA_;q3QAPW^XN?qQqu{h<}Vfiv}Rr!lA$C79^1=U>+ng9Dh>v{`?AOZt>CrQ=o zI}=mSnR))8fJpO->rcX?H);oqSQUZ?sR!fH2SoFdcPm5*2y<_u;4h;BqcF*XbwWSv zcJN%!g|L(22Xp!^1?c;T&qm%rpkP&2EQC3JF+SENm$+@7#e!UKD1uQ{TDw43?!b!3 zUooS_rt=xJfa&h?c^hfV>YwQXre3qosz_^c#)FO~d!<)2o}Oxz5HWtr<)1Yw012v4 zhv0w(RfJspDnA^-6Jmr;GkWt%{mAYOm6yPb&Vl&rv@D^K&;#?=X{kaK5FhScNJ_3> z#5u(Saisq2(~pVlrfG#@kLM#Ot~5rZZc%B&h1=gen?R+#t^1bYKf zVvtefX=D$*)39e^2@!~A_}9c${Gf0?1;dk=!Itp#s%0>Io%k`9(bDeI-udd&E6Zfu zcaiv(h`DM3W3Mfda)fYwhB=8RAPkotVt5-z21Ij~Ot9A^SK-1u*zFVK&mF?q1;|wy zrF+XWs^5Q-%Z6I62gTwrRe#F>riVM#fv_TihxSJ6to1X7NVszgivoTa!fPfBBYj94 zuc2m zL_k-<1FoORng1i3mth0|ZzT1O9&X8W9LkyFWn#Ebm_hAPM%O zNC_$OQHe90; z+@DGs;NHgGW8%wjH$EpvQ-Hd! znZdIh#!H5nOStiOKNV8}QvY~=VMqtG&p$ByF&%pe_gR`|H5ULg47lk20(Xe=k8ptc zn%EmTI7k9gNE=!IN4WnbymtsKoHn2-cL65z^9cQOSp>XFzo;!h*x1s^0U!<{Y-VZ1 zXJ7zekkYf(`@dZ3F9|?O+*dUL4K4?0@V^>I2;k-a1%ZgY9w2|C5r0R5?80e-|&4yEwkklXmZ)!QSYG) zXBKOz|IPC2W_X!t^cgb^@D=|>r@x$f{3Y+`%NoDT^Y@JIuJ%jxe;es9vi`kJmbnPYT%X}rzs0K#=H)Q`)_L7%?KLLJP+0XJbL&JgdJE{i*){MOFSK z{7XUfXZR-Te}aE8RelNkQV0AQ7RC0TVE^o8c!~K^RQ4GY+xed`|A+zjZ(qij@~zLP zkS@Q0`rpM|UsnI6B;_+vw)^iA{n0%C7N~ql@KXNonIOUIHwgYg4Dcn>OOdc=rUl>M zVEQe|u$P=Kb)TL&-2#4t^Pg0pUQ)dj%6O)#3;zwOe~`_1$@Ef`;F+l=>NlAFFbBS0 zN))`LdKnA;OjQ{B+f;z>i|wCv-CmNs46S`8X-oKRl0V+pKZ%XJWO*6G`OMOs^xG_d zj_7-p06{fybw_P;UzX^eX5Pkcrm04%9rPFa56 zyZE \(.*\)$'` + if expr "$link" : '/.*' > /dev/null; then + PRG="$link" + else + PRG=`dirname "$PRG"`"/$link" + fi +done +SAVED="`pwd`" +cd "`dirname \"$PRG\"`/" >/dev/null +APP_HOME="`pwd -P`" +cd "$SAVED" >/dev/null + +APP_NAME="Gradle" +APP_BASE_NAME=`basename "$0"` + +# Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. +DEFAULT_JVM_OPTS="" + +# Use the maximum available, or set MAX_FD != -1 to use that value. +MAX_FD="maximum" + +warn () { + echo "$*" +} + +die () { + echo + echo "$*" + echo + exit 1 +} + +# OS specific support (must be 'true' or 'false'). +cygwin=false +msys=false +darwin=false +nonstop=false +case "`uname`" in + CYGWIN* ) + cygwin=true + ;; + Darwin* ) + darwin=true + ;; + MINGW* ) + msys=true + ;; + NONSTOP* ) + nonstop=true + ;; +esac + +CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar + +# Determine the Java command to use to start the JVM. +if [ -n "$JAVA_HOME" ] ; then + if [ -x "$JAVA_HOME/jre/sh/java" ] ; then + # IBM's JDK on AIX uses strange locations for the executables + JAVACMD="$JAVA_HOME/jre/sh/java" + else + JAVACMD="$JAVA_HOME/bin/java" + fi + if [ ! -x "$JAVACMD" ] ; then + die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME + +Please set the JAVA_HOME variable in your environment to match the +location of your Java installation." + fi +else + JAVACMD="java" + which java >/dev/null 2>&1 || die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. + +Please set the JAVA_HOME variable in your environment to match the +location of your Java installation." +fi + +# Increase the maximum file descriptors if we can. +if [ "$cygwin" = "false" -a "$darwin" = "false" -a "$nonstop" = "false" ] ; then + MAX_FD_LIMIT=`ulimit -H -n` + if [ $? -eq 0 ] ; then + if [ "$MAX_FD" = "maximum" -o "$MAX_FD" = "max" ] ; then + MAX_FD="$MAX_FD_LIMIT" + fi + ulimit -n $MAX_FD + if [ $? -ne 0 ] ; then + warn "Could not set maximum file descriptor limit: $MAX_FD" + fi + else + warn "Could not query maximum file descriptor limit: $MAX_FD_LIMIT" + fi +fi + +# For Darwin, add options to specify how the application appears in the dock +if $darwin; then + GRADLE_OPTS="$GRADLE_OPTS \"-Xdock:name=$APP_NAME\" \"-Xdock:icon=$APP_HOME/media/gradle.icns\"" +fi + +# For Cygwin, switch paths to Windows format before running java +if $cygwin ; then + APP_HOME=`cygpath --path --mixed "$APP_HOME"` + CLASSPATH=`cygpath --path --mixed "$CLASSPATH"` + JAVACMD=`cygpath --unix "$JAVACMD"` + + # We build the pattern for arguments to be converted via cygpath + ROOTDIRSRAW=`find -L / -maxdepth 1 -mindepth 1 -type d 2>/dev/null` + SEP="" + for dir in $ROOTDIRSRAW ; do + ROOTDIRS="$ROOTDIRS$SEP$dir" + SEP="|" + done + OURCYGPATTERN="(^($ROOTDIRS))" + # Add a user-defined pattern to the cygpath arguments + if [ "$GRADLE_CYGPATTERN" != "" ] ; then + OURCYGPATTERN="$OURCYGPATTERN|($GRADLE_CYGPATTERN)" + fi + # Now convert the arguments - kludge to limit ourselves to /bin/sh + i=0 + for arg in "$@" ; do + CHECK=`echo "$arg"|egrep -c "$OURCYGPATTERN" -` + CHECK2=`echo "$arg"|egrep -c "^-"` ### Determine if an option + + if [ $CHECK -ne 0 ] && [ $CHECK2 -eq 0 ] ; then ### Added a condition + eval `echo args$i`=`cygpath --path --ignore --mixed "$arg"` + else + eval `echo args$i`="\"$arg\"" + fi + i=$((i+1)) + done + case $i in + (0) set -- ;; + (1) set -- "$args0" ;; + (2) set -- "$args0" "$args1" ;; + (3) set -- "$args0" "$args1" "$args2" ;; + (4) set -- "$args0" "$args1" "$args2" "$args3" ;; + (5) set -- "$args0" "$args1" "$args2" "$args3" "$args4" ;; + (6) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" ;; + (7) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" ;; + (8) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" ;; + (9) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" "$args8" ;; + esac +fi + +# Escape application args +save () { + for i do printf %s\\n "$i" | sed "s/'/'\\\\''/g;1s/^/'/;\$s/\$/' \\\\/" ; done + echo " " +} +APP_ARGS=$(save "$@") + +# Collect all arguments for the java command, following the shell quoting and substitution rules +eval set -- $DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS "\"-Dorg.gradle.appname=$APP_BASE_NAME\"" -classpath "\"$CLASSPATH\"" org.gradle.wrapper.GradleWrapperMain "$APP_ARGS" + +# by default we should be in the correct project dir, but when run from Finder on Mac, the cwd is wrong +if [ "$(uname)" = "Darwin" ] && [ "$HOME" = "$PWD" ]; then + cd "$(dirname "$0")" +fi + +exec "$JAVACMD" "$@" diff --git a/gradlew.bat b/gradlew.bat new file mode 100644 index 00000000..f9553162 --- /dev/null +++ b/gradlew.bat @@ -0,0 +1,84 @@ +@if "%DEBUG%" == "" @echo off +@rem ########################################################################## +@rem +@rem Gradle startup script for Windows +@rem +@rem ########################################################################## + +@rem Set local scope for the variables with windows NT shell +if "%OS%"=="Windows_NT" setlocal + +set DIRNAME=%~dp0 +if "%DIRNAME%" == "" set DIRNAME=. +set APP_BASE_NAME=%~n0 +set APP_HOME=%DIRNAME% + +@rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. +set DEFAULT_JVM_OPTS= + +@rem Find java.exe +if defined JAVA_HOME goto findJavaFromJavaHome + +set JAVA_EXE=java.exe +%JAVA_EXE% -version >NUL 2>&1 +if "%ERRORLEVEL%" == "0" goto init + +echo. +echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. +echo. +echo Please set the JAVA_HOME variable in your environment to match the +echo location of your Java installation. + +goto fail + +:findJavaFromJavaHome +set JAVA_HOME=%JAVA_HOME:"=% +set JAVA_EXE=%JAVA_HOME%/bin/java.exe + +if exist "%JAVA_EXE%" goto init + +echo. +echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME% +echo. +echo Please set the JAVA_HOME variable in your environment to match the +echo location of your Java installation. + +goto fail + +:init +@rem Get command-line arguments, handling Windows variants + +if not "%OS%" == "Windows_NT" goto win9xME_args + +:win9xME_args +@rem Slurp the command line arguments. +set CMD_LINE_ARGS= +set _SKIP=2 + +:win9xME_args_slurp +if "x%~1" == "x" goto execute + +set CMD_LINE_ARGS=%* + +:execute +@rem Setup the command line + +set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar + +@rem Execute Gradle +"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %CMD_LINE_ARGS% + +:end +@rem End local scope for the variables with windows NT shell +if "%ERRORLEVEL%"=="0" goto mainEnd + +:fail +rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of +rem the _cmd.exe /c_ return code! +if not "" == "%GRADLE_EXIT_CONSOLE%" exit 1 +exit /b 1 + +:mainEnd +if "%OS%"=="Windows_NT" endlocal + +:omega diff --git a/notification/build.gradle b/notification/build.gradle new file mode 100644 index 00000000..633ab8c6 --- /dev/null +++ b/notification/build.gradle @@ -0,0 +1,26 @@ +/* + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +apply plugin: 'java' +apply plugin: 'jacoco' + +dependencies { + compileOnly "org.elasticsearch:elasticsearch:${es_version}" + compile "org.apache.httpcomponents:httpcore:4.4.5" + compile "org.apache.httpcomponents:httpclient:4.5.2" + + testImplementation "org.elasticsearch.test:framework:${es_version}" + testImplementation "org.easymock:easymock:4.0.1" +} diff --git a/notification/src/main/java/com/amazon/opendistroforelasticsearch/alerting/destination/Notification.java b/notification/src/main/java/com/amazon/opendistroforelasticsearch/alerting/destination/Notification.java new file mode 100644 index 00000000..7eb8f04a --- /dev/null +++ b/notification/src/main/java/com/amazon/opendistroforelasticsearch/alerting/destination/Notification.java @@ -0,0 +1,47 @@ +/* + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package com.amazon.opendistroforelasticsearch.alerting.destination; + +import com.amazon.opendistroforelasticsearch.alerting.destination.factory.DestinationFactory; +import com.amazon.opendistroforelasticsearch.alerting.destination.factory.DestinationFactoryProvider; +import com.amazon.opendistroforelasticsearch.alerting.destination.message.BaseMessage; +import com.amazon.opendistroforelasticsearch.alerting.destination.response.BaseResponse; + +import java.security.AccessController; +import java.security.PrivilegedAction; + +/** + * This is a client facing Notification class to publish the messages + * to the Notification channels like chime, slack, webhooks etc + */ +public class Notification { + private DestinationFactoryProvider factoryProvider; + + /** + * Publishes the notification message to the corresponding notification + * channel + * + * @param notificationMessage + * @return BaseResponse + */ + public static BaseResponse publish(BaseMessage notificationMessage) { + return AccessController.doPrivileged((PrivilegedAction) () -> { + DestinationFactory destinationFactory = DestinationFactoryProvider.getFactory(notificationMessage.getChannelType()); + return destinationFactory.publish(notificationMessage); + }); + } +} + diff --git a/notification/src/main/java/com/amazon/opendistroforelasticsearch/alerting/destination/client/DestinationHttpClient.java b/notification/src/main/java/com/amazon/opendistroforelasticsearch/alerting/destination/client/DestinationHttpClient.java new file mode 100644 index 00000000..ed221e88 --- /dev/null +++ b/notification/src/main/java/com/amazon/opendistroforelasticsearch/alerting/destination/client/DestinationHttpClient.java @@ -0,0 +1,169 @@ +/* + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package com.amazon.opendistroforelasticsearch.alerting.destination.client; + +import com.amazon.opendistroforelasticsearch.alerting.destination.message.BaseMessage; +import com.amazon.opendistroforelasticsearch.alerting.destination.message.CustomWebhookMessage; +import org.apache.http.HttpEntity; +import org.apache.http.HttpResponse; +import org.apache.http.client.config.RequestConfig; +import org.apache.http.client.methods.CloseableHttpResponse; +import org.apache.http.client.methods.HttpPost; +import org.apache.http.client.utils.URIBuilder; +import org.apache.http.entity.StringEntity; +import org.apache.http.impl.client.CloseableHttpClient; +import org.apache.http.impl.client.DefaultHttpRequestRetryHandler; +import org.apache.http.impl.client.HttpClientBuilder; +import org.apache.http.impl.conn.PoolingHttpClientConnectionManager; +import org.apache.http.util.EntityUtils; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.rest.RestStatus; + +import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; +import java.util.Map; + + +/** + * This class handles the connections to the given Destination. + */ +public class DestinationHttpClient { + + private static final Logger logger = Loggers.getLogger(DestinationHttpClient.class); + + private static final int MAX_CONNECTIONS = 60; + private static final int MAX_CONNECTIONS_PER_ROUTE = 20; + private static final int TIMEOUT_MILLISECONDS = (int) TimeValue.timeValueSeconds(5).millis(); + private static final int SOCKET_TIMEOUT_MILLISECONDS = (int)TimeValue.timeValueSeconds(50).millis(); + + private static CloseableHttpClient HTTP_CLIENT = createHttpClient(); + + private static CloseableHttpClient createHttpClient() { + RequestConfig config = RequestConfig.custom() + .setConnectTimeout(TIMEOUT_MILLISECONDS) + .setConnectionRequestTimeout(TIMEOUT_MILLISECONDS) + .setSocketTimeout(SOCKET_TIMEOUT_MILLISECONDS) + .build(); + + PoolingHttpClientConnectionManager connectionManager = new PoolingHttpClientConnectionManager(); + connectionManager.setMaxTotal(MAX_CONNECTIONS); + connectionManager.setDefaultMaxPerRoute(MAX_CONNECTIONS_PER_ROUTE); + + return HttpClientBuilder.create() + .setDefaultRequestConfig(config) + .setConnectionManager(connectionManager) + .setRetryHandler(new DefaultHttpRequestRetryHandler()) + .build(); + } + + public String execute(BaseMessage message) throws Exception { + CloseableHttpResponse response = null; + try { + response = getHttpResponse(message); + validateResponseStatus(response); + return getResponseString(response); + } finally { + if (response != null) { + EntityUtils.consumeQuietly(response.getEntity()); + } + } + } + + private CloseableHttpResponse getHttpResponse(BaseMessage message) throws Exception { + URI uri = null; + HttpPost httpPostRequest = new HttpPost(); + if (message instanceof CustomWebhookMessage) { + CustomWebhookMessage customWebhookMessage = (CustomWebhookMessage) message; + uri = buildUri(customWebhookMessage.getUrl(), customWebhookMessage.getScheme(), customWebhookMessage.getHost(), + customWebhookMessage.getPort(), customWebhookMessage.getPath(), customWebhookMessage.getQueryParams()); + + // set headers + Map headerParams = customWebhookMessage.getHeaderParams(); + if(headerParams == null || headerParams.isEmpty()) { + // set default header + httpPostRequest.setHeader("Content-Type", "application/json"); + } else { + for (Map.Entry e : customWebhookMessage.getHeaderParams().entrySet()) + httpPostRequest.setHeader(e.getKey(), e.getValue()); + } + } else { + uri = buildUri(message.getUrl().trim(), null, null, -1, null, null); + } + + httpPostRequest.setURI(uri); + StringEntity entity = new StringEntity(extractBody(message)); + httpPostRequest.setEntity(entity); + + return HTTP_CLIENT.execute(httpPostRequest); + } + + private URI buildUri(String endpoint, String scheme, String host, + int port, String path, Map queryParams) + throws Exception { + try { + if(Strings.isNullOrEmpty(endpoint)) { + logger.info("endpoint empty. Fall back to host:port/path"); + if (Strings.isNullOrEmpty(scheme)) { + scheme = "https"; + } + URIBuilder uriBuilder = new URIBuilder(); + if(queryParams != null) { + for (Map.Entry e : queryParams.entrySet()) + uriBuilder.addParameter(e.getKey(), e.getValue()); + } + return uriBuilder.setScheme(scheme).setHost(host).setPort(port).setPath(path).build(); + } + return new URIBuilder(endpoint).build(); + } catch (URISyntaxException exception) { + logger.error("Error occured while building Uri"); + throw new IllegalStateException("Error creating URI"); + } + } + + public String getResponseString(CloseableHttpResponse response) throws IOException { + HttpEntity entity = response.getEntity(); + if (entity == null) + return "{}"; + + String responseString = EntityUtils.toString(entity); + logger.debug("Http response: " + responseString); + + return responseString; + } + + private void validateResponseStatus(HttpResponse response) throws IOException { + int statusCode = response.getStatusLine().getStatusCode(); + + if (statusCode != RestStatus.OK.getStatus()) { + throw new IOException("Failed: " + response); + } + } + + private String extractBody(BaseMessage message) { + return message.getMessageContent(); + } + + /* + * This method is useful for Mocking the client + */ + public void setHttpClient(CloseableHttpClient httpClient) { + HTTP_CLIENT = httpClient; + } +} diff --git a/notification/src/main/java/com/amazon/opendistroforelasticsearch/alerting/destination/client/DestinationHttpClientPool.java b/notification/src/main/java/com/amazon/opendistroforelasticsearch/alerting/destination/client/DestinationHttpClientPool.java new file mode 100644 index 00000000..aa63af05 --- /dev/null +++ b/notification/src/main/java/com/amazon/opendistroforelasticsearch/alerting/destination/client/DestinationHttpClientPool.java @@ -0,0 +1,30 @@ +/* + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package com.amazon.opendistroforelasticsearch.alerting.destination.client; + +/** + * This class provides Client to the relevant destinations + */ +public final class DestinationHttpClientPool { + + private static final DestinationHttpClient httpClient = new DestinationHttpClient(); + + private DestinationHttpClientPool() { } + + public static DestinationHttpClient getHttpClient() { + return httpClient; + } +} diff --git a/notification/src/main/java/com/amazon/opendistroforelasticsearch/alerting/destination/factory/ChimeDestinationFactory.java b/notification/src/main/java/com/amazon/opendistroforelasticsearch/alerting/destination/factory/ChimeDestinationFactory.java new file mode 100644 index 00000000..80f6b30b --- /dev/null +++ b/notification/src/main/java/com/amazon/opendistroforelasticsearch/alerting/destination/factory/ChimeDestinationFactory.java @@ -0,0 +1,61 @@ +/* + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package com.amazon.opendistroforelasticsearch.alerting.destination.factory; + +import com.amazon.opendistroforelasticsearch.alerting.destination.response.DestinationHttpResponse; +import com.amazon.opendistroforelasticsearch.alerting.destination.client.DestinationHttpClient; +import com.amazon.opendistroforelasticsearch.alerting.destination.client.DestinationHttpClientPool; +import com.amazon.opendistroforelasticsearch.alerting.destination.message.ChimeMessage; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.rest.RestStatus; + +/** + * This class handles the client responsible for submitting the messages to Chime destination. + */ +public class ChimeDestinationFactory implements DestinationFactory{ + + private static final Logger logger = Loggers.getLogger(ChimeDestinationFactory.class); + + private DestinationHttpClient destinationHttpClient; + + public ChimeDestinationFactory() { + this.destinationHttpClient = DestinationHttpClientPool.getHttpClient(); + } + + @Override + public DestinationHttpResponse publish(ChimeMessage message) { + try { + String response = getClient(message).execute(message); + return new DestinationHttpResponse.Builder().withStatusCode(RestStatus.OK.getStatus()).withResponseContent(response).build(); + } catch (Exception ex) { + logger.error("Exception publishing Message: " + message.toString(), ex); + throw new IllegalStateException(ex); + } + } + + @Override + public DestinationHttpClient getClient(ChimeMessage message) { + return destinationHttpClient; + } + + /* + * This function can be used to mock the client for unit test + */ + public void setClient(DestinationHttpClient client) { + this.destinationHttpClient = client; + } +} diff --git a/notification/src/main/java/com/amazon/opendistroforelasticsearch/alerting/destination/factory/CustomWebhookDestinationFactory.java b/notification/src/main/java/com/amazon/opendistroforelasticsearch/alerting/destination/factory/CustomWebhookDestinationFactory.java new file mode 100644 index 00000000..2fed4709 --- /dev/null +++ b/notification/src/main/java/com/amazon/opendistroforelasticsearch/alerting/destination/factory/CustomWebhookDestinationFactory.java @@ -0,0 +1,62 @@ +/* + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package com.amazon.opendistroforelasticsearch.alerting.destination.factory; + +import com.amazon.opendistroforelasticsearch.alerting.destination.message.CustomWebhookMessage; +import com.amazon.opendistroforelasticsearch.alerting.destination.response.DestinationHttpResponse; +import com.amazon.opendistroforelasticsearch.alerting.destination.client.DestinationHttpClient; +import com.amazon.opendistroforelasticsearch.alerting.destination.client.DestinationHttpClientPool; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.rest.RestStatus; + +/** + * This class handles the client responsible for submitting the messages to custom webhook destination. + */ +public class CustomWebhookDestinationFactory implements DestinationFactory{ + + private static final Logger logger = Loggers.getLogger(CustomWebhookDestinationFactory.class); + + private DestinationHttpClient destinationHttpClient; + + public CustomWebhookDestinationFactory() { + this.destinationHttpClient = DestinationHttpClientPool.getHttpClient(); + } + + @Override + public DestinationHttpResponse publish(CustomWebhookMessage message) { + try { + String response = getClient(message).execute(message); + return new DestinationHttpResponse.Builder().withStatusCode(RestStatus.OK.getStatus()).withResponseContent(response).build(); + } catch (Exception ex) { + logger.error("Exception publishing Message: " + message.toString(), ex); + throw new IllegalStateException(ex); + } + } + + @Override + public DestinationHttpClient getClient(CustomWebhookMessage message) { + return destinationHttpClient; + } + + /* + * This function can be used to mock the client for unit test + */ + public void setClient(DestinationHttpClient client) { + this.destinationHttpClient = client; + } + +} \ No newline at end of file diff --git a/notification/src/main/java/com/amazon/opendistroforelasticsearch/alerting/destination/factory/DestinationFactory.java b/notification/src/main/java/com/amazon/opendistroforelasticsearch/alerting/destination/factory/DestinationFactory.java new file mode 100644 index 00000000..8ebd4880 --- /dev/null +++ b/notification/src/main/java/com/amazon/opendistroforelasticsearch/alerting/destination/factory/DestinationFactory.java @@ -0,0 +1,32 @@ +/* + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package com.amazon.opendistroforelasticsearch.alerting.destination.factory; + +import com.amazon.opendistroforelasticsearch.alerting.destination.message.BaseMessage; +import com.amazon.opendistroforelasticsearch.alerting.destination.message.DestinationType; +import com.amazon.opendistroforelasticsearch.alerting.destination.response.BaseResponse; + +/** + * Interface which enables to plug in multiple notification Channel Factories. + * + * @param message object of type [{@link DestinationType}] + * @param client to publish above message + */ +public interface DestinationFactory { + BaseResponse publish(T message); + + Y getClient(T message); +} diff --git a/notification/src/main/java/com/amazon/opendistroforelasticsearch/alerting/destination/factory/DestinationFactoryProvider.java b/notification/src/main/java/com/amazon/opendistroforelasticsearch/alerting/destination/factory/DestinationFactoryProvider.java new file mode 100644 index 00000000..149695ab --- /dev/null +++ b/notification/src/main/java/com/amazon/opendistroforelasticsearch/alerting/destination/factory/DestinationFactoryProvider.java @@ -0,0 +1,57 @@ +/* + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package com.amazon.opendistroforelasticsearch.alerting.destination.factory; + +import com.amazon.opendistroforelasticsearch.alerting.destination.message.DestinationType; + +import java.util.HashMap; +import java.util.Map; + +/* + * This class helps in fetching the right Channel Factory based on the + * type of the channel. + * A channel could be Email, Webhook etc + */ +public class DestinationFactoryProvider { + + private static Map destinationFactoryMap = new HashMap<>(); + + static { + destinationFactoryMap.put(DestinationType.CHIME, new ChimeDestinationFactory()); + destinationFactoryMap.put(DestinationType.SLACK, new SlackDestinationFactory()); + destinationFactoryMap.put(DestinationType.CUSTOMWEBHOOK, new CustomWebhookDestinationFactory()); + } + + /** + * Fetches the right channel factory based on the type of the channel + * + * @param destinationType [{@link DestinationType}] + * @return DestinationFactory factory object for above destination type + */ + public static DestinationFactory getFactory(DestinationType destinationType) { + if (!destinationFactoryMap.containsKey(destinationType)) { + throw new IllegalArgumentException("Invalid channel type"); + } + return destinationFactoryMap.get(destinationType); + } + + /* + * This function is to mock hooks for the unit test + */ + public static void setFactory(DestinationType type, DestinationFactory factory) { + destinationFactoryMap.put(type, factory); + } +} diff --git a/notification/src/main/java/com/amazon/opendistroforelasticsearch/alerting/destination/factory/SlackDestinationFactory.java b/notification/src/main/java/com/amazon/opendistroforelasticsearch/alerting/destination/factory/SlackDestinationFactory.java new file mode 100644 index 00000000..e1177cdb --- /dev/null +++ b/notification/src/main/java/com/amazon/opendistroforelasticsearch/alerting/destination/factory/SlackDestinationFactory.java @@ -0,0 +1,62 @@ +/* + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package com.amazon.opendistroforelasticsearch.alerting.destination.factory; + +import com.amazon.opendistroforelasticsearch.alerting.destination.message.SlackMessage; +import com.amazon.opendistroforelasticsearch.alerting.destination.response.DestinationHttpResponse; +import com.amazon.opendistroforelasticsearch.alerting.destination.client.DestinationHttpClient; +import com.amazon.opendistroforelasticsearch.alerting.destination.client.DestinationHttpClientPool; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.rest.RestStatus; + +/** + * This class handles the client responsible for submitting the messages to Slack destination. + */ +public class SlackDestinationFactory implements DestinationFactory{ + + private DestinationHttpClient destinationHttpClient; + + private static final Logger logger = Loggers.getLogger(SlackDestinationFactory.class); + + public SlackDestinationFactory() { + this.destinationHttpClient = DestinationHttpClientPool.getHttpClient(); + } + + @Override + public DestinationHttpResponse publish(SlackMessage message) { + try { + String response = getClient(message).execute(message); + return new DestinationHttpResponse.Builder().withStatusCode(RestStatus.OK.getStatus()).withResponseContent(response).build(); + } catch (Exception ex) { + logger.error("Exception publishing Message: " + message.toString(), ex); + throw new IllegalStateException(ex); + } + } + + @Override + public DestinationHttpClient getClient(SlackMessage message) { + return destinationHttpClient; + } + + /* + * This function can be used to mock the client for unit test + */ + public void setClient(DestinationHttpClient client) { + this.destinationHttpClient = client; + } + +} diff --git a/notification/src/main/java/com/amazon/opendistroforelasticsearch/alerting/destination/message/BaseMessage.java b/notification/src/main/java/com/amazon/opendistroforelasticsearch/alerting/destination/message/BaseMessage.java new file mode 100644 index 00000000..fa6ff622 --- /dev/null +++ b/notification/src/main/java/com/amazon/opendistroforelasticsearch/alerting/destination/message/BaseMessage.java @@ -0,0 +1,71 @@ +/* + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package com.amazon.opendistroforelasticsearch.alerting.destination.message; + +import org.elasticsearch.common.Strings; + +/** + * This class holds the generic parameters required for a + * message. + */ +public abstract class BaseMessage { + + protected DestinationType destinationType; + protected String destinationName; + protected String url; + private String content; + + BaseMessage(final DestinationType destinationType, final String destinationName, final String content) { + if (destinationType == null) { + throw new IllegalArgumentException("Channel type must be defined"); + } + if (!Strings.hasLength(destinationName)) { + throw new IllegalArgumentException("Channel name must be defined"); + } + this.destinationType = destinationType; + this.destinationName = destinationName; + this.content = content; + } + + BaseMessage(final DestinationType destinationType, final String destinationName, + final String content, final String url) { + this(destinationType, destinationName, content); + if (url == null) { + throw new IllegalArgumentException("url is invalid or empty"); + } + this.url = url; + } + + public void setUrl(String url) { + this.url = url; + } + public DestinationType getChannelType() { + return destinationType; + } + + public String getChannelName() { + return destinationName; + } + + public String getMessageContent() { + return content; + } + + public String getUrl() { + return url; + } + +} diff --git a/notification/src/main/java/com/amazon/opendistroforelasticsearch/alerting/destination/message/ChimeMessage.java b/notification/src/main/java/com/amazon/opendistroforelasticsearch/alerting/destination/message/ChimeMessage.java new file mode 100644 index 00000000..38b6613f --- /dev/null +++ b/notification/src/main/java/com/amazon/opendistroforelasticsearch/alerting/destination/message/ChimeMessage.java @@ -0,0 +1,80 @@ +/* + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package com.amazon.opendistroforelasticsearch.alerting.destination.message; + +import org.elasticsearch.common.Strings; + +/** + * This class holds the contents of an Chime message + */ +public class ChimeMessage extends BaseMessage { + private String message; + private ChimeMessage(final DestinationType destinationType, + final String destinationName, + final String url, + final String message) { + + super(destinationType, destinationName, message, url); + + if (DestinationType.CHIME != destinationType) { + throw new IllegalArgumentException("Channel Type does not match CHIME"); + } + + if (Strings.isNullOrEmpty(message)) { + throw new IllegalArgumentException("Message content is missing"); + } + + this.message = message; + } + + @Override + public String toString() { + return "DestinationType: " + destinationType + ", DestinationName:" + destinationName + + ", Url: " + url + ", Message: " + message; + } + + public static class Builder { + private String message; + private DestinationType destinationType; + private String destinationName; + private String url; + + public Builder(String destinationName) { + this.destinationName = destinationName; + this.destinationType = DestinationType.CHIME; + } + + public ChimeMessage.Builder withMessage(String message) { + this.message = message; + return this; + } + + public ChimeMessage.Builder withUrl(String url) { + this.url = url; + return this; + } + + public ChimeMessage build() { + ChimeMessage chimeMessage = new ChimeMessage(this.destinationType, this.destinationName, this.url, + this.message); + return chimeMessage; + } + } + + public String getUrl() { + return url; + } +} diff --git a/notification/src/main/java/com/amazon/opendistroforelasticsearch/alerting/destination/message/CustomWebhookMessage.java b/notification/src/main/java/com/amazon/opendistroforelasticsearch/alerting/destination/message/CustomWebhookMessage.java new file mode 100644 index 00000000..27a3140f --- /dev/null +++ b/notification/src/main/java/com/amazon/opendistroforelasticsearch/alerting/destination/message/CustomWebhookMessage.java @@ -0,0 +1,196 @@ +/* + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package com.amazon.opendistroforelasticsearch.alerting.destination.message; + +import org.elasticsearch.common.Strings; + +import java.util.Map; + +/** + * This class holds the content of an CustomWebhook message + */ +public class CustomWebhookMessage extends BaseMessage { + + private String message; + private String url; + private String scheme; + private String host; + private int port; + private String path; + private Map queryParams; + private Map headerParams; + private final String userName; + private final String password; + + private CustomWebhookMessage(final DestinationType destinationType, + final String destinationName, + final String url, + final String scheme, + final String host, + final Integer port, + final String path, + final Map queryParams, + final Map headerParams, + final String userName, + final String password, + final String message) { + + super(destinationType, destinationName, message); + + if (DestinationType.CUSTOMWEBHOOK != destinationType) { + throw new IllegalArgumentException("Channel Type does not match CustomWebhook"); + } + + if (!Strings.isNullOrEmpty(url)) { + setUrl(url.trim()); + } + + if (Strings.isNullOrEmpty(message)) { + throw new IllegalArgumentException("Message content is missing"); + } + + this.scheme = Strings.isNullOrEmpty(scheme) ? "https" : scheme; + this.port = port==null ? -1 : port; + + if (!Strings.isNullOrEmpty(path)) { + if (!path.startsWith("/")) { + this.path = "/" + path; + } + } + + if(Strings.isNullOrEmpty(url) && Strings.isNullOrEmpty(host)) { + throw new IllegalArgumentException("Either fully qualified URL or host name should be provided"); + } + + this.message = message; + this.url = url; + this.host = host; + this.queryParams = queryParams; + this.headerParams = headerParams; + this.userName = userName; + this.password = password; + } + + @Override + public String toString() { + return "DestinationType: " + destinationType + ", DestinationName:" + destinationName + + ", Url: " + url + ", scheme: " + scheme + ", Host: " + host + ", Port: " + + port + ", Path: " + path + ", Message: " + message; + } + + public static class Builder { + private String message; + private DestinationType destinationType; + private String destinationName; + private String url; + private String scheme; + private String host; + private Integer port; + private String path; + private Map queryParams; + private Map headerParams; + private String userName; + private String password; + + public Builder(String destinationName) { + this.destinationName = destinationName; + this.destinationType = DestinationType.CUSTOMWEBHOOK; + } + + public CustomWebhookMessage.Builder withScheme(String scheme) { + this.scheme = scheme; + return this; + } + + public CustomWebhookMessage.Builder withHost(String host) { + this.host = host; + return this; + } + + public CustomWebhookMessage.Builder withPort(Integer port) { + this.port = port; + return this; + } + + public CustomWebhookMessage.Builder withPath(String path) { + this.path = path; + return this; + } + + public CustomWebhookMessage.Builder withQueryParams(Map queryParams) { + this.queryParams = queryParams; + return this; + } + + public CustomWebhookMessage.Builder withHeaderParams(Map headerParams) { + this.headerParams = headerParams; + return this; + } + + public CustomWebhookMessage.Builder withMessage(String message) { + this.message = message; + return this; + } + + public CustomWebhookMessage.Builder withUrl(String url) { + this.url = url; + return this; + } + + public CustomWebhookMessage.Builder withUserName(String userName) { + this.userName = userName; + return this; + } + + public CustomWebhookMessage.Builder withPassword(String password) { + this.password = password; + return this; + } + + public CustomWebhookMessage build() { + CustomWebhookMessage customWebhookMessage = new CustomWebhookMessage( + this.destinationType, this.destinationName, this.url, + this.scheme, this.host, this.port, this.path, this.queryParams, + this.headerParams, this.userName, this.password, this.message); + return customWebhookMessage; + } + } + + public String getScheme() { + return scheme; + } + + public String getHost() { + return host; + } + + public int getPort() { + return port; + } + + public String getPath() { + return path; + } + + public Map getQueryParams() { + return queryParams; + } + + public Map getHeaderParams() { + return headerParams; + } + +} \ No newline at end of file diff --git a/notification/src/main/java/com/amazon/opendistroforelasticsearch/alerting/destination/message/DestinationType.java b/notification/src/main/java/com/amazon/opendistroforelasticsearch/alerting/destination/message/DestinationType.java new file mode 100644 index 00000000..7f170f14 --- /dev/null +++ b/notification/src/main/java/com/amazon/opendistroforelasticsearch/alerting/destination/message/DestinationType.java @@ -0,0 +1,23 @@ +/* + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package com.amazon.opendistroforelasticsearch.alerting.destination.message; + +/** + * Supported notification destinations + */ +public enum DestinationType { + CHIME, SLACK, CUSTOMWEBHOOK +} diff --git a/notification/src/main/java/com/amazon/opendistroforelasticsearch/alerting/destination/message/SlackMessage.java b/notification/src/main/java/com/amazon/opendistroforelasticsearch/alerting/destination/message/SlackMessage.java new file mode 100644 index 00000000..7ce4b2c7 --- /dev/null +++ b/notification/src/main/java/com/amazon/opendistroforelasticsearch/alerting/destination/message/SlackMessage.java @@ -0,0 +1,90 @@ +/* + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package com.amazon.opendistroforelasticsearch.alerting.destination.message; + +import org.elasticsearch.common.Strings; + +/** + * This class holds the content of an Slack message + */ +public class SlackMessage extends BaseMessage { + private String message; + private SlackMessage(final DestinationType destinationType, + final String destinationName, + final String url, + final String message) { + + super(destinationType, destinationName, message, url); + + if (DestinationType.SLACK != destinationType) { + throw new IllegalArgumentException("Channel Type does not match Slack"); + } + + if (Strings.isNullOrEmpty(url)) { // add URL validation + throw new IllegalArgumentException("Fully qualified URL is missing/invalid: " + url); + } + + if (Strings.isNullOrEmpty(message)) { + throw new IllegalArgumentException("Message content is missing"); + } + + this.message = message; + } + + @Override + public String toString() { + return "DestinationType: " + destinationType + ", DestinationName:" + destinationName + + ", Url: " + url + ", Message: " + message; + } + + public static class Builder { + private String message; + private DestinationType destinationType; + private String destinationName; + private String url; + + public Builder(String channelName) { + this.destinationName = channelName; + this.destinationType = DestinationType.SLACK; + } + + public SlackMessage.Builder withMessage(String message) { + this.message = message; + return this; + } + + public SlackMessage.Builder withUrl(String url) { + this.url = url; + return this; + } + + public SlackMessage build() { + SlackMessage slackMessage = new SlackMessage(this.destinationType, + this.destinationName, + this.url, + this.message); + return slackMessage; + } + } + + public String getMessage() { + return message; + } + + public String getUrl() { + return url; + } +} diff --git a/notification/src/main/java/com/amazon/opendistroforelasticsearch/alerting/destination/response/BaseResponse.java b/notification/src/main/java/com/amazon/opendistroforelasticsearch/alerting/destination/response/BaseResponse.java new file mode 100644 index 00000000..575c93bd --- /dev/null +++ b/notification/src/main/java/com/amazon/opendistroforelasticsearch/alerting/destination/response/BaseResponse.java @@ -0,0 +1,34 @@ +/* + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package com.amazon.opendistroforelasticsearch.alerting.destination.response; + +/** + * This class holds the generic response attributes + */ +public abstract class BaseResponse { + protected Integer statusCode; + + BaseResponse(final Integer statusCode) { + if (statusCode == null) { + throw new IllegalArgumentException("status code is invalid"); + } + this.statusCode = statusCode; + } + + public int getStatusCode() { + return statusCode; + } +} diff --git a/notification/src/main/java/com/amazon/opendistroforelasticsearch/alerting/destination/response/DestinationHttpResponse.java b/notification/src/main/java/com/amazon/opendistroforelasticsearch/alerting/destination/response/DestinationHttpResponse.java new file mode 100644 index 00000000..b3308dee --- /dev/null +++ b/notification/src/main/java/com/amazon/opendistroforelasticsearch/alerting/destination/response/DestinationHttpResponse.java @@ -0,0 +1,57 @@ +/* + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package com.amazon.opendistroforelasticsearch.alerting.destination.response; + +import org.elasticsearch.common.Strings; + +/** + * This class is a place holder for http response metadata + */ +public class DestinationHttpResponse extends BaseResponse { + + private String responseContent; + + private DestinationHttpResponse(final String responseString, final int statusCode) { + super(statusCode); + if (Strings.isNullOrEmpty(responseString)) { + throw new IllegalArgumentException("Response is missing"); + } + this.responseContent = responseString; + } + + public static class Builder { + private String responseContent; + private Integer statusCode = null; + + public DestinationHttpResponse.Builder withResponseContent(String responseContent) { + this.responseContent = responseContent; + return this; + } + + public DestinationHttpResponse.Builder withStatusCode(Integer statusCode) { + this.statusCode = statusCode; + return this; + } + + public DestinationHttpResponse build() { + return new DestinationHttpResponse(responseContent, statusCode); + } + } + + public String getResponseContent() { + return this.responseContent; + } +} diff --git a/notification/src/test/java/com/amazon/opendistroforelasticsearch/alerting/destination/ChimeDestinationTest.java b/notification/src/test/java/com/amazon/opendistroforelasticsearch/alerting/destination/ChimeDestinationTest.java new file mode 100644 index 00000000..8f7a089b --- /dev/null +++ b/notification/src/test/java/com/amazon/opendistroforelasticsearch/alerting/destination/ChimeDestinationTest.java @@ -0,0 +1,94 @@ +/* + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package com.amazon.opendistroforelasticsearch.alerting.destination; + +import com.amazon.opendistroforelasticsearch.alerting.destination.client.DestinationHttpClient; +import com.amazon.opendistroforelasticsearch.alerting.destination.factory.ChimeDestinationFactory; +import com.amazon.opendistroforelasticsearch.alerting.destination.factory.DestinationFactoryProvider; +import com.amazon.opendistroforelasticsearch.alerting.destination.message.BaseMessage; +import com.amazon.opendistroforelasticsearch.alerting.destination.message.ChimeMessage; +import com.amazon.opendistroforelasticsearch.alerting.destination.message.DestinationType; +import com.amazon.opendistroforelasticsearch.alerting.destination.response.DestinationHttpResponse; +import org.apache.http.client.methods.CloseableHttpResponse; +import org.apache.http.client.methods.HttpPost; +import org.apache.http.impl.client.CloseableHttpClient; +import org.apache.http.message.BasicStatusLine; +import org.easymock.EasyMock; +import org.elasticsearch.rest.RestStatus; +import org.junit.Test; + +import static org.junit.Assert.assertEquals; + +public class ChimeDestinationTest { + + @Test + public void testChimeMessage() throws Exception { + CloseableHttpClient mockHttpClient = EasyMock.createMock(CloseableHttpClient.class); + + DestinationHttpResponse expectedChimeResponse = new DestinationHttpResponse.Builder().withResponseContent("{}") + .withStatusCode(RestStatus.OK.getStatus()).build(); + CloseableHttpResponse httpResponse = EasyMock.createMock(CloseableHttpResponse.class); + EasyMock.expect(mockHttpClient.execute(EasyMock.anyObject(HttpPost.class))).andReturn(httpResponse); + + BasicStatusLine mockStatusLine = EasyMock.createMock(BasicStatusLine.class); + + EasyMock.expect(httpResponse.getStatusLine()).andReturn(mockStatusLine); + EasyMock.expect(httpResponse.getEntity()).andReturn(null).anyTimes(); + EasyMock.expect(mockStatusLine.getStatusCode()).andReturn(RestStatus.OK.getStatus()); + EasyMock.replay(mockHttpClient); + EasyMock.replay(httpResponse); + EasyMock.replay(mockStatusLine); + + DestinationHttpClient httpClient = new DestinationHttpClient(); + httpClient.setHttpClient(mockHttpClient); + ChimeDestinationFactory chimeDestinationFactory = new ChimeDestinationFactory(); + chimeDestinationFactory.setClient(httpClient); + + DestinationFactoryProvider.setFactory(DestinationType.CHIME, chimeDestinationFactory); + + String message = "{\"Content\":\"Message gughjhjlkh Body emoji test: :) :+1: " + + "link test: http://sample.com email test: marymajor@example.com All member callout: " + + "@All All Present member callout: @Present\"}"; + BaseMessage bm = new ChimeMessage.Builder("abc").withMessage(message). + withUrl("https://abc/com").build(); + DestinationHttpResponse actualChimeResponse = (DestinationHttpResponse) Notification.publish(bm); + + assertEquals(expectedChimeResponse.getResponseContent(), actualChimeResponse.getResponseContent()); + assertEquals(expectedChimeResponse.getStatusCode(), actualChimeResponse.getStatusCode()); + } + + @Test(expected = IllegalArgumentException.class) + public void testUrlMissingMessage() { + try { + ChimeMessage message = new ChimeMessage.Builder("chime") + .withMessage("dummyMessage").build(); + } catch (Exception ex) { + assertEquals("url is invalid or empty", ex.getMessage()); + throw ex; + } + } + + @Test(expected = IllegalArgumentException.class) + public void testContentMissingMessage() { + try { + ChimeMessage message = new ChimeMessage.Builder("chime") + .withUrl("abc.com").build(); + } catch (Exception ex) { + assertEquals("Message content is missing", ex.getMessage()); + throw ex; + } + } +} diff --git a/notification/src/test/java/com/amazon/opendistroforelasticsearch/alerting/destination/CustomWebhookMessageTest.java b/notification/src/test/java/com/amazon/opendistroforelasticsearch/alerting/destination/CustomWebhookMessageTest.java new file mode 100644 index 00000000..4bc1ef9d --- /dev/null +++ b/notification/src/test/java/com/amazon/opendistroforelasticsearch/alerting/destination/CustomWebhookMessageTest.java @@ -0,0 +1,103 @@ +/* + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package com.amazon.opendistroforelasticsearch.alerting.destination; + +import com.amazon.opendistroforelasticsearch.alerting.destination.client.DestinationHttpClient; +import com.amazon.opendistroforelasticsearch.alerting.destination.factory.CustomWebhookDestinationFactory; +import com.amazon.opendistroforelasticsearch.alerting.destination.factory.DestinationFactoryProvider; +import com.amazon.opendistroforelasticsearch.alerting.destination.message.BaseMessage; +import com.amazon.opendistroforelasticsearch.alerting.destination.message.CustomWebhookMessage; +import com.amazon.opendistroforelasticsearch.alerting.destination.message.DestinationType; +import com.amazon.opendistroforelasticsearch.alerting.destination.response.DestinationHttpResponse; +import org.apache.http.client.methods.CloseableHttpResponse; +import org.apache.http.client.methods.HttpPost; +import org.apache.http.impl.client.CloseableHttpClient; +import org.apache.http.message.BasicStatusLine; +import org.easymock.EasyMock; +import org.elasticsearch.rest.RestStatus; +import org.junit.Test; + +import java.util.HashMap; +import java.util.Map; + +import static org.junit.Assert.assertEquals; + +public class CustomWebhookMessageTest { + + @Test + public void testCustomWebhookMessage() throws Exception { + + CloseableHttpClient mockHttpClient = EasyMock.createMock(CloseableHttpClient.class); + + DestinationHttpResponse expectedCustomWebhookResponse = new DestinationHttpResponse.Builder().withResponseContent("{}") + .withStatusCode(RestStatus.OK.getStatus()).build(); + CloseableHttpResponse httpResponse = EasyMock.createMock(CloseableHttpResponse.class); + EasyMock.expect(mockHttpClient.execute(EasyMock.anyObject(HttpPost.class))).andReturn(httpResponse); + + BasicStatusLine mockStatusLine = EasyMock.createMock(BasicStatusLine.class); + + EasyMock.expect(httpResponse.getStatusLine()).andReturn(mockStatusLine); + EasyMock.expect(httpResponse.getEntity()).andReturn(null).anyTimes(); + EasyMock.expect(mockStatusLine.getStatusCode()).andReturn(RestStatus.OK.getStatus()); + EasyMock.replay(mockHttpClient); + EasyMock.replay(httpResponse); + EasyMock.replay(mockStatusLine); + + DestinationHttpClient httpClient = new DestinationHttpClient(); + httpClient.setHttpClient(mockHttpClient); + CustomWebhookDestinationFactory customDestinationFactory = new CustomWebhookDestinationFactory(); + customDestinationFactory.setClient(httpClient); + + DestinationFactoryProvider.setFactory(DestinationType.CUSTOMWEBHOOK, customDestinationFactory); + + Map queryParams = new HashMap(); + queryParams.put("token", "R2x1UlN4ZHF8MXxxVFJpelJNVDgzdGNwMnVRenJwRFBHUkR0NlhROWhXOVVTZXpiTWx2azVr"); + + String message = "{\"Content\":\"Message gughjhjlkh Body emoji test: :) :+1: " + + "link test: http://sample.com email test: marymajor@example.com " + + "All member callout: @All All Present member callout: @Present\"}"; + BaseMessage bm = new CustomWebhookMessage.Builder("abc").withHost("hooks.chime.aws"). + withPath("incomingwebhooks/383c0e2b-d028-44f4-8d38-696754bc4574"). + withMessage(message). + withQueryParams(queryParams).build(); + DestinationHttpResponse actualCustomResponse = (DestinationHttpResponse) Notification.publish(bm); + + assertEquals(expectedCustomWebhookResponse.getResponseContent(), actualCustomResponse.getResponseContent()); + assertEquals(expectedCustomWebhookResponse.getStatusCode(), actualCustomResponse.getStatusCode()); + } + + @Test(expected = IllegalArgumentException.class) + public void testUrlMissingMessage() { + try { + CustomWebhookMessage message = new CustomWebhookMessage.Builder("custom") + .withMessage("dummyMessage").build(); + } catch (Exception ex) { + assertEquals("Either fully qualified URL or host name should be provided", ex.getMessage()); + throw ex; + } + } + + @Test(expected = IllegalArgumentException.class) + public void testContentMissingMessage() { + try { + CustomWebhookMessage message = new CustomWebhookMessage.Builder("custom") + .withUrl("abc.com").build(); + } catch (Exception ex) { + assertEquals("Message content is missing", ex.getMessage()); + throw ex; + } + } +} diff --git a/notification/src/test/java/com/amazon/opendistroforelasticsearch/alerting/destination/SlackDestinationTest.java b/notification/src/test/java/com/amazon/opendistroforelasticsearch/alerting/destination/SlackDestinationTest.java new file mode 100644 index 00000000..3e8ee1c4 --- /dev/null +++ b/notification/src/test/java/com/amazon/opendistroforelasticsearch/alerting/destination/SlackDestinationTest.java @@ -0,0 +1,97 @@ +/* + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package com.amazon.opendistroforelasticsearch.alerting.destination; + +import com.amazon.opendistroforelasticsearch.alerting.destination.client.DestinationHttpClient; +import com.amazon.opendistroforelasticsearch.alerting.destination.factory.DestinationFactoryProvider; +import com.amazon.opendistroforelasticsearch.alerting.destination.factory.SlackDestinationFactory; +import com.amazon.opendistroforelasticsearch.alerting.destination.message.BaseMessage; +import com.amazon.opendistroforelasticsearch.alerting.destination.message.DestinationType; +import com.amazon.opendistroforelasticsearch.alerting.destination.message.SlackMessage; +import com.amazon.opendistroforelasticsearch.alerting.destination.response.DestinationHttpResponse; +import org.apache.http.client.methods.CloseableHttpResponse; +import org.apache.http.client.methods.HttpPost; +import org.apache.http.impl.client.CloseableHttpClient; +import org.apache.http.message.BasicStatusLine; +import org.easymock.EasyMock; +import org.elasticsearch.rest.RestStatus; +import org.junit.Assert; +import org.junit.Test; + +import static org.junit.Assert.assertEquals; + +public class SlackDestinationTest { + + @Test + public void testSlackMessage() throws Exception { + + CloseableHttpClient mockHttpClient = EasyMock.createMock(CloseableHttpClient.class); + + DestinationHttpResponse expectedSlackResponse = new DestinationHttpResponse.Builder().withResponseContent("{}") + .withStatusCode(RestStatus.OK.getStatus()).build(); + CloseableHttpResponse httpResponse = EasyMock.createMock(CloseableHttpResponse.class); + EasyMock.expect(mockHttpClient.execute(EasyMock.anyObject(HttpPost.class))).andReturn(httpResponse); + + BasicStatusLine mockStatusLine = EasyMock.createMock(BasicStatusLine.class); + + EasyMock.expect(httpResponse.getStatusLine()).andReturn(mockStatusLine); + EasyMock.expect(httpResponse.getEntity()).andReturn(null).anyTimes(); + EasyMock.expect(mockStatusLine.getStatusCode()).andReturn(RestStatus.OK.getStatus()); + EasyMock.replay(mockHttpClient); + EasyMock.replay(httpResponse); + EasyMock.replay(mockStatusLine); + + DestinationHttpClient httpClient = new DestinationHttpClient(); + httpClient.setHttpClient(mockHttpClient); + SlackDestinationFactory slackDestinationFactory = new SlackDestinationFactory(); + slackDestinationFactory.setClient(httpClient); + + DestinationFactoryProvider.setFactory(DestinationType.SLACK, slackDestinationFactory); + + String message = "{\"text\":\"Vamshi Message gughjhjlkh Body emoji test: :) :+1: " + + "link test: http://sample.com email test: marymajor@example.com All member callout: " + + "@All All Present member callout: @Present\"}"; + BaseMessage bm = new SlackMessage.Builder("abc").withMessage(message). + withUrl("https://hooks.slack.com/services/xxxx/xxxxxx/xxxxxxxxx").build(); + + DestinationHttpResponse actualSlackResponse = (DestinationHttpResponse) Notification.publish(bm); + + assertEquals(expectedSlackResponse.getResponseContent(), actualSlackResponse.getResponseContent()); + assertEquals(expectedSlackResponse.getStatusCode(), actualSlackResponse.getStatusCode()); + } + + @Test(expected = IllegalArgumentException.class) + public void testUrlMissingMessage() { + try { + SlackMessage message = new SlackMessage.Builder("slack") + .withMessage("dummyMessage").build(); + } catch (Exception ex) { + Assert.assertEquals("url is invalid or empty", ex.getMessage()); + throw ex; + } + } + + @Test(expected = IllegalArgumentException.class) + public void testContentMissingMessage() { + try { + SlackMessage message = new SlackMessage.Builder("slack") + .withUrl("abc.com").build(); + } catch (Exception ex) { + assertEquals("Message content is missing", ex.getMessage()); + throw ex; + } + } +} \ No newline at end of file diff --git a/opendistro-elasticsearch-alerting.release-notes b/opendistro-elasticsearch-alerting.release-notes new file mode 100644 index 00000000..c5a9586b --- /dev/null +++ b/opendistro-elasticsearch-alerting.release-notes @@ -0,0 +1,25 @@ +## 2019-01-31, Version 0.7.0 (Current) + + +### New Features + +This is the first release of the OpenDistro Elasticsearch Alerting plugin. + +Allows users to create and schedule **monitors** to run periodic queries of data in Elasticsearch. +Results of periodic queries are evaluated against the monitor's **triggers** to see if they meet certain criteria. +If criteria is met, **alerts** are generated and saved in an Elasticsearch index and the user is notified by the trigger's **actions**. +Actions are messages using mustache templating created by the user that are sent to **destinations**. +Destinations are locations where action messages are sent, such as email server, slack, chime, or custom webhooks. +Alerts can be acknowledged to mute notifications. + +Adds backend REST API used for basic CRUD, search operations, and testing on monitors as well as acknowledging alerts. + +Adds configuration API to enable/disable monitoring. + +Adds stats API to check the status of plugin and ensure everything is working as expected. + +Adds API support for create, update, and deleting destinations. + +### Commits + +* [[`4771e6c`](https://github.com/mauve-hedgehog/opendistro-elasticsearch-alerting/commit/4771e6c5ce6f541fc84f1290ac2fd43f64f3dcb2)] Initial release for OpenDistro Elasticsearch Alerting \ No newline at end of file diff --git a/settings.gradle b/settings.gradle new file mode 100644 index 00000000..cd5d8e8a --- /dev/null +++ b/settings.gradle @@ -0,0 +1,24 @@ +/* + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +rootProject.name = 'opendistro-plugins' +include 'alerting' +include 'core' +include 'notification' +include 'elastic-api' + +project(":core").name = 'alerting-core' +project(":notification").name = 'alerting-notification' +project(":elastic-api").name = 'alerting-elastic-api' \ No newline at end of file