From 548946a65b7528a933c5dca4a1afbdda9a640522 Mon Sep 17 00:00:00 2001 From: Jeffrey Chien Date: Fri, 22 Sep 2023 11:39:44 -0400 Subject: [PATCH 01/55] Restore workflows (#505) --- .github/repo_sync_pr_template.md | 46 ++++++++++ .github/workflows/alpha-release.yml | 25 ++++++ .github/workflows/apm-beta-pre-release.yml | 17 ++++ .../workflows/configurable-nonprod-test.yml | 33 +++++++ .github/workflows/deploy-canary.yml | 90 +++++++++++++++++++ ...hanced-container-insights-beta-release.yml | 17 ++++ .github/workflows/nonprod-release.yml | 17 ++++ .github/workflows/repo-sync.yml | 58 ++++++++++++ 8 files changed, 303 insertions(+) create mode 100644 .github/repo_sync_pr_template.md create mode 100644 .github/workflows/alpha-release.yml create mode 100644 .github/workflows/apm-beta-pre-release.yml create mode 100644 .github/workflows/configurable-nonprod-test.yml create mode 100644 .github/workflows/deploy-canary.yml create mode 100644 .github/workflows/enhanced-container-insights-beta-release.yml create mode 100644 .github/workflows/nonprod-release.yml create mode 100644 .github/workflows/repo-sync.yml diff --git a/.github/repo_sync_pr_template.md b/.github/repo_sync_pr_template.md new file mode 100644 index 0000000000..1bc6cea78a --- /dev/null +++ b/.github/repo_sync_pr_template.md @@ -0,0 +1,46 @@ +# Description of the issue +An automated PR to kickstart the process of syncing the latest changes from [cw-agent](https://github.com/aws/amazon-cloudwatch-agent/) + +# Description of changes + +### Follow the git CLI instructions resolve the merge conflicts + +```shell +git pull origin main +git checkout repo-sync-- +git merge main # do a regular merge -- we want to keep the commits +# resolve merge conflicts in your preferred IDE +git push -u origin repo-sync-- +``` + +Some useful commands +* [Restore conflict resolution in a single file](https://stackoverflow.com/questions/14409420/restart-undo-conflict-resolution-in-a-single-file) - `git checkout -m ` +* Total reset - `git merge --abort` + +### Related docs +* Resolving conflicts with: + * [Git CLI](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/addressing-merge-conflicts/resolving-a-merge-conflict-using-the-command-line) + * [IntelliJ](https://www.jetbrains.com/help/idea/resolving-conflicts.html#distributed-version-control-systems) + * [GoLand](https://www.jetbrains.com/help/go/resolve-conflicts.html) + * [VSCode](https://learn.microsoft.com/en-us/visualstudio/version-control/git-resolve-conflicts?view=vs-2022) + +### Best practices + +* Remember to update all references from `amazon-cloudwatch-agent` to `private-amazon-cloudwatch-agent-staging` +* Resolve the `go.sum` with `go mod tidy`. Don't bother manually resolving conflicts in this file +* When finished, ensure builds work by using `make build` or `make release` +* When unsure or blocked, do a deep dive on the `git blame` for greater context. Maybe even look for the associated PR's and ask the original authors and PR approvers +* If another automated PR arrives before your work is merged, just close your current one and save the branch +* After your PR is approved, **do a regular merge to preserve the commits**. +* Remember to cleanup your commits because none of them will be squashed in a regular merge + +# License +By submitting this pull request, I confirm that you can use, modify, copy, and redistribute this contribution, under the terms of your choice. + +# Tests +n/a + +# Requirements +_Before commit the code, please do the following steps._ +1. Run `make fmt` and `make fmt-sh` +2. Run `make lint` diff --git a/.github/workflows/alpha-release.yml b/.github/workflows/alpha-release.yml new file mode 100644 index 0000000000..c5d3c29fd6 --- /dev/null +++ b/.github/workflows/alpha-release.yml @@ -0,0 +1,25 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: MIT + +name: CCWA Release + +on: + release: + types: [published] + +jobs: + BuildAndUpload: + uses: ./.github/workflows/test-build.yml + secrets: inherit + permissions: + id-token: write + contents: read + with: + ContainerRepositoryNameAndTag: "ccwa-release:latest" + BucketKey: "release" + PackageBucketKey: "release" + + DeployCanary: + needs: [BuildAndUpload] + uses: ./.github/workflows/deploy-canary.yml + secrets: inherit diff --git a/.github/workflows/apm-beta-pre-release.yml b/.github/workflows/apm-beta-pre-release.yml new file mode 100644 index 0000000000..12e201f29c --- /dev/null +++ b/.github/workflows/apm-beta-pre-release.yml @@ -0,0 +1,17 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: MIT + +name: APM Beta Pre-Release +on: + workflow_dispatch: +jobs: + BuildAndUpload: + uses: ./.github/workflows/test-build.yml + secrets: inherit + permissions: + id-token: write + contents: read + with: + ContainerRepositoryNameAndTag: "apm-beta-pre-release:latest" + BucketKey: "apm-beta-pre-release" + PackageBucketKey: "apm-beta-pre-release" diff --git a/.github/workflows/configurable-nonprod-test.yml b/.github/workflows/configurable-nonprod-test.yml new file mode 100644 index 0000000000..df7f6bb85b --- /dev/null +++ b/.github/workflows/configurable-nonprod-test.yml @@ -0,0 +1,33 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: MIT + +name: NonProd Test Build Binaries Configurable +on: + workflow_dispatch: + inputs: + ContainerPostfixTag: + # e.g. "ccwa_nonprod:test-${input goes here}" + description: "ECR repo name and tag" + required: true + type: string + BucketPostfixPath: + # e.g. s3:///nonprod/test/${input goes here} + description: "S3 URI to upload artifacts into." + required: true + type: string + PackageBucketPostfixPath: + # e.g. s3:///nonprod/test/${input goes here} + description: "Integration tests put the MSI and PKG in a different bucket path than the binaries." + required: true + type: string +jobs: + BuildAndUpload: + uses: ./.github/workflows/test-build.yml + secrets: inherit + permissions: + id-token: write + contents: read + with: + ContainerRepositoryNameAndTag: "ccwa_nonprod:test-${{ inputs.ContainerPostfixTag }}" + BucketKey: "nonprod/test/${{ inputs.BucketPostfixPath }}" + PackageBucketKey: "nonprod/test/${{ github.PackageBucketPostfixPath }}" \ No newline at end of file diff --git a/.github/workflows/deploy-canary.yml b/.github/workflows/deploy-canary.yml new file mode 100644 index 0000000000..0bdebc8ac3 --- /dev/null +++ b/.github/workflows/deploy-canary.yml @@ -0,0 +1,90 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: MIT + +name: Deploy Canary +env: + TERRAFORM_AWS_ASSUME_ROLE: ${{ secrets.TERRAFORM_AWS_ASSUME_ROLE }} + S3_INTEGRATION_BUCKET: ${{ secrets.S3_INTEGRATION_BUCKET }} + KEY_NAME: ${{ secrets.KEY_NAME }} + PRIVATE_KEY: ${{ secrets.AWS_PRIVATE_KEY }} + CWA_GITHUB_TEST_REPO_NAME: "aws/amazon-cloudwatch-agent-test" + CWA_GITHUB_TEST_REPO_URL: "https://github.com/aws/amazon-cloudwatch-agent-test.git" + CWA_GITHUB_TEST_REPO_BRANCH: "main" + +on: + schedule: + - cron: "0 15 * * *" # Run daily at 15:00 UTC + workflow_call: + workflow_dispatch: + +concurrency: + group: ${{ github.workflow }} + cancel-in-progress: true + +jobs: + DeployCanary: + name: "DeployCanary" + runs-on: ubuntu-latest + permissions: + id-token: write + contents: read + steps: + - uses: actions/checkout@v3 + with: + repository: ${{env.CWA_GITHUB_TEST_REPO_NAME}} + ref: ${{env.CWA_GITHUB_TEST_REPO_BRANCH}} + + - name: Configure AWS Credentials + uses: aws-actions/configure-aws-credentials@v1 + with: + role-to-assume: ${{ env.TERRAFORM_AWS_ASSUME_ROLE }} + aws-region: us-west-2 + + - name: Terminate Last Canary + run: | + if aws s3api wait object-exists --bucket ${S3_INTEGRATION_BUCKET} --key canary/al2/terraform.tfstate ; + then + cd terraform/ec2/linux + aws s3 cp s3://${S3_INTEGRATION_BUCKET}/canary/al2/terraform.tfstate . + terraform --version + terraform init + terraform destroy -auto-approve + aws s3api delete-object --bucket ${S3_INTEGRATION_BUCKET} --key canary/al2/terraform.tfstate + fi + + # @TODO we can add a matrix in the future but for alpha we will only deploy to al2 + - name: Terraform apply + uses: nick-fields/retry@v2 + with: + max_attempts: 3 + timeout_minutes: 60 + retry_wait_seconds: 5 + command: | + cd terraform/ec2/linux + terraform init + if terraform apply --auto-approve \ + -var="github_test_repo=${{env.CWA_GITHUB_TEST_REPO_URL}}" \ + -var="github_test_repo_branch=${{env.CWA_GITHUB_TEST_REPO_BRANCH}}" \ + -var="user=ec2-user" \ + -var="ami=cloudwatch-agent-integration-test-al2*" \ + -var="arc=amd64" \ + -var="binary_name=amazon-cloudwatch-agent.rpm" \ + -var="s3_bucket=${S3_INTEGRATION_BUCKET}" \ + -var="ssh_key_name=${KEY_NAME}" \ + -var="ssh_key_value=${PRIVATE_KEY}" \ + -var="test_name=canary" \ + -var="is_canary=true" \ + -var="test_dir=./test/canary" ; then aws s3 cp terraform.tfstate s3://${S3_INTEGRATION_BUCKET}/canary/al2/terraform.tfstate + else + terraform destroy -auto-approve && exit 1 + fi + + #This is here just in case workflow cancel + - name: Terraform destroy + if: ${{ cancelled() }} + uses: nick-fields/retry@v2 + with: + max_attempts: 3 + timeout_minutes: 8 + retry_wait_seconds: 5 + command: cd terraform/ec2/linux && terraform destroy --auto-approve diff --git a/.github/workflows/enhanced-container-insights-beta-release.yml b/.github/workflows/enhanced-container-insights-beta-release.yml new file mode 100644 index 0000000000..2aaf11e220 --- /dev/null +++ b/.github/workflows/enhanced-container-insights-beta-release.yml @@ -0,0 +1,17 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: MIT + +name: Enhanced Container Insights Beta Release +on: + workflow_dispatch: +jobs: + BuildAndUpload: + uses: ./.github/workflows/test-build.yml + secrets: inherit + permissions: + id-token: write + contents: read + with: + ContainerRepositoryNameAndTag: "enhanced-container-insights-beta:latest" + BucketKey: "enhanced-container-insights-beta" + PackageBucketKey: "enhanced-container-insights-beta" diff --git a/.github/workflows/nonprod-release.yml b/.github/workflows/nonprod-release.yml new file mode 100644 index 0000000000..d8d4485c16 --- /dev/null +++ b/.github/workflows/nonprod-release.yml @@ -0,0 +1,17 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: MIT + +name: NonProd Release +on: + workflow_dispatch: +jobs: + BuildAndUpload: + uses: ./.github/workflows/test-build.yml + secrets: inherit + permissions: + id-token: write + contents: read + with: + ContainerRepositoryNameAndTag: "ccwa_nonprod:latest" + BucketKey: "nonprod" + PackageBucketKey: "nonprod" \ No newline at end of file diff --git a/.github/workflows/repo-sync.yml b/.github/workflows/repo-sync.yml new file mode 100644 index 0000000000..1bef136e90 --- /dev/null +++ b/.github/workflows/repo-sync.yml @@ -0,0 +1,58 @@ +# disable this workflow after beta phase +name: Sync with upstream + +on: + schedule: + - cron: "0 0 * * 0" # every sunday at 12AM + workflow_dispatch: + +env: + RUN_ID: "${{ github.run_number }}.${{ github.run_attempt }}" + UPSTREAM: "https://github.com/aws/amazon-cloudwatch-agent.git" + +jobs: + # gets the last commit hash from public/master and defines the branch name + # https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idoutputs + define-branch-name: + runs-on: ubuntu-latest + steps: + - name: Get last commit hash from public + id: get-last-commit + run: echo "hash=$(git ls-remote ${{ env.UPSTREAM }} HEAD | awk '{print $1;}')" >> $GITHUB_OUTPUT + outputs: + LAST_COMMIT: ${{ steps.get-last-commit.outputs.hash }} + PR_BRANCH: "repo-sync-${{ steps.get-last-commit.outputs.hash }}-${{ env.RUN_ID }}" + + # pushes the latest from public/main to private/repo-sync + # https://github.com/marketplace/actions/github-repo-sync + create-branch: + needs: define-branch-name + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + with: + persist-credentials: false + - name: repo-sync + uses: repo-sync/github-sync@v2 + with: + source_repo: ${{ env.UPSTREAM }} + source_branch: "main" + destination_branch: ${{ needs.define-branch-name.outputs.PR_BRANCH }} + github_token: ${{ secrets.WILLIAZZ_PAT }} + + # upon create-branch completion, creates a PR from private/repo-sync to private/main + # https://github.com/marketplace/actions/github-pull-request-action + create-pr: + needs: [define-branch-name, create-branch] + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - name: pull-request + uses: repo-sync/pull-request@v2 + with: + source_branch: ${{ needs.define-branch-name.outputs.PR_BRANCH }} + destination_branch: "main" + github_token: ${{ secrets.GITHUB_TOKEN }} + pr_title: "Automated sync with upstream - last commit ${{ needs.define-branch-name.outputs.LAST_COMMIT }} - run #${{ env.RUN_ID }}" + pr_template: ".github/repo_sync_pr_template.md" + pr_allow_empty: false From 350c310027cb8a6ff3beb9d0425fab1c8e16cbf5 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Mon, 2 Oct 2023 17:50:55 -0500 Subject: [PATCH 02/55] Automated sync with upstream - last commit 66f1a8cdb5d85d2778e2979c749e93e0cea81415 - run #78.1 (#528) Co-authored-by: Seth L <81644108+sethAmazon@users.noreply.github.com> Co-authored-by: Chad Patel --- .github/workflows/PR-build.yml | 4 ++-- .github/workflows/integration-test.yml | 6 +++--- .github/workflows/otel-fork-replace.yml | 2 +- .github/workflows/test-build.yml | 6 +++--- .../tocwconfig/sampleConfig/emf_and_kubernetes_config.yaml | 2 +- .../tocwconfig/sampleConfig/kubernetes_on_prem_config.yaml | 2 +- .../tocwconfig/sampleConfig/logs_and_kubernetes_config.yaml | 2 +- translator/translate/otel/exporter/awsemf/kubernetes.go | 2 +- .../translate/otel/exporter/awsemf/translator_test.go | 2 +- 9 files changed, 14 insertions(+), 14 deletions(-) diff --git a/.github/workflows/PR-build.yml b/.github/workflows/PR-build.yml index 9aca3ef1a1..8d178b2a4f 100644 --- a/.github/workflows/PR-build.yml +++ b/.github/workflows/PR-build.yml @@ -47,7 +47,7 @@ jobs: if: needs.changes.outputs.lint == 'true' uses: actions/setup-go@v4 with: - go-version: ~1.20.7 + go-version: ~1.21.1 cache: false - name: Check out code @@ -102,7 +102,7 @@ jobs: if: needs.changes.outputs.build == 'true' uses: actions/setup-go@v4 with: - go-version: ~1.20.7 + go-version: ~1.21.1 cache: false - name: Check out code diff --git a/.github/workflows/integration-test.yml b/.github/workflows/integration-test.yml index fb2d8e2a19..ef27fe34d5 100644 --- a/.github/workflows/integration-test.yml +++ b/.github/workflows/integration-test.yml @@ -76,7 +76,7 @@ jobs: - name: Set up Go 1.x uses: actions/setup-go@v4 with: - go-version: ~1.20.7 + go-version: ~1.21.1 - name: Generate matrix id: set-matrix @@ -129,7 +129,7 @@ jobs: - name: Set up Go 1.x uses: actions/setup-go@v2 with: - go-version: ~1.19.2 + go-version: ~1.21.1 - name: Configure AWS Credentials uses: aws-actions/configure-aws-credentials@v2 @@ -327,7 +327,7 @@ jobs: - name: Set up Go 1.x uses: actions/setup-go@v4 with: - go-version: ~1.20.7 + go-version: ~1.21.1 - name: SetOutputs id: set-outputs diff --git a/.github/workflows/otel-fork-replace.yml b/.github/workflows/otel-fork-replace.yml index 15ecbcef7f..eb56e137d7 100644 --- a/.github/workflows/otel-fork-replace.yml +++ b/.github/workflows/otel-fork-replace.yml @@ -30,7 +30,7 @@ jobs: - name: Set up Go 1.x uses: actions/setup-go@v4 with: - go-version: ~1.20.7 + go-version: ~1.21.1 cache: false - name: Update OTel fork components version id: set-matrix diff --git a/.github/workflows/test-build.yml b/.github/workflows/test-build.yml index cc9e89ded3..def7f549b8 100644 --- a/.github/workflows/test-build.yml +++ b/.github/workflows/test-build.yml @@ -63,7 +63,7 @@ jobs: - name: Set up Go 1.x uses: actions/setup-go@v4 with: - go-version: ~1.20.7 + go-version: ~1.21.1 cache: false - name: Install rpm @@ -161,7 +161,7 @@ jobs: - name: Set up Go 1.x uses: actions/setup-go@v4 with: - go-version: ~1.20.7 + go-version: ~1.21.1 - name: Configure AWS Credentials uses: aws-actions/configure-aws-credentials@v2 @@ -226,7 +226,7 @@ jobs: - name: Set up Go 1.x uses: actions/setup-go@v4 with: - go-version: ~1.20.7 + go-version: ~1.21.1 - name: Configure AWS Credentials uses: aws-actions/configure-aws-credentials@v2 diff --git a/translator/tocwconfig/sampleConfig/emf_and_kubernetes_config.yaml b/translator/tocwconfig/sampleConfig/emf_and_kubernetes_config.yaml index b0f233bef0..72e0e72ccd 100644 --- a/translator/tocwconfig/sampleConfig/emf_and_kubernetes_config.yaml +++ b/translator/tocwconfig/sampleConfig/emf_and_kubernetes_config.yaml @@ -172,7 +172,7 @@ exporters: label_matchers: [ ] metric_name_selectors: - apiserver_storage_size_bytes - - apiserver_storage_size_bytes + - apiserver_storage_db_total_size_in_bytes - etcd_db_total_size_in_bytes - etcd_request_duration_seconds - dimensions: [ [ClusterName, resource], [ ClusterName ] ] diff --git a/translator/tocwconfig/sampleConfig/kubernetes_on_prem_config.yaml b/translator/tocwconfig/sampleConfig/kubernetes_on_prem_config.yaml index 69af6a05f5..6c57b41bc5 100644 --- a/translator/tocwconfig/sampleConfig/kubernetes_on_prem_config.yaml +++ b/translator/tocwconfig/sampleConfig/kubernetes_on_prem_config.yaml @@ -143,7 +143,7 @@ exporters: label_matchers: [ ] metric_name_selectors: - apiserver_storage_size_bytes - - apiserver_storage_size_bytes + - apiserver_storage_db_total_size_in_bytes - etcd_db_total_size_in_bytes - etcd_request_duration_seconds - dimensions: [ [ ClusterName, resource ], [ ClusterName ] ] diff --git a/translator/tocwconfig/sampleConfig/logs_and_kubernetes_config.yaml b/translator/tocwconfig/sampleConfig/logs_and_kubernetes_config.yaml index b460f21958..888495f51b 100644 --- a/translator/tocwconfig/sampleConfig/logs_and_kubernetes_config.yaml +++ b/translator/tocwconfig/sampleConfig/logs_and_kubernetes_config.yaml @@ -172,7 +172,7 @@ exporters: label_matchers: [ ] metric_name_selectors: - apiserver_storage_size_bytes - - apiserver_storage_size_bytes + - apiserver_storage_db_total_size_in_bytes - etcd_db_total_size_in_bytes - etcd_request_duration_seconds - dimensions: [ [ ClusterName, resource ], [ ClusterName ] ] diff --git a/translator/translate/otel/exporter/awsemf/kubernetes.go b/translator/translate/otel/exporter/awsemf/kubernetes.go index d9ae7a7674..342cd985a4 100644 --- a/translator/translate/otel/exporter/awsemf/kubernetes.go +++ b/translator/translate/otel/exporter/awsemf/kubernetes.go @@ -270,7 +270,7 @@ func getControlPlaneMetricDeclarations(conf *confmap.Conf) []*awsemfexporter.Met Dimensions: [][]string{{"ClusterName", "endpoint"}, {"ClusterName"}}, MetricNameSelectors: []string{ "apiserver_storage_size_bytes", - "apiserver_storage_size_bytes", + "apiserver_storage_db_total_size_in_bytes", "etcd_db_total_size_in_bytes", "etcd_request_duration_seconds", }, diff --git a/translator/translate/otel/exporter/awsemf/translator_test.go b/translator/translate/otel/exporter/awsemf/translator_test.go index e935dcb231..3029d4b03c 100644 --- a/translator/translate/otel/exporter/awsemf/translator_test.go +++ b/translator/translate/otel/exporter/awsemf/translator_test.go @@ -341,7 +341,7 @@ func TestTranslator(t *testing.T) { }, { Dimensions: [][]string{{"ClusterName", "endpoint"}, {"ClusterName"}}, - MetricNameSelectors: []string{"apiserver_storage_size_bytes", "apiserver_storage_size_bytes", "etcd_db_total_size_in_bytes", "etcd_request_duration_seconds"}, + MetricNameSelectors: []string{"apiserver_storage_size_bytes", "apiserver_storage_db_total_size_in_bytes", "etcd_db_total_size_in_bytes", "etcd_request_duration_seconds"}, }, { Dimensions: [][]string{{"ClusterName", "resource"}, {"ClusterName"}}, From 27d4c30fc9c26537788aca5499d9ffa49b68d859 Mon Sep 17 00:00:00 2001 From: Chad Patel Date: Tue, 3 Oct 2023 14:55:43 -0500 Subject: [PATCH 03/55] Create enhanced-container-insights-internal.yml --- .../enhanced-container-insights-internal.yml | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) create mode 100644 .github/workflows/enhanced-container-insights-internal.yml diff --git a/.github/workflows/enhanced-container-insights-internal.yml b/.github/workflows/enhanced-container-insights-internal.yml new file mode 100644 index 0000000000..4ac6250bf8 --- /dev/null +++ b/.github/workflows/enhanced-container-insights-internal.yml @@ -0,0 +1,17 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: MIT + +name: Enhanced Container Insights Internal Test Release +on: + workflow_dispatch: +jobs: + BuildAndUpload: + uses: ./.github/workflows/test-build.yml + secrets: inherit + permissions: + id-token: write + contents: read + with: + ContainerRepositoryNameAndTag: "enhanced-container-insights-internal:latest" + BucketKey: "enhanced-container-insights-internal" + PackageBucketKey: "enhanced-container-insights-internal" From 5486731ab416c3541117c6fae47bf2364b793f4c Mon Sep 17 00:00:00 2001 From: Hyunsoo Kim <884273+movence@users.noreply.github.com> Date: Tue, 3 Oct 2023 15:57:42 -0400 Subject: [PATCH 04/55] Update GHA to release Enhanced CI to us-east-1 (#530) --- .../enhanced-container-insights-beta-release.yml | 13 ++++++++++++- .github/workflows/test-build.yml | 12 +++++++++++- 2 files changed, 23 insertions(+), 2 deletions(-) diff --git a/.github/workflows/enhanced-container-insights-beta-release.yml b/.github/workflows/enhanced-container-insights-beta-release.yml index 2aaf11e220..f08844262f 100644 --- a/.github/workflows/enhanced-container-insights-beta-release.yml +++ b/.github/workflows/enhanced-container-insights-beta-release.yml @@ -5,7 +5,7 @@ name: Enhanced Container Insights Beta Release on: workflow_dispatch: jobs: - BuildAndUpload: + BuildAndPushToUSWEST2: uses: ./.github/workflows/test-build.yml secrets: inherit permissions: @@ -15,3 +15,14 @@ jobs: ContainerRepositoryNameAndTag: "enhanced-container-insights-beta:latest" BucketKey: "enhanced-container-insights-beta" PackageBucketKey: "enhanced-container-insights-beta" + BuildAndPushToUSEAST1: + uses: ./.github/workflows/test-build.yml + secrets: inherit + permissions: + id-token: write + contents: read + with: + ContainerRepositoryNameAndTag: "enhanced-container-insights-beta:latest" + BucketKey: "enhanced-container-insights-beta" + PackageBucketKey: "enhanced-container-insights-beta" + TargetRegion: "us-east-1" diff --git a/.github/workflows/test-build.yml b/.github/workflows/test-build.yml index def7f549b8..f67e1210e7 100644 --- a/.github/workflows/test-build.yml +++ b/.github/workflows/test-build.yml @@ -26,6 +26,11 @@ on: description: "Integration tests put the MSI and PKG in a different bucket path than the binaries." required: true type: string + TargetRegion: + description: "Target region" + required: false + type: string + default: "us-west-2" workflow_call: inputs: ContainerRepositoryNameAndTag: @@ -46,6 +51,11 @@ on: description: "Integration tests put the MSI and PKG in a different bucket path than the binaries." required: true type: string + TargetRegion: + description: "Target region" + required: false + type: string + default: "us-west-2" jobs: MakeBinary: @@ -73,7 +83,7 @@ jobs: uses: aws-actions/configure-aws-credentials@v2 with: role-to-assume: ${{ secrets.TERRAFORM_AWS_ASSUME_ROLE }} - aws-region: us-west-2 + aws-region: $${{ inputs.TargetRegion }} - name: Cache binaries id: cached_binaries From 7fc3c14802e851986ba7635ea818b78134cf1f93 Mon Sep 17 00:00:00 2001 From: Lisa Guo Date: Thu, 5 Oct 2023 18:21:21 -0400 Subject: [PATCH 05/55] Create github action for apm release workflow (#537) (#538) --- .github/workflows/apm-beta-release.yml | 15 +++++++++++++++ 1 file changed, 15 insertions(+) create mode 100644 .github/workflows/apm-beta-release.yml diff --git a/.github/workflows/apm-beta-release.yml b/.github/workflows/apm-beta-release.yml new file mode 100644 index 0000000000..5268d64c6b --- /dev/null +++ b/.github/workflows/apm-beta-release.yml @@ -0,0 +1,15 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: MIT + +name: APM Beta Release + +on: + workflow_dispatch: + +jobs: + no-op: + runs-on: ubuntu-latest + steps: + - name: do nothing + run: | + echo "This is a no-op" \ No newline at end of file From d1a0ae12e71e008d18cba13a1c0a82ff3c56c8b0 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Mon, 9 Oct 2023 13:44:22 -0500 Subject: [PATCH 06/55] Automated sync with upstream - last commit d31f1fbbd8b64b6e4606bdb46e93ddce0de15f52 - run #81.1 (#542) Co-authored-by: Seth L <81644108+sethAmazon@users.noreply.github.com> Co-authored-by: Chad Patel Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: Github Action Co-authored-by: Jeffrey Chien Co-authored-by: Adam <90734270+adam-mateen@users.noreply.github.com> --- .../amazon-cloudwatch-agent.go | 35 +++++++------ cmd/start-amazon-cloudwatch-agent/path.go | 38 +++++--------- .../path_windows.go | 36 ++----------- .../start-amazon-cloudwatch-agent.go | 46 ++++------------ go.mod | 28 +++++----- go.sum | 52 +++++++++---------- internal/retryer/imdsretryer.go | 6 +-- packaging/linux/amazon-cloudwatch-agent.spec | 2 + tool/paths/paths.go | 27 ++++++++++ tool/paths/paths_unix.go | 14 +++++ tool/paths/paths_windows.go | 28 ++++++++++ .../emf_and_kubernetes_config.yaml | 15 +++--- .../kubernetes_on_prem_config.yaml | 15 +++--- .../logs_and_kubernetes_config.yaml | 15 +++--- .../otel/exporter/awsemf/kubernetes.go | 22 ++++---- .../otel/exporter/awsemf/translator_test.go | 19 +++---- translator/util/ec2util/ec2util.go | 5 +- 17 files changed, 203 insertions(+), 200 deletions(-) create mode 100644 tool/paths/paths.go diff --git a/cmd/amazon-cloudwatch-agent/amazon-cloudwatch-agent.go b/cmd/amazon-cloudwatch-agent/amazon-cloudwatch-agent.go index 6fb7a62013..0cae50afe0 100644 --- a/cmd/amazon-cloudwatch-agent/amazon-cloudwatch-agent.go +++ b/cmd/amazon-cloudwatch-agent/amazon-cloudwatch-agent.go @@ -42,6 +42,7 @@ import ( "github.com/aws/amazon-cloudwatch-agent/service/configprovider" "github.com/aws/amazon-cloudwatch-agent/service/defaultcomponents" "github.com/aws/amazon-cloudwatch-agent/service/registry" + "github.com/aws/amazon-cloudwatch-agent/tool/paths" ) const ( @@ -51,14 +52,14 @@ const ( var fDebug = flag.Bool("debug", false, "turn on debug logging") var pprofAddr = flag.String("pprof-addr", "", - "pprof address to listen on, not activate pprof if empty") + "pprof address to listen on, disabled by default, examples: 'localhost:1234', ':4567' (restricted to localhost)") var fQuiet = flag.Bool("quiet", false, "run in quiet mode") var fTest = flag.Bool("test", false, "enable test mode: gather metrics, print them out, and exit") var fTestWait = flag.Int("test-wait", 0, "wait up to this many seconds for service inputs to complete in test mode") var fSchemaTest = flag.Bool("schematest", false, "validate the toml file schema") -var fConfig = flag.String("config", "", "configuration file to load") -var fOtelConfig = flag.String("otelconfig", "", "YAML configuration file to run OTel pipeline") +var fTomlConfig = flag.String("config", "", "configuration file to load") +var fOtelConfig = flag.String("otelconfig", paths.YamlConfigPath, "YAML configuration file to run OTel pipeline") var fEnvConfig = flag.String("envconfig", "", "env configuration file to load") var fConfigDirectory = flag.String("config-directory", "", "directory containing additional *.conf files") @@ -80,8 +81,6 @@ var fAggregatorFilters = flag.String("aggregator-filter", "", "filter the aggregators to enable, separator is :") var fProcessorFilters = flag.String("processor-filter", "", "filter the processors to enable, separator is :") -var fUsage = flag.String("usage", "", - "print usage for a plugin, ie, 'telegraf --usage mysql'") var fService = flag.String("service", "", "operate on the service (windows only)") var fServiceName = flag.String("service-name", "telegraf", "service name (windows only)") @@ -137,7 +136,7 @@ func reloadLoop( } }(ctx) - if envConfigPath, err := getEnvConfigPath(*fConfig, *fEnvConfig); err == nil { + if envConfigPath, err := getEnvConfigPath(*fTomlConfig, *fEnvConfig); err == nil { // Reloads environment variables when file is changed go func(ctx context.Context, envConfigPath string) { var previousModTime time.Time @@ -181,17 +180,17 @@ func reloadLoop( // The "config-translator" program populates that file. func loadEnvironmentVariables(path string) error { if path == "" { - return fmt.Errorf("No env config file specified") + return fmt.Errorf("no env config file specified") } bytes, err := os.ReadFile(path) if err != nil { - return fmt.Errorf("Can't read env config file %s due to: %s", path, err.Error()) + return fmt.Errorf("cannot read env config file %s due to: %s", path, err.Error()) } envVars := map[string]string{} err = json.Unmarshal(bytes, &envVars) if err != nil { - return fmt.Errorf("Can't create env config due to: %s", err.Error()) + return fmt.Errorf("cannot create env config due to: %s", err.Error()) } for key, val := range envVars { @@ -203,7 +202,7 @@ func loadEnvironmentVariables(path string) error { func getEnvConfigPath(configPath, envConfigPath string) (string, error) { if configPath == "" { - return "", fmt.Errorf("No config file specified") + return "", fmt.Errorf("no config file specified") } //load the environment variables that's saved in json env config file if envConfigPath == "" { @@ -217,7 +216,7 @@ func runAgent(ctx context.Context, inputFilters []string, outputFilters []string, ) error { - envConfigPath, err := getEnvConfigPath(*fConfig, *fEnvConfig) + envConfigPath, err := getEnvConfigPath(*fTomlConfig, *fEnvConfig) if err != nil { return err } @@ -437,6 +436,9 @@ func main() { parts := strings.Split(pprofHostPort, ":") if len(parts) == 2 && parts[0] == "" { pprofHostPort = fmt.Sprintf("localhost:%s", parts[1]) + } else if parts[0] != "localhost" { + log.Printf("W! Not starting pprof, it is restricted to localhost:nnnn") + return } pprofHostPort = "http://" + pprofHostPort + "/debug/pprof" @@ -516,6 +518,9 @@ func main() { } envVars[parts[0]] = parts[1] bytes, err = json.MarshalIndent(envVars, "", "\t") + if err != nil { + log.Fatalf("E! Failed to marshal env config: %v", err) + } if err = os.WriteFile(*fEnvConfig, bytes, 0644); err != nil { log.Fatalf("E! Failed to update env config: %v", err) } @@ -550,8 +555,8 @@ func main() { // Handle the --service flag here to prevent any issues with tooling that // may not have an interactive session, e.g. installing from Ansible. if *fService != "" { - if *fConfig != "" { - svcConfig.Arguments = []string{"--config", *fConfig} + if *fTomlConfig != "" { + svcConfig.Arguments = []string{"--config", *fTomlConfig} } if *fConfigDirectory != "" { svcConfig.Arguments = append(svcConfig.Arguments, "--config-directory", *fConfigDirectory) @@ -602,7 +607,7 @@ func windowsRunAsService() bool { } func loadTomlConfigIntoAgent(c *config.Config) error { - err := c.LoadConfig(*fConfig) + err := c.LoadConfig(*fTomlConfig) if err != nil { return err } @@ -633,7 +638,7 @@ func validateAgentFinalConfigAndPlugins(c *config.Config) error { if *fSchemaTest { //up to this point, the given config file must be valid fmt.Println(agentinfo.FullVersion()) - fmt.Printf("The given config: %v is valid\n", *fConfig) + fmt.Printf("The given config: %v is valid\n", *fTomlConfig) os.Exit(0) } diff --git a/cmd/start-amazon-cloudwatch-agent/path.go b/cmd/start-amazon-cloudwatch-agent/path.go index 3383b06777..bf17673c52 100644 --- a/cmd/start-amazon-cloudwatch-agent/path.go +++ b/cmd/start-amazon-cloudwatch-agent/path.go @@ -24,13 +24,13 @@ func startAgent(writer io.WriteCloser) error { if os.Getenv(config.RUN_IN_CONTAINER) == config.RUN_IN_CONTAINER_TRUE { // Use exec so PID 1 changes to agent from start-agent. execArgs := []string{ - agentBinaryPath, // when using syscall.Exec, must pass binary name as args[0] - "-config", tomlConfigPath, - "-envconfig", envConfigPath, - "-otelconfig", yamlConfigPath, + paths.AgentBinaryPath, // when using syscall.Exec, must pass binary name as args[0] + "-config", paths.TomlConfigPath, + "-envconfig", paths.EnvConfigPath, + "-otelconfig", paths.YamlConfigPath, "-pidfile", paths.AgentDir + "/var/amazon-cloudwatch-agent.pid", } - if err := syscall.Exec(agentBinaryPath, execArgs, os.Environ()); err != nil { + if err := syscall.Exec(paths.AgentBinaryPath, execArgs, os.Environ()); err != nil { return fmt.Errorf("error exec as agent binary: %w", err) } // We should never reach this line but the compiler doesn't know... @@ -49,7 +49,7 @@ func startAgent(writer io.WriteCloser) error { return err } - name, err := exec.LookPath(agentBinaryPath) + name, err := exec.LookPath(paths.AgentBinaryPath) if err != nil { log.Printf("E! Failed to lookpath: %v ", err) return err @@ -62,10 +62,10 @@ func startAgent(writer io.WriteCloser) error { // linux command has pid passed while windows does not agentCmd := []string{ - agentBinaryPath, - "-config", tomlConfigPath, - "-envconfig", envConfigPath, - "-otelconfig", yamlConfigPath, + paths.AgentBinaryPath, + "-config", paths.TomlConfigPath, + "-envconfig", paths.EnvConfigPath, + "-otelconfig", paths.YamlConfigPath, "-pidfile", paths.AgentDir + "/var/amazon-cloudwatch-agent.pid", } if err = syscall.Exec(name, agentCmd, os.Environ()); err != nil { @@ -80,22 +80,8 @@ func startAgent(writer io.WriteCloser) error { func generateMergedJsonConfigMap() (map[string]interface{}, error) { ctx := context.CurrentContext() setCTXOS(ctx) - ctx.SetInputJsonFilePath(jsonConfigPath) - ctx.SetInputJsonDirPath(jsonDirPath) + ctx.SetInputJsonFilePath(paths.JsonConfigPath) + ctx.SetInputJsonDirPath(paths.JsonDirPath) ctx.SetMultiConfig("remove") return cmdutil.GenerateMergedJsonConfigMap(ctx) } - -func init() { - jsonConfigPath = paths.AgentDir + "/etc/" + JSON - jsonDirPath = paths.AgentDir + "/etc/" + paths.JsonDir - envConfigPath = paths.AgentDir + "/etc/" + ENV - tomlConfigPath = paths.AgentDir + "/etc/" + TOML - commonConfigPath = paths.AgentDir + "/etc/" + COMMON_CONFIG - yamlConfigPath = paths.AgentDir + "/etc/" + YAML - - agentLogFilePath = paths.AgentDir + "/logs/" + AGENT_LOG_FILE - - translatorBinaryPath = paths.AgentDir + "/bin/" + paths.TranslatorBinaryName - agentBinaryPath = paths.AgentDir + "/bin/" + paths.AgentBinaryName -} diff --git a/cmd/start-amazon-cloudwatch-agent/path_windows.go b/cmd/start-amazon-cloudwatch-agent/path_windows.go index 549058b5b4..19c01a2ef7 100644 --- a/cmd/start-amazon-cloudwatch-agent/path_windows.go +++ b/cmd/start-amazon-cloudwatch-agent/path_windows.go @@ -10,7 +10,6 @@ import ( "fmt" "io" "log" - "os" "os/exec" "github.com/aws/amazon-cloudwatch-agent/tool/paths" @@ -23,40 +22,13 @@ func startAgent(writer io.WriteCloser) error { } cmd := exec.Command( - agentBinaryPath, - "-config", tomlConfigPath, - "-envconfig", envConfigPath, - "-otelconfig", yamlConfigPath, + paths.AgentBinaryPath, + "-config", paths.TomlConfigPath, + "-envconfig", paths.EnvConfigPath, + "-otelconfig", paths.YamlConfigPath, ) stdoutStderr, err := cmd.CombinedOutput() // log file is closed, so use fmt here fmt.Printf("%s \n", stdoutStderr) return err } - -func init() { - programFiles := os.Getenv("ProgramFiles") - var programData string - if _, ok := os.LookupEnv("ProgramData"); ok { - programData = os.Getenv("ProgramData") - } else { - // Windows 2003 - programData = os.Getenv("ALLUSERSPROFILE") + "\\Application Data" - } - - agentRootDir := programFiles + paths.AgentDir - agentConfigDir := programData + paths.AgentDir - - jsonConfigPath = agentConfigDir + "\\" + JSON - jsonDirPath = agentConfigDir + paths.JsonDir - envConfigPath = agentConfigDir + "\\" + ENV - tomlConfigPath = agentConfigDir + "\\" + TOML - yamlConfigPath = agentConfigDir + "\\" + YAML - - commonConfigPath = agentConfigDir + "\\" + COMMON_CONFIG - - agentLogFilePath = agentConfigDir + "\\Logs\\" + AGENT_LOG_FILE - - translatorBinaryPath = agentRootDir + "\\" + paths.TranslatorBinaryName - agentBinaryPath = agentRootDir + "\\" + paths.AgentBinaryName -} diff --git a/cmd/start-amazon-cloudwatch-agent/start-amazon-cloudwatch-agent.go b/cmd/start-amazon-cloudwatch-agent/start-amazon-cloudwatch-agent.go index d6fa6f0457..0c23787f06 100644 --- a/cmd/start-amazon-cloudwatch-agent/start-amazon-cloudwatch-agent.go +++ b/cmd/start-amazon-cloudwatch-agent/start-amazon-cloudwatch-agent.go @@ -14,47 +14,21 @@ import ( "gopkg.in/natefinch/lumberjack.v2" + "github.com/aws/amazon-cloudwatch-agent/tool/paths" "github.com/aws/amazon-cloudwatch-agent/translator/config" ) -const ( - COMMON_CONFIG = "common-config.toml" - JSON = "amazon-cloudwatch-agent.json" - TOML = "amazon-cloudwatch-agent.toml" - YAML = "amazon-cloudwatch-agent.yaml" - ENV = "env-config.json" - - AGENT_LOG_FILE = "amazon-cloudwatch-agent.log" - - //TODO this CONFIG_DIR_IN_CONTAINER should change to something indicate dir, keep it for now to avoid break testing - CONFIG_DIR_IN_CONTAINER = "/etc/cwagentconfig" -) - -var ( - jsonConfigPath string - jsonDirPath string - envConfigPath string - tomlConfigPath string - commonConfigPath string - yamlConfigPath string - - agentLogFilePath string - - translatorBinaryPath string - agentBinaryPath string -) - // We use an environment variable here because we need this condition before the translator reads agent config json file. var runInContainer = os.Getenv(config.RUN_IN_CONTAINER) func translateConfig() error { - args := []string{"--output", tomlConfigPath, "--mode", "auto"} + args := []string{"--output", paths.TomlConfigPath, "--mode", "auto"} if runInContainer == config.RUN_IN_CONTAINER_TRUE { - args = append(args, "--input-dir", CONFIG_DIR_IN_CONTAINER) + args = append(args, "--input-dir", paths.CONFIG_DIR_IN_CONTAINER) } else { - args = append(args, "--input", jsonConfigPath, "--input-dir", jsonDirPath, "--config", commonConfigPath) + args = append(args, "--input", paths.JsonConfigPath, "--input-dir", paths.JsonDirPath, "--config", paths.CommonConfigPath) } - cmd := exec.Command(translatorBinaryPath, args...) + cmd := exec.Command(paths.TranslatorBinaryPath, args...) cmd.Stdout = os.Stdout cmd.Stderr = os.Stdout err := cmd.Run() @@ -83,7 +57,7 @@ func main() { if runInContainer != config.RUN_IN_CONTAINER_TRUE { writer = &lumberjack.Logger{ - Filename: agentLogFilePath, + Filename: paths.AgentLogFilePath, MaxSize: 100, //MB MaxBackups: 5, //backup files MaxAge: 7, //days @@ -96,10 +70,10 @@ func main() { if err := translateConfig(); err != nil { log.Fatalf("E! Cannot translate JSON, ERROR is %v \n", err) } - log.Printf("I! Config has been translated into TOML %s \n", tomlConfigPath) - printFileContents(tomlConfigPath) - log.Printf("I! Config has been translated into YAML %s \n", yamlConfigPath) - printFileContents(yamlConfigPath) + log.Printf("I! Config has been translated into TOML %s \n", paths.TomlConfigPath) + printFileContents(paths.TomlConfigPath) + log.Printf("I! Config has been translated into YAML %s \n", paths.YamlConfigPath) + printFileContents(paths.YamlConfigPath) if err := startAgent(writer); err != nil { log.Printf("E! Error when starting Agent, Error is %v \n", err) diff --git a/go.mod b/go.mod index 663325d7e7..494c82bbb8 100644 --- a/go.mod +++ b/go.mod @@ -6,34 +6,34 @@ replace github.com/influxdata/telegraf => github.com/aws/telegraf v0.10.2-0.2022 // Replace with https://github.com/amazon-contributing/opentelemetry-collector-contrib, there are no requirements for all receivers/processors/exporters // to be all replaced since there are some changes that will always be from upstream -replace github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsemfexporter => github.com/amazon-contributing/opentelemetry-collector-contrib/exporter/awsemfexporter v0.0.0-20230928170322-0df38c533713 +replace github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsemfexporter => github.com/amazon-contributing/opentelemetry-collector-contrib/exporter/awsemfexporter v0.0.0-20231005180140-4b74f352a689 -replace github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsxrayexporter => github.com/amazon-contributing/opentelemetry-collector-contrib/exporter/awsxrayexporter v0.0.0-20230928170322-0df38c533713 +replace github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsxrayexporter => github.com/amazon-contributing/opentelemetry-collector-contrib/exporter/awsxrayexporter v0.0.0-20231005180140-4b74f352a689 -replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/xray => github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/xray v0.0.0-20230928170322-0df38c533713 +replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/xray => github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/xray v0.0.0-20231005180140-4b74f352a689 -replace github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver => github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver v0.0.0-20230928170322-0df38c533713 +replace github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver => github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver v0.0.0-20231005180140-4b74f352a689 -replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/k8s => github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/k8s v0.0.0-20230928170322-0df38c533713 +replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/k8s => github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/k8s v0.0.0-20231005180140-4b74f352a689 -replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/containerinsight => github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/containerinsight v0.0.0-20230928170322-0df38c533713 +replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/containerinsight => github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/containerinsight v0.0.0-20231005180140-4b74f352a689 // Replace with contrib to revert upstream change https://github.com/open-telemetry/opentelemetry-collector-contrib/pull/20519 -replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus => github.com/amazon-contributing/opentelemetry-collector-contrib/pkg/translator/prometheus v0.0.0-20230928170322-0df38c533713 +replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus => github.com/amazon-contributing/opentelemetry-collector-contrib/pkg/translator/prometheus v0.0.0-20231005180140-4b74f352a689 replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza => github.com/amazon-contributing/opentelemetry-collector-contrib/pkg/stanza v0.0.0-20230928170322-0df38c533713 -replace github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver => github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.0.0-20230928170322-0df38c533713 +replace github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver => github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.0.0-20231005180140-4b74f352a689 -replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/awsutil => github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/awsutil v0.0.0-20230928170322-0df38c533713 +replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/awsutil => github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/awsutil v0.0.0-20231005180140-4b74f352a689 -replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/cwlogs => github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/cwlogs v0.0.0-20230928170322-0df38c533713 +replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/cwlogs => github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/cwlogs v0.0.0-20231005180140-4b74f352a689 -replace github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awscloudwatchlogsexporter => github.com/amazon-contributing/opentelemetry-collector-contrib/exporter/awscloudwatchlogsexporter v0.0.0-20230928170322-0df38c533713 +replace github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awscloudwatchlogsexporter => github.com/amazon-contributing/opentelemetry-collector-contrib/exporter/awscloudwatchlogsexporter v0.0.0-20231005180140-4b74f352a689 -replace github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awsxrayreceiver => github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/awsxrayreceiver v0.0.0-20230928170322-0df38c533713 +replace github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awsxrayreceiver => github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/awsxrayreceiver v0.0.0-20231005180140-4b74f352a689 -replace github.com/amazon-contributing/opentelemetry-collector-contrib/override/aws => github.com/amazon-contributing/opentelemetry-collector-contrib/override/aws v0.0.0-20230928170322-0df38c533713 +replace github.com/amazon-contributing/opentelemetry-collector-contrib/override/aws => github.com/amazon-contributing/opentelemetry-collector-contrib/override/aws v0.0.0-20231005180140-4b74f352a689 // Temporary fix, pending PR https://github.com/shirou/gopsutil/pull/957 replace github.com/shirou/gopsutil/v3 => github.com/aws/telegraf/patches/gopsutil/v3 v3.0.0-20230915153624-7629361f8380 // indirect @@ -422,3 +422,5 @@ require ( sigs.k8s.io/structured-merge-diff/v4 v4.3.0 // indirect sigs.k8s.io/yaml v1.3.0 // indirect ) + +replace github.com/amazon-contributing/opentelemetry-collector-contrib/pkg/stanza => github.com/amazon-contributing/opentelemetry-collector-contrib/pkg/stanza v0.0.0-20231005180140-4b74f352a689 diff --git a/go.sum b/go.sum index 87964eb931..7fd27813d0 100644 --- a/go.sum +++ b/go.sum @@ -139,34 +139,34 @@ github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk5 github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 h1:s6gZFSlWYmbqAuRjVTiNNhvNRfY2Wxp9nhfyel4rklc= github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= github.com/aliyun/alibaba-cloud-sdk-go v1.61.1483 h1:J8HaD+Zpfi1gcel3HCKpoHHEsrcuRrZlSnx7R9SCf5I= -github.com/amazon-contributing/opentelemetry-collector-contrib/exporter/awscloudwatchlogsexporter v0.0.0-20230928170322-0df38c533713 h1:wt44oIHsyO9tNSw36V2oWZG4cTzrpqBe7JKyfXHBa5w= -github.com/amazon-contributing/opentelemetry-collector-contrib/exporter/awscloudwatchlogsexporter v0.0.0-20230928170322-0df38c533713/go.mod h1:WgmC0gq7urueR/VbZ0EHZhe3MXV6oWbaMmEWhHvagfg= -github.com/amazon-contributing/opentelemetry-collector-contrib/exporter/awsemfexporter v0.0.0-20230928170322-0df38c533713 h1:/DboazGzxalMCGykP//7s3m1+YwUaXydlUzX8uIrXUg= -github.com/amazon-contributing/opentelemetry-collector-contrib/exporter/awsemfexporter v0.0.0-20230928170322-0df38c533713/go.mod h1:b8pL6t9Xqk/zv0nLZsMiniuugDWiWQZRu9kh9t5SBLk= -github.com/amazon-contributing/opentelemetry-collector-contrib/exporter/awsxrayexporter v0.0.0-20230928170322-0df38c533713 h1:+vsrfX+HryDhdmXbbsopiLiTQd4UKQgBEQqZ2qZ/qGw= -github.com/amazon-contributing/opentelemetry-collector-contrib/exporter/awsxrayexporter v0.0.0-20230928170322-0df38c533713/go.mod h1:K9h+mkX+BsA1UTuuheGJjo44KAahxaNu9jJ8/xVF6jo= -github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/awsutil v0.0.0-20230928170322-0df38c533713 h1:7yTgx0xsW4p1JW0aiGI1B0swLJET933/28PCwWODTr0= -github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/awsutil v0.0.0-20230928170322-0df38c533713/go.mod h1:9iAsO2SC8NIsa8/xCmC2Pj4MZPmYdvm+1/n89M74JS4= -github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/containerinsight v0.0.0-20230928170322-0df38c533713 h1:oaZq2zS/clGw4fl59WihfY3nRRzmI5gN+7ogDYK9uB4= -github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/containerinsight v0.0.0-20230928170322-0df38c533713/go.mod h1:ZwAqtlNaHJX0IUU5O40j96TDbsPA0K7o+m49AZgei7g= -github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/cwlogs v0.0.0-20230928170322-0df38c533713 h1:5u5narTayNWsEqMAlUYcJ+mPWY0J0J00zXTVkpFLUwU= -github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/cwlogs v0.0.0-20230928170322-0df38c533713/go.mod h1:U0J/v82xC95JvG5QhXlrHH9OpgV8scQSGS6N7XW2y/4= -github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/k8s v0.0.0-20230928170322-0df38c533713 h1:py3+pCTK1NW0GGarcMnZEgY6J3JxTgKB9BpSQ90r39k= -github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/k8s v0.0.0-20230928170322-0df38c533713/go.mod h1:58ZN2DUrqxJLqoXu+GZfL0RwMYiRZAAI+COKp0OmA0k= -github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/xray v0.0.0-20230928170322-0df38c533713 h1:TrDLQ2BdMFt+bUp6dfzOFhG57AcY85h27UTei+jUdr0= -github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/xray v0.0.0-20230928170322-0df38c533713/go.mod h1:8edNN/XfefbHuGLiDhFdBN1QfJfgH7wmq5ms2Gme1EA= -github.com/amazon-contributing/opentelemetry-collector-contrib/override/aws v0.0.0-20230928170322-0df38c533713 h1:N5zWIyEcfUr+t0EimIukJ0Z341YY0oew7ErHRJ1fnoc= -github.com/amazon-contributing/opentelemetry-collector-contrib/override/aws v0.0.0-20230928170322-0df38c533713/go.mod h1:F5l/VuHtB8418NLJEsHeYz/pni6sWtOMR/SM6mgarhQ= +github.com/amazon-contributing/opentelemetry-collector-contrib/exporter/awscloudwatchlogsexporter v0.0.0-20231005180140-4b74f352a689 h1:C41hepLgo3qoZ/raJJXwg5+61r1T0wKGe7ouq2GTDCQ= +github.com/amazon-contributing/opentelemetry-collector-contrib/exporter/awscloudwatchlogsexporter v0.0.0-20231005180140-4b74f352a689/go.mod h1:WgmC0gq7urueR/VbZ0EHZhe3MXV6oWbaMmEWhHvagfg= +github.com/amazon-contributing/opentelemetry-collector-contrib/exporter/awsemfexporter v0.0.0-20231005180140-4b74f352a689 h1:PB5QDEQZeC7Wuvmuwd6mlJbjAbU7d1jNe91Ro1sieBY= +github.com/amazon-contributing/opentelemetry-collector-contrib/exporter/awsemfexporter v0.0.0-20231005180140-4b74f352a689/go.mod h1:b8pL6t9Xqk/zv0nLZsMiniuugDWiWQZRu9kh9t5SBLk= +github.com/amazon-contributing/opentelemetry-collector-contrib/exporter/awsxrayexporter v0.0.0-20231005180140-4b74f352a689 h1:Hs0LHi+VUmWUlTBrc3dp8moDSsIElpbUfWWezE4yPdg= +github.com/amazon-contributing/opentelemetry-collector-contrib/exporter/awsxrayexporter v0.0.0-20231005180140-4b74f352a689/go.mod h1:K9h+mkX+BsA1UTuuheGJjo44KAahxaNu9jJ8/xVF6jo= +github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/awsutil v0.0.0-20231005180140-4b74f352a689 h1:PE+2GVB1EVsRaqShTpyunefheg82SklTOeLsYIXveus= +github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/awsutil v0.0.0-20231005180140-4b74f352a689/go.mod h1:9iAsO2SC8NIsa8/xCmC2Pj4MZPmYdvm+1/n89M74JS4= +github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/containerinsight v0.0.0-20231005180140-4b74f352a689 h1:H7WeHG6ERGOgPCS01RjwMo3Faja60PbozpPXDhx3OGI= +github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/containerinsight v0.0.0-20231005180140-4b74f352a689/go.mod h1:ZwAqtlNaHJX0IUU5O40j96TDbsPA0K7o+m49AZgei7g= +github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/cwlogs v0.0.0-20231005180140-4b74f352a689 h1:hpv83ENe81wZmLC93U90hT5WzbOzLCY8EhxAmvUjMuo= +github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/cwlogs v0.0.0-20231005180140-4b74f352a689/go.mod h1:U0J/v82xC95JvG5QhXlrHH9OpgV8scQSGS6N7XW2y/4= +github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/k8s v0.0.0-20231005180140-4b74f352a689 h1:MVjE0TuGzjWvWbphb/UQhhKz2+XtRjnhEJF20ht/D30= +github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/k8s v0.0.0-20231005180140-4b74f352a689/go.mod h1:58ZN2DUrqxJLqoXu+GZfL0RwMYiRZAAI+COKp0OmA0k= +github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/xray v0.0.0-20231005180140-4b74f352a689 h1:XYgIXvOXzjDUvkw0KQ8HjGmqW2+3fLFOhwtWRZcn5YU= +github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/xray v0.0.0-20231005180140-4b74f352a689/go.mod h1:8edNN/XfefbHuGLiDhFdBN1QfJfgH7wmq5ms2Gme1EA= +github.com/amazon-contributing/opentelemetry-collector-contrib/override/aws v0.0.0-20231005180140-4b74f352a689 h1:UMqzAQZicKnFjs4M7QXXfMq1Tf/RopSD0g1kfZkgRvo= +github.com/amazon-contributing/opentelemetry-collector-contrib/override/aws v0.0.0-20231005180140-4b74f352a689/go.mod h1:F5l/VuHtB8418NLJEsHeYz/pni6sWtOMR/SM6mgarhQ= github.com/amazon-contributing/opentelemetry-collector-contrib/pkg/stanza v0.0.0-20230928170322-0df38c533713 h1:2daWNVtWNvRDoCTN5GG5N+LEM9OuY3RjJ0cboU3+xmM= github.com/amazon-contributing/opentelemetry-collector-contrib/pkg/stanza v0.0.0-20230928170322-0df38c533713/go.mod h1:lJLumMdUeKqurOskauSjhH4J2hz8r0iNyQWDl3i5NSM= -github.com/amazon-contributing/opentelemetry-collector-contrib/pkg/translator/prometheus v0.0.0-20230928170322-0df38c533713 h1:SZf6K08K4cyoGDdkPthRGcsT1Gk6Z7o6QN01oM8Jr6A= -github.com/amazon-contributing/opentelemetry-collector-contrib/pkg/translator/prometheus v0.0.0-20230928170322-0df38c533713/go.mod h1:9qsT0AsMflbQKz0ojK3aRU/PbyGQCDPKut3XMfAkW8k= -github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver v0.0.0-20230928170322-0df38c533713 h1:hDrhgnYst1yL2CLUmlwK1MqH4iqjLfxWZpw85X8cojE= -github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver v0.0.0-20230928170322-0df38c533713/go.mod h1:t/v7BcGrHUQ0/Lb/4egp0Xe8PrTceEkZVArTuRjQGBo= -github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/awsxrayreceiver v0.0.0-20230928170322-0df38c533713 h1:YPlfoYRq2+HxqibbpC3W+Go4WWJzVKEI5rziSHg/5Fc= -github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/awsxrayreceiver v0.0.0-20230928170322-0df38c533713/go.mod h1:akbVXOWuMWKSgqA1QKoXkm3hFt0qIvDeUr7m3ODAiS8= -github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.0.0-20230928170322-0df38c533713 h1:rAzBpcdCtVZqky+w3Nwv7tvTf7osX6WggT2/NEOHrkI= -github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.0.0-20230928170322-0df38c533713/go.mod h1:fw4J+Pn19ZgfR5ZVxWVtlvKq7+zEfXXlZV/7G9IWkko= +github.com/amazon-contributing/opentelemetry-collector-contrib/pkg/translator/prometheus v0.0.0-20231005180140-4b74f352a689 h1:mSioY+8Y75youwy5RUzKmlR5K0SXKcsTsIPReetXyIs= +github.com/amazon-contributing/opentelemetry-collector-contrib/pkg/translator/prometheus v0.0.0-20231005180140-4b74f352a689/go.mod h1:9qsT0AsMflbQKz0ojK3aRU/PbyGQCDPKut3XMfAkW8k= +github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver v0.0.0-20231005180140-4b74f352a689 h1:vMz2KlUPl8FvCit6GN+K2mYGPjN5DbgU5u9q9o0YL0s= +github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver v0.0.0-20231005180140-4b74f352a689/go.mod h1:t/v7BcGrHUQ0/Lb/4egp0Xe8PrTceEkZVArTuRjQGBo= +github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/awsxrayreceiver v0.0.0-20231005180140-4b74f352a689 h1:8lwu1RO1grkes/ZD4zQuNLF5FQ9jKlOQQ7vQUss4Buo= +github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/awsxrayreceiver v0.0.0-20231005180140-4b74f352a689/go.mod h1:akbVXOWuMWKSgqA1QKoXkm3hFt0qIvDeUr7m3ODAiS8= +github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.0.0-20231005180140-4b74f352a689 h1:PkIHZwFh7YGCEHK6Hb81VMb9J8UugN1KI3wMRoLfb2Q= +github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.0.0-20231005180140-4b74f352a689/go.mod h1:fw4J+Pn19ZgfR5ZVxWVtlvKq7+zEfXXlZV/7G9IWkko= github.com/amir/raidman v0.0.0-20170415203553-1ccc43bfb9c9 h1:FXrPTd8Rdlc94dKccl7KPmdmIbVh/OjelJ8/vgMRzcQ= github.com/andybalholm/brotli v1.0.4 h1:V7DdXeJtZscaqfNuAdSRuRFzuiKlHSC/Zh3zl9qY3JY= github.com/antchfx/jsonquery v1.1.5 h1:1YWrNFYCcIuJPIjFeOP5b6TXbLSUYY8qqxWbuZOB1qE= diff --git a/internal/retryer/imdsretryer.go b/internal/retryer/imdsretryer.go index d7aaeb9dc4..29dec2976f 100644 --- a/internal/retryer/imdsretryer.go +++ b/internal/retryer/imdsretryer.go @@ -4,7 +4,7 @@ package retryer import ( - "log" + "fmt" "os" "strconv" @@ -27,7 +27,7 @@ type IMDSRetryer struct { // otel component layer retries should come from aws config settings // translator layer should come from env vars see GetDefaultRetryNumber() func NewIMDSRetryer(imdsRetries int) IMDSRetryer { - log.Printf("I! imds retry client will retry %d times", imdsRetries) + fmt.Printf("I! imds retry client will retry %d times", imdsRetries) return IMDSRetryer{ DefaultRetryer: client.DefaultRetryer{ NumMaxRetries: imdsRetries, @@ -43,7 +43,7 @@ func (r IMDSRetryer) ShouldRetry(req *request.Request) bool { if awsError, ok := req.Error.(awserr.Error); r.DefaultRetryer.ShouldRetry(req) || (ok && awsError != nil && awsError.Code() == "EC2MetadataError") { shouldRetry = true } - log.Printf("D! should retry %t for imds error : %v", shouldRetry, req.Error) + fmt.Printf("D! should retry %t for imds error : %v", shouldRetry, req.Error) return shouldRetry } diff --git a/packaging/linux/amazon-cloudwatch-agent.spec b/packaging/linux/amazon-cloudwatch-agent.spec index 4a897c54e5..b0e589e25e 100644 --- a/packaging/linux/amazon-cloudwatch-agent.spec +++ b/packaging/linux/amazon-cloudwatch-agent.spec @@ -10,6 +10,8 @@ Source: amazon-cloudwatch-agent.tar.gz %define _enable_debug_packages 0 %define debug_package %{nil} +%define _source_payload w6.gzdio +%define _binary_payload w6.gzdio %prep %setup -c %{name}-%{version} diff --git a/tool/paths/paths.go b/tool/paths/paths.go new file mode 100644 index 0000000000..26543934fb --- /dev/null +++ b/tool/paths/paths.go @@ -0,0 +1,27 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package paths + +const ( + COMMON_CONFIG = "common-config.toml" + JSON = "amazon-cloudwatch-agent.json" + TOML = "amazon-cloudwatch-agent.toml" + YAML = "amazon-cloudwatch-agent.yaml" + ENV = "env-config.json" + AGENT_LOG_FILE = "amazon-cloudwatch-agent.log" + //TODO this CONFIG_DIR_IN_CONTAINER should change to something indicate dir, keep it for now to avoid break testing + CONFIG_DIR_IN_CONTAINER = "/etc/cwagentconfig" +) + +var ( + JsonConfigPath string + JsonDirPath string + EnvConfigPath string + TomlConfigPath string + CommonConfigPath string + YamlConfigPath string + AgentLogFilePath string + TranslatorBinaryPath string + AgentBinaryPath string +) diff --git a/tool/paths/paths_unix.go b/tool/paths/paths_unix.go index 111b0d622f..c59c70232a 100644 --- a/tool/paths/paths_unix.go +++ b/tool/paths/paths_unix.go @@ -6,6 +6,8 @@ package paths +import "path/filepath" + const ( AgentDir = "/opt/aws/amazon-cloudwatch-agent" BinaryDir = "bin" @@ -15,3 +17,15 @@ const ( WizardBinaryName = "amazon-cloudwatch-agent-config-wizard" AgentStartName = "amazon-cloudwatch-agent-ctl" ) + +func init() { + JsonConfigPath = filepath.Join(AgentDir, "etc", JSON) + JsonDirPath = filepath.Join(AgentDir, "etc", JsonDir) + EnvConfigPath = filepath.Join(AgentDir, "etc", ENV) + TomlConfigPath = filepath.Join(AgentDir, "etc", TOML) + CommonConfigPath = filepath.Join(AgentDir, "etc", COMMON_CONFIG) + YamlConfigPath = filepath.Join(AgentDir, "etc", YAML) + AgentLogFilePath = filepath.Join(AgentDir, "logs", AGENT_LOG_FILE) + TranslatorBinaryPath = filepath.Join(AgentDir, "bin", TranslatorBinaryName) + AgentBinaryPath = filepath.Join(AgentDir, "bin", AgentBinaryName) +} diff --git a/tool/paths/paths_windows.go b/tool/paths/paths_windows.go index ae96449be2..69c51ac889 100644 --- a/tool/paths/paths_windows.go +++ b/tool/paths/paths_windows.go @@ -6,6 +6,11 @@ package paths +import ( + "os" + "path/filepath" +) + const ( AgentDir = "\\Amazon\\AmazonCloudWatchAgent\\" JsonDir = "\\Configs" @@ -15,3 +20,26 @@ const ( WizardBinaryName = "amazon-cloudwatch-agent-config-wizard.exe" AgentStartName = "amazon-cloudwatch-agent-ctl.ps1" ) + +func init() { + programFiles := os.Getenv("ProgramFiles") + var programData string + if _, ok := os.LookupEnv("ProgramData"); ok { + programData = os.Getenv("ProgramData") + } else { + // Windows 2003 + programData = filepath.Join(os.Getenv("ALLUSERSPROFILE"), "Application Data") + } + + AgentRootDir := filepath.Join(programFiles, AgentDir) + AgentConfigDir := filepath.Join(programData, AgentDir) + JsonConfigPath = filepath.Join(AgentConfigDir, JSON) + JsonDirPath = filepath.Join(AgentConfigDir, JsonDir) + EnvConfigPath = filepath.Join(AgentConfigDir, ENV) + TomlConfigPath = filepath.Join(AgentConfigDir, TOML) + YamlConfigPath = filepath.Join(AgentConfigDir, YAML) + CommonConfigPath = filepath.Join(AgentConfigDir, COMMON_CONFIG) + AgentLogFilePath = filepath.Join(AgentConfigDir, AGENT_LOG_FILE) + TranslatorBinaryPath = filepath.Join(AgentRootDir, TranslatorBinaryName) + AgentBinaryPath = filepath.Join(AgentRootDir, AgentBinaryName) +} diff --git a/translator/tocwconfig/sampleConfig/emf_and_kubernetes_config.yaml b/translator/tocwconfig/sampleConfig/emf_and_kubernetes_config.yaml index 72e0e72ccd..7fe1138031 100644 --- a/translator/tocwconfig/sampleConfig/emf_and_kubernetes_config.yaml +++ b/translator/tocwconfig/sampleConfig/emf_and_kubernetes_config.yaml @@ -50,6 +50,8 @@ exporters: metric_name_selectors: - container_cpu_utilization - container_cpu_utilization_over_container_limit + - container_cpu_limit + - container_cpu_request - container_memory_utilization - container_memory_utilization_over_container_limit - container_memory_failures_total @@ -99,6 +101,8 @@ exporters: - pod_status_succeeded - pod_memory_request - pod_memory_limit + - pod_cpu_limit + - pod_cpu_request # node metrics - dimensions: [ [ ClusterName, InstanceId, NodeName ], [ ClusterName ] ] label_matchers: [ ] @@ -174,7 +178,6 @@ exporters: - apiserver_storage_size_bytes - apiserver_storage_db_total_size_in_bytes - etcd_db_total_size_in_bytes - - etcd_request_duration_seconds - dimensions: [ [ClusterName, resource], [ ClusterName ] ] label_matchers: [ ] metric_name_selectors: @@ -205,6 +208,7 @@ exporters: label_matchers: [ ] metric_name_selectors: - apiserver_current_inflight_requests + - apiserver_current_inqueue_requests - dimensions: [ [ClusterName, name], [ ClusterName ] ] label_matchers: [ ] metric_name_selectors: @@ -255,6 +259,9 @@ exporters: - metric_name: apiserver_request_total_5xx unit: Count overwrite: true + - metric_name: apiserver_requested_deprecated_apis + unit: Count + overwrite: true - metric_name: apiserver_storage_objects unit: Count overwrite: true @@ -264,9 +271,6 @@ exporters: - metric_name: apiserver_storage_list_duration_seconds unit: Seconds overwrite: true - - metric_name: apiserver_storage_objects - unit: Count - overwrite: true - metric_name: apiserver_storage_db_total_size_in_bytes unit: Bytes overwrite: true @@ -276,9 +280,6 @@ exporters: - metric_name: etcd_db_total_size_in_bytes unit: Bytes overwrite: true - - metric_name: etcd_request_duration_seconds - unit: Seconds - overwrite: true - metric_name: rest_client_request_duration_seconds unit: Seconds overwrite: true diff --git a/translator/tocwconfig/sampleConfig/kubernetes_on_prem_config.yaml b/translator/tocwconfig/sampleConfig/kubernetes_on_prem_config.yaml index 6c57b41bc5..3cc73c82af 100644 --- a/translator/tocwconfig/sampleConfig/kubernetes_on_prem_config.yaml +++ b/translator/tocwconfig/sampleConfig/kubernetes_on_prem_config.yaml @@ -21,6 +21,8 @@ exporters: metric_name_selectors: - container_cpu_utilization - container_cpu_utilization_over_container_limit + - container_cpu_limit + - container_cpu_request - container_memory_utilization - container_memory_utilization_over_container_limit - container_memory_failures_total @@ -70,6 +72,8 @@ exporters: - pod_status_succeeded - pod_memory_request - pod_memory_limit + - pod_cpu_limit + - pod_cpu_request # node metrics - dimensions: [ [ ClusterName, InstanceId, NodeName ], [ ClusterName ] ] label_matchers: [ ] @@ -145,7 +149,6 @@ exporters: - apiserver_storage_size_bytes - apiserver_storage_db_total_size_in_bytes - etcd_db_total_size_in_bytes - - etcd_request_duration_seconds - dimensions: [ [ ClusterName, resource ], [ ClusterName ] ] label_matchers: [ ] metric_name_selectors: @@ -176,6 +179,7 @@ exporters: label_matchers: [ ] metric_name_selectors: - apiserver_current_inflight_requests + - apiserver_current_inqueue_requests - dimensions: [ [ ClusterName, name ], [ ClusterName ] ] label_matchers: [ ] metric_name_selectors: @@ -226,6 +230,9 @@ exporters: - metric_name: apiserver_request_total_5xx unit: Count overwrite: true + - metric_name: apiserver_requested_deprecated_apis + unit: Count + overwrite: true - metric_name: apiserver_storage_objects unit: Count overwrite: true @@ -235,9 +242,6 @@ exporters: - metric_name: apiserver_storage_list_duration_seconds unit: Seconds overwrite: true - - metric_name: apiserver_storage_objects - unit: Count - overwrite: true - metric_name: apiserver_storage_db_total_size_in_bytes unit: Bytes overwrite: true @@ -247,9 +251,6 @@ exporters: - metric_name: etcd_db_total_size_in_bytes unit: Bytes overwrite: true - - metric_name: etcd_request_duration_seconds - unit: Seconds - overwrite: true - metric_name: rest_client_request_duration_seconds unit: Seconds overwrite: true diff --git a/translator/tocwconfig/sampleConfig/logs_and_kubernetes_config.yaml b/translator/tocwconfig/sampleConfig/logs_and_kubernetes_config.yaml index 888495f51b..5b038168ff 100644 --- a/translator/tocwconfig/sampleConfig/logs_and_kubernetes_config.yaml +++ b/translator/tocwconfig/sampleConfig/logs_and_kubernetes_config.yaml @@ -50,6 +50,8 @@ exporters: metric_name_selectors: - container_cpu_utilization - container_cpu_utilization_over_container_limit + - container_cpu_limit + - container_cpu_request - container_memory_utilization - container_memory_utilization_over_container_limit - container_memory_failures_total @@ -99,6 +101,8 @@ exporters: - pod_status_succeeded - pod_memory_request - pod_memory_limit + - pod_cpu_limit + - pod_cpu_request # node metrics - dimensions: [ [ ClusterName, InstanceId, NodeName ], [ ClusterName ] ] label_matchers: [ ] @@ -174,7 +178,6 @@ exporters: - apiserver_storage_size_bytes - apiserver_storage_db_total_size_in_bytes - etcd_db_total_size_in_bytes - - etcd_request_duration_seconds - dimensions: [ [ ClusterName, resource ], [ ClusterName ] ] label_matchers: [ ] metric_name_selectors: @@ -205,6 +208,7 @@ exporters: label_matchers: [ ] metric_name_selectors: - apiserver_current_inflight_requests + - apiserver_current_inqueue_requests - dimensions: [ [ ClusterName, name ], [ ClusterName ] ] label_matchers: [ ] metric_name_selectors: @@ -255,6 +259,9 @@ exporters: - metric_name: apiserver_request_total_5xx unit: Count overwrite: true + - metric_name: apiserver_requested_deprecated_apis + unit: Count + overwrite: true - metric_name: apiserver_storage_objects unit: Count overwrite: true @@ -264,9 +271,6 @@ exporters: - metric_name: apiserver_storage_list_duration_seconds unit: Seconds overwrite: true - - metric_name: apiserver_storage_objects - unit: Count - overwrite: true - metric_name: apiserver_storage_db_total_size_in_bytes unit: Bytes overwrite: true @@ -276,9 +280,6 @@ exporters: - metric_name: etcd_db_total_size_in_bytes unit: Bytes overwrite: true - - metric_name: etcd_request_duration_seconds - unit: Seconds - overwrite: true - metric_name: rest_client_request_duration_seconds unit: Seconds overwrite: true diff --git a/translator/translate/otel/exporter/awsemf/kubernetes.go b/translator/translate/otel/exporter/awsemf/kubernetes.go index 342cd985a4..eb083953ad 100644 --- a/translator/translate/otel/exporter/awsemf/kubernetes.go +++ b/translator/translate/otel/exporter/awsemf/kubernetes.go @@ -60,7 +60,7 @@ func getContainerMetricDeclarations(conf *confmap.Conf) []*awsemfexporter.Metric metricDeclaration := awsemfexporter.MetricDeclaration{ Dimensions: [][]string{{"ClusterName"}, {"ContainerName", "FullPodName", "PodName", "Namespace", "ClusterName"}, {"ContainerName", "PodName", "Namespace", "ClusterName"}}, MetricNameSelectors: []string{ - "container_cpu_utilization", "container_cpu_utilization_over_container_limit", + "container_cpu_utilization", "container_cpu_utilization_over_container_limit", "container_cpu_limit", "container_cpu_request", "container_memory_utilization", "container_memory_utilization_over_container_limit", "container_memory_failures_total", "container_memory_limit", "container_memory_request", "container_filesystem_usage", "container_filesystem_available", "container_filesystem_utilization", "container_status_running", "container_status_terminated", "container_status_waiting", "container_status_waiting_reason_crash_loop_back_off", @@ -94,7 +94,8 @@ func getPodMetricDeclarations(conf *confmap.Conf) []*awsemfexporter.MetricDeclar podMetricDeclarations[0].Dimensions = append(podMetricDeclarations[0].Dimensions, []string{"FullPodName", "PodName", "Namespace", "ClusterName"}) selectors = append(selectors, []string{"pod_number_of_container_restarts", "pod_number_of_containers", "pod_number_of_running_containers", "pod_status_ready", "pod_status_scheduled", "pod_status_running", "pod_status_pending", "pod_status_failed", "pod_status_unknown", - "pod_status_succeeded", "pod_memory_request", "pod_memory_limit"}...) + "pod_status_succeeded", "pod_memory_request", "pod_memory_limit", "pod_cpu_limit", "pod_cpu_request", + }...) } @@ -272,7 +273,6 @@ func getControlPlaneMetricDeclarations(conf *confmap.Conf) []*awsemfexporter.Met "apiserver_storage_size_bytes", "apiserver_storage_db_total_size_in_bytes", "etcd_db_total_size_in_bytes", - "etcd_request_duration_seconds", }, }, { @@ -315,6 +315,7 @@ func getControlPlaneMetricDeclarations(conf *confmap.Conf) []*awsemfexporter.Met Dimensions: [][]string{{"ClusterName", "request_kind"}, {"ClusterName"}}, MetricNameSelectors: []string{ "apiserver_current_inflight_requests", + "apiserver_current_inqueue_requests", }, }, { @@ -406,6 +407,11 @@ func getControlPlaneMetricDescriptors(conf *confmap.Conf) []awsemfexporter.Metri Unit: "Count", Overwrite: true, }, + { + MetricName: "apiserver_requested_deprecated_apis", + Unit: "Count", + Overwrite: true, + }, { MetricName: "apiserver_storage_objects", Unit: "Count", @@ -421,11 +427,6 @@ func getControlPlaneMetricDescriptors(conf *confmap.Conf) []awsemfexporter.Metri Unit: "Seconds", Overwrite: true, }, - { - MetricName: "apiserver_storage_objects", - Unit: "Count", - Overwrite: true, - }, { MetricName: "apiserver_storage_db_total_size_in_bytes", Unit: "Bytes", @@ -441,11 +442,6 @@ func getControlPlaneMetricDescriptors(conf *confmap.Conf) []awsemfexporter.Metri Unit: "Bytes", Overwrite: true, }, - { - MetricName: "etcd_request_duration_seconds", - Unit: "Seconds", - Overwrite: true, - }, { MetricName: "rest_client_request_duration_seconds", Unit: "Seconds", diff --git a/translator/translate/otel/exporter/awsemf/translator_test.go b/translator/translate/otel/exporter/awsemf/translator_test.go index 3029d4b03c..72b0867bbf 100644 --- a/translator/translate/otel/exporter/awsemf/translator_test.go +++ b/translator/translate/otel/exporter/awsemf/translator_test.go @@ -267,7 +267,7 @@ func TestTranslator(t *testing.T) { { Dimensions: [][]string{{"ClusterName"}, {"ContainerName", "FullPodName", "PodName", "Namespace", "ClusterName"}, {"ContainerName", "PodName", "Namespace", "ClusterName"}}, MetricNameSelectors: []string{ - "container_cpu_utilization", "container_cpu_utilization_over_container_limit", + "container_cpu_utilization", "container_cpu_utilization_over_container_limit", "container_cpu_limit", "container_cpu_request", "container_memory_utilization", "container_memory_utilization_over_container_limit", "container_memory_failures_total", "container_memory_limit", "container_memory_request", "container_filesystem_usage", "container_filesystem_available", "container_filesystem_utilization", "container_status_running", "container_status_terminated", "container_status_waiting", "container_status_waiting_reason_crash_loop_back_off", @@ -294,7 +294,7 @@ func TestTranslator(t *testing.T) { Dimensions: [][]string{{"PodName", "Namespace", "ClusterName"}, {"ClusterName"}, {"FullPodName", "PodName", "Namespace", "ClusterName"}, {"Service", "Namespace", "ClusterName"}}, MetricNameSelectors: []string{"pod_cpu_reserved_capacity", "pod_memory_reserved_capacity", "pod_number_of_container_restarts", "pod_number_of_containers", "pod_number_of_running_containers", "pod_status_ready", "pod_status_scheduled", "pod_status_running", "pod_status_pending", "pod_status_failed", "pod_status_unknown", - "pod_status_succeeded", "pod_memory_request", "pod_memory_limit", + "pod_status_succeeded", "pod_memory_request", "pod_memory_limit", "pod_cpu_limit", "pod_cpu_request", }, }, { @@ -341,7 +341,7 @@ func TestTranslator(t *testing.T) { }, { Dimensions: [][]string{{"ClusterName", "endpoint"}, {"ClusterName"}}, - MetricNameSelectors: []string{"apiserver_storage_size_bytes", "apiserver_storage_db_total_size_in_bytes", "etcd_db_total_size_in_bytes", "etcd_request_duration_seconds"}, + MetricNameSelectors: []string{"apiserver_storage_size_bytes", "apiserver_storage_db_total_size_in_bytes", "etcd_db_total_size_in_bytes"}, }, { Dimensions: [][]string{{"ClusterName", "resource"}, {"ClusterName"}}, @@ -365,7 +365,7 @@ func TestTranslator(t *testing.T) { }, { Dimensions: [][]string{{"ClusterName", "request_kind"}, {"ClusterName"}}, - MetricNameSelectors: []string{"apiserver_current_inflight_requests"}, + MetricNameSelectors: []string{"apiserver_current_inflight_requests", "apiserver_current_inqueue_requests"}, }, { Dimensions: [][]string{{"ClusterName", "name"}, {"ClusterName"}}, @@ -441,13 +441,13 @@ func TestTranslator(t *testing.T) { Overwrite: true, }, { - MetricName: "apiserver_storage_objects", + MetricName: "apiserver_requested_deprecated_apis", Unit: "Count", Overwrite: true, }, { - MetricName: "etcd_request_duration_seconds", - Unit: "Seconds", + MetricName: "apiserver_storage_objects", + Unit: "Count", Overwrite: true, }, { @@ -455,11 +455,6 @@ func TestTranslator(t *testing.T) { Unit: "Seconds", Overwrite: true, }, - { - MetricName: "apiserver_storage_objects", - Unit: "Count", - Overwrite: true, - }, { MetricName: "apiserver_storage_db_total_size_in_bytes", Unit: "Bytes", diff --git a/translator/util/ec2util/ec2util.go b/translator/util/ec2util/ec2util.go index b2af3fac74..6b3ce5e37b 100644 --- a/translator/util/ec2util/ec2util.go +++ b/translator/util/ec2util/ec2util.go @@ -5,7 +5,6 @@ package ec2util import ( "fmt" - "log" "net" "sync" "time" @@ -113,7 +112,7 @@ func (e *ec2Util) deriveEC2MetadataFromIMDS() error { if hostname, err := mdDisableFallback.GetMetadata("hostname"); err == nil { e.Hostname = hostname } else { - log.Printf("D! could not get hostname without imds v1 fallback enable thus enable fallback") + fmt.Println("D! could not get hostname without imds v1 fallback enable thus enable fallback") hostnameInner, errInner := mdEnableFallback.GetMetadata("hostname") if errInner == nil { e.Hostname = hostnameInner @@ -130,7 +129,7 @@ func (e *ec2Util) deriveEC2MetadataFromIMDS() error { e.PrivateIP = instanceIdentityDocument.PrivateIP e.InstanceID = instanceIdentityDocument.InstanceID } else { - log.Printf("D! could not get instance document without imds v1 fallback enable thus enable fallback") + fmt.Println("D! could not get instance document without imds v1 fallback enable thus enable fallback") instanceIdentityDocumentInner, errInner := mdEnableFallback.GetInstanceIdentityDocument() if errInner == nil { e.Region = instanceIdentityDocumentInner.Region From fe5f931f08aee68e3da24895662d7a41608cd5d9 Mon Sep 17 00:00:00 2001 From: Hyunsoo Kim <884273+movence@users.noreply.github.com> Date: Mon, 9 Oct 2023 15:25:25 -0400 Subject: [PATCH 07/55] remove double '$' (#543) --- .github/workflows/test-build.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/test-build.yml b/.github/workflows/test-build.yml index f67e1210e7..370c832990 100644 --- a/.github/workflows/test-build.yml +++ b/.github/workflows/test-build.yml @@ -83,7 +83,7 @@ jobs: uses: aws-actions/configure-aws-credentials@v2 with: role-to-assume: ${{ secrets.TERRAFORM_AWS_ASSUME_ROLE }} - aws-region: $${{ inputs.TargetRegion }} + aws-region: ${{ inputs.TargetRegion }} - name: Cache binaries id: cached_binaries From a6ac5b6e30ed95cfdc4b7a3d438543e31f50e7a8 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Tue, 10 Oct 2023 16:08:57 -0500 Subject: [PATCH 08/55] Automated sync with upstream - last commit fc6b1c50ec76687bf025b5291944a4291bb11dbd - run #84.1 (#549) Co-authored-by: Seth L <81644108+sethAmazon@users.noreply.github.com> Co-authored-by: Chad Patel Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: Github Action Co-authored-by: Jeffrey Chien Co-authored-by: Adam <90734270+adam-mateen@users.noreply.github.com> --- go.mod | 28 +++++----- go.sum | 52 +++++++++---------- .../emf_and_kubernetes_config.yaml | 18 +++---- .../kubernetes_on_prem_config.yaml | 20 +++---- .../logs_and_kubernetes_config.yaml | 20 +++---- .../otel/exporter/awsemf/kubernetes.go | 6 +-- .../otel/exporter/awsemf/translator_test.go | 6 +-- 7 files changed, 75 insertions(+), 75 deletions(-) diff --git a/go.mod b/go.mod index 494c82bbb8..55f21c2495 100644 --- a/go.mod +++ b/go.mod @@ -6,34 +6,34 @@ replace github.com/influxdata/telegraf => github.com/aws/telegraf v0.10.2-0.2022 // Replace with https://github.com/amazon-contributing/opentelemetry-collector-contrib, there are no requirements for all receivers/processors/exporters // to be all replaced since there are some changes that will always be from upstream -replace github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsemfexporter => github.com/amazon-contributing/opentelemetry-collector-contrib/exporter/awsemfexporter v0.0.0-20231005180140-4b74f352a689 +replace github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsemfexporter => github.com/amazon-contributing/opentelemetry-collector-contrib/exporter/awsemfexporter v0.0.0-20231010194048-ca00a4d96086 -replace github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsxrayexporter => github.com/amazon-contributing/opentelemetry-collector-contrib/exporter/awsxrayexporter v0.0.0-20231005180140-4b74f352a689 +replace github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsxrayexporter => github.com/amazon-contributing/opentelemetry-collector-contrib/exporter/awsxrayexporter v0.0.0-20231010194048-ca00a4d96086 -replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/xray => github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/xray v0.0.0-20231005180140-4b74f352a689 +replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/xray => github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/xray v0.0.0-20231010194048-ca00a4d96086 -replace github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver => github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver v0.0.0-20231005180140-4b74f352a689 +replace github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver => github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver v0.0.0-20231010194048-ca00a4d96086 -replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/k8s => github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/k8s v0.0.0-20231005180140-4b74f352a689 +replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/k8s => github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/k8s v0.0.0-20231010194048-ca00a4d96086 -replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/containerinsight => github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/containerinsight v0.0.0-20231005180140-4b74f352a689 +replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/containerinsight => github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/containerinsight v0.0.0-20231010194048-ca00a4d96086 // Replace with contrib to revert upstream change https://github.com/open-telemetry/opentelemetry-collector-contrib/pull/20519 -replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus => github.com/amazon-contributing/opentelemetry-collector-contrib/pkg/translator/prometheus v0.0.0-20231005180140-4b74f352a689 +replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus => github.com/amazon-contributing/opentelemetry-collector-contrib/pkg/translator/prometheus v0.0.0-20231010194048-ca00a4d96086 replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza => github.com/amazon-contributing/opentelemetry-collector-contrib/pkg/stanza v0.0.0-20230928170322-0df38c533713 -replace github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver => github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.0.0-20231005180140-4b74f352a689 +replace github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver => github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.0.0-20231010194048-ca00a4d96086 -replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/awsutil => github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/awsutil v0.0.0-20231005180140-4b74f352a689 +replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/awsutil => github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/awsutil v0.0.0-20231010194048-ca00a4d96086 -replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/cwlogs => github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/cwlogs v0.0.0-20231005180140-4b74f352a689 +replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/cwlogs => github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/cwlogs v0.0.0-20231010194048-ca00a4d96086 -replace github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awscloudwatchlogsexporter => github.com/amazon-contributing/opentelemetry-collector-contrib/exporter/awscloudwatchlogsexporter v0.0.0-20231005180140-4b74f352a689 +replace github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awscloudwatchlogsexporter => github.com/amazon-contributing/opentelemetry-collector-contrib/exporter/awscloudwatchlogsexporter v0.0.0-20231010194048-ca00a4d96086 -replace github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awsxrayreceiver => github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/awsxrayreceiver v0.0.0-20231005180140-4b74f352a689 +replace github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awsxrayreceiver => github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/awsxrayreceiver v0.0.0-20231010194048-ca00a4d96086 -replace github.com/amazon-contributing/opentelemetry-collector-contrib/override/aws => github.com/amazon-contributing/opentelemetry-collector-contrib/override/aws v0.0.0-20231005180140-4b74f352a689 +replace github.com/amazon-contributing/opentelemetry-collector-contrib/override/aws => github.com/amazon-contributing/opentelemetry-collector-contrib/override/aws v0.0.0-20231010194048-ca00a4d96086 // Temporary fix, pending PR https://github.com/shirou/gopsutil/pull/957 replace github.com/shirou/gopsutil/v3 => github.com/aws/telegraf/patches/gopsutil/v3 v3.0.0-20230915153624-7629361f8380 // indirect @@ -423,4 +423,4 @@ require ( sigs.k8s.io/yaml v1.3.0 // indirect ) -replace github.com/amazon-contributing/opentelemetry-collector-contrib/pkg/stanza => github.com/amazon-contributing/opentelemetry-collector-contrib/pkg/stanza v0.0.0-20231005180140-4b74f352a689 +replace github.com/amazon-contributing/opentelemetry-collector-contrib/pkg/stanza => github.com/amazon-contributing/opentelemetry-collector-contrib/pkg/stanza v0.0.0-20231010194048-ca00a4d96086 diff --git a/go.sum b/go.sum index 7fd27813d0..9fa90aee4e 100644 --- a/go.sum +++ b/go.sum @@ -139,34 +139,34 @@ github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk5 github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 h1:s6gZFSlWYmbqAuRjVTiNNhvNRfY2Wxp9nhfyel4rklc= github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= github.com/aliyun/alibaba-cloud-sdk-go v1.61.1483 h1:J8HaD+Zpfi1gcel3HCKpoHHEsrcuRrZlSnx7R9SCf5I= -github.com/amazon-contributing/opentelemetry-collector-contrib/exporter/awscloudwatchlogsexporter v0.0.0-20231005180140-4b74f352a689 h1:C41hepLgo3qoZ/raJJXwg5+61r1T0wKGe7ouq2GTDCQ= -github.com/amazon-contributing/opentelemetry-collector-contrib/exporter/awscloudwatchlogsexporter v0.0.0-20231005180140-4b74f352a689/go.mod h1:WgmC0gq7urueR/VbZ0EHZhe3MXV6oWbaMmEWhHvagfg= -github.com/amazon-contributing/opentelemetry-collector-contrib/exporter/awsemfexporter v0.0.0-20231005180140-4b74f352a689 h1:PB5QDEQZeC7Wuvmuwd6mlJbjAbU7d1jNe91Ro1sieBY= -github.com/amazon-contributing/opentelemetry-collector-contrib/exporter/awsemfexporter v0.0.0-20231005180140-4b74f352a689/go.mod h1:b8pL6t9Xqk/zv0nLZsMiniuugDWiWQZRu9kh9t5SBLk= -github.com/amazon-contributing/opentelemetry-collector-contrib/exporter/awsxrayexporter v0.0.0-20231005180140-4b74f352a689 h1:Hs0LHi+VUmWUlTBrc3dp8moDSsIElpbUfWWezE4yPdg= -github.com/amazon-contributing/opentelemetry-collector-contrib/exporter/awsxrayexporter v0.0.0-20231005180140-4b74f352a689/go.mod h1:K9h+mkX+BsA1UTuuheGJjo44KAahxaNu9jJ8/xVF6jo= -github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/awsutil v0.0.0-20231005180140-4b74f352a689 h1:PE+2GVB1EVsRaqShTpyunefheg82SklTOeLsYIXveus= -github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/awsutil v0.0.0-20231005180140-4b74f352a689/go.mod h1:9iAsO2SC8NIsa8/xCmC2Pj4MZPmYdvm+1/n89M74JS4= -github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/containerinsight v0.0.0-20231005180140-4b74f352a689 h1:H7WeHG6ERGOgPCS01RjwMo3Faja60PbozpPXDhx3OGI= -github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/containerinsight v0.0.0-20231005180140-4b74f352a689/go.mod h1:ZwAqtlNaHJX0IUU5O40j96TDbsPA0K7o+m49AZgei7g= -github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/cwlogs v0.0.0-20231005180140-4b74f352a689 h1:hpv83ENe81wZmLC93U90hT5WzbOzLCY8EhxAmvUjMuo= -github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/cwlogs v0.0.0-20231005180140-4b74f352a689/go.mod h1:U0J/v82xC95JvG5QhXlrHH9OpgV8scQSGS6N7XW2y/4= -github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/k8s v0.0.0-20231005180140-4b74f352a689 h1:MVjE0TuGzjWvWbphb/UQhhKz2+XtRjnhEJF20ht/D30= -github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/k8s v0.0.0-20231005180140-4b74f352a689/go.mod h1:58ZN2DUrqxJLqoXu+GZfL0RwMYiRZAAI+COKp0OmA0k= -github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/xray v0.0.0-20231005180140-4b74f352a689 h1:XYgIXvOXzjDUvkw0KQ8HjGmqW2+3fLFOhwtWRZcn5YU= -github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/xray v0.0.0-20231005180140-4b74f352a689/go.mod h1:8edNN/XfefbHuGLiDhFdBN1QfJfgH7wmq5ms2Gme1EA= -github.com/amazon-contributing/opentelemetry-collector-contrib/override/aws v0.0.0-20231005180140-4b74f352a689 h1:UMqzAQZicKnFjs4M7QXXfMq1Tf/RopSD0g1kfZkgRvo= -github.com/amazon-contributing/opentelemetry-collector-contrib/override/aws v0.0.0-20231005180140-4b74f352a689/go.mod h1:F5l/VuHtB8418NLJEsHeYz/pni6sWtOMR/SM6mgarhQ= +github.com/amazon-contributing/opentelemetry-collector-contrib/exporter/awscloudwatchlogsexporter v0.0.0-20231010194048-ca00a4d96086 h1:TRJjygBgnh9fJryfwmW4mlTY5x+3YLzdbLvRH7fRKKE= +github.com/amazon-contributing/opentelemetry-collector-contrib/exporter/awscloudwatchlogsexporter v0.0.0-20231010194048-ca00a4d96086/go.mod h1:WgmC0gq7urueR/VbZ0EHZhe3MXV6oWbaMmEWhHvagfg= +github.com/amazon-contributing/opentelemetry-collector-contrib/exporter/awsemfexporter v0.0.0-20231010194048-ca00a4d96086 h1:K6U7gjV7K3dphSHM58+syX2AJEDl0/L5JV+vLoMNteg= +github.com/amazon-contributing/opentelemetry-collector-contrib/exporter/awsemfexporter v0.0.0-20231010194048-ca00a4d96086/go.mod h1:b8pL6t9Xqk/zv0nLZsMiniuugDWiWQZRu9kh9t5SBLk= +github.com/amazon-contributing/opentelemetry-collector-contrib/exporter/awsxrayexporter v0.0.0-20231010194048-ca00a4d96086 h1:U5DkSgPvYr3JPveOKaC2KZYYZLBMQQsU7dBOJOunbco= +github.com/amazon-contributing/opentelemetry-collector-contrib/exporter/awsxrayexporter v0.0.0-20231010194048-ca00a4d96086/go.mod h1:K9h+mkX+BsA1UTuuheGJjo44KAahxaNu9jJ8/xVF6jo= +github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/awsutil v0.0.0-20231010194048-ca00a4d96086 h1:DgFsom+0FLOGRmifUum7Ak5agaUkmoowMsaJV9RBuLw= +github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/awsutil v0.0.0-20231010194048-ca00a4d96086/go.mod h1:9iAsO2SC8NIsa8/xCmC2Pj4MZPmYdvm+1/n89M74JS4= +github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/containerinsight v0.0.0-20231010194048-ca00a4d96086 h1:XqNAMwWdeRK3yBSNQ7qtz2m4h7V14u/uDBqFOzcIGGY= +github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/containerinsight v0.0.0-20231010194048-ca00a4d96086/go.mod h1:ZwAqtlNaHJX0IUU5O40j96TDbsPA0K7o+m49AZgei7g= +github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/cwlogs v0.0.0-20231010194048-ca00a4d96086 h1:wacwVdx2g+cgG9rwfnkRpo2livzkh/zNIlJHgS+0eCg= +github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/cwlogs v0.0.0-20231010194048-ca00a4d96086/go.mod h1:U0J/v82xC95JvG5QhXlrHH9OpgV8scQSGS6N7XW2y/4= +github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/k8s v0.0.0-20231010194048-ca00a4d96086 h1:YX51130ub3HUmjT/yQ9LE3jLLPPJo684DBECE4j614w= +github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/k8s v0.0.0-20231010194048-ca00a4d96086/go.mod h1:58ZN2DUrqxJLqoXu+GZfL0RwMYiRZAAI+COKp0OmA0k= +github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/xray v0.0.0-20231010194048-ca00a4d96086 h1:bOmg1o07RevtPz7r6DTYA30m9BFvgk0TBqa0R+g9AAs= +github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/xray v0.0.0-20231010194048-ca00a4d96086/go.mod h1:8edNN/XfefbHuGLiDhFdBN1QfJfgH7wmq5ms2Gme1EA= +github.com/amazon-contributing/opentelemetry-collector-contrib/override/aws v0.0.0-20231010194048-ca00a4d96086 h1:o2kPeydL+WfCJs8jM6NBmvfT2hEhXjTmrodckD4ZxNA= +github.com/amazon-contributing/opentelemetry-collector-contrib/override/aws v0.0.0-20231010194048-ca00a4d96086/go.mod h1:F5l/VuHtB8418NLJEsHeYz/pni6sWtOMR/SM6mgarhQ= github.com/amazon-contributing/opentelemetry-collector-contrib/pkg/stanza v0.0.0-20230928170322-0df38c533713 h1:2daWNVtWNvRDoCTN5GG5N+LEM9OuY3RjJ0cboU3+xmM= github.com/amazon-contributing/opentelemetry-collector-contrib/pkg/stanza v0.0.0-20230928170322-0df38c533713/go.mod h1:lJLumMdUeKqurOskauSjhH4J2hz8r0iNyQWDl3i5NSM= -github.com/amazon-contributing/opentelemetry-collector-contrib/pkg/translator/prometheus v0.0.0-20231005180140-4b74f352a689 h1:mSioY+8Y75youwy5RUzKmlR5K0SXKcsTsIPReetXyIs= -github.com/amazon-contributing/opentelemetry-collector-contrib/pkg/translator/prometheus v0.0.0-20231005180140-4b74f352a689/go.mod h1:9qsT0AsMflbQKz0ojK3aRU/PbyGQCDPKut3XMfAkW8k= -github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver v0.0.0-20231005180140-4b74f352a689 h1:vMz2KlUPl8FvCit6GN+K2mYGPjN5DbgU5u9q9o0YL0s= -github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver v0.0.0-20231005180140-4b74f352a689/go.mod h1:t/v7BcGrHUQ0/Lb/4egp0Xe8PrTceEkZVArTuRjQGBo= -github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/awsxrayreceiver v0.0.0-20231005180140-4b74f352a689 h1:8lwu1RO1grkes/ZD4zQuNLF5FQ9jKlOQQ7vQUss4Buo= -github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/awsxrayreceiver v0.0.0-20231005180140-4b74f352a689/go.mod h1:akbVXOWuMWKSgqA1QKoXkm3hFt0qIvDeUr7m3ODAiS8= -github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.0.0-20231005180140-4b74f352a689 h1:PkIHZwFh7YGCEHK6Hb81VMb9J8UugN1KI3wMRoLfb2Q= -github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.0.0-20231005180140-4b74f352a689/go.mod h1:fw4J+Pn19ZgfR5ZVxWVtlvKq7+zEfXXlZV/7G9IWkko= +github.com/amazon-contributing/opentelemetry-collector-contrib/pkg/translator/prometheus v0.0.0-20231010194048-ca00a4d96086 h1:xdNB2JVC9WnOdhoLJVSAfa7q2T8qjDTQyJ3+Rnpr87U= +github.com/amazon-contributing/opentelemetry-collector-contrib/pkg/translator/prometheus v0.0.0-20231010194048-ca00a4d96086/go.mod h1:9qsT0AsMflbQKz0ojK3aRU/PbyGQCDPKut3XMfAkW8k= +github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver v0.0.0-20231010194048-ca00a4d96086 h1:eYAppnv9Aib7y27njtMLOQeA02dgOwt/diIo4LNgIgI= +github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver v0.0.0-20231010194048-ca00a4d96086/go.mod h1:t/v7BcGrHUQ0/Lb/4egp0Xe8PrTceEkZVArTuRjQGBo= +github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/awsxrayreceiver v0.0.0-20231010194048-ca00a4d96086 h1:3FiCcGI5UjOzvdtM+AByUsaanTl7x+oXEwxiC70Hl0I= +github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/awsxrayreceiver v0.0.0-20231010194048-ca00a4d96086/go.mod h1:akbVXOWuMWKSgqA1QKoXkm3hFt0qIvDeUr7m3ODAiS8= +github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.0.0-20231010194048-ca00a4d96086 h1:nLe+YkTSldClMKWWN14TU+Ix1YBo4aAhezedGTbzjeU= +github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.0.0-20231010194048-ca00a4d96086/go.mod h1:fw4J+Pn19ZgfR5ZVxWVtlvKq7+zEfXXlZV/7G9IWkko= github.com/amir/raidman v0.0.0-20170415203553-1ccc43bfb9c9 h1:FXrPTd8Rdlc94dKccl7KPmdmIbVh/OjelJ8/vgMRzcQ= github.com/andybalholm/brotli v1.0.4 h1:V7DdXeJtZscaqfNuAdSRuRFzuiKlHSC/Zh3zl9qY3JY= github.com/antchfx/jsonquery v1.1.5 h1:1YWrNFYCcIuJPIjFeOP5b6TXbLSUYY8qqxWbuZOB1qE= diff --git a/translator/tocwconfig/sampleConfig/emf_and_kubernetes_config.yaml b/translator/tocwconfig/sampleConfig/emf_and_kubernetes_config.yaml index 7fe1138031..16804d953f 100644 --- a/translator/tocwconfig/sampleConfig/emf_and_kubernetes_config.yaml +++ b/translator/tocwconfig/sampleConfig/emf_and_kubernetes_config.yaml @@ -60,15 +60,6 @@ exporters: - container_filesystem_usage - container_filesystem_available - container_filesystem_utilization - - container_status_running - - container_status_terminated - - container_status_waiting - - container_status_waiting_reason_crash_loop_back_off - - container_status_waiting_reason_image_pull_error - - container_status_waiting_reason_start_error - - container_status_waiting_reason_create_container_error - - container_status_waiting_reason_create_container_config_error - - container_status_terminated_reason_oom_killed # pod metrics - dimensions: [ [ ClusterName, Namespace, PodName ], [ ClusterName ], [ ClusterName, Namespace, Service ], [ ClusterName, Namespace ], [ ClusterName, FullPodName, Namespace, PodName ] ] label_matchers: [ ] @@ -103,6 +94,15 @@ exporters: - pod_memory_limit - pod_cpu_limit - pod_cpu_request + - pod_container_status_running + - pod_container_status_terminated + - pod_container_status_waiting + - pod_container_status_waiting_reason_crash_loop_back_off + - pod_container_status_waiting_reason_image_pull_error + - pod_container_status_waiting_reason_start_error + - pod_container_status_waiting_reason_create_container_error + - pod_container_status_waiting_reason_create_container_config_error + - pod_container_status_terminated_reason_oom_killed # node metrics - dimensions: [ [ ClusterName, InstanceId, NodeName ], [ ClusterName ] ] label_matchers: [ ] diff --git a/translator/tocwconfig/sampleConfig/kubernetes_on_prem_config.yaml b/translator/tocwconfig/sampleConfig/kubernetes_on_prem_config.yaml index 3cc73c82af..54cdb7fe87 100644 --- a/translator/tocwconfig/sampleConfig/kubernetes_on_prem_config.yaml +++ b/translator/tocwconfig/sampleConfig/kubernetes_on_prem_config.yaml @@ -31,15 +31,6 @@ exporters: - container_filesystem_usage - container_filesystem_available - container_filesystem_utilization - - container_status_running - - container_status_terminated - - container_status_waiting - - container_status_waiting_reason_crash_loop_back_off - - container_status_waiting_reason_image_pull_error - - container_status_waiting_reason_start_error - - container_status_waiting_reason_create_container_error - - container_status_waiting_reason_create_container_config_error - - container_status_terminated_reason_oom_killed # pod metrics - dimensions: [ [ ClusterName, Namespace, PodName ], [ ClusterName ], [ ClusterName, Namespace, Service ], [ ClusterName, Namespace ], [ ClusterName, FullPodName, Namespace, PodName ] ] label_matchers: [ ] @@ -56,7 +47,7 @@ exporters: - pod_interface_network_rx_dropped - pod_interface_network_tx_dropped - dimensions: [ [ ClusterName, Namespace, PodName ], [ ClusterName ], [ ClusterName, FullPodName, Namespace, PodName ], [ ClusterName, Namespace, Service ] ] - label_matchers: [ ] + label_matchers: [] metric_name_selectors: - pod_cpu_reserved_capacity - pod_memory_reserved_capacity @@ -74,6 +65,15 @@ exporters: - pod_memory_limit - pod_cpu_limit - pod_cpu_request + - pod_container_status_running + - pod_container_status_terminated + - pod_container_status_waiting + - pod_container_status_waiting_reason_crash_loop_back_off + - pod_container_status_waiting_reason_image_pull_error + - pod_container_status_waiting_reason_start_error + - pod_container_status_waiting_reason_create_container_error + - pod_container_status_waiting_reason_create_container_config_error + - pod_container_status_terminated_reason_oom_killed # node metrics - dimensions: [ [ ClusterName, InstanceId, NodeName ], [ ClusterName ] ] label_matchers: [ ] diff --git a/translator/tocwconfig/sampleConfig/logs_and_kubernetes_config.yaml b/translator/tocwconfig/sampleConfig/logs_and_kubernetes_config.yaml index 5b038168ff..664ab2127c 100644 --- a/translator/tocwconfig/sampleConfig/logs_and_kubernetes_config.yaml +++ b/translator/tocwconfig/sampleConfig/logs_and_kubernetes_config.yaml @@ -60,15 +60,6 @@ exporters: - container_filesystem_usage - container_filesystem_available - container_filesystem_utilization - - container_status_running - - container_status_terminated - - container_status_waiting - - container_status_waiting_reason_crash_loop_back_off - - container_status_waiting_reason_image_pull_error - - container_status_waiting_reason_start_error - - container_status_waiting_reason_create_container_error - - container_status_waiting_reason_create_container_config_error - - container_status_terminated_reason_oom_killed # pod metrics - dimensions: [ [ ClusterName, Namespace, PodName ], [ ClusterName ], [ ClusterName, Namespace, Service ], [ ClusterName, Namespace ], [ ClusterName, FullPodName, Namespace, PodName ] ] label_matchers: [ ] @@ -85,7 +76,7 @@ exporters: - pod_interface_network_rx_dropped - pod_interface_network_tx_dropped - dimensions: [ [ ClusterName, Namespace, PodName ], [ ClusterName ], [ ClusterName, FullPodName, Namespace, PodName ], [ ClusterName, Namespace, Service ] ] - label_matchers: [ ] + label_matchers: [] metric_name_selectors: - pod_cpu_reserved_capacity - pod_memory_reserved_capacity @@ -103,6 +94,15 @@ exporters: - pod_memory_limit - pod_cpu_limit - pod_cpu_request + - pod_container_status_running + - pod_container_status_terminated + - pod_container_status_waiting + - pod_container_status_waiting_reason_crash_loop_back_off + - pod_container_status_waiting_reason_image_pull_error + - pod_container_status_waiting_reason_start_error + - pod_container_status_waiting_reason_create_container_error + - pod_container_status_waiting_reason_create_container_config_error + - pod_container_status_terminated_reason_oom_killed # node metrics - dimensions: [ [ ClusterName, InstanceId, NodeName ], [ ClusterName ] ] label_matchers: [ ] diff --git a/translator/translate/otel/exporter/awsemf/kubernetes.go b/translator/translate/otel/exporter/awsemf/kubernetes.go index eb083953ad..76d2f39ea5 100644 --- a/translator/translate/otel/exporter/awsemf/kubernetes.go +++ b/translator/translate/otel/exporter/awsemf/kubernetes.go @@ -63,9 +63,6 @@ func getContainerMetricDeclarations(conf *confmap.Conf) []*awsemfexporter.Metric "container_cpu_utilization", "container_cpu_utilization_over_container_limit", "container_cpu_limit", "container_cpu_request", "container_memory_utilization", "container_memory_utilization_over_container_limit", "container_memory_failures_total", "container_memory_limit", "container_memory_request", "container_filesystem_usage", "container_filesystem_available", "container_filesystem_utilization", - "container_status_running", "container_status_terminated", "container_status_waiting", "container_status_waiting_reason_crash_loop_back_off", - "container_status_waiting_reason_image_pull_error", "container_status_waiting_reason_start_error", "container_status_waiting_reason_create_container_error", - "container_status_waiting_reason_create_container_config_error", "container_status_terminated_reason_oom_killed", }, } @@ -95,6 +92,9 @@ func getPodMetricDeclarations(conf *confmap.Conf) []*awsemfexporter.MetricDeclar selectors = append(selectors, []string{"pod_number_of_container_restarts", "pod_number_of_containers", "pod_number_of_running_containers", "pod_status_ready", "pod_status_scheduled", "pod_status_running", "pod_status_pending", "pod_status_failed", "pod_status_unknown", "pod_status_succeeded", "pod_memory_request", "pod_memory_limit", "pod_cpu_limit", "pod_cpu_request", + "pod_container_status_running", "pod_container_status_terminated", "pod_container_status_waiting", "pod_container_status_waiting_reason_crash_loop_back_off", + "pod_container_status_waiting_reason_image_pull_error", "pod_container_status_waiting_reason_start_error", "pod_container_status_waiting_reason_create_container_error", + "pod_container_status_waiting_reason_create_container_config_error", "pod_container_status_terminated_reason_oom_killed", }...) } diff --git a/translator/translate/otel/exporter/awsemf/translator_test.go b/translator/translate/otel/exporter/awsemf/translator_test.go index 72b0867bbf..8af6695ee7 100644 --- a/translator/translate/otel/exporter/awsemf/translator_test.go +++ b/translator/translate/otel/exporter/awsemf/translator_test.go @@ -270,9 +270,6 @@ func TestTranslator(t *testing.T) { "container_cpu_utilization", "container_cpu_utilization_over_container_limit", "container_cpu_limit", "container_cpu_request", "container_memory_utilization", "container_memory_utilization_over_container_limit", "container_memory_failures_total", "container_memory_limit", "container_memory_request", "container_filesystem_usage", "container_filesystem_available", "container_filesystem_utilization", - "container_status_running", "container_status_terminated", "container_status_waiting", "container_status_waiting_reason_crash_loop_back_off", - "container_status_waiting_reason_image_pull_error", "container_status_waiting_reason_start_error", "container_status_waiting_reason_create_container_error", - "container_status_waiting_reason_create_container_config_error", "container_status_terminated_reason_oom_killed", }, }, { @@ -295,6 +292,9 @@ func TestTranslator(t *testing.T) { MetricNameSelectors: []string{"pod_cpu_reserved_capacity", "pod_memory_reserved_capacity", "pod_number_of_container_restarts", "pod_number_of_containers", "pod_number_of_running_containers", "pod_status_ready", "pod_status_scheduled", "pod_status_running", "pod_status_pending", "pod_status_failed", "pod_status_unknown", "pod_status_succeeded", "pod_memory_request", "pod_memory_limit", "pod_cpu_limit", "pod_cpu_request", + "pod_container_status_running", "pod_container_status_terminated", "pod_container_status_waiting", "pod_container_status_waiting_reason_crash_loop_back_off", + "pod_container_status_waiting_reason_image_pull_error", "pod_container_status_waiting_reason_start_error", "pod_container_status_waiting_reason_create_container_error", + "pod_container_status_waiting_reason_create_container_config_error", "pod_container_status_terminated_reason_oom_killed", }, }, { From 3b3498ff408b78ef0e09ae305b439bfe454a1a52 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Fri, 13 Oct 2023 13:05:09 -0400 Subject: [PATCH 09/55] Update OTel fork components to d1a2dc44f2f34893710a29fbdb554840652309ea (#563) Co-authored-by: Github Action --- go.mod | 28 ++++++++++++++-------------- go.sum | 52 ++++++++++++++++++++++++++-------------------------- 2 files changed, 40 insertions(+), 40 deletions(-) diff --git a/go.mod b/go.mod index 55f21c2495..a86aa781e0 100644 --- a/go.mod +++ b/go.mod @@ -6,34 +6,34 @@ replace github.com/influxdata/telegraf => github.com/aws/telegraf v0.10.2-0.2022 // Replace with https://github.com/amazon-contributing/opentelemetry-collector-contrib, there are no requirements for all receivers/processors/exporters // to be all replaced since there are some changes that will always be from upstream -replace github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsemfexporter => github.com/amazon-contributing/opentelemetry-collector-contrib/exporter/awsemfexporter v0.0.0-20231010194048-ca00a4d96086 +replace github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsemfexporter => github.com/amazon-contributing/opentelemetry-collector-contrib/exporter/awsemfexporter v0.0.0-20231012205229-d1a2dc44f2f3 -replace github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsxrayexporter => github.com/amazon-contributing/opentelemetry-collector-contrib/exporter/awsxrayexporter v0.0.0-20231010194048-ca00a4d96086 +replace github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsxrayexporter => github.com/amazon-contributing/opentelemetry-collector-contrib/exporter/awsxrayexporter v0.0.0-20231012205229-d1a2dc44f2f3 -replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/xray => github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/xray v0.0.0-20231010194048-ca00a4d96086 +replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/xray => github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/xray v0.0.0-20231012205229-d1a2dc44f2f3 -replace github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver => github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver v0.0.0-20231010194048-ca00a4d96086 +replace github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver => github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver v0.0.0-20231012205229-d1a2dc44f2f3 -replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/k8s => github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/k8s v0.0.0-20231010194048-ca00a4d96086 +replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/k8s => github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/k8s v0.0.0-20231012205229-d1a2dc44f2f3 -replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/containerinsight => github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/containerinsight v0.0.0-20231010194048-ca00a4d96086 +replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/containerinsight => github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/containerinsight v0.0.0-20231012205229-d1a2dc44f2f3 // Replace with contrib to revert upstream change https://github.com/open-telemetry/opentelemetry-collector-contrib/pull/20519 -replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus => github.com/amazon-contributing/opentelemetry-collector-contrib/pkg/translator/prometheus v0.0.0-20231010194048-ca00a4d96086 +replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus => github.com/amazon-contributing/opentelemetry-collector-contrib/pkg/translator/prometheus v0.0.0-20231012205229-d1a2dc44f2f3 replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza => github.com/amazon-contributing/opentelemetry-collector-contrib/pkg/stanza v0.0.0-20230928170322-0df38c533713 -replace github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver => github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.0.0-20231010194048-ca00a4d96086 +replace github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver => github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.0.0-20231012205229-d1a2dc44f2f3 -replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/awsutil => github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/awsutil v0.0.0-20231010194048-ca00a4d96086 +replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/awsutil => github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/awsutil v0.0.0-20231012205229-d1a2dc44f2f3 -replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/cwlogs => github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/cwlogs v0.0.0-20231010194048-ca00a4d96086 +replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/cwlogs => github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/cwlogs v0.0.0-20231012205229-d1a2dc44f2f3 -replace github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awscloudwatchlogsexporter => github.com/amazon-contributing/opentelemetry-collector-contrib/exporter/awscloudwatchlogsexporter v0.0.0-20231010194048-ca00a4d96086 +replace github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awscloudwatchlogsexporter => github.com/amazon-contributing/opentelemetry-collector-contrib/exporter/awscloudwatchlogsexporter v0.0.0-20231012205229-d1a2dc44f2f3 -replace github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awsxrayreceiver => github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/awsxrayreceiver v0.0.0-20231010194048-ca00a4d96086 +replace github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awsxrayreceiver => github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/awsxrayreceiver v0.0.0-20231012205229-d1a2dc44f2f3 -replace github.com/amazon-contributing/opentelemetry-collector-contrib/override/aws => github.com/amazon-contributing/opentelemetry-collector-contrib/override/aws v0.0.0-20231010194048-ca00a4d96086 +replace github.com/amazon-contributing/opentelemetry-collector-contrib/override/aws => github.com/amazon-contributing/opentelemetry-collector-contrib/override/aws v0.0.0-20231012205229-d1a2dc44f2f3 // Temporary fix, pending PR https://github.com/shirou/gopsutil/pull/957 replace github.com/shirou/gopsutil/v3 => github.com/aws/telegraf/patches/gopsutil/v3 v3.0.0-20230915153624-7629361f8380 // indirect @@ -423,4 +423,4 @@ require ( sigs.k8s.io/yaml v1.3.0 // indirect ) -replace github.com/amazon-contributing/opentelemetry-collector-contrib/pkg/stanza => github.com/amazon-contributing/opentelemetry-collector-contrib/pkg/stanza v0.0.0-20231010194048-ca00a4d96086 +replace github.com/amazon-contributing/opentelemetry-collector-contrib/pkg/stanza => github.com/amazon-contributing/opentelemetry-collector-contrib/pkg/stanza v0.0.0-20231012205229-d1a2dc44f2f3 diff --git a/go.sum b/go.sum index 9fa90aee4e..930f46a297 100644 --- a/go.sum +++ b/go.sum @@ -139,34 +139,34 @@ github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk5 github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 h1:s6gZFSlWYmbqAuRjVTiNNhvNRfY2Wxp9nhfyel4rklc= github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= github.com/aliyun/alibaba-cloud-sdk-go v1.61.1483 h1:J8HaD+Zpfi1gcel3HCKpoHHEsrcuRrZlSnx7R9SCf5I= -github.com/amazon-contributing/opentelemetry-collector-contrib/exporter/awscloudwatchlogsexporter v0.0.0-20231010194048-ca00a4d96086 h1:TRJjygBgnh9fJryfwmW4mlTY5x+3YLzdbLvRH7fRKKE= -github.com/amazon-contributing/opentelemetry-collector-contrib/exporter/awscloudwatchlogsexporter v0.0.0-20231010194048-ca00a4d96086/go.mod h1:WgmC0gq7urueR/VbZ0EHZhe3MXV6oWbaMmEWhHvagfg= -github.com/amazon-contributing/opentelemetry-collector-contrib/exporter/awsemfexporter v0.0.0-20231010194048-ca00a4d96086 h1:K6U7gjV7K3dphSHM58+syX2AJEDl0/L5JV+vLoMNteg= -github.com/amazon-contributing/opentelemetry-collector-contrib/exporter/awsemfexporter v0.0.0-20231010194048-ca00a4d96086/go.mod h1:b8pL6t9Xqk/zv0nLZsMiniuugDWiWQZRu9kh9t5SBLk= -github.com/amazon-contributing/opentelemetry-collector-contrib/exporter/awsxrayexporter v0.0.0-20231010194048-ca00a4d96086 h1:U5DkSgPvYr3JPveOKaC2KZYYZLBMQQsU7dBOJOunbco= -github.com/amazon-contributing/opentelemetry-collector-contrib/exporter/awsxrayexporter v0.0.0-20231010194048-ca00a4d96086/go.mod h1:K9h+mkX+BsA1UTuuheGJjo44KAahxaNu9jJ8/xVF6jo= -github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/awsutil v0.0.0-20231010194048-ca00a4d96086 h1:DgFsom+0FLOGRmifUum7Ak5agaUkmoowMsaJV9RBuLw= -github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/awsutil v0.0.0-20231010194048-ca00a4d96086/go.mod h1:9iAsO2SC8NIsa8/xCmC2Pj4MZPmYdvm+1/n89M74JS4= -github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/containerinsight v0.0.0-20231010194048-ca00a4d96086 h1:XqNAMwWdeRK3yBSNQ7qtz2m4h7V14u/uDBqFOzcIGGY= -github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/containerinsight v0.0.0-20231010194048-ca00a4d96086/go.mod h1:ZwAqtlNaHJX0IUU5O40j96TDbsPA0K7o+m49AZgei7g= -github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/cwlogs v0.0.0-20231010194048-ca00a4d96086 h1:wacwVdx2g+cgG9rwfnkRpo2livzkh/zNIlJHgS+0eCg= -github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/cwlogs v0.0.0-20231010194048-ca00a4d96086/go.mod h1:U0J/v82xC95JvG5QhXlrHH9OpgV8scQSGS6N7XW2y/4= -github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/k8s v0.0.0-20231010194048-ca00a4d96086 h1:YX51130ub3HUmjT/yQ9LE3jLLPPJo684DBECE4j614w= -github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/k8s v0.0.0-20231010194048-ca00a4d96086/go.mod h1:58ZN2DUrqxJLqoXu+GZfL0RwMYiRZAAI+COKp0OmA0k= -github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/xray v0.0.0-20231010194048-ca00a4d96086 h1:bOmg1o07RevtPz7r6DTYA30m9BFvgk0TBqa0R+g9AAs= -github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/xray v0.0.0-20231010194048-ca00a4d96086/go.mod h1:8edNN/XfefbHuGLiDhFdBN1QfJfgH7wmq5ms2Gme1EA= -github.com/amazon-contributing/opentelemetry-collector-contrib/override/aws v0.0.0-20231010194048-ca00a4d96086 h1:o2kPeydL+WfCJs8jM6NBmvfT2hEhXjTmrodckD4ZxNA= -github.com/amazon-contributing/opentelemetry-collector-contrib/override/aws v0.0.0-20231010194048-ca00a4d96086/go.mod h1:F5l/VuHtB8418NLJEsHeYz/pni6sWtOMR/SM6mgarhQ= +github.com/amazon-contributing/opentelemetry-collector-contrib/exporter/awscloudwatchlogsexporter v0.0.0-20231012205229-d1a2dc44f2f3 h1:2tLCZtdFZ4B7yfqhtPEWOtVoCpFE0s3zB2IHqkVAVqM= +github.com/amazon-contributing/opentelemetry-collector-contrib/exporter/awscloudwatchlogsexporter v0.0.0-20231012205229-d1a2dc44f2f3/go.mod h1:WgmC0gq7urueR/VbZ0EHZhe3MXV6oWbaMmEWhHvagfg= +github.com/amazon-contributing/opentelemetry-collector-contrib/exporter/awsemfexporter v0.0.0-20231012205229-d1a2dc44f2f3 h1:J2tk9Eo7FBT+GbQIOd7tq0XQyxtA+O75gHKoGYH5uuE= +github.com/amazon-contributing/opentelemetry-collector-contrib/exporter/awsemfexporter v0.0.0-20231012205229-d1a2dc44f2f3/go.mod h1:b8pL6t9Xqk/zv0nLZsMiniuugDWiWQZRu9kh9t5SBLk= +github.com/amazon-contributing/opentelemetry-collector-contrib/exporter/awsxrayexporter v0.0.0-20231012205229-d1a2dc44f2f3 h1:S1LKmk4Eczr+Ur6ReKMOkvfaYLBcJaTbRbtVmE9s9rY= +github.com/amazon-contributing/opentelemetry-collector-contrib/exporter/awsxrayexporter v0.0.0-20231012205229-d1a2dc44f2f3/go.mod h1:K9h+mkX+BsA1UTuuheGJjo44KAahxaNu9jJ8/xVF6jo= +github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/awsutil v0.0.0-20231012205229-d1a2dc44f2f3 h1:axMuDAP4C3gfDb0Ox5NGbIyoTKPOZ4Pt268hfH4jNKk= +github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/awsutil v0.0.0-20231012205229-d1a2dc44f2f3/go.mod h1:9iAsO2SC8NIsa8/xCmC2Pj4MZPmYdvm+1/n89M74JS4= +github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/containerinsight v0.0.0-20231012205229-d1a2dc44f2f3 h1:uCQjg1aL9DjKu8BB+9VhBdEQ6PDxz3PACctDYQU0i0I= +github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/containerinsight v0.0.0-20231012205229-d1a2dc44f2f3/go.mod h1:ZwAqtlNaHJX0IUU5O40j96TDbsPA0K7o+m49AZgei7g= +github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/cwlogs v0.0.0-20231012205229-d1a2dc44f2f3 h1:uswozvbfuT2WbsLDd9Pk9hRrm9fsMHPTLzzbbhFEOi8= +github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/cwlogs v0.0.0-20231012205229-d1a2dc44f2f3/go.mod h1:U0J/v82xC95JvG5QhXlrHH9OpgV8scQSGS6N7XW2y/4= +github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/k8s v0.0.0-20231012205229-d1a2dc44f2f3 h1:brhehmtgpokhtJUaX/Roo/Fg+wp2JootZzj9PyJXc7I= +github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/k8s v0.0.0-20231012205229-d1a2dc44f2f3/go.mod h1:58ZN2DUrqxJLqoXu+GZfL0RwMYiRZAAI+COKp0OmA0k= +github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/xray v0.0.0-20231012205229-d1a2dc44f2f3 h1:xlCHzoTSd8DOC0aAs1cWsGXlAtUaH0Hzh9yJYzdljgQ= +github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/xray v0.0.0-20231012205229-d1a2dc44f2f3/go.mod h1:8edNN/XfefbHuGLiDhFdBN1QfJfgH7wmq5ms2Gme1EA= +github.com/amazon-contributing/opentelemetry-collector-contrib/override/aws v0.0.0-20231012205229-d1a2dc44f2f3 h1:Igs/mTssnX3WYVpGcj72b9n1GqQdcmWBZmCgAQam8Cg= +github.com/amazon-contributing/opentelemetry-collector-contrib/override/aws v0.0.0-20231012205229-d1a2dc44f2f3/go.mod h1:F5l/VuHtB8418NLJEsHeYz/pni6sWtOMR/SM6mgarhQ= github.com/amazon-contributing/opentelemetry-collector-contrib/pkg/stanza v0.0.0-20230928170322-0df38c533713 h1:2daWNVtWNvRDoCTN5GG5N+LEM9OuY3RjJ0cboU3+xmM= github.com/amazon-contributing/opentelemetry-collector-contrib/pkg/stanza v0.0.0-20230928170322-0df38c533713/go.mod h1:lJLumMdUeKqurOskauSjhH4J2hz8r0iNyQWDl3i5NSM= -github.com/amazon-contributing/opentelemetry-collector-contrib/pkg/translator/prometheus v0.0.0-20231010194048-ca00a4d96086 h1:xdNB2JVC9WnOdhoLJVSAfa7q2T8qjDTQyJ3+Rnpr87U= -github.com/amazon-contributing/opentelemetry-collector-contrib/pkg/translator/prometheus v0.0.0-20231010194048-ca00a4d96086/go.mod h1:9qsT0AsMflbQKz0ojK3aRU/PbyGQCDPKut3XMfAkW8k= -github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver v0.0.0-20231010194048-ca00a4d96086 h1:eYAppnv9Aib7y27njtMLOQeA02dgOwt/diIo4LNgIgI= -github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver v0.0.0-20231010194048-ca00a4d96086/go.mod h1:t/v7BcGrHUQ0/Lb/4egp0Xe8PrTceEkZVArTuRjQGBo= -github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/awsxrayreceiver v0.0.0-20231010194048-ca00a4d96086 h1:3FiCcGI5UjOzvdtM+AByUsaanTl7x+oXEwxiC70Hl0I= -github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/awsxrayreceiver v0.0.0-20231010194048-ca00a4d96086/go.mod h1:akbVXOWuMWKSgqA1QKoXkm3hFt0qIvDeUr7m3ODAiS8= -github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.0.0-20231010194048-ca00a4d96086 h1:nLe+YkTSldClMKWWN14TU+Ix1YBo4aAhezedGTbzjeU= -github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.0.0-20231010194048-ca00a4d96086/go.mod h1:fw4J+Pn19ZgfR5ZVxWVtlvKq7+zEfXXlZV/7G9IWkko= +github.com/amazon-contributing/opentelemetry-collector-contrib/pkg/translator/prometheus v0.0.0-20231012205229-d1a2dc44f2f3 h1:2veJzdr7Ulzh0PVhIzk8qFlYZ2UOKlh+Th8awDT8fsw= +github.com/amazon-contributing/opentelemetry-collector-contrib/pkg/translator/prometheus v0.0.0-20231012205229-d1a2dc44f2f3/go.mod h1:9qsT0AsMflbQKz0ojK3aRU/PbyGQCDPKut3XMfAkW8k= +github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver v0.0.0-20231012205229-d1a2dc44f2f3 h1:4Xsr3S/XBYXDOo3AihM9YUww6YxzZUJYRUpiP8LeWe0= +github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver v0.0.0-20231012205229-d1a2dc44f2f3/go.mod h1:t/v7BcGrHUQ0/Lb/4egp0Xe8PrTceEkZVArTuRjQGBo= +github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/awsxrayreceiver v0.0.0-20231012205229-d1a2dc44f2f3 h1:cvtX0kTQO/wY+33wWVv3PJS3tLCQ39x+lCfhZgbMiOY= +github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/awsxrayreceiver v0.0.0-20231012205229-d1a2dc44f2f3/go.mod h1:akbVXOWuMWKSgqA1QKoXkm3hFt0qIvDeUr7m3ODAiS8= +github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.0.0-20231012205229-d1a2dc44f2f3 h1:aNJxUF5OA5SeBosU9OxJr+L6JY8JEx2APAz+xPbb1GM= +github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.0.0-20231012205229-d1a2dc44f2f3/go.mod h1:fw4J+Pn19ZgfR5ZVxWVtlvKq7+zEfXXlZV/7G9IWkko= github.com/amir/raidman v0.0.0-20170415203553-1ccc43bfb9c9 h1:FXrPTd8Rdlc94dKccl7KPmdmIbVh/OjelJ8/vgMRzcQ= github.com/andybalholm/brotli v1.0.4 h1:V7DdXeJtZscaqfNuAdSRuRFzuiKlHSC/Zh3zl9qY3JY= github.com/antchfx/jsonquery v1.1.5 h1:1YWrNFYCcIuJPIjFeOP5b6TXbLSUYY8qqxWbuZOB1qE= From 53ccec9f2c228bd8b2b0032ee8c0b6144bd74ca8 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Fri, 13 Oct 2023 14:09:55 -0400 Subject: [PATCH 10/55] Automated sync with upstream - last commit a67ac451e380754d08930c0a71696bfa06cdc563 - run #88.1 (#564) Co-authored-by: Seth L <81644108+sethAmazon@users.noreply.github.com> Co-authored-by: Chad Patel Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: Github Action Co-authored-by: Jeffrey Chien Co-authored-by: Adam <90734270+adam-mateen@users.noreply.github.com> Co-authored-by: Mitali Salvi <44349099+mitali-salvi@users.noreply.github.com> From 15ee36d198748226c08be51f5fa1ccf633e1392d Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Fri, 20 Oct 2023 14:59:08 -0400 Subject: [PATCH 11/55] Automated sync with upstream - last commit 8a17844600e57650a785620363439a26ccaaf1b1 - run #96.1 (#583) Co-authored-by: Seth L <81644108+sethAmazon@users.noreply.github.com> Co-authored-by: Chad Patel Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: Github Action Co-authored-by: Jeffrey Chien Co-authored-by: Adam <90734270+adam-mateen@users.noreply.github.com> Co-authored-by: Bhanu Bandi Co-authored-by: Yared <45324375+ymtaye@users.noreply.github.com> Co-authored-by: Mitali Salvi <44349099+mitali-salvi@users.noreply.github.com> --- go.mod | 31 +- go.sum | 55 ++-- plugins/inputs/logfile/README.md | 4 +- plugins/inputs/logfile/fileconfig.go | 11 +- plugins/inputs/logfile/fileconfig_test.go | 51 +++- plugins/inputs/logfile/logfile.go | 2 +- plugins/inputs/logfile/logfile_test.go | 67 ++-- plugins/inputs/logfile/tail/tail.go | 5 +- plugins/inputs/logfile/tail/tail_test.go | 13 +- plugins/outputs/cloudwatchlogs/pusher.go | 13 +- .../base_container_insights_config.yaml | 6 +- .../emf_and_kubernetes_config.yaml | 4 +- .../kubernetes_on_prem_config.yaml | 4 +- .../sampleConfig/log_ecs_metric_only.yaml | 4 +- .../logs_and_kubernetes_config.conf | 2 +- .../logs_and_kubernetes_config.yaml | 4 +- .../sampleConfig/no_skip_log_timestamp.conf | 37 +++ .../sampleConfig/no_skip_log_timestamp.json | 15 + .../no_skip_log_timestamp_windows.conf | 37 +++ .../no_skip_log_timestamp_windows.json | 16 + .../sampleConfig/prometheus_config_linux.yaml | 4 +- .../prometheus_config_windows.yaml | 4 +- .../sampleConfig/skip_log_timestamp.conf | 35 +++ .../sampleConfig/skip_log_timestamp.json | 18 ++ .../skip_log_timestamp_default.conf | 35 +++ .../skip_log_timestamp_default.json | 15 + .../skip_log_timestamp_default_windows.conf | 35 +++ .../skip_log_timestamp_default_windows.json | 15 + .../skip_log_timestamp_windows.conf | 35 +++ .../skip_log_timestamp_windows.json | 18 ++ translator/tocwconfig/tocwconfig_test.go | 286 +++++++++++++++--- translator/translate/agent/ruleLogFile.go | 1 + .../files/collect_list/collect_list_test.go | 89 ++++-- .../files/collect_list/ruleTimestampFormat.go | 48 ++- .../collect_list/ruleTimestampFormat_test.go | 199 ++++++++++++ .../otel/exporter/awsemf/kubernetes.go | 2 +- .../otel/exporter/awsemf/translator.go | 9 +- .../otel/exporter/awsemf/translator_test.go | 2 +- .../otel_aws_cloudwatch_logs/translator.go | 1 - 39 files changed, 1051 insertions(+), 181 deletions(-) create mode 100644 translator/tocwconfig/sampleConfig/no_skip_log_timestamp.conf create mode 100644 translator/tocwconfig/sampleConfig/no_skip_log_timestamp.json create mode 100644 translator/tocwconfig/sampleConfig/no_skip_log_timestamp_windows.conf create mode 100644 translator/tocwconfig/sampleConfig/no_skip_log_timestamp_windows.json create mode 100644 translator/tocwconfig/sampleConfig/skip_log_timestamp.conf create mode 100644 translator/tocwconfig/sampleConfig/skip_log_timestamp.json create mode 100644 translator/tocwconfig/sampleConfig/skip_log_timestamp_default.conf create mode 100644 translator/tocwconfig/sampleConfig/skip_log_timestamp_default.json create mode 100644 translator/tocwconfig/sampleConfig/skip_log_timestamp_default_windows.conf create mode 100644 translator/tocwconfig/sampleConfig/skip_log_timestamp_default_windows.json create mode 100644 translator/tocwconfig/sampleConfig/skip_log_timestamp_windows.conf create mode 100644 translator/tocwconfig/sampleConfig/skip_log_timestamp_windows.json create mode 100644 translator/translate/logs/logs_collected/files/collect_list/ruleTimestampFormat_test.go diff --git a/go.mod b/go.mod index a86aa781e0..1db4bc99f1 100644 --- a/go.mod +++ b/go.mod @@ -6,34 +6,35 @@ replace github.com/influxdata/telegraf => github.com/aws/telegraf v0.10.2-0.2022 // Replace with https://github.com/amazon-contributing/opentelemetry-collector-contrib, there are no requirements for all receivers/processors/exporters // to be all replaced since there are some changes that will always be from upstream -replace github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsemfexporter => github.com/amazon-contributing/opentelemetry-collector-contrib/exporter/awsemfexporter v0.0.0-20231012205229-d1a2dc44f2f3 -replace github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsxrayexporter => github.com/amazon-contributing/opentelemetry-collector-contrib/exporter/awsxrayexporter v0.0.0-20231012205229-d1a2dc44f2f3 +replace github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsemfexporter => github.com/amazon-contributing/opentelemetry-collector-contrib/exporter/awsemfexporter v0.0.0-20231020163023-8bdf732d320a -replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/xray => github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/xray v0.0.0-20231012205229-d1a2dc44f2f3 +replace github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsxrayexporter => github.com/amazon-contributing/opentelemetry-collector-contrib/exporter/awsxrayexporter v0.0.0-20231020163023-8bdf732d320a -replace github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver => github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver v0.0.0-20231012205229-d1a2dc44f2f3 +replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/xray => github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/xray v0.0.0-20231020163023-8bdf732d320a -replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/k8s => github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/k8s v0.0.0-20231012205229-d1a2dc44f2f3 +replace github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver => github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver v0.0.0-20231020163023-8bdf732d320a -replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/containerinsight => github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/containerinsight v0.0.0-20231012205229-d1a2dc44f2f3 +replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/k8s => github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/k8s v0.0.0-20231020163023-8bdf732d320a + +replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/containerinsight => github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/containerinsight v0.0.0-20231020163023-8bdf732d320a // Replace with contrib to revert upstream change https://github.com/open-telemetry/opentelemetry-collector-contrib/pull/20519 -replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus => github.com/amazon-contributing/opentelemetry-collector-contrib/pkg/translator/prometheus v0.0.0-20231012205229-d1a2dc44f2f3 +replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus => github.com/amazon-contributing/opentelemetry-collector-contrib/pkg/translator/prometheus v0.0.0-20231020163023-8bdf732d320a replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza => github.com/amazon-contributing/opentelemetry-collector-contrib/pkg/stanza v0.0.0-20230928170322-0df38c533713 -replace github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver => github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.0.0-20231012205229-d1a2dc44f2f3 +replace github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver => github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.0.0-20231020163023-8bdf732d320a -replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/awsutil => github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/awsutil v0.0.0-20231012205229-d1a2dc44f2f3 +replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/awsutil => github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/awsutil v0.0.0-20231020163023-8bdf732d320a -replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/cwlogs => github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/cwlogs v0.0.0-20231012205229-d1a2dc44f2f3 +replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/cwlogs => github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/cwlogs v0.0.0-20231020163023-8bdf732d320a -replace github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awscloudwatchlogsexporter => github.com/amazon-contributing/opentelemetry-collector-contrib/exporter/awscloudwatchlogsexporter v0.0.0-20231012205229-d1a2dc44f2f3 +replace github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awscloudwatchlogsexporter => github.com/amazon-contributing/opentelemetry-collector-contrib/exporter/awscloudwatchlogsexporter v0.0.0-20231020163023-8bdf732d320a -replace github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awsxrayreceiver => github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/awsxrayreceiver v0.0.0-20231012205229-d1a2dc44f2f3 +replace github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awsxrayreceiver => github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/awsxrayreceiver v0.0.0-20231020163023-8bdf732d320a -replace github.com/amazon-contributing/opentelemetry-collector-contrib/override/aws => github.com/amazon-contributing/opentelemetry-collector-contrib/override/aws v0.0.0-20231012205229-d1a2dc44f2f3 +replace github.com/amazon-contributing/opentelemetry-collector-contrib/override/aws => github.com/amazon-contributing/opentelemetry-collector-contrib/override/aws v0.0.0-20231020163023-8bdf732d320a // Temporary fix, pending PR https://github.com/shirou/gopsutil/pull/957 replace github.com/shirou/gopsutil/v3 => github.com/aws/telegraf/patches/gopsutil/v3 v3.0.0-20230915153624-7629361f8380 // indirect @@ -83,7 +84,7 @@ replace github.com/go-kit/kit => github.com/go-kit/kit v0.12.1-0.20220808180842- replace github.com/openshift/api v3.9.0+incompatible => github.com/openshift/api v0.0.0-20180801171038-322a19404e37 require ( - github.com/BurntSushi/toml v0.4.1 + github.com/BurntSushi/toml v1.3.2 github.com/Jeffail/gabs v1.4.0 github.com/aws/aws-sdk-go v1.45.2 github.com/aws/aws-sdk-go-v2 v1.19.0 @@ -423,4 +424,4 @@ require ( sigs.k8s.io/yaml v1.3.0 // indirect ) -replace github.com/amazon-contributing/opentelemetry-collector-contrib/pkg/stanza => github.com/amazon-contributing/opentelemetry-collector-contrib/pkg/stanza v0.0.0-20231012205229-d1a2dc44f2f3 +replace github.com/amazon-contributing/opentelemetry-collector-contrib/pkg/stanza => github.com/amazon-contributing/opentelemetry-collector-contrib/pkg/stanza v0.0.0-20231020163023-8bdf732d320a diff --git a/go.sum b/go.sum index 930f46a297..d9b16e920a 100644 --- a/go.sum +++ b/go.sum @@ -94,8 +94,9 @@ github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZ github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= github.com/Azure/go-ntlmssp v0.0.0-20200615164410-66371956d46c h1:/IBSNwUN8+eKzUzbJPqhK839ygXJ82sde8x3ogr6R28= -github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8= +github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/ClickHouse/clickhouse-go v1.5.4 h1:cKjXeYLNWVJIx2J1K6H2CqyRmfwVJVY1OV1coaaFcI0= github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= @@ -139,34 +140,34 @@ github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk5 github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 h1:s6gZFSlWYmbqAuRjVTiNNhvNRfY2Wxp9nhfyel4rklc= github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= github.com/aliyun/alibaba-cloud-sdk-go v1.61.1483 h1:J8HaD+Zpfi1gcel3HCKpoHHEsrcuRrZlSnx7R9SCf5I= -github.com/amazon-contributing/opentelemetry-collector-contrib/exporter/awscloudwatchlogsexporter v0.0.0-20231012205229-d1a2dc44f2f3 h1:2tLCZtdFZ4B7yfqhtPEWOtVoCpFE0s3zB2IHqkVAVqM= -github.com/amazon-contributing/opentelemetry-collector-contrib/exporter/awscloudwatchlogsexporter v0.0.0-20231012205229-d1a2dc44f2f3/go.mod h1:WgmC0gq7urueR/VbZ0EHZhe3MXV6oWbaMmEWhHvagfg= -github.com/amazon-contributing/opentelemetry-collector-contrib/exporter/awsemfexporter v0.0.0-20231012205229-d1a2dc44f2f3 h1:J2tk9Eo7FBT+GbQIOd7tq0XQyxtA+O75gHKoGYH5uuE= -github.com/amazon-contributing/opentelemetry-collector-contrib/exporter/awsemfexporter v0.0.0-20231012205229-d1a2dc44f2f3/go.mod h1:b8pL6t9Xqk/zv0nLZsMiniuugDWiWQZRu9kh9t5SBLk= -github.com/amazon-contributing/opentelemetry-collector-contrib/exporter/awsxrayexporter v0.0.0-20231012205229-d1a2dc44f2f3 h1:S1LKmk4Eczr+Ur6ReKMOkvfaYLBcJaTbRbtVmE9s9rY= -github.com/amazon-contributing/opentelemetry-collector-contrib/exporter/awsxrayexporter v0.0.0-20231012205229-d1a2dc44f2f3/go.mod h1:K9h+mkX+BsA1UTuuheGJjo44KAahxaNu9jJ8/xVF6jo= -github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/awsutil v0.0.0-20231012205229-d1a2dc44f2f3 h1:axMuDAP4C3gfDb0Ox5NGbIyoTKPOZ4Pt268hfH4jNKk= -github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/awsutil v0.0.0-20231012205229-d1a2dc44f2f3/go.mod h1:9iAsO2SC8NIsa8/xCmC2Pj4MZPmYdvm+1/n89M74JS4= -github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/containerinsight v0.0.0-20231012205229-d1a2dc44f2f3 h1:uCQjg1aL9DjKu8BB+9VhBdEQ6PDxz3PACctDYQU0i0I= -github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/containerinsight v0.0.0-20231012205229-d1a2dc44f2f3/go.mod h1:ZwAqtlNaHJX0IUU5O40j96TDbsPA0K7o+m49AZgei7g= -github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/cwlogs v0.0.0-20231012205229-d1a2dc44f2f3 h1:uswozvbfuT2WbsLDd9Pk9hRrm9fsMHPTLzzbbhFEOi8= -github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/cwlogs v0.0.0-20231012205229-d1a2dc44f2f3/go.mod h1:U0J/v82xC95JvG5QhXlrHH9OpgV8scQSGS6N7XW2y/4= -github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/k8s v0.0.0-20231012205229-d1a2dc44f2f3 h1:brhehmtgpokhtJUaX/Roo/Fg+wp2JootZzj9PyJXc7I= -github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/k8s v0.0.0-20231012205229-d1a2dc44f2f3/go.mod h1:58ZN2DUrqxJLqoXu+GZfL0RwMYiRZAAI+COKp0OmA0k= -github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/xray v0.0.0-20231012205229-d1a2dc44f2f3 h1:xlCHzoTSd8DOC0aAs1cWsGXlAtUaH0Hzh9yJYzdljgQ= -github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/xray v0.0.0-20231012205229-d1a2dc44f2f3/go.mod h1:8edNN/XfefbHuGLiDhFdBN1QfJfgH7wmq5ms2Gme1EA= -github.com/amazon-contributing/opentelemetry-collector-contrib/override/aws v0.0.0-20231012205229-d1a2dc44f2f3 h1:Igs/mTssnX3WYVpGcj72b9n1GqQdcmWBZmCgAQam8Cg= -github.com/amazon-contributing/opentelemetry-collector-contrib/override/aws v0.0.0-20231012205229-d1a2dc44f2f3/go.mod h1:F5l/VuHtB8418NLJEsHeYz/pni6sWtOMR/SM6mgarhQ= +github.com/amazon-contributing/opentelemetry-collector-contrib/exporter/awscloudwatchlogsexporter v0.0.0-20231020163023-8bdf732d320a h1:E12KX+/EG0lgJLqs6j5egsqr+H3+CB8a/UaMnTVocbE= +github.com/amazon-contributing/opentelemetry-collector-contrib/exporter/awscloudwatchlogsexporter v0.0.0-20231020163023-8bdf732d320a/go.mod h1:WgmC0gq7urueR/VbZ0EHZhe3MXV6oWbaMmEWhHvagfg= +github.com/amazon-contributing/opentelemetry-collector-contrib/exporter/awsemfexporter v0.0.0-20231020163023-8bdf732d320a h1:Qf01P4PgCCtViETGGhBBtnyMGPpUssyZRV0m4zWyIzA= +github.com/amazon-contributing/opentelemetry-collector-contrib/exporter/awsemfexporter v0.0.0-20231020163023-8bdf732d320a/go.mod h1:b8pL6t9Xqk/zv0nLZsMiniuugDWiWQZRu9kh9t5SBLk= +github.com/amazon-contributing/opentelemetry-collector-contrib/exporter/awsxrayexporter v0.0.0-20231020163023-8bdf732d320a h1:ZrHh+eRCpcRKyQ7eyIf+OMDJ6zNj4cUCUH4HSLo7lVg= +github.com/amazon-contributing/opentelemetry-collector-contrib/exporter/awsxrayexporter v0.0.0-20231020163023-8bdf732d320a/go.mod h1:K9h+mkX+BsA1UTuuheGJjo44KAahxaNu9jJ8/xVF6jo= +github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/awsutil v0.0.0-20231020163023-8bdf732d320a h1:fzduYrFPdB/U4S2pr0oyo1dyLBwxtLcpnTPLw12ULjA= +github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/awsutil v0.0.0-20231020163023-8bdf732d320a/go.mod h1:9iAsO2SC8NIsa8/xCmC2Pj4MZPmYdvm+1/n89M74JS4= +github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/containerinsight v0.0.0-20231020163023-8bdf732d320a h1:JukQdbg+7mvU0kQ92j/KocsAYZ4x3ei85kEbEdTslPI= +github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/containerinsight v0.0.0-20231020163023-8bdf732d320a/go.mod h1:ZwAqtlNaHJX0IUU5O40j96TDbsPA0K7o+m49AZgei7g= +github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/cwlogs v0.0.0-20231020163023-8bdf732d320a h1:g9MMmqMQXpy3HjJ64PoKtpaTSu61iv3SJNNAXQUmcdg= +github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/cwlogs v0.0.0-20231020163023-8bdf732d320a/go.mod h1:U0J/v82xC95JvG5QhXlrHH9OpgV8scQSGS6N7XW2y/4= +github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/k8s v0.0.0-20231020163023-8bdf732d320a h1:U/fex+AoERTJeFo1IjDgsjjyKshIagmxu9c67GJp0j0= +github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/k8s v0.0.0-20231020163023-8bdf732d320a/go.mod h1:58ZN2DUrqxJLqoXu+GZfL0RwMYiRZAAI+COKp0OmA0k= +github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/xray v0.0.0-20231020163023-8bdf732d320a h1:RTebkRHNQMiZyvIR5U55deCMgEpH+Nerlhl6eeJ5dgI= +github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/xray v0.0.0-20231020163023-8bdf732d320a/go.mod h1:8edNN/XfefbHuGLiDhFdBN1QfJfgH7wmq5ms2Gme1EA= +github.com/amazon-contributing/opentelemetry-collector-contrib/override/aws v0.0.0-20231020163023-8bdf732d320a h1:ad2h+5tqUTXFSdnJHGxO9UxUDpmr1r2n8Bcw/BYzfrw= +github.com/amazon-contributing/opentelemetry-collector-contrib/override/aws v0.0.0-20231020163023-8bdf732d320a/go.mod h1:F5l/VuHtB8418NLJEsHeYz/pni6sWtOMR/SM6mgarhQ= github.com/amazon-contributing/opentelemetry-collector-contrib/pkg/stanza v0.0.0-20230928170322-0df38c533713 h1:2daWNVtWNvRDoCTN5GG5N+LEM9OuY3RjJ0cboU3+xmM= github.com/amazon-contributing/opentelemetry-collector-contrib/pkg/stanza v0.0.0-20230928170322-0df38c533713/go.mod h1:lJLumMdUeKqurOskauSjhH4J2hz8r0iNyQWDl3i5NSM= -github.com/amazon-contributing/opentelemetry-collector-contrib/pkg/translator/prometheus v0.0.0-20231012205229-d1a2dc44f2f3 h1:2veJzdr7Ulzh0PVhIzk8qFlYZ2UOKlh+Th8awDT8fsw= -github.com/amazon-contributing/opentelemetry-collector-contrib/pkg/translator/prometheus v0.0.0-20231012205229-d1a2dc44f2f3/go.mod h1:9qsT0AsMflbQKz0ojK3aRU/PbyGQCDPKut3XMfAkW8k= -github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver v0.0.0-20231012205229-d1a2dc44f2f3 h1:4Xsr3S/XBYXDOo3AihM9YUww6YxzZUJYRUpiP8LeWe0= -github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver v0.0.0-20231012205229-d1a2dc44f2f3/go.mod h1:t/v7BcGrHUQ0/Lb/4egp0Xe8PrTceEkZVArTuRjQGBo= -github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/awsxrayreceiver v0.0.0-20231012205229-d1a2dc44f2f3 h1:cvtX0kTQO/wY+33wWVv3PJS3tLCQ39x+lCfhZgbMiOY= -github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/awsxrayreceiver v0.0.0-20231012205229-d1a2dc44f2f3/go.mod h1:akbVXOWuMWKSgqA1QKoXkm3hFt0qIvDeUr7m3ODAiS8= -github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.0.0-20231012205229-d1a2dc44f2f3 h1:aNJxUF5OA5SeBosU9OxJr+L6JY8JEx2APAz+xPbb1GM= -github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.0.0-20231012205229-d1a2dc44f2f3/go.mod h1:fw4J+Pn19ZgfR5ZVxWVtlvKq7+zEfXXlZV/7G9IWkko= +github.com/amazon-contributing/opentelemetry-collector-contrib/pkg/translator/prometheus v0.0.0-20231020163023-8bdf732d320a h1:9xYr+Ru7GzJq9cobLqoV0DOp2XbZ+f6nuxo/wTOi4gQ= +github.com/amazon-contributing/opentelemetry-collector-contrib/pkg/translator/prometheus v0.0.0-20231020163023-8bdf732d320a/go.mod h1:9qsT0AsMflbQKz0ojK3aRU/PbyGQCDPKut3XMfAkW8k= +github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver v0.0.0-20231020163023-8bdf732d320a h1:ILOMnnQ8bUXg0Q9591LknoPqZJWbefDe3ktQpKv41E8= +github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver v0.0.0-20231020163023-8bdf732d320a/go.mod h1:t/v7BcGrHUQ0/Lb/4egp0Xe8PrTceEkZVArTuRjQGBo= +github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/awsxrayreceiver v0.0.0-20231020163023-8bdf732d320a h1:Yd5943Zx/1PDXzA9QUQ+duoxEfAqLa7U9fJGBuWK+Ic= +github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/awsxrayreceiver v0.0.0-20231020163023-8bdf732d320a/go.mod h1:akbVXOWuMWKSgqA1QKoXkm3hFt0qIvDeUr7m3ODAiS8= +github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.0.0-20231020163023-8bdf732d320a h1:nI8fWWNGJ+/zsSjdrDC6iFHlFsFp1dCbj/NRZ6s9vUc= +github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.0.0-20231020163023-8bdf732d320a/go.mod h1:fw4J+Pn19ZgfR5ZVxWVtlvKq7+zEfXXlZV/7G9IWkko= github.com/amir/raidman v0.0.0-20170415203553-1ccc43bfb9c9 h1:FXrPTd8Rdlc94dKccl7KPmdmIbVh/OjelJ8/vgMRzcQ= github.com/andybalholm/brotli v1.0.4 h1:V7DdXeJtZscaqfNuAdSRuRFzuiKlHSC/Zh3zl9qY3JY= github.com/antchfx/jsonquery v1.1.5 h1:1YWrNFYCcIuJPIjFeOP5b6TXbLSUYY8qqxWbuZOB1qE= diff --git a/plugins/inputs/logfile/README.md b/plugins/inputs/logfile/README.md index 3a7adcfb4b..e514ce75c4 100644 --- a/plugins/inputs/logfile/README.md +++ b/plugins/inputs/logfile/README.md @@ -45,7 +45,7 @@ The plugin expects messages in one of the log_group_name = "logfile.log" log_stream_name = "" timestamp_regex = "^(\\d{2} \\w{3} \\d{4} \\d{2}:\\d{2}:\\d{2}).*$" - timestamp_layout = "02 Jan 2006 15:04:05" + timestamp_layout = ["_2 Jan 2006 15:04:05"] timezone = "UTC" multi_line_start_pattern = "{timestamp_regex}" ## Read file from beginning. @@ -63,7 +63,7 @@ The plugin expects messages in one of the log_group_name = "varlog" log_stream_name = "" timestamp_regex = "^(\\d{2} \\w{3} \\d{4} \\d{2}:\\d{2}:\\d{2}).*$" - timestamp_layout = "02 Jan 2006 15:04:05" + timestamp_layout = ["_2 Jan 2006 15:04:05"] timezone = "UTC" multi_line_start_pattern = "{timestamp_regex}" ## Read file from beginning. diff --git a/plugins/inputs/logfile/fileconfig.go b/plugins/inputs/logfile/fileconfig.go index dd383ccd54..c136f9d7dc 100644 --- a/plugins/inputs/logfile/fileconfig.go +++ b/plugins/inputs/logfile/fileconfig.go @@ -43,7 +43,7 @@ type FileConfig struct { //The regex of the timestampFromLogLine presents in the log entry TimestampRegex string `toml:"timestamp_regex"` //The timestampFromLogLine layout used in GoLang to parse the timestampFromLogLine. - TimestampLayout string `toml:"timestamp_layout"` + TimestampLayout []string `toml:"timestamp_layout"` //The time zone used to parse the timestampFromLogLine in the log entry. Timezone string `toml:"timezone"` @@ -179,7 +179,14 @@ func (config *FileConfig) timestampFromLogLine(logValue string) time.Time { replacement := fmt.Sprintf(".%s", fracSecond[:3]) timestampContent = fmt.Sprintf("%s%s%s", timestampContent[:start], replacement, timestampContent[end:]) } - timestamp, err := time.ParseInLocation(config.TimestampLayout, timestampContent, config.TimezoneLoc) + var err error + var timestamp time.Time + for _, timestampLayout := range config.TimestampLayout { + timestamp, err = time.ParseInLocation(timestampLayout, timestampContent, config.TimezoneLoc) + if err == nil { + break + } + } if err != nil { log.Printf("E! Error parsing timestampFromLogLine: %s", err) return time.Time{} diff --git a/plugins/inputs/logfile/fileconfig_test.go b/plugins/inputs/logfile/fileconfig_test.go index 8f0b2b96b0..4890adb7c5 100644 --- a/plugins/inputs/logfile/fileconfig_test.go +++ b/plugins/inputs/logfile/fileconfig_test.go @@ -18,7 +18,7 @@ func TestFileConfigInit(t *testing.T) { FilePath: "/tmp/logfile.log", LogGroupName: "logfile.log", TimestampRegex: "(\\d{2} \\w{3} \\d{4} \\d{2}:\\d{2}:\\d{2})", - TimestampLayout: "02 Jan 2006 15:04:05", + TimestampLayout: []string{"02 Jan 2006 15:04:05"}, Timezone: "UTC", MultiLineStartPattern: "{timestamp_regex}", } @@ -47,7 +47,7 @@ func TestFileConfigInitFailureCase(t *testing.T) { FilePath: "/tmp/logfile.log", LogGroupName: "logfile.log", TimestampRegex: "(\\d{2} \\w{3} \\d{4} \\d{2}:\\d{2}:\\d{2}+)", - TimestampLayout: "02 Jan 2006 15:04:05", + TimestampLayout: []string{"02 Jan 2006 15:04:05"}, Timezone: "UTC", MultiLineStartPattern: "{timestamp_regex}", } @@ -60,7 +60,7 @@ func TestFileConfigInitFailureCase(t *testing.T) { FilePath: "/tmp/logfile.log", LogGroupName: "logfile.log", TimestampRegex: "(\\d{2} \\w{3} \\d{4} \\d{2}:\\d{2}:\\d{2})", - TimestampLayout: "02 Jan 2006 15:04:05", + TimestampLayout: []string{"02 Jan 2006 15:04:05"}, Timezone: "UTC", MultiLineStartPattern: "(\\d{2} \\w{3} \\d{4} \\d{2}:\\d{2}:\\d{2}+)", } @@ -82,7 +82,7 @@ func TestLogGroupName(t *testing.T) { func TestTimestampParser(t *testing.T) { timestampRegex := "(\\d{2} \\w{3} \\d{4} \\d{2}:\\d{2}:\\d{2})" - timestampLayout := "02 Jan 2006 15:04:05" + timestampLayout := []string{"02 Jan 2006 15:04:05"} timezone := "UTC" timezoneLoc := time.UTC timestampRegexP, err := regexp.Compile(timestampRegex) @@ -110,7 +110,7 @@ func TestTimestampParser(t *testing.T) { func TestTimestampParserWithPadding(t *testing.T) { timestampRegex := "(\\d{1,2} \\s{0,1}\\d{1,2} \\d{2}:\\d{2}:\\d{2})" - timestampLayout := "1 2 15:04:05" + timestampLayout := []string{"1 2 15:04:05"} timezone := "UTC" timezoneLoc := time.UTC timestampRegexP, err := regexp.Compile(timestampRegex) @@ -133,9 +133,48 @@ func TestTimestampParserWithPadding(t *testing.T) { assert.Equal(t, 10, timestamp.Minute(), fmt.Sprintf("Timestamp does not match: %v, act: %v", "10", timestamp.Minute())) } +func TestTimestampParserDefault(t *testing.T) { + // Check when timestamp_format is "%b %d %H:%M:%S" + // %d and %-d are both treated as s{0,1}\\d{1,2} + timestampRegex := "(\\w{3} \\s{0,1}\\d{1,2} \\d{2}:\\d{2}:\\d{2})" + timestampLayout := []string{"test", "Jan 2 15:04:05"} + timezone := "UTC" + timezoneLoc := time.UTC + timestampRegexP, err := regexp.Compile(timestampRegex) + require.NoError(t, err, fmt.Sprintf("Failed to compile regex %s", timestampRegex)) + fileConfig := &FileConfig{ + TimestampRegex: timestampRegex, + TimestampRegexP: timestampRegexP, + TimestampLayout: timestampLayout, + Timezone: timezone, + TimezoneLoc: timezoneLoc} + + // make sure layout is compatible for "Sep 9", "Sep 9" , "Sep 09", "Sep 09" options + logEntry := fmt.Sprintf("Sep 9 02:00:43 ip-10-4-213-132 \n") + timestamp := fileConfig.timestampFromLogLine(logEntry) + assert.Equal(t, 02, timestamp.Hour()) + assert.Equal(t, 00, timestamp.Minute()) + + logEntry = fmt.Sprintf("Sep 9 02:00:43 ip-10-4-213-132 \n") + timestamp = fileConfig.timestampFromLogLine(logEntry) + assert.Equal(t, 02, timestamp.Hour()) + assert.Equal(t, 00, timestamp.Minute()) + + logEntry = fmt.Sprintf("Sep 09 02:00:43 ip-10-4-213-132 \n") + timestamp = fileConfig.timestampFromLogLine(logEntry) + assert.Equal(t, 02, timestamp.Hour()) + assert.Equal(t, 00, timestamp.Minute()) + + logEntry = fmt.Sprintf("Sep 09 02:00:43 ip-10-4-213-132 \n") + timestamp = fileConfig.timestampFromLogLine(logEntry) + assert.Equal(t, 02, timestamp.Hour()) + assert.Equal(t, 00, timestamp.Minute()) + +} + func TestTimestampParserWithFracSeconds(t *testing.T) { timestampRegex := "(\\d{2} \\w{3} \\d{4} \\d{2}:\\d{2}:\\d{2},(\\d{1,9}) \\w{3})" - timestampLayout := "02 Jan 2006 15:04:05,.000 MST" + timestampLayout := []string{"02 Jan 2006 15:04:05,.000 MST"} timezone := "UTC" timezoneLoc := time.UTC timestampRegexP, err := regexp.Compile(timestampRegex) diff --git a/plugins/inputs/logfile/logfile.go b/plugins/inputs/logfile/logfile.go index 3dc0578b99..1ca98da36b 100644 --- a/plugins/inputs/logfile/logfile.go +++ b/plugins/inputs/logfile/logfile.go @@ -73,7 +73,7 @@ const sampleConfig = ` log_stream_name = "" publish_multi_logs = false timestamp_regex = "^(\\d{2} \\w{3} \\d{4} \\d{2}:\\d{2}:\\d{2}).*$" - timestamp_layout = "02 Jan 2006 15:04:05" + timestamp_layout = ["_2 Jan 2006 15:04:05"] timezone = "UTC" multi_line_start_pattern = "{timestamp_regex}" ## Read file from beginning. diff --git a/plugins/inputs/logfile/logfile_test.go b/plugins/inputs/logfile/logfile_test.go index 4efa63115a..f5a38ab48c 100644 --- a/plugins/inputs/logfile/logfile_test.go +++ b/plugins/inputs/logfile/logfile_test.go @@ -371,11 +371,12 @@ func TestLogsFileRemove(t *testing.T) { tt.Stop() } -func setupLogFileForTest(t *testing.T, file *os.File, prefix string) *LogFile { +func setupLogFileForTest(t *testing.T, monitorPath string) *LogFile { logFile := NewLogFile() logFile.Log = TestLogger{t} + t.Logf("create LogFile with FilePath = %s", monitorPath) logFile.FileConfig = []FileConfig{{ - FilePath: filepath.Join(filepath.Dir(file.Name()), prefix+"*"), + FilePath: monitorPath, FromBeginning: true, AutoRemoval: true, }} @@ -394,8 +395,12 @@ func makeTempFile(t *testing.T, prefix string) *os.File { // getLogSrc returns a LogSrc from the given LogFile, and the channel for output. // Verifies 1 and only 1 LogSrc is discovered. func getLogSrc(t *testing.T, logFile *LogFile) (*logs.LogSrc, chan logs.LogEvent) { + start := time.Now() logSources := logFile.FindLogSrc() - require.Equal(t, 1, len(logSources)) + duration := time.Since(start) + // LogFile.FindLogSrc() should not block. + require.Less(t, duration, time.Millisecond*100) + require.Equal(t, 1, len(logSources), "FindLogSrc() expected 1, got %d", len(logSources)) logSource := logSources[0] evts := make(chan logs.LogEvent) logSource.SetOutput(func(e logs.LogEvent) { @@ -407,48 +412,38 @@ func getLogSrc(t *testing.T, logFile *LogFile) (*logs.LogSrc, chan logs.LogEvent } func writeLines(t *testing.T, file *os.File, numLines int, msg string) { - t.Log("Fill temp file with sufficient lines to be read.") + t.Logf("start writing, %s", file.Name()) for i := 0; i < numLines; i++ { _, err := file.WriteString(msg + "\n") require.NoError(t, err) } + t.Logf("stop writing, %s", file.Name()) } // createWriteRead creates a temp file, writes to it, then verifies events // are received. If isParent is true, then spawn a 2nd goroutine for createWriteRead. -// Close the given channel when complete to let caller know it was successful. +// Closes "done" when complete to let caller know it was successful. func createWriteRead(t *testing.T, prefix string, logFile *LogFile, done chan bool, isParent bool) { // Let caller know when the goroutine is done. defer close(done) // done2 is only passed to child if this is the parent. done2 := make(chan bool) file := makeTempFile(t, prefix) - if isParent { - logFile = setupLogFileForTest(t, file, prefix) - defer logFile.Stop() - } logSrc, evts := getLogSrc(t, logFile) defer (*logSrc).Stop() defer close(evts) // Choose a large enough number of lines so that even high-spec hosts will not // complete receiving logEvents before the 2nd createWriteRead() goroutine begins. - const numLines int = 100000 + const numLines int = 1000000 const msg string = "this is the best log line ever written to a file" writeLines(t, file, numLines, msg) file.Close() - if !isParent { - // Child creates 2nd temp file which is NOT auto removed. - defer os.Remove(file.Name()) - } t.Log("Verify every line written to the temp file is received.") for i := 0; i < numLines; i++ { logEvent := <-evts require.Equal(t, msg, logEvent.Message()) - if i != numLines/2 { - continue - } - // Halfway through start another goroutine to create another temp file. - if isParent { + if isParent && i == numLines/2 { + // Halfway through start child goroutine to create another temp file. go createWriteRead(t, prefix, logFile, done2, false) } } @@ -457,8 +452,8 @@ func createWriteRead(t *testing.T, prefix string, logFile *LogFile, done chan bo t.Log("Verify child completed.") select { case <-done2: - t.Log("Completed before timeout (as expected)") - case <-time.After(time.Second * 5): + t.Log("Child completed before timeout (as expected)") + case <-time.After(time.Second * 10): require.Fail(t, "timeout waiting for child") } t.Log("Verify 1st temp file was auto deleted.") @@ -468,21 +463,37 @@ func createWriteRead(t *testing.T, prefix string, logFile *LogFile, done chan bo } // TestLogsFileAutoRemoval verifies when a new file matching the configured -// FilePath is discovered, the old file will be automatically deleted after -// being read to the end-of-file. +// FilePath is discovered, the old file will be automatically deleted ONLY after +// being read to the end-of-file. Also verifies the new log file is discovered +// before finishing the old file. func TestLogsFileAutoRemoval(t *testing.T) { // Override global in tailersrc.go. multilineWaitPeriod = 10 * time.Millisecond - prefix := "file_auto_removal" + prefix := "TestLogsFileAutoRemoval*" + f1 := makeTempFile(t, prefix) + f1.Close() + os.Remove(f1.Name()) + // Create the LogFile. + fileDirectoryPath := filepath.Dir(f1.Name()) + monitorPath := filepath.Join(fileDirectoryPath, prefix) + logFile := setupLogFileForTest(t, monitorPath) + defer logFile.Stop() + done := make(chan bool) - createWriteRead(t, prefix, nil, done, true) + createWriteRead(t, prefix, logFile, done, true) t.Log("Verify 1st tmp file created and discovered.") select { case <-done: - t.Log("Completed before timeout (as expected)") - case <-time.After(time.Second * 5): + t.Log("Parent completed before timeout (as expected)") + case <-time.After(time.Second * 10): require.Fail(t, "timeout waiting for 2nd temp file.") } + // Cleanup + files, _ := filepath.Glob(monitorPath) + for _, f := range files { + t.Logf("cleanup, %s", f) + os.Remove(f) + } } func TestLogsTimestampAsMultilineStarter(t *testing.T) { @@ -504,7 +515,7 @@ append line` FilePath: tmpfile.Name(), FromBeginning: true, TimestampRegex: "(\\d{2}:\\d{2}:\\d{2} \\d{2} \\w{3} \\s{0,1}\\d{1,2})", - TimestampLayout: "15:04:05 06 Jan 2", + TimestampLayout: []string{"15:04:05 06 Jan 2"}, MultiLineStartPattern: "{timestamp_regex}", Timezone: time.UTC.String(), }} diff --git a/plugins/inputs/logfile/tail/tail.go b/plugins/inputs/logfile/tail/tail.go index c52418e854..826f5c6c4b 100644 --- a/plugins/inputs/logfile/tail/tail.go +++ b/plugins/inputs/logfile/tail/tail.go @@ -165,10 +165,9 @@ func (tail *Tail) Stop() error { } // StopAtEOF stops tailing as soon as the end of the file is reached. -// Blocks until tailer is dead and returns reason for death. -func (tail *Tail) StopAtEOF() error { +// Does not wait until tailer is dead. +func (tail *Tail) StopAtEOF() { tail.Kill(errStopAtEOF) - return tail.Wait() } var errStopAtEOF = errors.New("tail: stop at eof") diff --git a/plugins/inputs/logfile/tail/tail_test.go b/plugins/inputs/logfile/tail/tail_test.go index cd72b22f3d..691d8b4459 100644 --- a/plugins/inputs/logfile/tail/tail_test.go +++ b/plugins/inputs/logfile/tail/tail_test.go @@ -83,19 +83,20 @@ func TestStopAtEOF(t *testing.T) { readThreelines(t, tail) - // Since StopAtEOF() will block until the EOF is reached, run it in a goroutine. + // Since tail.Wait() will block until the EOF is reached, run it in a goroutine. done := make(chan bool) go func() { tail.StopAtEOF() + tail.Wait() close(done) }() // Verify the goroutine is blocked indefinitely. select { case <-done: - t.Fatalf("StopAtEOF() completed unexpectedly") + t.Fatalf("tail.Wait() completed unexpectedly") case <-time.After(time.Second * 1): - t.Log("timeout waiting for StopAtEOF() (as expected)") + t.Log("timeout waiting for tail.Wait() (as expected)") } assert.Equal(t, errStopAtEOF, tail.Err()) @@ -105,12 +106,12 @@ func TestStopAtEOF(t *testing.T) { <-tail.Lines } - // Verify StopAtEOF() has completed. + // Verify tail.Wait() has completed. select { case <-done: - t.Log("StopAtEOF() completed (as expected)") + t.Log("tail.Wait() completed (as expected)") case <-time.After(time.Second * 1): - t.Fatalf("StopAtEOF() has not completed") + t.Fatalf("tail.Wait() has not completed") } // Then remove the tmpfile diff --git a/plugins/outputs/cloudwatchlogs/pusher.go b/plugins/outputs/cloudwatchlogs/pusher.go index 80572880bc..83b8f55282 100644 --- a/plugins/outputs/cloudwatchlogs/pusher.go +++ b/plugins/outputs/cloudwatchlogs/pusher.go @@ -20,8 +20,9 @@ import ( ) const ( - reqSizeLimit = 1024 * 1024 - reqEventsLimit = 10000 + reqSizeLimit = 1024 * 1024 + reqEventsLimit = 10000 + warnOldTimeStamp = 1 * 24 * time.Hour ) var ( @@ -51,6 +52,7 @@ type pusher struct { flushTimer *time.Timer sequenceToken *string lastValidTime int64 + lastUpdateTime time.Time needSort bool stop <-chan struct{} lastSentTime time.Time @@ -413,12 +415,19 @@ func (p *pusher) convertEvent(e logs.LogEvent) *cloudwatchlogs.InputLogEvent { // a valid timestamp and use the last valid timestamp for new entries that does // not have a timestamp. t = p.lastValidTime + if !p.lastUpdateTime.IsZero() { + // Check when timestamp has an interval of 5 days. + if time.Since(p.lastUpdateTime) > warnOldTimeStamp { + p.Log.Warnf("Unable to parse timestamp, using last valid timestamp found in the logs %v: which is at least older than 1 day for log group %v: ", p.lastValidTime, p.Group) + } + } } else { t = time.Now().UnixNano() / 1000000 } } else { t = e.Time().UnixNano() / 1000000 p.lastValidTime = t + p.lastUpdateTime = time.Now() } return &cloudwatchlogs.InputLogEvent{ Message: &message, diff --git a/translator/tocwconfig/sampleConfig/base_container_insights_config.yaml b/translator/tocwconfig/sampleConfig/base_container_insights_config.yaml index 7ecd23fe8f..94c18cc7e7 100644 --- a/translator/tocwconfig/sampleConfig/base_container_insights_config.yaml +++ b/translator/tocwconfig/sampleConfig/base_container_insights_config.yaml @@ -1,7 +1,7 @@ connectors: {} exporters: awscloudwatchlogs/emf_logs: - certificate_file_path: "" + certificate_file_path: "/etc/test/ca_bundle.pem" emf_only: true endpoint: "https://fake_endpoint" imds_retries: 1 @@ -39,8 +39,8 @@ exporters: disable_metric_extraction: true version: "0" eks_fargate_container_insights_enabled: false - certificate_file_path: "" - endpoint: "" + certificate_file_path: "/etc/test/ca_bundle.pem" + endpoint: "https://fake_endpoint" enhanced_container_insights: false imds_retries: 1 local_mode: false diff --git a/translator/tocwconfig/sampleConfig/emf_and_kubernetes_config.yaml b/translator/tocwconfig/sampleConfig/emf_and_kubernetes_config.yaml index 16804d953f..a1746ec979 100644 --- a/translator/tocwconfig/sampleConfig/emf_and_kubernetes_config.yaml +++ b/translator/tocwconfig/sampleConfig/emf_and_kubernetes_config.yaml @@ -35,7 +35,7 @@ exporters: disable_metric_extraction: true eks_fargate_container_insights_enabled: false certificate_file_path: "" - endpoint: "" + endpoint: "https://fake_endpoint" enhanced_container_insights: true imds_retries: 2 local_mode: false @@ -70,7 +70,7 @@ exporters: - pod_network_tx_bytes - pod_cpu_utilization_over_pod_limit - pod_memory_utilization_over_pod_limit - - dimensions: [ [ ClusterName, FullPodName, Namespace, PodName ], [ ClusterName, Namespace, PodName ], [ ClusterName, Namespace, Service ], [ ClusterName ] ] + - dimensions: [ [ ClusterName, FullPodName, Namespace, PodName ], [ ClusterName, Namespace, PodName ], [ ClusterName, Namespace ], [ ClusterName ] ] label_matchers: [ ] metric_name_selectors: - pod_interface_network_rx_dropped diff --git a/translator/tocwconfig/sampleConfig/kubernetes_on_prem_config.yaml b/translator/tocwconfig/sampleConfig/kubernetes_on_prem_config.yaml index 54cdb7fe87..64ed8e36cb 100644 --- a/translator/tocwconfig/sampleConfig/kubernetes_on_prem_config.yaml +++ b/translator/tocwconfig/sampleConfig/kubernetes_on_prem_config.yaml @@ -6,7 +6,7 @@ exporters: dimension_rollup_option: NoDimensionRollup disable_metric_extraction: true eks_fargate_container_insights_enabled: false - endpoint: "" + endpoint: "https://fake_endpoint" enhanced_container_insights: true imds_retries: 1 local_mode: false @@ -41,7 +41,7 @@ exporters: - pod_network_tx_bytes - pod_cpu_utilization_over_pod_limit - pod_memory_utilization_over_pod_limit - - dimensions: [ [ ClusterName, FullPodName, Namespace, PodName ], [ ClusterName, Namespace, PodName ], [ ClusterName, Namespace, Service ], [ ClusterName ] ] + - dimensions: [ [ ClusterName, FullPodName, Namespace, PodName ], [ ClusterName, Namespace, PodName ], [ ClusterName, Namespace ], [ ClusterName ] ] label_matchers: [ ] metric_name_selectors: - pod_interface_network_rx_dropped diff --git a/translator/tocwconfig/sampleConfig/log_ecs_metric_only.yaml b/translator/tocwconfig/sampleConfig/log_ecs_metric_only.yaml index 7025cbffc6..7d944ba2b3 100644 --- a/translator/tocwconfig/sampleConfig/log_ecs_metric_only.yaml +++ b/translator/tocwconfig/sampleConfig/log_ecs_metric_only.yaml @@ -35,8 +35,8 @@ exporters: disable_metric_extraction: false eks_fargate_container_insights_enabled: false certificate_file_path: "" - endpoint: "" - imds_retries: 1 + endpoint: "https://fake_endpoint" + "imds_retries": 1 enhanced_container_insights: false local_mode: false log_group_name: /aws/ecs/containerinsights/{ClusterName}/performance diff --git a/translator/tocwconfig/sampleConfig/logs_and_kubernetes_config.conf b/translator/tocwconfig/sampleConfig/logs_and_kubernetes_config.conf index ce241e990c..62db563917 100644 --- a/translator/tocwconfig/sampleConfig/logs_and_kubernetes_config.conf +++ b/translator/tocwconfig/sampleConfig/logs_and_kubernetes_config.conf @@ -28,7 +28,7 @@ multi_line_start_pattern = "{timestamp_regex}" pipe = false retention_in_days = -1 - timestamp_layout = "02 Jan 2006 15:04:05" + timestamp_layout = ["_2 Jan 2006 15:04:05"] timestamp_regex = "(\\d{2} \\w{3} \\d{4} \\d{2}:\\d{2}:\\d{2})" timezone = "UTC" diff --git a/translator/tocwconfig/sampleConfig/logs_and_kubernetes_config.yaml b/translator/tocwconfig/sampleConfig/logs_and_kubernetes_config.yaml index 664ab2127c..3eb5ce2d7e 100644 --- a/translator/tocwconfig/sampleConfig/logs_and_kubernetes_config.yaml +++ b/translator/tocwconfig/sampleConfig/logs_and_kubernetes_config.yaml @@ -35,7 +35,7 @@ exporters: dimension_rollup_option: NoDimensionRollup disable_metric_extraction: false eks_fargate_container_insights_enabled: false - endpoint: "" + endpoint: "https://fake_endpoint" imds_retries: 0 enhanced_container_insights: true local_mode: false @@ -70,7 +70,7 @@ exporters: - pod_network_tx_bytes - pod_cpu_utilization_over_pod_limit - pod_memory_utilization_over_pod_limit - - dimensions: [ [ ClusterName, FullPodName, Namespace, PodName ], [ ClusterName, Namespace, PodName ], [ ClusterName, Namespace, Service ], [ ClusterName ] ] + - dimensions: [ [ ClusterName, FullPodName, Namespace, PodName ], [ ClusterName, Namespace, PodName ], [ ClusterName, Namespace ], [ ClusterName ] ] label_matchers: [ ] metric_name_selectors: - pod_interface_network_rx_dropped diff --git a/translator/tocwconfig/sampleConfig/no_skip_log_timestamp.conf b/translator/tocwconfig/sampleConfig/no_skip_log_timestamp.conf new file mode 100644 index 0000000000..18f892386e --- /dev/null +++ b/translator/tocwconfig/sampleConfig/no_skip_log_timestamp.conf @@ -0,0 +1,37 @@ +[agent] + collection_jitter = "0s" + debug = false + flush_interval = "1s" + flush_jitter = "0s" + hostname = "" + interval = "60s" + logfile = "/opt/aws/amazon-cloudwatch-agent/logs/amazon-cloudwatch-agent.log" + logtarget = "lumberjack" + metric_batch_size = 1000 + metric_buffer_limit = 10000 + omit_hostname = false + precision = "" + quiet = false + round_interval = false + +[inputs] + + [[inputs.logfile]] + destination = "cloudwatchlogs" + file_state_folder = "/opt/aws/amazon-cloudwatch-agent/logs/state" + + [[inputs.logfile.file_config]] + file_path = "/tmp/not-amazon-cloudwatch-agent.log" + from_beginning = true + log_group_name = "amazon-cloudwatch-agent.log" + pipe = false + retention_in_days = -1 + timestamp_layout = ["15:04:05 06 Jan _2"] + timestamp_regex = "(d{2}:d{2}:d{2} d{2} w{3} s{0,1} d{1,2})" + +[outputs] + + [[outputs.cloudwatchlogs]] + force_flush_interval = "5s" + log_stream_name = "i-UNKNOWN" + region = "us-west-2" diff --git a/translator/tocwconfig/sampleConfig/no_skip_log_timestamp.json b/translator/tocwconfig/sampleConfig/no_skip_log_timestamp.json new file mode 100644 index 0000000000..4e04deb3de --- /dev/null +++ b/translator/tocwconfig/sampleConfig/no_skip_log_timestamp.json @@ -0,0 +1,15 @@ +{ + "logs": { + "logs_collected": { + "files": { + "collect_list": [ + { + "file_path": "/tmp/not-amazon-cloudwatch-agent.log", + "log_group_name": "amazon-cloudwatch-agent.log", + "timestamp_format": "%H:%M:%S %y %b %d" + } + ] + } + } + } +} \ No newline at end of file diff --git a/translator/tocwconfig/sampleConfig/no_skip_log_timestamp_windows.conf b/translator/tocwconfig/sampleConfig/no_skip_log_timestamp_windows.conf new file mode 100644 index 0000000000..70d57a32b2 --- /dev/null +++ b/translator/tocwconfig/sampleConfig/no_skip_log_timestamp_windows.conf @@ -0,0 +1,37 @@ +[agent] + collection_jitter = "0s" + debug = false + flush_interval = "1s" + flush_jitter = "0s" + hostname = "" + interval = "60s" + logfile = "c:\\ProgramData\\Amazon\\AmazonCloudWatchAgent\\Logs\\amazon-cloudwatch-agent.log" + logtarget = "lumberjack" + metric_batch_size = 1000 + metric_buffer_limit = 10000 + omit_hostname = false + precision = "" + quiet = false + round_interval = false + +[inputs] + + [[inputs.logfile]] + destination = "cloudwatchlogs" + file_state_folder = "c:\\ProgramData\\Amazon\\AmazonCloudWatchAgent\\Logs\\state" + + [[inputs.logfile.file_config]] + file_path = "c:\\tmp\\not-amazon-cloudwatch-agent.log" + from_beginning = true + log_group_name = "amazon-cloudwatch-agent.log" + pipe = false + retention_in_days = -1 + timestamp_layout = ["15:04:05 06 Jan _2"] + timestamp_regex = "(d{2}:d{2}:d{2} d{2} w{3} s{0,1} d{1,2})" + +[outputs] + + [[outputs.cloudwatchlogs]] + force_flush_interval = "5s" + log_stream_name = "i-UNKNOWN" + region = "us-west-2" diff --git a/translator/tocwconfig/sampleConfig/no_skip_log_timestamp_windows.json b/translator/tocwconfig/sampleConfig/no_skip_log_timestamp_windows.json new file mode 100644 index 0000000000..fb7bf89559 --- /dev/null +++ b/translator/tocwconfig/sampleConfig/no_skip_log_timestamp_windows.json @@ -0,0 +1,16 @@ +{ + + "logs": { + "logs_collected": { + "files": { + "collect_list": [ + { + "file_path": "c:\\tmp\\not-amazon-cloudwatch-agent.log", + "log_group_name": "amazon-cloudwatch-agent.log", + "timestamp_format": "%H:%M:%S %y %b %d" + } + ] + } + } + } +} \ No newline at end of file diff --git a/translator/tocwconfig/sampleConfig/prometheus_config_linux.yaml b/translator/tocwconfig/sampleConfig/prometheus_config_linux.yaml index 0324968b89..d8464cc07f 100644 --- a/translator/tocwconfig/sampleConfig/prometheus_config_linux.yaml +++ b/translator/tocwconfig/sampleConfig/prometheus_config_linux.yaml @@ -8,8 +8,8 @@ exporters: retain_initial_value_of_delta_metric: false eks_fargate_container_insights_enabled: false certificate_file_path: "" - endpoint: "" - imds_retries: 1 + endpoint: "https://fake_endpoint" + "imds_retries": 1 enhanced_container_insights: false local_mode: false log_group_name: /aws/ecs/containerinsights/TestCluster/prometheus diff --git a/translator/tocwconfig/sampleConfig/prometheus_config_windows.yaml b/translator/tocwconfig/sampleConfig/prometheus_config_windows.yaml index ccd84ea2fb..639e1cd466 100644 --- a/translator/tocwconfig/sampleConfig/prometheus_config_windows.yaml +++ b/translator/tocwconfig/sampleConfig/prometheus_config_windows.yaml @@ -8,8 +8,8 @@ exporters: retain_initial_value_of_delta_metric: false eks_fargate_container_insights_enabled: false certificate_file_path: "" - endpoint: "" - imds_retries: 1 + endpoint: "https://fake_endpoint" + "imds_retries": 1 enhanced_container_insights: false local_mode: false log_group_name: /aws/ecs/containerinsights/TestCluster/prometheus diff --git a/translator/tocwconfig/sampleConfig/skip_log_timestamp.conf b/translator/tocwconfig/sampleConfig/skip_log_timestamp.conf new file mode 100644 index 0000000000..527d132104 --- /dev/null +++ b/translator/tocwconfig/sampleConfig/skip_log_timestamp.conf @@ -0,0 +1,35 @@ +[agent] + collection_jitter = "0s" + debug = false + flush_interval = "1s" + flush_jitter = "0s" + hostname = "" + interval = "60s" + logfile = "/opt/tmp/a.log" + logtarget = "lumberjack" + metric_batch_size = 1000 + metric_buffer_limit = 10000 + omit_hostname = false + precision = "" + quiet = false + round_interval = false + +[inputs] + + [[inputs.logfile]] + destination = "cloudwatchlogs" + file_state_folder = "/opt/aws/amazon-cloudwatch-agent/logs/state" + + [[inputs.logfile.file_config]] + file_path = "/opt/tmp/a.log" + from_beginning = true + log_group_name = "amazon-cloudwatch-agent.log" + pipe = false + retention_in_days = -1 + +[outputs] + + [[outputs.cloudwatchlogs]] + force_flush_interval = "5s" + log_stream_name = "i-UNKNOWN" + region = "us-west-2" diff --git a/translator/tocwconfig/sampleConfig/skip_log_timestamp.json b/translator/tocwconfig/sampleConfig/skip_log_timestamp.json new file mode 100644 index 0000000000..db134e261e --- /dev/null +++ b/translator/tocwconfig/sampleConfig/skip_log_timestamp.json @@ -0,0 +1,18 @@ +{ + "agent": { + "logfile": "/opt/tmp/a.log" + }, + "logs": { + "logs_collected": { + "files": { + "collect_list": [ + { + "file_path": "/opt/tmp/a.log", + "log_group_name": "amazon-cloudwatch-agent.log", + "timestamp_format": "%H:%M:%S %y %b %d" + } + ] + } + } + } +} \ No newline at end of file diff --git a/translator/tocwconfig/sampleConfig/skip_log_timestamp_default.conf b/translator/tocwconfig/sampleConfig/skip_log_timestamp_default.conf new file mode 100644 index 0000000000..3cb30744ec --- /dev/null +++ b/translator/tocwconfig/sampleConfig/skip_log_timestamp_default.conf @@ -0,0 +1,35 @@ +[agent] + collection_jitter = "0s" + debug = false + flush_interval = "1s" + flush_jitter = "0s" + hostname = "" + interval = "60s" + logfile = "/opt/aws/amazon-cloudwatch-agent/logs/amazon-cloudwatch-agent.log" + logtarget = "lumberjack" + metric_batch_size = 1000 + metric_buffer_limit = 10000 + omit_hostname = false + precision = "" + quiet = false + round_interval = false + +[inputs] + + [[inputs.logfile]] + destination = "cloudwatchlogs" + file_state_folder = "/opt/aws/amazon-cloudwatch-agent/logs/state" + + [[inputs.logfile.file_config]] + file_path = "/opt/aws/amazon-cloudwatch-agent/logs/amazon-cloudwatch-agent.log" + from_beginning = true + log_group_name = "amazon-cloudwatch-agent.log" + pipe = false + retention_in_days = -1 + +[outputs] + + [[outputs.cloudwatchlogs]] + force_flush_interval = "5s" + log_stream_name = "i-UNKNOWN" + region = "us-west-2" diff --git a/translator/tocwconfig/sampleConfig/skip_log_timestamp_default.json b/translator/tocwconfig/sampleConfig/skip_log_timestamp_default.json new file mode 100644 index 0000000000..734df33a1b --- /dev/null +++ b/translator/tocwconfig/sampleConfig/skip_log_timestamp_default.json @@ -0,0 +1,15 @@ +{ + "logs": { + "logs_collected": { + "files": { + "collect_list": [ + { + "file_path": "/opt/aws/amazon-cloudwatch-agent/logs/amazon-cloudwatch-agent.log", + "log_group_name": "amazon-cloudwatch-agent.log", + "timestamp_format": "%H:%M:%S %y %b %d" + } + ] + } + } + } +} \ No newline at end of file diff --git a/translator/tocwconfig/sampleConfig/skip_log_timestamp_default_windows.conf b/translator/tocwconfig/sampleConfig/skip_log_timestamp_default_windows.conf new file mode 100644 index 0000000000..36c2684e47 --- /dev/null +++ b/translator/tocwconfig/sampleConfig/skip_log_timestamp_default_windows.conf @@ -0,0 +1,35 @@ +[agent] + collection_jitter = "0s" + debug = false + flush_interval = "1s" + flush_jitter = "0s" + hostname = "" + interval = "60s" + logfile = "c:\\ProgramData\\Amazon\\AmazonCloudWatchAgent\\Logs\\amazon-cloudwatch-agent.log" + logtarget = "lumberjack" + metric_batch_size = 1000 + metric_buffer_limit = 10000 + omit_hostname = false + precision = "" + quiet = false + round_interval = false + +[inputs] + + [[inputs.logfile]] + destination = "cloudwatchlogs" + file_state_folder = "c:\\ProgramData\\Amazon\\AmazonCloudWatchAgent\\Logs\\state" + + [[inputs.logfile.file_config]] + file_path = "c:\\ProgramData\\Amazon\\AmazonCloudWatchAgent\\Logs\\amazon-cloudwatch-agent.log" + from_beginning = true + log_group_name = "amazon-cloudwatch-agent.log" + pipe = false + retention_in_days = -1 + +[outputs] + + [[outputs.cloudwatchlogs]] + force_flush_interval = "5s" + log_stream_name = "i-UNKNOWN" + region = "us-west-2" diff --git a/translator/tocwconfig/sampleConfig/skip_log_timestamp_default_windows.json b/translator/tocwconfig/sampleConfig/skip_log_timestamp_default_windows.json new file mode 100644 index 0000000000..0ba8b224fb --- /dev/null +++ b/translator/tocwconfig/sampleConfig/skip_log_timestamp_default_windows.json @@ -0,0 +1,15 @@ +{ + "logs": { + "logs_collected": { + "files": { + "collect_list": [ + { + "file_path": "c:\\ProgramData\\Amazon\\AmazonCloudWatchAgent\\Logs\\amazon-cloudwatch-agent.log", + "log_group_name": "amazon-cloudwatch-agent.log", + "timestamp_format": "%H:%M:%S %y %b %d" + } + ] + } + } + } +} \ No newline at end of file diff --git a/translator/tocwconfig/sampleConfig/skip_log_timestamp_windows.conf b/translator/tocwconfig/sampleConfig/skip_log_timestamp_windows.conf new file mode 100644 index 0000000000..fbd9d27b6a --- /dev/null +++ b/translator/tocwconfig/sampleConfig/skip_log_timestamp_windows.conf @@ -0,0 +1,35 @@ +[agent] + collection_jitter = "0s" + debug = false + flush_interval = "1s" + flush_jitter = "0s" + hostname = "" + interval = "60s" + logfile = "c:\\tmp\\am.log" + logtarget = "lumberjack" + metric_batch_size = 1000 + metric_buffer_limit = 10000 + omit_hostname = false + precision = "" + quiet = false + round_interval = false + +[inputs] + + [[inputs.logfile]] + destination = "cloudwatchlogs" + file_state_folder = "c:\\ProgramData\\Amazon\\AmazonCloudWatchAgent\\Logs\\state" + + [[inputs.logfile.file_config]] + file_path = "c:\\tmp\\am.log" + from_beginning = true + log_group_name = "amazon-cloudwatch-agent.log" + pipe = false + retention_in_days = -1 + +[outputs] + + [[outputs.cloudwatchlogs]] + force_flush_interval = "5s" + log_stream_name = "i-UNKNOWN" + region = "us-west-2" diff --git a/translator/tocwconfig/sampleConfig/skip_log_timestamp_windows.json b/translator/tocwconfig/sampleConfig/skip_log_timestamp_windows.json new file mode 100644 index 0000000000..66394e40c7 --- /dev/null +++ b/translator/tocwconfig/sampleConfig/skip_log_timestamp_windows.json @@ -0,0 +1,18 @@ +{ + "agent": { + "logfile": "c:\\tmp\\am.log" + }, + "logs": { + "logs_collected": { + "files": { + "collect_list": [ + { + "file_path": "c:\\tmp\\am.log", + "log_group_name": "amazon-cloudwatch-agent.log", + "timestamp_format": "%H:%M:%S %y %b %d" + } + ] + } + } + } +} \ No newline at end of file diff --git a/translator/tocwconfig/tocwconfig_test.go b/translator/tocwconfig/tocwconfig_test.go index 1c0e010c27..816824aef8 100644 --- a/translator/tocwconfig/tocwconfig_test.go +++ b/translator/tocwconfig/tocwconfig_test.go @@ -47,12 +47,22 @@ const ( //go:embed sampleConfig/prometheus_config.yaml var prometheusConfig string +type testCase struct { + filename string + targetPlatform string + expectedEnvVars map[string]string + appendString string +} + func TestBaseContainerInsightsConfig(t *testing.T) { resetContext(t) context.CurrentContext().SetRunInContainer(true) t.Setenv(config.HOST_NAME, "host_name_from_env") t.Setenv(config.HOST_IP, "127.0.0.1") - expectedEnvVars := map[string]string{} + t.Setenv(envconfig.AWS_CA_BUNDLE, "/etc/test/ca_bundle.pem") + expectedEnvVars := map[string]string{ + "AWS_CA_BUNDLE": "/etc/test/ca_bundle.pem", + } checkTranslation(t, "base_container_insights_config", "linux", expectedEnvVars, "") checkTranslation(t, "base_container_insights_config", "darwin", nil, "") } @@ -101,11 +111,32 @@ func TestWindowsEventOnlyConfig(t *testing.T) { } func TestStatsDConfig(t *testing.T) { - resetContext(t) - expectedEnvVars := map[string]string{} - checkTranslation(t, "statsd_config", "linux", expectedEnvVars, "_linux") - checkTranslation(t, "statsd_config", "windows", expectedEnvVars, "_windows") - checkTranslation(t, "statsd_config", "darwin", nil, "_linux") + testCases := map[string]testCase{ + "linux": { + filename: "statsd_config", + targetPlatform: "linux", + expectedEnvVars: map[string]string{}, + appendString: "_linux", + }, + "windows": { + filename: "statsd_config", + targetPlatform: "windows", + expectedEnvVars: map[string]string{}, + appendString: "_windows", + }, + "darwin": { + filename: "statsd_config", + targetPlatform: "darwin", + expectedEnvVars: nil, + appendString: "_linux", + }, + } + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + resetContext(t) + checkTranslation(t, testCase.filename, testCase.targetPlatform, testCase.expectedEnvVars, testCase.appendString) + }) + } } // Linux only for CollectD @@ -141,11 +172,32 @@ func TestPrometheusConfig(t *testing.T) { } func TestBasicConfig(t *testing.T) { - resetContext(t) - expectedEnvVars := map[string]string{} - checkTranslation(t, "basic_config_linux", "linux", expectedEnvVars, "") - checkTranslation(t, "basic_config_linux", "darwin", nil, "") - checkTranslation(t, "basic_config_windows", "windows", expectedEnvVars, "") + testCases := map[string]testCase{ + "linux": { + filename: "basic_config_linux", + targetPlatform: "linux", + expectedEnvVars: map[string]string{}, + appendString: "", + }, + "darwin": { + filename: "basic_config_linux", + targetPlatform: "darwin", + expectedEnvVars: nil, + appendString: "", + }, + "windows": { + filename: "basic_config_windows", + targetPlatform: "windows", + expectedEnvVars: map[string]string{}, + appendString: "", + }, + } + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + resetContext(t) + checkTranslation(t, testCase.filename, testCase.targetPlatform, testCase.expectedEnvVars, testCase.appendString) + }) + } } func TestInvalidInputConfig(t *testing.T) { @@ -157,20 +209,62 @@ func TestInvalidInputConfig(t *testing.T) { func TestStandardConfig(t *testing.T) { // the way our config translator works is int(0) leaves an empty in the yaml // this will default to 0 on contrib side since int default is 0 for golang - resetContext(t) - t.Setenv(envconfig.IMDS_NUMBER_RETRY, "0") - expectedEnvVars := map[string]string{} - checkTranslation(t, "standard_config_linux", "linux", expectedEnvVars, "") - checkTranslation(t, "standard_config_linux", "darwin", nil, "") - checkTranslation(t, "standard_config_windows", "windows", nil, "") + testCases := map[string]testCase{ + "linux": { + filename: "standard_config_linux", + targetPlatform: "linux", + expectedEnvVars: map[string]string{}, + appendString: "", + }, + "darwin": { + filename: "standard_config_linux", + targetPlatform: "darwin", + expectedEnvVars: nil, + appendString: "", + }, + "windows": { + filename: "standard_config_windows", + targetPlatform: "windows", + expectedEnvVars: nil, + appendString: "", + }, + } + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + resetContext(t) + t.Setenv(envconfig.IMDS_NUMBER_RETRY, "0") + checkTranslation(t, testCase.filename, testCase.targetPlatform, testCase.expectedEnvVars, testCase.appendString) + }) + } } func TestAdvancedConfig(t *testing.T) { - resetContext(t) - expectedEnvVars := map[string]string{} - checkTranslation(t, "advanced_config_linux", "linux", expectedEnvVars, "") - checkTranslation(t, "advanced_config_darwin", "darwin", nil, "") - checkTranslation(t, "advanced_config_windows", "windows", expectedEnvVars, "") + testCases := map[string]testCase{ + "linux": { + filename: "advanced_config_linux", + targetPlatform: "linux", + expectedEnvVars: map[string]string{}, + appendString: "", + }, + "darwin": { + filename: "advanced_config_darwin", + targetPlatform: "darwin", + expectedEnvVars: nil, + appendString: "", + }, + "windows": { + filename: "advanced_config_windows", + targetPlatform: "windows", + expectedEnvVars: map[string]string{}, + appendString: "", + }, + } + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + resetContext(t) + checkTranslation(t, testCase.filename, testCase.targetPlatform, testCase.expectedEnvVars, testCase.appendString) + }) + } } func TestLogOnlyConfig(t *testing.T) { @@ -179,13 +273,105 @@ func TestLogOnlyConfig(t *testing.T) { checkTranslation(t, "log_only_config_windows", "windows", expectedEnvVars, "") } -func TestTraceConfig(t *testing.T) { +func TestSkipLogTimestampConfig(t *testing.T) { + testCases := map[string]testCase{ + "default_linux": { + filename: "skip_log_timestamp_default", + targetPlatform: "linux", + expectedEnvVars: map[string]string{}, + appendString: "", + }, + "default_darwin": { + filename: "skip_log_timestamp_default", + targetPlatform: "darwin", + expectedEnvVars: map[string]string{}, + appendString: "", + }, + "default_windows": { + filename: "skip_log_timestamp_default_windows", + targetPlatform: "windows", + expectedEnvVars: map[string]string{}, + appendString: "", + }, + "set_linux": { + filename: "skip_log_timestamp", + targetPlatform: "linux", + expectedEnvVars: map[string]string{}, + appendString: "", + }, + "set_darwin": { + filename: "skip_log_timestamp", + targetPlatform: "darwin", + expectedEnvVars: map[string]string{}, + appendString: "", + }, + "set_windows": { + filename: "skip_log_timestamp_windows", + targetPlatform: "windows", + expectedEnvVars: map[string]string{}, + appendString: "", + }, + "no_skip_linux": { + filename: "no_skip_log_timestamp", + targetPlatform: "linux", + expectedEnvVars: map[string]string{}, + appendString: "", + }, + "no_skip_darwin": { + filename: "no_skip_log_timestamp", + targetPlatform: "darwin", + expectedEnvVars: map[string]string{}, + appendString: "", + }, + "no_skip_windows": { + filename: "no_skip_log_timestamp_windows", + targetPlatform: "windows", + expectedEnvVars: map[string]string{}, + appendString: "", + }, + } + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + resetContext(t) + checkTranslation(t, testCase.filename, testCase.targetPlatform, testCase.expectedEnvVars, testCase.appendString) + }) + } +} + +func TestDoNotSkipLogDefaultTimestampConfig(t *testing.T) { resetContext(t) - readCommonConfig(t, "./sampleConfig/commonConfig/withCredentials.toml") expectedEnvVars := map[string]string{} - checkTranslation(t, "trace_config", "linux", expectedEnvVars, "_linux") - checkTranslation(t, "trace_config", "darwin", expectedEnvVars, "_linux") - checkTranslation(t, "trace_config", "windows", expectedEnvVars, "_windows") + checkTranslation(t, "log_only_config_windows", "windows", expectedEnvVars, "") +} + +func TestTraceConfig(t *testing.T) { + testCases := map[string]testCase{ + "linux": { + filename: "trace_config", + targetPlatform: "linux", + expectedEnvVars: map[string]string{}, + appendString: "_linux", + }, + "darwin": { + filename: "trace_config", + targetPlatform: "darwin", + expectedEnvVars: map[string]string{}, + appendString: "_linux", + }, + "windows": { + filename: "trace_config", + targetPlatform: "windows", + expectedEnvVars: map[string]string{}, + appendString: "_windows", + }, + } + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + resetContext(t) + readCommonConfig(t, "./sampleConfig/commonConfig/withCredentials.toml") + checkTranslation(t, testCase.filename, testCase.targetPlatform, testCase.expectedEnvVars, testCase.appendString) + }) + } } func TestConfigWithEnvironmentVariables(t *testing.T) { @@ -195,17 +381,43 @@ func TestConfigWithEnvironmentVariables(t *testing.T) { } func TestStandardConfigWithCommonConfig(t *testing.T) { - resetContext(t) - readCommonConfig(t, "./sampleConfig/commonConfig/withCredentialsProxySsl.toml") - expectedEnvVars := map[string]string{ - "AWS_CA_BUNDLE": "/etc/test/ca_bundle.pem", - "HTTPS_PROXY": "https://127.0.0.1:3280", - "HTTP_PROXY": "http://127.0.0.1:3280", - "NO_PROXY": "254.1.1.1", + testCases := map[string]testCase{ + "linux": { + filename: "standard_config_linux", + targetPlatform: "linux", + expectedEnvVars: map[string]string{ + "AWS_CA_BUNDLE": "/etc/test/ca_bundle.pem", + "HTTPS_PROXY": "https://127.0.0.1:3280", + "HTTP_PROXY": "http://127.0.0.1:3280", + "NO_PROXY": "254.1.1.1", + }, + appendString: "_with_common_config", + }, + "darwin": { + filename: "standard_config_linux", + targetPlatform: "darwin", + expectedEnvVars: nil, + appendString: "_with_common_config", + }, + "windows": { + filename: "standard_config_windows", + targetPlatform: "windows", + expectedEnvVars: map[string]string{ + "AWS_CA_BUNDLE": "/etc/test/ca_bundle.pem", + "HTTPS_PROXY": "https://127.0.0.1:3280", + "HTTP_PROXY": "http://127.0.0.1:3280", + "NO_PROXY": "254.1.1.1", + }, + appendString: "_with_common_config", + }, + } + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + resetContext(t) + readCommonConfig(t, "./sampleConfig/commonConfig/withCredentialsProxySsl.toml") + checkTranslation(t, testCase.filename, testCase.targetPlatform, testCase.expectedEnvVars, testCase.appendString) + }) } - checkTranslation(t, "standard_config_linux", "linux", expectedEnvVars, "_with_common_config") - checkTranslation(t, "standard_config_linux", "darwin", nil, "_with_common_config") - checkTranslation(t, "standard_config_windows", "windows", expectedEnvVars, "_with_common_config") } func TestDeltaNetConfigLinux(t *testing.T) { diff --git a/translator/translate/agent/ruleLogFile.go b/translator/translate/agent/ruleLogFile.go index e2988893a7..1eff380c8a 100644 --- a/translator/translate/agent/ruleLogFile.go +++ b/translator/translate/agent/ruleLogFile.go @@ -19,6 +19,7 @@ type Logfile struct { func (l *Logfile) ApplyRule(input interface{}) (returnKey string, returnVal interface{}) { returnKey, returnVal = translator.DefaultCase("logfile", GetDefaultValue(), input) + context.CurrentContext().SetAgentLogFile(returnVal.(string)) return } diff --git a/translator/translate/logs/logs_collected/files/collect_list/collect_list_test.go b/translator/translate/logs/logs_collected/files/collect_list/collect_list_test.go index a2ad595f70..297e486efb 100644 --- a/translator/translate/logs/logs_collected/files/collect_list/collect_list_test.go +++ b/translator/translate/logs/logs_collected/files/collect_list/collect_list_test.go @@ -78,7 +78,7 @@ func TestTimestampFormat(t *testing.T) { "file_path": "path1", "from_beginning": true, "pipe": false, - "timestamp_layout": "15:04:05 06 Jan 2", + "timestamp_layout": []string{"15:04:05 06 Jan _2"}, "timestamp_regex": "(\\d{2}:\\d{2}:\\d{2} \\d{2} \\w{3} \\s{0,1}\\d{1,2})", "timezone": "UTC", "retention_in_days": -1, @@ -105,7 +105,7 @@ func TestTimestampFormatAll(t *testing.T) { "from_beginning": true, "pipe": false, "retention_in_days": -1, - "timestamp_layout": "15:04:05 06 Jan 2", + "timestamp_layout": []string{"15:04:05 06 Jan _2"}, "timestamp_regex": "(\\d{2}:\\d{2}:\\d{2} \\d{2} \\w{3} \\s{0,1}\\d{1,2})", }}, }, @@ -123,7 +123,7 @@ func TestTimestampFormatAll(t *testing.T) { "from_beginning": true, "pipe": false, "retention_in_days": -1, - "timestamp_layout": "1 2 15:04:05", + "timestamp_layout": []string{"1 _2 15:04:05", "01 _2 15:04:05"}, "timestamp_regex": "(\\d{1,2} \\s{0,1}\\d{1,2} \\d{2}:\\d{2}:\\d{2})", }}, }, @@ -141,10 +141,46 @@ func TestTimestampFormatAll(t *testing.T) { "from_beginning": true, "pipe": false, "retention_in_days": -1, - "timestamp_layout": "2 1 15:04:05", + "timestamp_layout": []string{"_2 1 15:04:05", "_2 01 15:04:05"}, "timestamp_regex": "(\\d{1,2} \\s{0,1}\\d{1,2} \\d{2}:\\d{2}:\\d{2})", }}, }, + { + input: `{ + "collect_list":[ + { + "file_path":"path4", + "timestamp_format": "%b %d %H:%M:%S" + } + ] + }`, + expected: []interface{}{map[string]interface{}{ + "file_path": "path4", + "from_beginning": true, + "pipe": false, + "retention_in_days": -1, + "timestamp_layout": []string{"Jan _2 15:04:05"}, + "timestamp_regex": "(\\w{3} \\s{0,1}\\d{1,2} \\d{2}:\\d{2}:\\d{2})", + }}, + }, + { + input: `{ + "collect_list":[ + { + "file_path":"path5", + "timestamp_format": "%b %-d %H:%M:%S" + } + ] + }`, + expected: []interface{}{map[string]interface{}{ + "file_path": "path5", + "from_beginning": true, + "pipe": false, + "retention_in_days": -1, + "timestamp_layout": []string{"Jan _2 15:04:05"}, + "timestamp_regex": "(\\w{3} \\s{0,1}\\d{1,2} \\d{2}:\\d{2}:\\d{2})", + }}, + }, { input: `{ "collect_list":[ @@ -159,7 +195,25 @@ func TestTimestampFormatAll(t *testing.T) { "from_beginning": true, "pipe": false, "retention_in_days": -1, - "timestamp_layout": "5 2 1 15:04:05", + "timestamp_layout": []string{"5 _2 1 15:04:05", "5 _2 01 15:04:05"}, + "timestamp_regex": "(\\d{1,2} \\s{0,1}\\d{1,2} \\s{0,1}\\d{1,2} \\d{2}:\\d{2}:\\d{2})", + }}, + }, + { + input: `{ + "collect_list":[ + { + "file_path":"path7", + "timestamp_format":"%-S %-d %m %H:%M:%S" + } + ] + }`, + expected: []interface{}{map[string]interface{}{ + "file_path": "path7", + "from_beginning": true, + "pipe": false, + "retention_in_days": -1, + "timestamp_layout": []string{"5 _2 01 15:04:05", "5 _2 1 15:04:05"}, "timestamp_regex": "(\\d{1,2} \\s{0,1}\\d{1,2} \\s{0,1}\\d{1,2} \\d{2}:\\d{2}:\\d{2})", }}, }, @@ -182,8 +236,6 @@ func applyRule1(t *testing.T, buf string) interface{} { return val } -// stdNumMonth // "1" //%-m -// stdDay // "2" //%-d // -hour:-minute:-seconds does not work for golang parser. func TestTimestampFormat_NonZeroPadding(t *testing.T) { f := new(FileConfig) @@ -201,7 +253,7 @@ func TestTimestampFormat_NonZeroPadding(t *testing.T) { assert.Fail(t, e.Error()) } _, val := f.ApplyRule(input) - expectedLayout := "3:4:5 06 1 2" + expectedLayout := []string{"3:4:5 06 1 _2", "3:4:5 06 01 _2"} expectedRegex := "(\\d{1,2}:\\d{1,2}:\\d{1,2} \\d{2} \\s{0,1}\\d{1,2} \\s{0,1}\\d{1,2})" expectVal := []interface{}{map[string]interface{}{ "file_path": "path1", @@ -223,8 +275,7 @@ func TestTimestampFormat_NonZeroPadding(t *testing.T) { match := regex.FindStringSubmatch(sampleLogEntry) assert.NotNil(t, match) assert.Equal(t, 2, len(match)) - - parsedTime, err := time.ParseInLocation(expectedLayout, match[1], time.UTC) + parsedTime, err := time.ParseInLocation(expectedLayout[0], match[1], time.UTC) assert.NoError(t, err) assert.Equal(t, time.Date(2018, 3, 8, 1, 2, 3, 0, time.UTC), parsedTime) } @@ -247,7 +298,7 @@ func TestTimestampFormat_SpecialCharacters(t *testing.T) { assert.Fail(t, e.Error()) } _, val := f.ApplyRule(input) - expectedLayout := "^.*?|[({15:04:05 06 Jan 2})]$" + expectedLayout := []string{"^.*?|[({15:04:05 06 Jan _2})]$"} expectedRegex := "(\\^\\.\\*\\?\\|\\[\\(\\{\\d{2}:\\d{2}:\\d{2} \\d{2} \\w{3} \\s{0,1}\\d{1,2}\\}\\)\\]\\$)" expectVal := []interface{}{map[string]interface{}{ "file_path": "path1", @@ -266,7 +317,7 @@ func TestTimestampFormat_SpecialCharacters(t *testing.T) { assert.NotNil(t, match) assert.Equal(t, 2, len(match)) - parsedTime, err := time.ParseInLocation(expectedLayout, match[1], time.UTC) + parsedTime, err := time.ParseInLocation(expectedLayout[0], match[1], time.UTC) assert.NoError(t, err) assert.Equal(t, time.Date(2017, 12, 27, 12, 52, 0, 0, time.UTC), parsedTime) } @@ -286,7 +337,7 @@ func TestTimestampFormat_Template(t *testing.T) { assert.Fail(t, e.Error()) } _, val := f.ApplyRule(input) - expectedLayout := "Jan 2 15:04:05" + expectedLayout := []string{"Jan _2 15:04:05"} expectedRegex := "(\\w{3} \\s{0,1}\\d{1,2} \\d{2}:\\d{2}:\\d{2})" expectVal := []interface{}{map[string]interface{}{ "file_path": "path1", @@ -304,14 +355,14 @@ func TestTimestampFormat_Template(t *testing.T) { assert.NotNil(t, match) assert.Equal(t, 2, len(match)) - parsedTime, err := time.ParseInLocation(expectedLayout, match[1], time.Local) + parsedTime, err := time.ParseInLocation(expectedLayout[0], match[1], time.Local) assert.NoError(t, err) assert.Equal(t, time.Date(0, 8, 9, 20, 45, 51, 0, time.Local), parsedTime) } func TestTimestampFormat_InvalidRegex(t *testing.T) { translator.ResetMessages() - r := new(TimestampRegax) + r := new(TimestampRegex) var input interface{} e := json.Unmarshal([]byte(`{ "timestamp_format":"%Y-%m-%dT%H:%M%S+00:00" @@ -347,8 +398,8 @@ func TestMultiLineStartPattern(t *testing.T) { "from_beginning": true, "pipe": false, "retention_in_days": -1, - "timestamp_layout": "15:04:05 06 Jan 02", - "timestamp_regex": "(\\d{2}:\\d{2}:\\d{2} \\d{2} \\w{3} \\d{2})", + "timestamp_layout": []string{"15:04:05 06 Jan _2"}, + "timestamp_regex": "(\\d{2}:\\d{2}:\\d{2} \\d{2} \\w{3} \\s{0,1}\\d{1,2})", "timezone": "UTC", "multi_line_start_pattern": "{timestamp_regex}", }} @@ -377,8 +428,8 @@ func TestEncoding(t *testing.T) { "from_beginning": true, "pipe": false, "retention_in_days": -1, - "timestamp_layout": "15:04:05 06 Jan 02", - "timestamp_regex": "(\\d{2}:\\d{2}:\\d{2} \\d{2} \\w{3} \\d{2})", + "timestamp_layout": []string{"15:04:05 06 Jan _2"}, + "timestamp_regex": "(\\d{2}:\\d{2}:\\d{2} \\d{2} \\w{3} \\s{0,1}\\d{1,2})", "timezone": "UTC", "encoding": "gbk", }} diff --git a/translator/translate/logs/logs_collected/files/collect_list/ruleTimestampFormat.go b/translator/translate/logs/logs_collected/files/collect_list/ruleTimestampFormat.go index 65af348631..34690e88c9 100644 --- a/translator/translate/logs/logs_collected/files/collect_list/ruleTimestampFormat.go +++ b/translator/translate/logs/logs_collected/files/collect_list/ruleTimestampFormat.go @@ -9,6 +9,7 @@ import ( "strings" "github.com/aws/amazon-cloudwatch-agent/translator" + "github.com/aws/amazon-cloudwatch-agent/translator/context" ) /* @@ -61,8 +62,8 @@ var TimeFormatMap = map[string]string{ "%m": "01", "%A": "Monday", "%a": "Mon", - "%-d": "2", - "%d": "02", + "%-d": "_2", + "%d": "_2", "%H": "15", "%-I": "3", "%I": "03", @@ -82,11 +83,11 @@ var TimeFormatRexMap = map[string]string{ "%B": "\\w{7}", "%b": "\\w{3}", "%-m": "\\s{0,1}\\d{1,2}", - "%m": "\\d{2}", + "%m": "\\s{0,1}\\d{1,2}", "%A": "\\w{6,9}", "%a": "\\w{3}", "%-d": "\\s{0,1}\\d{1,2}", - "%d": "\\d{2}", + "%d": "\\s{0,1}\\d{1,2}", "%H": "\\d{2}", "%-I": "\\d{1,2}", "%I": "\\d{2}", @@ -135,23 +136,27 @@ func checkAndReplace(input string, timestampFormatMap map[string]string) string return res } -type TimestampRegax struct { +type TimestampRegex struct { } -func (t *TimestampRegax) ApplyRule(input interface{}) (returnKey string, returnVal interface{}) { +// ApplyRule add timestamp regex +// do not add timestamp check when viewing cwa logfile +func (t *TimestampRegex) ApplyRule(input interface{}) (returnKey string, returnVal interface{}) { //Convert the input string into []rune and iterate the map and build the output []rune m := input.(map[string]interface{}) //If user not specify the timestamp_format, then no config entry for "timestamp_layout" in TOML if val, ok := m["timestamp_format"]; !ok { - returnKey = "" - returnVal = "" + return "", "" + } else if m["file_path"] == context.CurrentContext().GetAgentLogFile() { + fmt.Printf("timestamp_format set file_path : %s is the same as agent log file %s thus do not use timestamp_regex \n", m["file_path"], context.CurrentContext().GetAgentLogFile()) + return "", "" } else { //If user provide with the specific timestamp_format, use the one that user provide res := checkAndReplace(val.(string), TimeFormatRegexEscapeMap) res = checkAndReplace(res, TimeFormatRexMap) // remove the prefix, if the format startswith "%-m" or "%-d", there is an "\\s{0,1}" at the beginning. // like "timestamp_format": "%-m %-d %H:%M:%S" will be converted into following layout and regex - // timestamp_layout = "1 2 15:04:05" + // timestamp_layout = ["1 _2 15:04:05"] // timestamp_regex = "(\\s{0,1}\\d{1,2} \\s{0,1}\\d{1,2} \\d{2}:\\d{2}:\\d{2})" // following timestamp string " 2 1 07:10:06" matches the regex, but it can not match the layout. // After the prefix "\\s{0,1}", it can match both the regex and layout. @@ -170,18 +175,35 @@ func (t *TimestampRegax) ApplyRule(input interface{}) (returnKey string, returnV type TimestampLayout struct { } +// ApplyRule add timestamp layout +// do not add timestamp check when viewing cwa logfile func (t *TimestampLayout) ApplyRule(input interface{}) (returnKey string, returnVal interface{}) { //Convert the input string into []rune and iterate the map and build the output []rune m := input.(map[string]interface{}) //If user not specify the timestamp_format, then no config entry for "timestamp_layout" in TOML if val, ok := m["timestamp_format"]; !ok { - returnKey = "" - returnVal = "" + return "", "" + } else if m["file_path"] == context.CurrentContext().GetAgentLogFile() { + fmt.Printf("timestamp_format set file_path : %s is the same as agent log file %s thus do not use timestamp_layout \n", m["file_path"], context.CurrentContext().GetAgentLogFile()) + return "", "" } else { res := checkAndReplace(val.(string), TimeFormatMap) //If user provide with the specific timestamp_format, use the one that user provide returnKey = "timestamp_layout" - returnVal = res + timestampInput := val.(string) + // Go doesn't support _2 option for month in day as a result need to set + // timestamp_layout with 2 strings which support %m and %-m + if strings.Contains(timestampInput, "%m") { + timestampInput := strings.Replace(timestampInput, "%m", "%-m", -1) + alternativeLayout := checkAndReplace(timestampInput, TimeFormatMap) + returnVal = []string{res, alternativeLayout} + } else if strings.Contains(timestampInput, "%-m") { + timestampInput = strings.Replace(timestampInput, "%-m", "%m", -1) + alternativeLayout := checkAndReplace(timestampInput, TimeFormatMap) + returnVal = []string{res, alternativeLayout} + } else { + returnVal = []string{res} + } } return } @@ -207,7 +229,7 @@ func (t *Timezone) ApplyRule(input interface{}) (returnKey string, returnVal int } func init() { t1 := new(TimestampLayout) - t2 := new(TimestampRegax) + t2 := new(TimestampRegex) t3 := new(Timezone) r := []Rule{t1, t2, t3} RegisterRule("timestamp_format", r) diff --git a/translator/translate/logs/logs_collected/files/collect_list/ruleTimestampFormat_test.go b/translator/translate/logs/logs_collected/files/collect_list/ruleTimestampFormat_test.go new file mode 100644 index 0000000000..d94c54f33a --- /dev/null +++ b/translator/translate/logs/logs_collected/files/collect_list/ruleTimestampFormat_test.go @@ -0,0 +1,199 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package collect_list + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestTimestampRegexRule(t *testing.T) { + regex := new(TimestampRegex) + type want struct { + key string + value interface{} + } + testCases := map[string]struct { + input map[string]interface{} + want *want + wantErr error + }{ + "WithNonZeroPaddedOptions": { + input: map[string]interface{}{ + "timestamp_format": "%-m %-d %H:%M:%S", + }, + want: &want{ + key: "timestamp_regex", + value: "(\\d{1,2} \\s{0,1}\\d{1,2} \\d{2}:\\d{2}:\\d{2})", + }, + }, + "WithZeroPaddedOptions": { + input: map[string]interface{}{ + "timestamp_format": "%m %d %H:%M:%S", + }, + want: &want{ + key: "timestamp_regex", + value: "(\\d{1,2} \\s{0,1}\\d{1,2} \\d{2}:\\d{2}:\\d{2})", + }, + }, + "WithZeroPaddedMonthWord": { + input: map[string]interface{}{ + "timestamp_format": "%b %d %H:%M:%S", + }, + want: &want{ + key: "timestamp_regex", + value: "(\\w{3} \\s{0,1}\\d{1,2} \\d{2}:\\d{2}:\\d{2})", + }, + }, + "WithNonZeroPaddedMonthWord": { + input: map[string]interface{}{ + "timestamp_format": "%b %-d %H:%M:%S", + }, + want: &want{ + key: "timestamp_regex", + value: "(\\w{3} \\s{0,1}\\d{1,2} \\d{2}:\\d{2}:\\d{2})", + }, + }, + "WithYearAsTwoDigits": { + input: map[string]interface{}{ + "timestamp_format": "%b %-d %y %H:%M:%S", + }, + want: &want{ + key: "timestamp_regex", + value: "(\\w{3} \\s{0,1}\\d{1,2} \\d{2} \\d{2}:\\d{2}:\\d{2})", + }, + }, + "WithYearAsFourDigits": { + input: map[string]interface{}{ + "timestamp_format": "%b %-d %Y %H:%M:%S", + }, + want: &want{ + key: "timestamp_regex", + value: "(\\w{3} \\s{0,1}\\d{1,2} \\d{4} \\d{2}:\\d{2}:\\d{2})", + }, + }, + "WithNoTimestampFormat": { + input: map[string]interface{}{ + "timestamp": "foo", + }, + want: &want{ + key: "", + value: "", + }, + }, + "WithInvalidTimestampFormat": { + input: map[string]interface{}{ + "timestamp_format": "foo", + }, + want: &want{ + key: "timestamp_regex", + value: "(foo)", + }, + }, + } + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + res, returnVal := regex.ApplyRule(testCase.input) + require.NotNil(t, res) + assert.Equal(t, res, testCase.want.key) + assert.Equal(t, returnVal, testCase.want.value) + }) + } +} + +func TestTimestampLayoutxRule(t *testing.T) { + layout := new(TimestampLayout) + type want struct { + key string + value interface{} + } + testCases := map[string]struct { + input map[string]interface{} + want *want + wantErr error + }{ + "WithNonZeroPaddedOptions": { + input: map[string]interface{}{ + "timestamp_format": "%-m %-d %H:%M:%S", + }, + want: &want{ + key: "timestamp_layout", + value: []string{"1 _2 15:04:05", "01 _2 15:04:05"}, + }, + }, + "WithZeroPaddedOptions": { + input: map[string]interface{}{ + "timestamp_format": "%m %d %H:%M:%S", + }, + want: &want{ + key: "timestamp_layout", + value: []string{"01 _2 15:04:05", "1 _2 15:04:05"}, + }, + }, + "WithZeroPaddedMonthWord": { + input: map[string]interface{}{ + "timestamp_format": "%b %d %H:%M:%S", + }, + want: &want{ + key: "timestamp_layout", + value: []string{"Jan _2 15:04:05"}, + }, + }, + "WithNonZeroPaddedMonthWord": { + input: map[string]interface{}{ + "timestamp_format": "%b %-d %H:%M:%S", + }, + want: &want{ + key: "timestamp_layout", + value: []string{"Jan _2 15:04:05"}, + }, + }, + "WithYearAsTwoDigits": { + input: map[string]interface{}{ + "timestamp_format": "%b %-d %y %H:%M:%S", + }, + want: &want{ + key: "timestamp_layout", + value: []string{"Jan _2 06 15:04:05"}, + }, + }, + "WithYearAsFourDigits": { + input: map[string]interface{}{ + "timestamp_format": "%b %-d %Y %H:%M:%S", + }, + want: &want{ + key: "timestamp_layout", + value: []string{"Jan _2 2006 15:04:05"}, + }, + }, + "WithNoTimestampFormat": { + input: map[string]interface{}{ + "timestamp": "foo", + }, + want: &want{ + key: "", + value: "", + }, + }, + "WithInvalidTimestampFormat": { + input: map[string]interface{}{ + "timestamp_format": "foo", + }, + want: &want{ + key: "timestamp_layout", + value: []string{"foo"}, + }, + }, + } + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + res, returnVal := layout.ApplyRule(testCase.input) + require.NotNil(t, res) + assert.Equal(t, res, testCase.want.key) + assert.Equal(t, returnVal, testCase.want.value) + }) + } +} diff --git a/translator/translate/otel/exporter/awsemf/kubernetes.go b/translator/translate/otel/exporter/awsemf/kubernetes.go index 76d2f39ea5..564b0c2c84 100644 --- a/translator/translate/otel/exporter/awsemf/kubernetes.go +++ b/translator/translate/otel/exporter/awsemf/kubernetes.go @@ -111,7 +111,7 @@ func getPodMetricDeclarations(conf *confmap.Conf) []*awsemfexporter.MetricDeclar Dimensions: [][]string{ {"FullPodName", "PodName", "Namespace", "ClusterName"}, {"PodName", "Namespace", "ClusterName"}, - {"Service", "Namespace", "ClusterName"}, + {"Namespace", "ClusterName"}, {"ClusterName"}, }, MetricNameSelectors: []string{"pod_interface_network_rx_dropped", "pod_interface_network_tx_dropped"}, diff --git a/translator/translate/otel/exporter/awsemf/translator.go b/translator/translate/otel/exporter/awsemf/translator.go index 0eee896963..b33881ed70 100644 --- a/translator/translate/otel/exporter/awsemf/translator.go +++ b/translator/translate/otel/exporter/awsemf/translator.go @@ -6,6 +6,7 @@ package awsemf import ( _ "embed" "fmt" + "os" "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsemfexporter" "go.opentelemetry.io/collector/component" @@ -13,6 +14,7 @@ import ( "go.opentelemetry.io/collector/exporter" "gopkg.in/yaml.v3" + "github.com/aws/amazon-cloudwatch-agent/cfg/envconfig" "github.com/aws/amazon-cloudwatch-agent/internal/retryer" "github.com/aws/amazon-cloudwatch-agent/translator/translate/agent" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/common" @@ -33,6 +35,7 @@ var ( kubernetesBasePathKey = common.ConfigKey(common.LogsKey, common.MetricsCollectedKey, common.KubernetesKey) prometheusBasePathKey = common.ConfigKey(common.LogsKey, common.MetricsCollectedKey, common.PrometheusKey) emfProcessorBasePathKey = common.ConfigKey(prometheusBasePathKey, common.EMFProcessorKey) + endpointOverrideKey = common.ConfigKey(common.LogsKey, common.EndpointOverrideKey) ) type translator struct { @@ -79,6 +82,11 @@ func (t *translator) Translate(c *confmap.Conf) (component.Config, error) { } } cfg.AWSSessionSettings.Region = agent.Global_Config.Region + if c.IsSet(endpointOverrideKey) { + cfg.AWSSessionSettings.Endpoint, _ = common.GetString(c, endpointOverrideKey) + } + cfg.AWSSessionSettings.CertificateFilePath = os.Getenv(envconfig.AWS_CA_BUNDLE) + cfg.AWSSessionSettings.Region = agent.Global_Config.Region if profileKey, ok := agent.Global_Config.Credentials[agent.Profile_Key]; ok { cfg.AWSSessionSettings.Profile = fmt.Sprintf("%v", profileKey) } @@ -100,7 +108,6 @@ func (t *translator) Translate(c *confmap.Conf) (component.Config, error) { return nil, err } } - return cfg, nil } diff --git a/translator/translate/otel/exporter/awsemf/translator_test.go b/translator/translate/otel/exporter/awsemf/translator_test.go index 8af6695ee7..fb76195e0c 100644 --- a/translator/translate/otel/exporter/awsemf/translator_test.go +++ b/translator/translate/otel/exporter/awsemf/translator_test.go @@ -282,7 +282,7 @@ func TestTranslator(t *testing.T) { Dimensions: [][]string{ {"FullPodName", "PodName", "Namespace", "ClusterName"}, {"PodName", "Namespace", "ClusterName"}, - {"Service", "Namespace", "ClusterName"}, + {"Namespace", "ClusterName"}, {"ClusterName"}, }, MetricNameSelectors: []string{"pod_interface_network_rx_dropped", "pod_interface_network_tx_dropped"}, diff --git a/translator/translate/otel/exporter/otel_aws_cloudwatch_logs/translator.go b/translator/translate/otel/exporter/otel_aws_cloudwatch_logs/translator.go index ec4ee1c190..09cf02b61f 100644 --- a/translator/translate/otel/exporter/otel_aws_cloudwatch_logs/translator.go +++ b/translator/translate/otel/exporter/otel_aws_cloudwatch_logs/translator.go @@ -89,7 +89,6 @@ func (t *translator) Translate(c *confmap.Conf) (component.Config, error) { if c.IsSet(endpointOverrideKey) { cfg.AWSSessionSettings.Endpoint, _ = common.GetString(c, endpointOverrideKey) } - cfg.AWSSessionSettings.CertificateFilePath = os.Getenv(envconfig.AWS_CA_BUNDLE) cfg.AWSSessionSettings.IMDSRetries = retryer.GetDefaultRetryNumber() return cfg, nil From 69ee92f262f8d14070e60e6cb11422bbafebde7c Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Thu, 2 Nov 2023 10:07:48 -0400 Subject: [PATCH 12/55] Update OTel fork components to 505e23230a5051d019d7b7c04f5f4e245b41fbf6 (#604) Co-authored-by: Github Action --- go.mod | 41 ++++++++++++++-------------- go.sum | 85 +++++++++++++++++++++++++++++++++------------------------- 2 files changed, 69 insertions(+), 57 deletions(-) diff --git a/go.mod b/go.mod index 1db4bc99f1..8dca448a5d 100644 --- a/go.mod +++ b/go.mod @@ -7,34 +7,34 @@ replace github.com/influxdata/telegraf => github.com/aws/telegraf v0.10.2-0.2022 // Replace with https://github.com/amazon-contributing/opentelemetry-collector-contrib, there are no requirements for all receivers/processors/exporters // to be all replaced since there are some changes that will always be from upstream -replace github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsemfexporter => github.com/amazon-contributing/opentelemetry-collector-contrib/exporter/awsemfexporter v0.0.0-20231020163023-8bdf732d320a +replace github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsemfexporter => github.com/amazon-contributing/opentelemetry-collector-contrib/exporter/awsemfexporter v0.0.0-20231102130031-505e23230a50 -replace github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsxrayexporter => github.com/amazon-contributing/opentelemetry-collector-contrib/exporter/awsxrayexporter v0.0.0-20231020163023-8bdf732d320a +replace github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsxrayexporter => github.com/amazon-contributing/opentelemetry-collector-contrib/exporter/awsxrayexporter v0.0.0-20231102130031-505e23230a50 -replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/xray => github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/xray v0.0.0-20231020163023-8bdf732d320a +replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/xray => github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/xray v0.0.0-20231102130031-505e23230a50 -replace github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver => github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver v0.0.0-20231020163023-8bdf732d320a +replace github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver => github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver v0.0.0-20231102130031-505e23230a50 -replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/k8s => github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/k8s v0.0.0-20231020163023-8bdf732d320a +replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/k8s => github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/k8s v0.0.0-20231102130031-505e23230a50 -replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/containerinsight => github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/containerinsight v0.0.0-20231020163023-8bdf732d320a +replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/containerinsight => github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/containerinsight v0.0.0-20231102130031-505e23230a50 // Replace with contrib to revert upstream change https://github.com/open-telemetry/opentelemetry-collector-contrib/pull/20519 -replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus => github.com/amazon-contributing/opentelemetry-collector-contrib/pkg/translator/prometheus v0.0.0-20231020163023-8bdf732d320a +replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus => github.com/amazon-contributing/opentelemetry-collector-contrib/pkg/translator/prometheus v0.0.0-20231102130031-505e23230a50 replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza => github.com/amazon-contributing/opentelemetry-collector-contrib/pkg/stanza v0.0.0-20230928170322-0df38c533713 -replace github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver => github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.0.0-20231020163023-8bdf732d320a +replace github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver => github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.0.0-20231102130031-505e23230a50 -replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/awsutil => github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/awsutil v0.0.0-20231020163023-8bdf732d320a +replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/awsutil => github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/awsutil v0.0.0-20231102130031-505e23230a50 -replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/cwlogs => github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/cwlogs v0.0.0-20231020163023-8bdf732d320a +replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/cwlogs => github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/cwlogs v0.0.0-20231102130031-505e23230a50 -replace github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awscloudwatchlogsexporter => github.com/amazon-contributing/opentelemetry-collector-contrib/exporter/awscloudwatchlogsexporter v0.0.0-20231020163023-8bdf732d320a +replace github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awscloudwatchlogsexporter => github.com/amazon-contributing/opentelemetry-collector-contrib/exporter/awscloudwatchlogsexporter v0.0.0-20231102130031-505e23230a50 -replace github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awsxrayreceiver => github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/awsxrayreceiver v0.0.0-20231020163023-8bdf732d320a +replace github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awsxrayreceiver => github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/awsxrayreceiver v0.0.0-20231102130031-505e23230a50 -replace github.com/amazon-contributing/opentelemetry-collector-contrib/override/aws => github.com/amazon-contributing/opentelemetry-collector-contrib/override/aws v0.0.0-20231020163023-8bdf732d320a +replace github.com/amazon-contributing/opentelemetry-collector-contrib/override/aws => github.com/amazon-contributing/opentelemetry-collector-contrib/override/aws v0.0.0-20231102130031-505e23230a50 // Temporary fix, pending PR https://github.com/shirou/gopsutil/pull/957 replace github.com/shirou/gopsutil/v3 => github.com/aws/telegraf/patches/gopsutil/v3 v3.0.0-20230915153624-7629361f8380 // indirect @@ -86,8 +86,8 @@ replace github.com/openshift/api v3.9.0+incompatible => github.com/openshift/api require ( github.com/BurntSushi/toml v1.3.2 github.com/Jeffail/gabs v1.4.0 - github.com/aws/aws-sdk-go v1.45.2 - github.com/aws/aws-sdk-go-v2 v1.19.0 + github.com/aws/aws-sdk-go v1.45.24 + github.com/aws/aws-sdk-go-v2 v1.21.2 github.com/aws/aws-sdk-go-v2/config v1.18.25 github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.3 // indirect github.com/aws/aws-sdk-go-v2/service/autoscaling v1.28.10 @@ -96,7 +96,7 @@ require ( github.com/aws/aws-sdk-go-v2/service/ecs v1.28.1 github.com/aws/aws-sdk-go-v2/service/efs v1.19.7 github.com/aws/aws-sdk-go-v2/service/eks v1.27.15 - github.com/aws/smithy-go v1.13.5 + github.com/aws/smithy-go v1.15.0 github.com/bigkevmcd/go-configparser v0.0.0-20200217161103-d137835d2579 github.com/go-kit/log v0.2.1 github.com/gobwas/glob v0.2.3 @@ -182,6 +182,7 @@ require ( github.com/alecthomas/participle v0.4.1 // indirect github.com/alecthomas/participle/v2 v2.0.0 // indirect github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 // indirect + github.com/amazon-contributing/opentelemetry-collector-contrib/extension/awsmiddleware v0.0.0-20231023152757-c6e2437e6590 // indirect github.com/amazon-contributing/opentelemetry-collector-contrib/override/aws v0.0.0-20230928170322-0df38c533713 // indirect github.com/antchfx/jsonquery v1.1.5 // indirect github.com/antchfx/xmlquery v1.3.9 // indirect @@ -191,10 +192,10 @@ require ( github.com/apache/thrift v0.16.0 // indirect github.com/armon/go-metrics v0.4.1 // indirect github.com/aws/aws-sdk-go-v2/credentials v1.13.24 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.35 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.29 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.41 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.35 // indirect github.com/aws/aws-sdk-go-v2/internal/ini v1.3.34 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.27 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.35 // indirect github.com/aws/aws-sdk-go-v2/service/sso v1.12.10 // indirect github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.10 // indirect github.com/aws/aws-sdk-go-v2/service/sts v1.19.0 // indirect @@ -424,4 +425,4 @@ require ( sigs.k8s.io/yaml v1.3.0 // indirect ) -replace github.com/amazon-contributing/opentelemetry-collector-contrib/pkg/stanza => github.com/amazon-contributing/opentelemetry-collector-contrib/pkg/stanza v0.0.0-20231020163023-8bdf732d320a +replace github.com/amazon-contributing/opentelemetry-collector-contrib/pkg/stanza => github.com/amazon-contributing/opentelemetry-collector-contrib/pkg/stanza v0.0.0-20231102130031-505e23230a50 diff --git a/go.sum b/go.sum index d9b16e920a..912ce07b84 100644 --- a/go.sum +++ b/go.sum @@ -140,34 +140,36 @@ github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk5 github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 h1:s6gZFSlWYmbqAuRjVTiNNhvNRfY2Wxp9nhfyel4rklc= github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= github.com/aliyun/alibaba-cloud-sdk-go v1.61.1483 h1:J8HaD+Zpfi1gcel3HCKpoHHEsrcuRrZlSnx7R9SCf5I= -github.com/amazon-contributing/opentelemetry-collector-contrib/exporter/awscloudwatchlogsexporter v0.0.0-20231020163023-8bdf732d320a h1:E12KX+/EG0lgJLqs6j5egsqr+H3+CB8a/UaMnTVocbE= -github.com/amazon-contributing/opentelemetry-collector-contrib/exporter/awscloudwatchlogsexporter v0.0.0-20231020163023-8bdf732d320a/go.mod h1:WgmC0gq7urueR/VbZ0EHZhe3MXV6oWbaMmEWhHvagfg= -github.com/amazon-contributing/opentelemetry-collector-contrib/exporter/awsemfexporter v0.0.0-20231020163023-8bdf732d320a h1:Qf01P4PgCCtViETGGhBBtnyMGPpUssyZRV0m4zWyIzA= -github.com/amazon-contributing/opentelemetry-collector-contrib/exporter/awsemfexporter v0.0.0-20231020163023-8bdf732d320a/go.mod h1:b8pL6t9Xqk/zv0nLZsMiniuugDWiWQZRu9kh9t5SBLk= -github.com/amazon-contributing/opentelemetry-collector-contrib/exporter/awsxrayexporter v0.0.0-20231020163023-8bdf732d320a h1:ZrHh+eRCpcRKyQ7eyIf+OMDJ6zNj4cUCUH4HSLo7lVg= -github.com/amazon-contributing/opentelemetry-collector-contrib/exporter/awsxrayexporter v0.0.0-20231020163023-8bdf732d320a/go.mod h1:K9h+mkX+BsA1UTuuheGJjo44KAahxaNu9jJ8/xVF6jo= -github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/awsutil v0.0.0-20231020163023-8bdf732d320a h1:fzduYrFPdB/U4S2pr0oyo1dyLBwxtLcpnTPLw12ULjA= -github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/awsutil v0.0.0-20231020163023-8bdf732d320a/go.mod h1:9iAsO2SC8NIsa8/xCmC2Pj4MZPmYdvm+1/n89M74JS4= -github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/containerinsight v0.0.0-20231020163023-8bdf732d320a h1:JukQdbg+7mvU0kQ92j/KocsAYZ4x3ei85kEbEdTslPI= -github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/containerinsight v0.0.0-20231020163023-8bdf732d320a/go.mod h1:ZwAqtlNaHJX0IUU5O40j96TDbsPA0K7o+m49AZgei7g= -github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/cwlogs v0.0.0-20231020163023-8bdf732d320a h1:g9MMmqMQXpy3HjJ64PoKtpaTSu61iv3SJNNAXQUmcdg= -github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/cwlogs v0.0.0-20231020163023-8bdf732d320a/go.mod h1:U0J/v82xC95JvG5QhXlrHH9OpgV8scQSGS6N7XW2y/4= -github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/k8s v0.0.0-20231020163023-8bdf732d320a h1:U/fex+AoERTJeFo1IjDgsjjyKshIagmxu9c67GJp0j0= -github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/k8s v0.0.0-20231020163023-8bdf732d320a/go.mod h1:58ZN2DUrqxJLqoXu+GZfL0RwMYiRZAAI+COKp0OmA0k= -github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/xray v0.0.0-20231020163023-8bdf732d320a h1:RTebkRHNQMiZyvIR5U55deCMgEpH+Nerlhl6eeJ5dgI= -github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/xray v0.0.0-20231020163023-8bdf732d320a/go.mod h1:8edNN/XfefbHuGLiDhFdBN1QfJfgH7wmq5ms2Gme1EA= -github.com/amazon-contributing/opentelemetry-collector-contrib/override/aws v0.0.0-20231020163023-8bdf732d320a h1:ad2h+5tqUTXFSdnJHGxO9UxUDpmr1r2n8Bcw/BYzfrw= -github.com/amazon-contributing/opentelemetry-collector-contrib/override/aws v0.0.0-20231020163023-8bdf732d320a/go.mod h1:F5l/VuHtB8418NLJEsHeYz/pni6sWtOMR/SM6mgarhQ= +github.com/amazon-contributing/opentelemetry-collector-contrib/exporter/awscloudwatchlogsexporter v0.0.0-20231102130031-505e23230a50 h1:MFm/DA3NTQQ3LjON7cj3a53VBwCjRFzBy6UCjfkJxKc= +github.com/amazon-contributing/opentelemetry-collector-contrib/exporter/awscloudwatchlogsexporter v0.0.0-20231102130031-505e23230a50/go.mod h1:/8w8sPrpOeADRJgMsu8o4jOiFX29zCC899+ao7S1GXI= +github.com/amazon-contributing/opentelemetry-collector-contrib/exporter/awsemfexporter v0.0.0-20231102130031-505e23230a50 h1:Qzmqql2XCdgK3m65CK41hMAopfFNEbJpDUjs9xX5aSk= +github.com/amazon-contributing/opentelemetry-collector-contrib/exporter/awsemfexporter v0.0.0-20231102130031-505e23230a50/go.mod h1:UAXcRSojI8I0Kb9iS9a2v7J/iPrQ1loJIsBprSaVdFo= +github.com/amazon-contributing/opentelemetry-collector-contrib/exporter/awsxrayexporter v0.0.0-20231102130031-505e23230a50 h1:TTRentWNAvzfYCKHclu08pPWgTiBTuxLHXmBjeN1w/M= +github.com/amazon-contributing/opentelemetry-collector-contrib/exporter/awsxrayexporter v0.0.0-20231102130031-505e23230a50/go.mod h1:cr4dmBlfnMVYT+gyKUAKh39zQu5u/UAukxQj15MdZ18= +github.com/amazon-contributing/opentelemetry-collector-contrib/extension/awsmiddleware v0.0.0-20231023152757-c6e2437e6590 h1:uUCPnX2C5C36iyX46N1eUjmp4LRpSTGeexgUWmohv7c= +github.com/amazon-contributing/opentelemetry-collector-contrib/extension/awsmiddleware v0.0.0-20231023152757-c6e2437e6590/go.mod h1:uOQa5/9Jle9VADEdWCXL4AbJr35NJQil30tapcTHQlw= +github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/awsutil v0.0.0-20231102130031-505e23230a50 h1:qpH231iNaHowAP+sLku3pF9Rnw6lNC+8lKDQl6UpS0k= +github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/awsutil v0.0.0-20231102130031-505e23230a50/go.mod h1:9iAsO2SC8NIsa8/xCmC2Pj4MZPmYdvm+1/n89M74JS4= +github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/containerinsight v0.0.0-20231102130031-505e23230a50 h1:POrjHtvpsWELsdWNq3HBZfvN2Z9pvFMVV0L4T5TJiVM= +github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/containerinsight v0.0.0-20231102130031-505e23230a50/go.mod h1:ZwAqtlNaHJX0IUU5O40j96TDbsPA0K7o+m49AZgei7g= +github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/cwlogs v0.0.0-20231102130031-505e23230a50 h1:OojhFhavWdxvWUn2VwQriIBVXCmAU5R+n5S1dD3/AaY= +github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/cwlogs v0.0.0-20231102130031-505e23230a50/go.mod h1:U0J/v82xC95JvG5QhXlrHH9OpgV8scQSGS6N7XW2y/4= +github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/k8s v0.0.0-20231102130031-505e23230a50 h1:UL1ouNkUmHyTCFWkL9nV2pp7J9DFzJMqqJfDWJ8diPY= +github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/k8s v0.0.0-20231102130031-505e23230a50/go.mod h1:58ZN2DUrqxJLqoXu+GZfL0RwMYiRZAAI+COKp0OmA0k= +github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/xray v0.0.0-20231102130031-505e23230a50 h1:9pjFfcacizieSw26H7S5bCBlnRBRorFWtAW/ESJ4H5I= +github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/xray v0.0.0-20231102130031-505e23230a50/go.mod h1:8edNN/XfefbHuGLiDhFdBN1QfJfgH7wmq5ms2Gme1EA= +github.com/amazon-contributing/opentelemetry-collector-contrib/override/aws v0.0.0-20231102130031-505e23230a50 h1:RnTZCZljG89RQkX3qHp8QnYW0dN3fvzS5/Ma234xLp8= +github.com/amazon-contributing/opentelemetry-collector-contrib/override/aws v0.0.0-20231102130031-505e23230a50/go.mod h1:F5l/VuHtB8418NLJEsHeYz/pni6sWtOMR/SM6mgarhQ= github.com/amazon-contributing/opentelemetry-collector-contrib/pkg/stanza v0.0.0-20230928170322-0df38c533713 h1:2daWNVtWNvRDoCTN5GG5N+LEM9OuY3RjJ0cboU3+xmM= github.com/amazon-contributing/opentelemetry-collector-contrib/pkg/stanza v0.0.0-20230928170322-0df38c533713/go.mod h1:lJLumMdUeKqurOskauSjhH4J2hz8r0iNyQWDl3i5NSM= -github.com/amazon-contributing/opentelemetry-collector-contrib/pkg/translator/prometheus v0.0.0-20231020163023-8bdf732d320a h1:9xYr+Ru7GzJq9cobLqoV0DOp2XbZ+f6nuxo/wTOi4gQ= -github.com/amazon-contributing/opentelemetry-collector-contrib/pkg/translator/prometheus v0.0.0-20231020163023-8bdf732d320a/go.mod h1:9qsT0AsMflbQKz0ojK3aRU/PbyGQCDPKut3XMfAkW8k= -github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver v0.0.0-20231020163023-8bdf732d320a h1:ILOMnnQ8bUXg0Q9591LknoPqZJWbefDe3ktQpKv41E8= -github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver v0.0.0-20231020163023-8bdf732d320a/go.mod h1:t/v7BcGrHUQ0/Lb/4egp0Xe8PrTceEkZVArTuRjQGBo= -github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/awsxrayreceiver v0.0.0-20231020163023-8bdf732d320a h1:Yd5943Zx/1PDXzA9QUQ+duoxEfAqLa7U9fJGBuWK+Ic= -github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/awsxrayreceiver v0.0.0-20231020163023-8bdf732d320a/go.mod h1:akbVXOWuMWKSgqA1QKoXkm3hFt0qIvDeUr7m3ODAiS8= -github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.0.0-20231020163023-8bdf732d320a h1:nI8fWWNGJ+/zsSjdrDC6iFHlFsFp1dCbj/NRZ6s9vUc= -github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.0.0-20231020163023-8bdf732d320a/go.mod h1:fw4J+Pn19ZgfR5ZVxWVtlvKq7+zEfXXlZV/7G9IWkko= +github.com/amazon-contributing/opentelemetry-collector-contrib/pkg/translator/prometheus v0.0.0-20231102130031-505e23230a50 h1:vLAUJwQUtH2OQ9QkkehyxXI//WalYbNgKU2nqb48LR8= +github.com/amazon-contributing/opentelemetry-collector-contrib/pkg/translator/prometheus v0.0.0-20231102130031-505e23230a50/go.mod h1:9qsT0AsMflbQKz0ojK3aRU/PbyGQCDPKut3XMfAkW8k= +github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver v0.0.0-20231102130031-505e23230a50 h1:09XDd1Ta4n7nLlDUBASj6PoO5j/VwkRWQfvrvFNcoW0= +github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver v0.0.0-20231102130031-505e23230a50/go.mod h1:t/v7BcGrHUQ0/Lb/4egp0Xe8PrTceEkZVArTuRjQGBo= +github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/awsxrayreceiver v0.0.0-20231102130031-505e23230a50 h1:3oWzbiA0uwws3DPS6pg5pye/0PDkmWR+qUihyvDYVkA= +github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/awsxrayreceiver v0.0.0-20231102130031-505e23230a50/go.mod h1:akbVXOWuMWKSgqA1QKoXkm3hFt0qIvDeUr7m3ODAiS8= +github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.0.0-20231102130031-505e23230a50 h1:XRMxGoIsq7FySiJff33EcxnSJgOTaWrJtyzhB1Uw5XE= +github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.0.0-20231102130031-505e23230a50/go.mod h1:fw4J+Pn19ZgfR5ZVxWVtlvKq7+zEfXXlZV/7G9IWkko= github.com/amir/raidman v0.0.0-20170415203553-1ccc43bfb9c9 h1:FXrPTd8Rdlc94dKccl7KPmdmIbVh/OjelJ8/vgMRzcQ= github.com/andybalholm/brotli v1.0.4 h1:V7DdXeJtZscaqfNuAdSRuRFzuiKlHSC/Zh3zl9qY3JY= github.com/antchfx/jsonquery v1.1.5 h1:1YWrNFYCcIuJPIjFeOP5b6TXbLSUYY8qqxWbuZOB1qE= @@ -198,16 +200,18 @@ github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:l github.com/aws/aws-sdk-go v1.35.24/go.mod h1:tlPOdRjfxPBpNIwqDj61rmsnA85v9jc0Ps9+muhnW+k= github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/aws/aws-sdk-go v1.40.45/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q= -github.com/aws/aws-sdk-go v1.45.2 h1:hTong9YUklQKqzrGk3WnKABReb5R8GjbG4Y6dEQfjnk= -github.com/aws/aws-sdk-go v1.45.2/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= +github.com/aws/aws-sdk-go v1.45.24 h1:TZx/CizkmCQn8Rtsb11iLYutEQVGK5PK9wAhwouELBo= +github.com/aws/aws-sdk-go v1.45.24/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= github.com/aws/aws-sdk-go-v2 v1.9.1/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4= github.com/aws/aws-sdk-go-v2 v1.9.2/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4= github.com/aws/aws-sdk-go-v2 v1.17.5/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw= github.com/aws/aws-sdk-go-v2 v1.17.7/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw= github.com/aws/aws-sdk-go-v2 v1.18.0/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw= -github.com/aws/aws-sdk-go-v2 v1.19.0 h1:klAT+y3pGFBU/qVf1uzwttpBbiuozJYWzNLHioyDJ+k= github.com/aws/aws-sdk-go-v2 v1.19.0/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw= -github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.2.0 h1:scBthy70MB3m4LCMFaBcmYCyR2XWOz6MxSfdSu/+fQo= +github.com/aws/aws-sdk-go-v2 v1.21.0/go.mod h1:/RfNgGmRxI+iFOB1OeJUyxiU+9s88k3pfHvDagGEp0M= +github.com/aws/aws-sdk-go-v2 v1.21.2 h1:+LXZ0sgo8quN9UOKXXzAWRT3FWd4NxeXWOZom9pE7GA= +github.com/aws/aws-sdk-go-v2 v1.21.2/go.mod h1:ErQhvNuEMhJjweavOYhxVkn2RUx7kQXVATHrjKtxIpM= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.13 h1:OPLEkmhXf6xFPiz0bLeDArZIDx1NNS4oJyG4nv3Gct0= github.com/aws/aws-sdk-go-v2/config v1.8.3/go.mod h1:4AEiLtAb8kLs7vgw2ZV3p2VZ1+hBavOc84hqxVNpCyw= github.com/aws/aws-sdk-go-v2/config v1.18.25 h1:JuYyZcnMPBiFqn87L2cRppo+rNwgah6YwD3VuyvaW6Q= github.com/aws/aws-sdk-go-v2/config v1.18.25/go.mod h1:dZnYpD5wTW/dQF0rRNLVypB396zWCcPiBIvdvSWHEg4= @@ -221,16 +225,19 @@ github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.5.3 h1:0O72494cCsazjpsGfo+LXe github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.29/go.mod h1:Dip3sIGv485+xerzVv24emnjX5Sg88utCL8fwGmCeWg= github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.31/go.mod h1:QT0BqUvX1Bh2ABdTGnjqEjvjzrCfIniM9Sc8zn9Yndo= github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.33/go.mod h1:7i0PF1ME/2eUPFcjkVIwq+DOygHEoK92t5cDqNgYbIw= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.35 h1:hMUCiE3Zi5AHrRNGf5j985u0WyqI6r2NULhUfo0N/No= github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.35/go.mod h1:ipR5PvpSPqIqL5Mi82BxLnfMkHVbmco8kUwO2xrCi0M= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.41 h1:22dGT7PneFMx4+b3pz7lMTRyN8ZKH7M2cW4GP9yUS2g= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.41/go.mod h1:CrObHAuPneJBlfEJ5T3szXOUkLEThaGfvnhTf33buas= github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.23/go.mod h1:mr6c4cHC+S/MMkrjtSlG4QA36kOznDep+0fga5L/fGQ= github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.25/go.mod h1:zBHOPwhBc3FlQjQJE/D3IfPWiWaQmT06Vq9aNukDo0k= github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.27/go.mod h1:UrHnn3QV/d0pBZ6QBAEQcqFLf8FAzLmoUfPVIueOvoM= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.29 h1:yOpYx+FTBdpk/g+sBU6Cb1H0U/TLEcYYp66mYqsPpcc= github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.29/go.mod h1:M/eUABlDbw2uVrdAn+UsI6M727qp2fxkp8K0ejcBDUY= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.35 h1:SijA0mgjV8E+8G45ltVHs0fvKpTj8xmZJ3VwhGKtUSI= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.35/go.mod h1:SJC1nEVVva1g3pHAIdCp7QsRIkMmLAgoDquQ9Rr8kYw= github.com/aws/aws-sdk-go-v2/internal/ini v1.2.4/go.mod h1:ZcBrrI3zBKlhGFNYWvju0I3TR93I7YIgAfy82Fh4lcQ= github.com/aws/aws-sdk-go-v2/internal/ini v1.3.34 h1:gGLG7yKaXG02/jBlg210R7VgQIotiQntNhsCFejawx8= github.com/aws/aws-sdk-go-v2/internal/ini v1.3.34/go.mod h1:Etz2dj6UHYuw+Xw830KfzCfWGMzqvUTCjUj5b76GVDc= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.1.4 h1:6lJvvkQ9HmbHZ4h/IEwclwv2mrTW8Uq1SOB/kXy0mfw= github.com/aws/aws-sdk-go-v2/service/appconfig v1.4.2/go.mod h1:FZ3HkCe+b10uFZZkFdvf98LHW21k49W8o8J366lqVKY= github.com/aws/aws-sdk-go-v2/service/autoscaling v1.28.10 h1:moHEk4wbdc8VNvff4UOLuXVHtjh7YtsGdiyB0MrPPKg= github.com/aws/aws-sdk-go-v2/service/autoscaling v1.28.10/go.mod h1:P3qp1VYVoxHgDhpDDCTre1ee9IKpmgqnUoOb+8RA9qI= @@ -247,14 +254,16 @@ github.com/aws/aws-sdk-go-v2/service/efs v1.19.7 h1:BmyhflgczNmmuAPFhAhMQuLc9zSH github.com/aws/aws-sdk-go-v2/service/efs v1.19.7/go.mod h1:ENSgtHyPiYyBcTAi26Hpr8Xp636IB18qr0D5Ho8EQWA= github.com/aws/aws-sdk-go-v2/service/eks v1.27.15 h1:Q48ivwZJ136hfkk8Dua1fMM7m1e1s/0rBRyRX/J9XAY= github.com/aws/aws-sdk-go-v2/service/eks v1.27.15/go.mod h1:9mqDBj08MtFxKFQWUEMm4iFnIdM9gFpnSJvHUEIfsiU= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.8.0 h1:wS94St7YDmLhrPJw3mjJfCfHHOABS3G9c//mDZRzELU= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.14 h1:m0QTSI6pZYJTk5WSKx3fm5cNW/DCicVzULBgU/6IyD0= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.36 h1:eev2yZX7esGRjqRbnVk1UxMLw4CyVZDpZXRCcy75oQk= github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.6.0 h1:q/O6wGx7MFwWfRNgTIVmGgXGBz9UKv16eSX1uuWdM7A= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.3.2/go.mod h1:72HRZDLMtmVQiLG2tLfQcaWLCssELvGl+Zf2WVxMmR8= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.27 h1:0iKliEXAcCa2qVtRs7Ot5hItA2MsufrphbRFlz1Owxo= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.27/go.mod h1:EOwBD4J4S5qYszS5/3DpkejfuK+Z5/1uzICfPaZLtqw= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.7.1 h1:YEz2KMyqK2zyG3uOa0l2xBc/H6NUVJir8FhwHQHF3rc= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.35 h1:CdzPW9kKitgIiLV1+MHobfR5Xg25iYnyzWZhyQuSlDI= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.35/go.mod h1:QGF2Rs33W5MaN9gYdEQOBBFPLwTZkEhRwI33f7KIG0o= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.15.4 h1:v0jkRigbSD6uOdwcaUQmgEwG1BkPfAPDqaeNt/29ghg= github.com/aws/aws-sdk-go-v2/service/kinesis v1.13.0 h1:wqLvwC4qdrrGikudu8Z9X2sb79BYUYWAgMF5BGFQJY8= -github.com/aws/aws-sdk-go-v2/service/s3 v1.16.0 h1:dt1JQFj/135ozwGIWeCM3aQ8N/kB3Xu3Uu4r9zuOIyc= +github.com/aws/aws-sdk-go-v2/service/s3 v1.40.0 h1:wl5dxN1NONhTDQD9uaEvNsDRX29cBmGED/nl0jkWlt4= github.com/aws/aws-sdk-go-v2/service/sso v1.4.2/go.mod h1:NBvT9R1MEF+Ud6ApJKM0G+IkPchKS7p7c2YPKwHmBOk= github.com/aws/aws-sdk-go-v2/service/sso v1.12.10 h1:UBQjaMTCKwyUYwiVnUt6toEJwGXsLBI6al083tpjJzY= github.com/aws/aws-sdk-go-v2/service/sso v1.12.10/go.mod h1:ouy2P4z6sJN70fR3ka3wD3Ro3KezSxU6eKGQI2+2fjI= @@ -265,8 +274,10 @@ github.com/aws/aws-sdk-go-v2/service/sts v1.19.0 h1:2DQLAKDteoEDI8zpCzqBMaZlJuoE github.com/aws/aws-sdk-go-v2/service/sts v1.19.0/go.mod h1:BgQOMsg8av8jset59jelyPW7NoZcZXLVpDsXunGDrk8= github.com/aws/aws-sdk-go-v2/service/timestreamwrite v1.3.2 h1:1s/RRA5Owuz4/G/eWCdCKgC+9zaz2vxFsRSwe7R3cPY= github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= -github.com/aws/smithy-go v1.13.5 h1:hgz0X/DX0dGqTYpGALqXJoRKRj5oQ7150i5FdTePzO8= github.com/aws/smithy-go v1.13.5/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA= +github.com/aws/smithy-go v1.14.2/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA= +github.com/aws/smithy-go v1.15.0 h1:PS/durmlzvAFpQHDs4wi4sNNP9ExsqZh6IlfdHXgKK8= +github.com/aws/smithy-go v1.15.0/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA= github.com/aws/telegraf v0.10.2-0.20220502160831-c20ebe67c5ef h1:O53nKbZm2XpdudUywNdqbohwUxje9k4vE0xRXWeIVbE= github.com/aws/telegraf v0.10.2-0.20220502160831-c20ebe67c5ef/go.mod h1:6maU8S0L0iMSa0ZvH5b2W7dBX1xjK0D5ONAqe7WTqXc= github.com/aws/telegraf/patches/gopsutil/v3 v3.0.0-20230915153624-7629361f8380 h1:LyWVxYjlmdI9ruL66nvr85SRmUA7sScaTNEAHgbsEHc= From 8abbc2d3ff4acb3f7a56e0577a8d629452e3ee85 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Thu, 2 Nov 2023 11:46:20 -0400 Subject: [PATCH 13/55] Automated sync with upstream - last commit cff340a26865bcf2d0214d343dd32ccd670cde88 - run #102.1 (#605) --- RELEASE_NOTES | 47 + cfg/aws/credentials.go | 4 +- cfg/envconfig/envconfig.go | 42 + cfg/envconfig/envconfig_test.go | 33 + .../amazon-cloudwatch-agent.go | 17 +- cmd/config-downloader/downloader.go | 2 +- extension/agenthealth/config.go | 17 + extension/agenthealth/config_test.go | 47 + extension/agenthealth/extension.go | 37 + extension/agenthealth/extension_test.go | 32 + extension/agenthealth/factory.go | 39 + extension/agenthealth/factory_test.go | 28 + .../agenthealth/handler/stats/agent/agent.go | 114 +++ .../handler/stats/agent/agent_test.go | 114 +++ .../handler/stats/client/client.go | 124 +++ .../handler/stats/client/client_test.go | 67 ++ .../agenthealth/handler/stats/handler.go | 79 ++ .../agenthealth/handler/stats/handler_test.go | 73 ++ .../handler/stats/provider/flag.go | 115 +++ .../handler/stats/provider/flag_test.go | 37 + .../handler/stats/provider/interval.go | 60 ++ .../handler/stats/provider/interval_test.go | 30 + .../handler/stats/provider/process.go | 101 ++ .../handler/stats/provider/process_test.go | 69 ++ .../agenthealth/handler/useragent/handler.go | 67 ++ .../handler/useragent/handler_test.go | 32 + .../handler/useragent/useragent.go | 191 ++++ .../handler/useragent/useragent_test.go | 145 +++ extension/agenthealth/testdata/config.yaml | 8 + go.mod | 15 +- go.sum | 6 +- handlers/agentinfo/info.go | 41 +- handlers/agentinfo/info_test.go | 16 +- internal/version/version.go | 58 ++ internal/version/version_test.go | 43 + plugins/inputs/logfile/logfile_test.go | 2 +- plugins/outputs/cloudwatch/cloudwatch.go | 18 +- plugins/outputs/cloudwatch/cloudwatch_test.go | 46 +- plugins/outputs/cloudwatch/config.go | 6 +- plugins/outputs/cloudwatch/factory.go | 13 +- .../outputs/cloudwatchlogs/cloudwatchlogs.go | 43 +- plugins/outputs/cloudwatchlogs/pusher.go | 7 +- plugins/outputs/cloudwatchlogs/pusher_test.go | 4 +- .../ec2tagger/ec2metadataprovider.go | 8 +- service/defaultcomponents/components.go | 5 +- service/defaultcomponents/components_test.go | 3 +- translator/config/envconst.go | 26 +- translator/context/context.go | 10 + .../sampleConfig/advanced_config_darwin.yaml | 204 ++-- .../sampleConfig/advanced_config_linux.yaml | 42 +- .../sampleConfig/advanced_config_windows.yaml | 54 +- .../base_container_insights_config.yaml | 493 ++++----- .../sampleConfig/basic_config_linux.yaml | 24 +- .../sampleConfig/basic_config_windows.yaml | 26 +- .../sampleConfig/collectd_config_linux.yaml | 18 +- .../sampleConfig/complete_darwin_config.conf | 2 + .../sampleConfig/complete_darwin_config.yaml | 73 +- .../sampleConfig/complete_linux_config.conf | 2 + .../sampleConfig/complete_linux_config.yaml | 93 +- .../sampleConfig/complete_windows_config.conf | 2 + .../sampleConfig/complete_windows_config.yaml | 39 +- .../sampleConfig/config_with_env.yaml | 209 ++-- .../sampleConfig/delta_config_linux.yaml | 120 ++- .../sampleConfig/delta_net_config_linux.yaml | 88 +- .../sampleConfig/drop_origin_linux.yaml | 36 +- .../emf_and_kubernetes_config.yaml | 959 ++++++++++-------- .../ignore_append_dimensions.yaml | 116 ++- .../sampleConfig/invalid_input_linux.yaml | 22 +- .../kubernetes_on_prem_config.yaml | 805 ++++++++------- .../sampleConfig/log_ecs_metric_only.yaml | 406 ++++---- .../logs_and_kubernetes_config.yaml | 956 +++++++++-------- .../sampleConfig/prometheus_config_linux.yaml | 231 ++--- .../prometheus_config_windows.yaml | 158 +-- .../sampleConfig/standard_config_linux.yaml | 26 +- ...ndard_config_linux_with_common_config.yaml | 28 +- .../sampleConfig/standard_config_windows.yaml | 40 +- ...ard_config_windows_with_common_config.yaml | 42 +- .../sampleConfig/statsd_config_linux.yaml | 21 +- .../sampleConfig/statsd_config_windows.yaml | 18 +- .../sampleConfig/trace_config_linux.yaml | 196 ++-- .../sampleConfig/trace_config_windows.yaml | 196 ++-- translator/tocwconfig/tocwconfig_test.go | 13 +- translator/tocwconfig/tocwconfig_unix_test.go | 10 +- .../tocwconfig/tocwconfig_windows_test.go | 8 +- translator/translate/agent/agent.go | 3 + translator/translate/agent/ruleRegion.go | 9 +- translator/translate/logs/logs_test.go | 16 + .../translate/logs/ruleBasicLogConfig.go | 3 + .../otel/exporter/awscloudwatch/translator.go | 35 +- .../exporter/awscloudwatch/translator_test.go | 29 +- .../otel/exporter/awsemf/translator.go | 3 + .../otel/exporter/awsemf/translator_test.go | 32 +- .../exporter/awsxray/testdata/config.yaml | 3 +- .../otel/exporter/awsxray/translator.go | 2 + .../otel/exporter/awsxray/translator_test.go | 1 + .../otel_aws_cloudwatch_logs/translator.go | 2 + .../translator_test.go | 11 +- .../otel/extension/agenthealth/translator.go | 62 ++ .../extension/agenthealth/translator_test.go | 75 ++ .../pipeline/containerinsights/translator.go | 3 + .../containerinsights/translator_test.go | 5 + .../otel/pipeline/emf_logs/translator.go | 2 + .../otel/pipeline/emf_logs/translator_test.go | 6 + .../otel/pipeline/host/translator.go | 2 + .../otel/pipeline/host/translator_test.go | 6 + .../otel/pipeline/prometheus/translator.go | 4 +- .../pipeline/prometheus/translator_test.go | 2 + .../otel/pipeline/xray/translator.go | 2 + .../otel/pipeline/xray/translator_test.go | 5 + translator/util/ec2util/ec2util.go | 6 +- translator/util/sdkutil.go | 9 +- 111 files changed, 5565 insertions(+), 2690 deletions(-) create mode 100644 cfg/envconfig/envconfig_test.go create mode 100644 extension/agenthealth/config.go create mode 100644 extension/agenthealth/config_test.go create mode 100644 extension/agenthealth/extension.go create mode 100644 extension/agenthealth/extension_test.go create mode 100644 extension/agenthealth/factory.go create mode 100644 extension/agenthealth/factory_test.go create mode 100644 extension/agenthealth/handler/stats/agent/agent.go create mode 100644 extension/agenthealth/handler/stats/agent/agent_test.go create mode 100644 extension/agenthealth/handler/stats/client/client.go create mode 100644 extension/agenthealth/handler/stats/client/client_test.go create mode 100644 extension/agenthealth/handler/stats/handler.go create mode 100644 extension/agenthealth/handler/stats/handler_test.go create mode 100644 extension/agenthealth/handler/stats/provider/flag.go create mode 100644 extension/agenthealth/handler/stats/provider/flag_test.go create mode 100644 extension/agenthealth/handler/stats/provider/interval.go create mode 100644 extension/agenthealth/handler/stats/provider/interval_test.go create mode 100644 extension/agenthealth/handler/stats/provider/process.go create mode 100644 extension/agenthealth/handler/stats/provider/process_test.go create mode 100644 extension/agenthealth/handler/useragent/handler.go create mode 100644 extension/agenthealth/handler/useragent/handler_test.go create mode 100644 extension/agenthealth/handler/useragent/useragent.go create mode 100644 extension/agenthealth/handler/useragent/useragent_test.go create mode 100644 extension/agenthealth/testdata/config.yaml create mode 100644 internal/version/version.go create mode 100644 internal/version/version_test.go create mode 100644 translator/translate/otel/extension/agenthealth/translator.go create mode 100644 translator/translate/otel/extension/agenthealth/translator_test.go diff --git a/RELEASE_NOTES b/RELEASE_NOTES index a1ae5fff1f..1015fc01c0 100644 --- a/RELEASE_NOTES +++ b/RELEASE_NOTES @@ -1,3 +1,50 @@ +======================================================================== +Amazon CloudWatch Agent 1.300030.2 (2023-11-2) +======================================================================== + +Bug fixes: +* Fix enhanced container insights publishing 0 for cpu/memory over metrics when limit/resource not set + +======================================================================== +Amazon CloudWatch Agent 1.300030.1 (2023-10-30) +======================================================================== + +Bug fixes: +* Add units to StatusRunning and StatusContainerTerminatedReasonOOMKilled metrics + +Enhancements: +* Add agenthealth extension with user agent handler +* Enable enhanced container insights flag in EMF exporter + +======================================================================== +Amazon CloudWatch Agent 1.300030.0 (2023-10-24) +======================================================================== + +Bug fixes: +* Fix EMF log corruption caused by highly concurrent messages sent to the agent +* CA Bundle fix for AWS Secret regions +* Filter out terminated pods when calculating container insights node request metrics to match kubectl implementation +* Fix auto_removal to not block on the file being removed + +Enhancements: +* Support both zero padded and non-padded month/day when using %m/%d for the logfile timestamp_format +* Use default path when -otelconfig option not given +* Restrict pprof-addr to localhost + +======================================================================== +Amazon CloudWatch Agent 1.300028.4 (2023-10-4) +======================================================================== + +Bug fixes: +* Use GZIP 6 RPM Compression To Fix Install On AL2 Without Kernel Patch + +======================================================================== +Amazon CloudWatch Agent 1.300028.3 (2023-10-3) +======================================================================== + +Bug fixes: +* Use fmt.Printf Instead Of log.Printf On Start Up Because Of A Bug With Windows User Data Causing Agent Not To Start + ======================================================================== Amazon CloudWatch Agent 1.300028.1 (2023-09-18) ======================================================================== diff --git a/cfg/aws/credentials.go b/cfg/aws/credentials.go index 34ab40f54a..34ca454904 100644 --- a/cfg/aws/credentials.go +++ b/cfg/aws/credentials.go @@ -19,7 +19,7 @@ import ( "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/sts" - "github.com/aws/amazon-cloudwatch-agent/handlers/agentinfo" + "github.com/aws/amazon-cloudwatch-agent/extension/agenthealth/handler/stats/provider" ) const ( @@ -116,7 +116,7 @@ func getSession(config *aws.Config) *session.Session { if len(found) > 0 { log.Printf("W! Unused shared config file(s) found: %v. If you would like to use them, "+ "please update your common-config.toml.", found) - agentinfo.RecordSharedConfigFallback() + provider.GetFlagsStats().SetFlag(provider.FlagSharedConfigFallback) } } return ses diff --git a/cfg/envconfig/envconfig.go b/cfg/envconfig/envconfig.go index 8ad3850113..67e85a8021 100644 --- a/cfg/envconfig/envconfig.go +++ b/cfg/envconfig/envconfig.go @@ -3,6 +3,12 @@ package envconfig +import ( + "os" + "strconv" + "sync" +) + const ( //the following are the names of environment variables HTTP_PROXY = "HTTP_PROXY" @@ -14,4 +20,40 @@ const ( CWAGENT_LOG_LEVEL = "CWAGENT_LOG_LEVEL" CWAGENT_USAGE_DATA = "CWAGENT_USAGE_DATA" IMDS_NUMBER_RETRY = "IMDS_NUMBER_RETRY" + RunInContainer = "RUN_IN_CONTAINER" + RunInAWS = "RUN_IN_AWS" + RunWithIRSA = "RUN_WITH_IRSA" + UseDefaultConfig = "USE_DEFAULT_CONFIG" + HostName = "HOST_NAME" + PodName = "POD_NAME" + HostIP = "HOST_IP" + CWConfigContent = "CW_CONFIG_CONTENT" +) + +const ( + // TrueValue is the expected string set on an environment variable to indicate true. + TrueValue = "True" ) + +var ( + usageDataEnabled bool + onceUsageData sync.Once +) + +// getUsageDataEnabled returns true for true or invalid +// examples of invalid are not set env var, "", "invalid" +func getUsageDataEnabled() bool { + ok, err := strconv.ParseBool(os.Getenv(CWAGENT_USAGE_DATA)) + return ok || err != nil +} + +func IsUsageDataEnabled() bool { + onceUsageData.Do(func() { + usageDataEnabled = getUsageDataEnabled() + }) + return usageDataEnabled +} + +func IsRunningInContainer() bool { + return os.Getenv(RunInContainer) == TrueValue +} diff --git a/cfg/envconfig/envconfig_test.go b/cfg/envconfig/envconfig_test.go new file mode 100644 index 0000000000..a69b4bf74b --- /dev/null +++ b/cfg/envconfig/envconfig_test.go @@ -0,0 +1,33 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package envconfig + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestIsUsageDataEnabled(t *testing.T) { + assert.True(t, getUsageDataEnabled()) + + t.Setenv(CWAGENT_USAGE_DATA, "TRUE") + assert.True(t, getUsageDataEnabled()) + + t.Setenv(CWAGENT_USAGE_DATA, "INVALID") + assert.True(t, getUsageDataEnabled()) + + t.Setenv(CWAGENT_USAGE_DATA, "FALSE") + assert.False(t, getUsageDataEnabled()) +} + +func TestIsRunningInContainer(t *testing.T) { + assert.False(t, IsRunningInContainer()) + + t.Setenv(RunInContainer, "TRUE") + assert.False(t, IsRunningInContainer()) + + t.Setenv(RunInContainer, TrueValue) + assert.True(t, IsRunningInContainer()) +} diff --git a/cmd/amazon-cloudwatch-agent/amazon-cloudwatch-agent.go b/cmd/amazon-cloudwatch-agent/amazon-cloudwatch-agent.go index 0cae50afe0..7062e891b7 100644 --- a/cmd/amazon-cloudwatch-agent/amazon-cloudwatch-agent.go +++ b/cmd/amazon-cloudwatch-agent/amazon-cloudwatch-agent.go @@ -34,7 +34,8 @@ import ( configaws "github.com/aws/amazon-cloudwatch-agent/cfg/aws" "github.com/aws/amazon-cloudwatch-agent/cfg/envconfig" "github.com/aws/amazon-cloudwatch-agent/cmd/amazon-cloudwatch-agent/internal" - "github.com/aws/amazon-cloudwatch-agent/handlers/agentinfo" + "github.com/aws/amazon-cloudwatch-agent/extension/agenthealth/handler/useragent" + "github.com/aws/amazon-cloudwatch-agent/internal/version" "github.com/aws/amazon-cloudwatch-agent/logs" _ "github.com/aws/amazon-cloudwatch-agent/plugins" "github.com/aws/amazon-cloudwatch-agent/profiler" @@ -258,7 +259,7 @@ func runAgent(ctx context.Context, logger.SetupLogging(logConfig) - log.Printf("I! Starting AmazonCloudWatchAgent %s\n", agentinfo.FullVersion()) + log.Printf("I! Starting AmazonCloudWatchAgent %s\n", version.Full()) // Need to set SDK log level before plugins get loaded. // Some aws.Config objects get created early and live forever which means // we cannot change the sdk log level without restarting the Agent. @@ -304,7 +305,7 @@ func runAgent(ctx context.Context, // So just start Telegraf. _, err = os.Stat(*fOtelConfig) if errors.Is(err, os.ErrNotExist) { - agentinfo.SetComponents(&otelcol.Config{}, c) + useragent.Get().SetComponents(&otelcol.Config{}, c) return ag.Run(ctx) } } @@ -328,7 +329,7 @@ func runAgent(ctx context.Context, return err } - agentinfo.SetComponents(cfg, c) + useragent.Get().SetComponents(cfg, c) params := getCollectorParams(factories, provider) @@ -351,7 +352,7 @@ func getCollectorParams(factories otelcol.Factories, provider otelcol.ConfigProv BuildInfo: component.BuildInfo{ Command: "CWAgent", Description: "CloudWatch Agent", - Version: agentinfo.Version(), + Version: version.Number(), }, } return params @@ -453,7 +454,7 @@ func main() { if len(args) > 0 { switch args[0] { case "version": - fmt.Println(agentinfo.FullVersion()) + fmt.Println(version.Full()) return case "config": config.PrintSampleConfig( @@ -492,7 +493,7 @@ func main() { } return case *fVersion: - fmt.Println(agentinfo.FullVersion()) + fmt.Println(version.Full()) return case *fSampleConfig: config.PrintSampleConfig( @@ -637,7 +638,7 @@ func validateAgentFinalConfigAndPlugins(c *config.Config) error { if *fSchemaTest { //up to this point, the given config file must be valid - fmt.Println(agentinfo.FullVersion()) + fmt.Println(version.Full()) fmt.Printf("The given config: %v is valid\n", *fTomlConfig) os.Exit(0) } diff --git a/cmd/config-downloader/downloader.go b/cmd/config-downloader/downloader.go index 477173104f..fc198b3db1 100644 --- a/cmd/config-downloader/downloader.go +++ b/cmd/config-downloader/downloader.go @@ -143,7 +143,7 @@ func main() { mode = sdkutil.DetectAgentMode(mode) - region = util.DetectRegion(mode, cc.CredentialsMap()) + region, _ = util.DetectRegion(mode, cc.CredentialsMap()) if region == "" && downloadLocation != locationDefault { fmt.Println("Unable to determine aws-region.") diff --git a/extension/agenthealth/config.go b/extension/agenthealth/config.go new file mode 100644 index 0000000000..dd1f94c06c --- /dev/null +++ b/extension/agenthealth/config.go @@ -0,0 +1,17 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package agenthealth + +import ( + "go.opentelemetry.io/collector/component" + + "github.com/aws/amazon-cloudwatch-agent/extension/agenthealth/handler/stats/agent" +) + +type Config struct { + IsUsageDataEnabled bool `mapstructure:"is_usage_data_enabled"` + Stats agent.StatsConfig `mapstructure:"stats"` +} + +var _ component.Config = (*Config)(nil) diff --git a/extension/agenthealth/config_test.go b/extension/agenthealth/config_test.go new file mode 100644 index 0000000000..d1bb85b8f8 --- /dev/null +++ b/extension/agenthealth/config_test.go @@ -0,0 +1,47 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package agenthealth + +import ( + "path/filepath" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/confmap/confmaptest" + + "github.com/aws/amazon-cloudwatch-agent/extension/agenthealth/handler/stats/agent" +) + +func TestLoadConfig(t *testing.T) { + testCases := []struct { + id component.ID + want component.Config + }{ + { + id: component.NewID(TypeStr), + want: NewFactory().CreateDefaultConfig(), + }, + { + id: component.NewIDWithName(TypeStr, "1"), + want: &Config{IsUsageDataEnabled: false, Stats: agent.StatsConfig{Operations: []string{agent.AllowAllOperations}}}, + }, + { + id: component.NewIDWithName(TypeStr, "2"), + want: &Config{IsUsageDataEnabled: true, Stats: agent.StatsConfig{Operations: []string{"ListBuckets"}}}, + }, + } + for _, testCase := range testCases { + conf, err := confmaptest.LoadConf(filepath.Join("testdata", "config.yaml")) + require.NoError(t, err) + cfg := NewFactory().CreateDefaultConfig() + sub, err := conf.Sub(testCase.id.String()) + require.NoError(t, err) + require.NoError(t, component.UnmarshalConfig(sub, cfg)) + + assert.NoError(t, component.ValidateConfig(cfg)) + assert.Equal(t, testCase.want, cfg) + } +} diff --git a/extension/agenthealth/extension.go b/extension/agenthealth/extension.go new file mode 100644 index 0000000000..14ab08eb57 --- /dev/null +++ b/extension/agenthealth/extension.go @@ -0,0 +1,37 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package agenthealth + +import ( + "github.com/amazon-contributing/opentelemetry-collector-contrib/extension/awsmiddleware" + "go.opentelemetry.io/collector/component" + "go.uber.org/zap" + + "github.com/aws/amazon-cloudwatch-agent/extension/agenthealth/handler/stats" + "github.com/aws/amazon-cloudwatch-agent/extension/agenthealth/handler/useragent" +) + +type agentHealth struct { + logger *zap.Logger + cfg *Config + component.StartFunc + component.ShutdownFunc +} + +var _ awsmiddleware.Extension = (*agentHealth)(nil) + +func (ah *agentHealth) Handlers() ([]awsmiddleware.RequestHandler, []awsmiddleware.ResponseHandler) { + var responseHandlers []awsmiddleware.ResponseHandler + requestHandlers := []awsmiddleware.RequestHandler{useragent.NewHandler(ah.cfg.IsUsageDataEnabled)} + if ah.cfg.IsUsageDataEnabled { + req, res := stats.NewHandlers(ah.logger, ah.cfg.Stats) + requestHandlers = append(requestHandlers, req...) + responseHandlers = append(responseHandlers, res...) + } + return requestHandlers, responseHandlers +} + +func NewAgentHealth(logger *zap.Logger, cfg *Config) awsmiddleware.Extension { + return &agentHealth{logger: logger, cfg: cfg} +} diff --git a/extension/agenthealth/extension_test.go b/extension/agenthealth/extension_test.go new file mode 100644 index 0000000000..504dc8c50e --- /dev/null +++ b/extension/agenthealth/extension_test.go @@ -0,0 +1,32 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package agenthealth + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "go.opentelemetry.io/collector/component/componenttest" + "go.uber.org/zap" +) + +func TestExtension(t *testing.T) { + ctx := context.Background() + cfg := &Config{IsUsageDataEnabled: true} + extension := NewAgentHealth(zap.NewNop(), cfg) + assert.NotNil(t, extension) + assert.NoError(t, extension.Start(ctx, componenttest.NewNopHost())) + requestHandlers, responseHandlers := extension.Handlers() + // user agent, client stats, stats + assert.Len(t, requestHandlers, 3) + // client stats + assert.Len(t, responseHandlers, 1) + cfg.IsUsageDataEnabled = false + requestHandlers, responseHandlers = extension.Handlers() + // user agent + assert.Len(t, requestHandlers, 1) + assert.Len(t, responseHandlers, 0) + assert.NoError(t, extension.Shutdown(ctx)) +} diff --git a/extension/agenthealth/factory.go b/extension/agenthealth/factory.go new file mode 100644 index 0000000000..fe60efec6f --- /dev/null +++ b/extension/agenthealth/factory.go @@ -0,0 +1,39 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package agenthealth + +import ( + "context" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/extension" + + "github.com/aws/amazon-cloudwatch-agent/extension/agenthealth/handler/stats/agent" +) + +const ( + TypeStr = "agenthealth" +) + +func NewFactory() extension.Factory { + return extension.NewFactory( + TypeStr, + createDefaultConfig, + createExtension, + component.StabilityLevelAlpha, + ) +} + +func createDefaultConfig() component.Config { + return &Config{ + IsUsageDataEnabled: true, + Stats: agent.StatsConfig{ + Operations: []string{agent.AllowAllOperations}, + }, + } +} + +func createExtension(_ context.Context, settings extension.CreateSettings, cfg component.Config) (extension.Extension, error) { + return NewAgentHealth(settings.Logger, cfg.(*Config)), nil +} diff --git a/extension/agenthealth/factory_test.go b/extension/agenthealth/factory_test.go new file mode 100644 index 0000000000..4899dfb425 --- /dev/null +++ b/extension/agenthealth/factory_test.go @@ -0,0 +1,28 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package agenthealth + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/extension/extensiontest" + + "github.com/aws/amazon-cloudwatch-agent/extension/agenthealth/handler/stats/agent" +) + +func TestCreateDefaultConfig(t *testing.T) { + cfg := NewFactory().CreateDefaultConfig() + assert.Equal(t, &Config{IsUsageDataEnabled: true, Stats: agent.StatsConfig{Operations: []string{agent.AllowAllOperations}}}, cfg) + assert.NoError(t, componenttest.CheckConfigStruct(cfg)) +} + +func TestCreateExtension(t *testing.T) { + cfg := &Config{} + got, err := NewFactory().CreateExtension(context.Background(), extensiontest.NewNopCreateSettings(), cfg) + assert.NoError(t, err) + assert.NotNil(t, got) +} diff --git a/extension/agenthealth/handler/stats/agent/agent.go b/extension/agenthealth/handler/stats/agent/agent.go new file mode 100644 index 0000000000..6eaf97d4bd --- /dev/null +++ b/extension/agenthealth/handler/stats/agent/agent.go @@ -0,0 +1,114 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package agent + +import ( + "encoding/json" + "strings" + + "github.com/aws/amazon-cloudwatch-agent/internal/util/collections" +) + +const ( + AllowAllOperations = "*" +) + +type Stats struct { + CpuPercent *float64 `json:"cpu,omitempty"` + MemoryBytes *uint64 `json:"mem,omitempty"` + FileDescriptorCount *int32 `json:"fd,omitempty"` + ThreadCount *int32 `json:"th,omitempty"` + LatencyMillis *int64 `json:"lat,omitempty"` + PayloadBytes *int `json:"load,omitempty"` + StatusCode *int `json:"code,omitempty"` + SharedConfigFallback *int `json:"scfb,omitempty"` + ImdsFallbackSucceed *int `json:"ifs,omitempty"` + AppSignals *int `json:"as,omitempty"` + EnhancedContainerInsights *int `json:"eci,omitempty"` + RunningInContainer *int `json:"ric,omitempty"` + RegionType *string `json:"rt,omitempty"` + Mode *string `json:"m,omitempty"` +} + +// Merge the other Stats into the current. If the field is not nil, +// then it'll overwrite the existing one. +func (s *Stats) Merge(other Stats) { + if other.CpuPercent != nil { + s.CpuPercent = other.CpuPercent + } + if other.MemoryBytes != nil { + s.MemoryBytes = other.MemoryBytes + } + if other.FileDescriptorCount != nil { + s.FileDescriptorCount = other.FileDescriptorCount + } + if other.ThreadCount != nil { + s.ThreadCount = other.ThreadCount + } + if other.LatencyMillis != nil { + s.LatencyMillis = other.LatencyMillis + } + if other.PayloadBytes != nil { + s.PayloadBytes = other.PayloadBytes + } + if other.StatusCode != nil { + s.StatusCode = other.StatusCode + } + if other.SharedConfigFallback != nil { + s.SharedConfigFallback = other.SharedConfigFallback + } + if other.ImdsFallbackSucceed != nil { + s.ImdsFallbackSucceed = other.ImdsFallbackSucceed + } + if other.AppSignals != nil { + s.AppSignals = other.AppSignals + } + if other.EnhancedContainerInsights != nil { + s.EnhancedContainerInsights = other.EnhancedContainerInsights + } + if other.RunningInContainer != nil { + s.RunningInContainer = other.RunningInContainer + } + if other.RegionType != nil { + s.RegionType = other.RegionType + } + if other.Mode != nil { + s.Mode = other.Mode + } +} + +func (s *Stats) Marshal() (string, error) { + raw, err := json.Marshal(s) + if err != nil { + return "", err + } + content := strings.TrimPrefix(string(raw), "{") + return strings.TrimSuffix(content, "}"), nil +} + +type StatsProvider interface { + Stats(operation string) Stats +} + +type OperationsFilter struct { + operations collections.Set[string] + allowAll bool +} + +func (of OperationsFilter) IsAllowed(operationName string) bool { + return of.allowAll || of.operations.Contains(operationName) +} + +func NewOperationsFilter(operations ...string) OperationsFilter { + allowed := collections.NewSet[string](operations...) + return OperationsFilter{ + operations: allowed, + allowAll: allowed.Contains(AllowAllOperations), + } +} + +type StatsConfig struct { + // Operations are the allowed operation names to gather stats for. + Operations []string `mapstructure:"operations,omitempty"` +} diff --git a/extension/agenthealth/handler/stats/agent/agent_test.go b/extension/agenthealth/handler/stats/agent/agent_test.go new file mode 100644 index 0000000000..c379facc19 --- /dev/null +++ b/extension/agenthealth/handler/stats/agent/agent_test.go @@ -0,0 +1,114 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package agent + +import ( + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/stretchr/testify/assert" +) + +func TestMerge(t *testing.T) { + stats := &Stats{CpuPercent: aws.Float64(1.2)} + assert.EqualValues(t, 1.2, *stats.CpuPercent) + assert.Nil(t, stats.MemoryBytes) + stats.Merge(Stats{ + CpuPercent: aws.Float64(1.3), + MemoryBytes: aws.Uint64(123), + }) + assert.EqualValues(t, 1.3, *stats.CpuPercent) + assert.EqualValues(t, 123, *stats.MemoryBytes) + stats.Merge(Stats{ + CpuPercent: aws.Float64(1.5), + MemoryBytes: aws.Uint64(133), + FileDescriptorCount: aws.Int32(456), + ThreadCount: aws.Int32(789), + LatencyMillis: aws.Int64(1234), + PayloadBytes: aws.Int(5678), + StatusCode: aws.Int(200), + SharedConfigFallback: aws.Int(1), + ImdsFallbackSucceed: aws.Int(1), + AppSignals: aws.Int(1), + EnhancedContainerInsights: aws.Int(1), + RunningInContainer: aws.Int(0), + RegionType: aws.String("RegionType"), + Mode: aws.String("Mode"), + }) + assert.EqualValues(t, 1.5, *stats.CpuPercent) + assert.EqualValues(t, 133, *stats.MemoryBytes) + assert.EqualValues(t, 456, *stats.FileDescriptorCount) + assert.EqualValues(t, 789, *stats.ThreadCount) + assert.EqualValues(t, 1234, *stats.LatencyMillis) + assert.EqualValues(t, 5678, *stats.PayloadBytes) + assert.EqualValues(t, 200, *stats.StatusCode) + assert.EqualValues(t, 1, *stats.ImdsFallbackSucceed) + assert.EqualValues(t, 1, *stats.SharedConfigFallback) + assert.EqualValues(t, 1, *stats.AppSignals) + assert.EqualValues(t, 1, *stats.EnhancedContainerInsights) + assert.EqualValues(t, 0, *stats.RunningInContainer) + assert.EqualValues(t, "RegionType", *stats.RegionType) + assert.EqualValues(t, "Mode", *stats.Mode) +} + +func TestMarshal(t *testing.T) { + testCases := map[string]struct { + stats *Stats + want string + }{ + "WithEmpty": { + stats: &Stats{}, + want: "", + }, + "WithPartial": { + stats: &Stats{ + CpuPercent: aws.Float64(1.2), + MemoryBytes: aws.Uint64(123), + ThreadCount: aws.Int32(789), + PayloadBytes: aws.Int(5678), + }, + want: `"cpu":1.2,"mem":123,"th":789,"load":5678`, + }, + "WithFull": { + stats: &Stats{ + CpuPercent: aws.Float64(1.2), + MemoryBytes: aws.Uint64(123), + FileDescriptorCount: aws.Int32(456), + ThreadCount: aws.Int32(789), + LatencyMillis: aws.Int64(1234), + PayloadBytes: aws.Int(5678), + StatusCode: aws.Int(200), + ImdsFallbackSucceed: aws.Int(1), + }, + want: `"cpu":1.2,"mem":123,"fd":456,"th":789,"lat":1234,"load":5678,"code":200,"ifs":1`, + }, + } + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + got, err := testCase.stats.Marshal() + assert.NoError(t, err) + assert.Equal(t, testCase.want, got) + }) + } +} + +func TestOperationFilter(t *testing.T) { + testCases := map[string]struct { + allowedOperations []string + testOperations []string + want []bool + }{ + "WithNoneAllowed": {allowedOperations: nil, testOperations: []string{"nothing", "is", "allowed"}, want: []bool{false, false, false}}, + "WithSomeAllowed": {allowedOperations: []string{"are"}, testOperations: []string{"some", "are", "allowed"}, want: []bool{false, true, false}}, + "WithAllAllowed": {allowedOperations: []string{"*"}, testOperations: []string{"all", "are", "allowed"}, want: []bool{true, true, true}}, + } + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + filter := NewOperationsFilter(testCase.allowedOperations...) + for index, testOperation := range testCase.testOperations { + assert.Equal(t, testCase.want[index], filter.IsAllowed(testOperation)) + } + }) + } +} diff --git a/extension/agenthealth/handler/stats/client/client.go b/extension/agenthealth/handler/stats/client/client.go new file mode 100644 index 0000000000..188ef6b207 --- /dev/null +++ b/extension/agenthealth/handler/stats/client/client.go @@ -0,0 +1,124 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package client + +import ( + "context" + "io" + "net/http" + "sync" + "time" + + "github.com/amazon-contributing/opentelemetry-collector-contrib/extension/awsmiddleware" + "github.com/aws/aws-sdk-go/aws" + "github.com/jellydator/ttlcache/v3" + + "github.com/aws/amazon-cloudwatch-agent/extension/agenthealth/handler/stats/agent" +) + +const ( + handlerID = "cloudwatchagent.ClientStats" + ttlDuration = 10 * time.Second + cacheSize = 1000 +) + +type Stats interface { + awsmiddleware.RequestHandler + awsmiddleware.ResponseHandler + agent.StatsProvider +} + +type requestRecorder struct { + start time.Time + payloadBytes int64 +} + +type clientStatsHandler struct { + filter agent.OperationsFilter + getOperationName func(ctx context.Context) string + getRequestID func(ctx context.Context) string + + statsByOperation sync.Map + requestCache *ttlcache.Cache[string, *requestRecorder] +} + +var _ Stats = (*clientStatsHandler)(nil) + +func NewHandler(filter agent.OperationsFilter) Stats { + requestCache := ttlcache.New[string, *requestRecorder]( + ttlcache.WithTTL[string, *requestRecorder](ttlDuration), + ttlcache.WithCapacity[string, *requestRecorder](cacheSize), + ttlcache.WithDisableTouchOnHit[string, *requestRecorder](), + ) + go requestCache.Start() + return &clientStatsHandler{ + filter: filter, + getOperationName: awsmiddleware.GetOperationName, + getRequestID: awsmiddleware.GetRequestID, + requestCache: requestCache, + } +} + +func (csh *clientStatsHandler) ID() string { + return handlerID +} + +func (csh *clientStatsHandler) Position() awsmiddleware.HandlerPosition { + return awsmiddleware.After +} + +func (csh *clientStatsHandler) HandleRequest(ctx context.Context, r *http.Request) { + operation := csh.getOperationName(ctx) + if !csh.filter.IsAllowed(operation) { + return + } + requestID := csh.getRequestID(ctx) + recorder := &requestRecorder{start: time.Now()} + if r.ContentLength > 0 { + recorder.payloadBytes = r.ContentLength + } else if r.Body != nil { + rsc, ok := r.Body.(aws.ReaderSeekerCloser) + if !ok { + rsc = aws.ReadSeekCloser(r.Body) + } + if length, _ := aws.SeekerLen(rsc); length > 0 { + recorder.payloadBytes = length + } else if body, err := r.GetBody(); err == nil { + recorder.payloadBytes, _ = io.Copy(io.Discard, body) + } + } + csh.requestCache.Set(requestID, recorder, ttlcache.DefaultTTL) +} + +func (csh *clientStatsHandler) HandleResponse(ctx context.Context, r *http.Response) { + operation := csh.getOperationName(ctx) + if !csh.filter.IsAllowed(operation) { + return + } + requestID := csh.getRequestID(ctx) + item, ok := csh.requestCache.GetAndDelete(requestID) + if !ok { + return + } + recorder := item.Value() + stats := agent.Stats{ + PayloadBytes: aws.Int(int(recorder.payloadBytes)), + StatusCode: aws.Int(r.StatusCode), + } + latency := time.Since(recorder.start) + stats.LatencyMillis = aws.Int64(latency.Milliseconds()) + csh.statsByOperation.Store(operation, stats) +} + +func (csh *clientStatsHandler) Stats(operation string) agent.Stats { + value, ok := csh.statsByOperation.Load(operation) + if !ok { + return agent.Stats{} + } + stats, ok := value.(agent.Stats) + if !ok { + return agent.Stats{} + } + return stats +} diff --git a/extension/agenthealth/handler/stats/client/client_test.go b/extension/agenthealth/handler/stats/client/client_test.go new file mode 100644 index 0000000000..35ac023e0e --- /dev/null +++ b/extension/agenthealth/handler/stats/client/client_test.go @@ -0,0 +1,67 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package client + +import ( + "bytes" + "context" + "net/http" + "testing" + "time" + + "github.com/amazon-contributing/opentelemetry-collector-contrib/extension/awsmiddleware" + "github.com/aws/aws-sdk-go/aws" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/aws/amazon-cloudwatch-agent/extension/agenthealth/handler/stats/agent" +) + +func TestHandle(t *testing.T) { + operation := "test" + handler := NewHandler(agent.NewOperationsFilter("test")) + handler.(*clientStatsHandler).getOperationName = func(context.Context) string { + return operation + } + assert.Equal(t, awsmiddleware.After, handler.Position()) + assert.Equal(t, handlerID, handler.ID()) + body := []byte("test payload size") + req, err := http.NewRequest("", "localhost", bytes.NewBuffer(body)) + require.NoError(t, err) + req.ContentLength = 20 + ctx := context.Background() + handler.HandleRequest(ctx, req) + got := handler.Stats(operation) + assert.Nil(t, got.LatencyMillis) + assert.Nil(t, got.PayloadBytes) + assert.Nil(t, got.StatusCode) + time.Sleep(time.Millisecond) + handler.HandleResponse(ctx, &http.Response{StatusCode: http.StatusOK}) + got = handler.Stats(operation) + assert.NotNil(t, got.LatencyMillis) + assert.NotNil(t, got.PayloadBytes) + assert.NotNil(t, got.StatusCode) + assert.Equal(t, http.StatusOK, *got.StatusCode) + assert.Equal(t, 20, *got.PayloadBytes) + assert.GreaterOrEqual(t, *got.LatencyMillis, int64(1)) + + // without content length + req.ContentLength = 0 + handler.HandleRequest(ctx, req) + handler.HandleResponse(ctx, &http.Response{StatusCode: http.StatusOK}) + got = handler.Stats(operation) + assert.NotNil(t, got.PayloadBytes) + assert.Equal(t, 17, *got.PayloadBytes) + + // with seeker + body = append(body, " with seeker"...) + req, err = http.NewRequest("", "localhost", aws.ReadSeekCloser(bytes.NewReader(body))) + require.NoError(t, err) + req.ContentLength = 0 + handler.HandleRequest(ctx, req) + handler.HandleResponse(ctx, &http.Response{StatusCode: http.StatusOK}) + got = handler.Stats(operation) + assert.NotNil(t, got.PayloadBytes) + assert.Equal(t, 29, *got.PayloadBytes) +} diff --git a/extension/agenthealth/handler/stats/handler.go b/extension/agenthealth/handler/stats/handler.go new file mode 100644 index 0000000000..f5fe991125 --- /dev/null +++ b/extension/agenthealth/handler/stats/handler.go @@ -0,0 +1,79 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package stats + +import ( + "context" + "net/http" + "sync" + + "github.com/amazon-contributing/opentelemetry-collector-contrib/extension/awsmiddleware" + "go.uber.org/zap" + + "github.com/aws/amazon-cloudwatch-agent/extension/agenthealth/handler/stats/agent" + "github.com/aws/amazon-cloudwatch-agent/extension/agenthealth/handler/stats/client" + "github.com/aws/amazon-cloudwatch-agent/extension/agenthealth/handler/stats/provider" +) + +const ( + handlerID = "cloudwatchagent.AgentStats" + headerKeyAgentStats = "X-Amz-Agent-Stats" +) + +func NewHandlers(logger *zap.Logger, cfg agent.StatsConfig) ([]awsmiddleware.RequestHandler, []awsmiddleware.ResponseHandler) { + filter := agent.NewOperationsFilter(cfg.Operations...) + clientStats := client.NewHandler(filter) + stats := newStatsHandler(logger, filter, []agent.StatsProvider{clientStats, provider.GetProcessStats(), provider.GetFlagsStats()}) + return []awsmiddleware.RequestHandler{stats, clientStats}, []awsmiddleware.ResponseHandler{clientStats} +} + +type statsHandler struct { + mu sync.Mutex + + logger *zap.Logger + filter agent.OperationsFilter + providers []agent.StatsProvider +} + +func newStatsHandler(logger *zap.Logger, filter agent.OperationsFilter, providers []agent.StatsProvider) *statsHandler { + sh := &statsHandler{ + logger: logger, + filter: filter, + providers: providers, + } + return sh +} + +var _ awsmiddleware.RequestHandler = (*statsHandler)(nil) + +func (sh *statsHandler) ID() string { + return handlerID +} + +func (sh *statsHandler) Position() awsmiddleware.HandlerPosition { + return awsmiddleware.After +} + +func (sh *statsHandler) HandleRequest(ctx context.Context, r *http.Request) { + operation := awsmiddleware.GetOperationName(ctx) + if !sh.filter.IsAllowed(operation) { + return + } + header := sh.Header(operation) + if header != "" { + r.Header.Set(headerKeyAgentStats, header) + } +} + +func (sh *statsHandler) Header(operation string) string { + stats := &agent.Stats{} + for _, p := range sh.providers { + stats.Merge(p.Stats(operation)) + } + header, err := stats.Marshal() + if err != nil { + sh.logger.Warn("Failed to serialize agent stats", zap.Error(err)) + } + return header +} diff --git a/extension/agenthealth/handler/stats/handler_test.go b/extension/agenthealth/handler/stats/handler_test.go new file mode 100644 index 0000000000..f40bebd481 --- /dev/null +++ b/extension/agenthealth/handler/stats/handler_test.go @@ -0,0 +1,73 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package stats + +import ( + "context" + "net/http" + "testing" + + "github.com/amazon-contributing/opentelemetry-collector-contrib/extension/awsmiddleware" + "github.com/aws/aws-sdk-go/aws" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/zap" + + "github.com/aws/amazon-cloudwatch-agent/extension/agenthealth/handler/stats/agent" +) + +type mockStatsProvider struct { + stats *agent.Stats +} + +var _ agent.StatsProvider = (*mockStatsProvider)(nil) + +func (m *mockStatsProvider) Stats(string) agent.Stats { + return *m.stats +} + +func newMockStatsProvider(stats *agent.Stats) agent.StatsProvider { + return &mockStatsProvider{stats: stats} +} + +func TestStatsHandler(t *testing.T) { + stats := &agent.Stats{ + FileDescriptorCount: aws.Int32(456), + ThreadCount: aws.Int32(789), + LatencyMillis: aws.Int64(1234), + PayloadBytes: aws.Int(5678), + StatusCode: aws.Int(200), + ImdsFallbackSucceed: aws.Int(1), + SharedConfigFallback: aws.Int(1), + } + handler := newStatsHandler( + zap.NewNop(), + agent.NewOperationsFilter(), + []agent.StatsProvider{ + newMockStatsProvider(&agent.Stats{CpuPercent: aws.Float64(1.2)}), + newMockStatsProvider(&agent.Stats{MemoryBytes: aws.Uint64(123)}), + newMockStatsProvider(stats), + }, + ) + ctx := context.Background() + assert.Equal(t, awsmiddleware.After, handler.Position()) + assert.Equal(t, handlerID, handler.ID()) + req, err := http.NewRequest("", "localhost", nil) + require.NoError(t, err) + handler.HandleRequest(ctx, req) + assert.Equal(t, "", req.Header.Get(headerKeyAgentStats)) + handler.filter = agent.NewOperationsFilter(agent.AllowAllOperations) + handler.HandleRequest(ctx, req) + assert.Equal(t, `"cpu":1.2,"mem":123,"fd":456,"th":789,"lat":1234,"load":5678,"code":200,"scfb":1,"ifs":1`, req.Header.Get(headerKeyAgentStats)) + stats.StatusCode = aws.Int(404) + stats.LatencyMillis = nil + handler.HandleRequest(ctx, req) + assert.Equal(t, `"cpu":1.2,"mem":123,"fd":456,"th":789,"load":5678,"code":404,"scfb":1,"ifs":1`, req.Header.Get(headerKeyAgentStats)) +} + +func TestNewHandlers(t *testing.T) { + requestHandlers, responseHandlers := NewHandlers(zap.NewNop(), agent.StatsConfig{}) + assert.Len(t, requestHandlers, 2) + assert.Len(t, responseHandlers, 1) +} diff --git a/extension/agenthealth/handler/stats/provider/flag.go b/extension/agenthealth/handler/stats/provider/flag.go new file mode 100644 index 0000000000..de684e1f86 --- /dev/null +++ b/extension/agenthealth/handler/stats/provider/flag.go @@ -0,0 +1,115 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package provider + +import ( + "sync" + "time" + + "github.com/aws/aws-sdk-go/aws" + + "github.com/aws/amazon-cloudwatch-agent/cfg/envconfig" + "github.com/aws/amazon-cloudwatch-agent/extension/agenthealth/handler/stats/agent" +) + +const ( + flagGetInterval = 5 * time.Minute +) + +type Flag int + +const ( + FlagIMDSFallbackSucceed Flag = iota + FlagSharedConfigFallback + FlagAppSignal + FlagEnhancedContainerInsights + FlagRunningInContainer + FlagMode + FlagRegionType +) + +var ( + flagSingleton FlagStats + flagOnce sync.Once +) + +type FlagStats interface { + agent.StatsProvider + SetFlag(flag Flag) + SetFlagWithValue(flag Flag, value string) +} + +type flagStats struct { + *intervalStats + + flags sync.Map +} + +var _ FlagStats = (*flagStats)(nil) + +func (p *flagStats) update() { + p.stats.Store(agent.Stats{ + ImdsFallbackSucceed: p.getIntFlag(FlagIMDSFallbackSucceed, false), + SharedConfigFallback: p.getIntFlag(FlagSharedConfigFallback, false), + AppSignals: p.getIntFlag(FlagAppSignal, false), + EnhancedContainerInsights: p.getIntFlag(FlagEnhancedContainerInsights, false), + RunningInContainer: p.getIntFlag(FlagRunningInContainer, true), + Mode: p.getStringFlag(FlagMode), + RegionType: p.getStringFlag(FlagRegionType), + }) +} + +func (p *flagStats) getIntFlag(flag Flag, missingAsZero bool) *int { + if _, ok := p.flags.Load(flag); ok { + return aws.Int(1) + } + if missingAsZero { + return aws.Int(0) + } + return nil +} + +func (p *flagStats) getStringFlag(flag Flag) *string { + value, ok := p.flags.Load(flag) + if !ok { + return nil + } + var str string + str, ok = value.(string) + if !ok { + return nil + } + return aws.String(str) +} + +func (p *flagStats) SetFlag(flag Flag) { + if _, ok := p.flags.Load(flag); !ok { + p.flags.Store(flag, true) + p.update() + } +} + +func (p *flagStats) SetFlagWithValue(flag Flag, value string) { + if _, ok := p.flags.Load(flag); !ok { + p.flags.Store(flag, value) + p.update() + } +} + +func newFlagStats(interval time.Duration) *flagStats { + stats := &flagStats{ + intervalStats: newIntervalStats(interval), + } + if envconfig.IsRunningInContainer() { + stats.SetFlag(FlagRunningInContainer) + } + return stats +} + +func GetFlagsStats() FlagStats { + flagOnce.Do(func() { + flagSingleton = newFlagStats(flagGetInterval) + }) + return flagSingleton +} diff --git a/extension/agenthealth/handler/stats/provider/flag_test.go b/extension/agenthealth/handler/stats/provider/flag_test.go new file mode 100644 index 0000000000..cbc42c094a --- /dev/null +++ b/extension/agenthealth/handler/stats/provider/flag_test.go @@ -0,0 +1,37 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package provider + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" + + "github.com/aws/amazon-cloudwatch-agent/cfg/envconfig" +) + +func TestFlagStats(t *testing.T) { + t.Setenv(envconfig.RunInContainer, envconfig.TrueValue) + provider := newFlagStats(time.Microsecond) + got := provider.getStats() + assert.Nil(t, got.ImdsFallbackSucceed) + assert.Nil(t, got.SharedConfigFallback) + assert.NotNil(t, got.RunningInContainer) + assert.Equal(t, 1, *got.RunningInContainer) + provider.SetFlag(FlagIMDSFallbackSucceed) + assert.Nil(t, got.ImdsFallbackSucceed) + got = provider.getStats() + assert.NotNil(t, got.ImdsFallbackSucceed) + assert.Equal(t, 1, *got.ImdsFallbackSucceed) + assert.Nil(t, got.SharedConfigFallback) + provider.SetFlag(FlagSharedConfigFallback) + got = provider.getStats() + assert.NotNil(t, got.SharedConfigFallback) + assert.Equal(t, 1, *got.SharedConfigFallback) + provider.SetFlagWithValue(FlagMode, "test") + got = provider.getStats() + assert.NotNil(t, got.Mode) + assert.Equal(t, "test", *got.Mode) +} diff --git a/extension/agenthealth/handler/stats/provider/interval.go b/extension/agenthealth/handler/stats/provider/interval.go new file mode 100644 index 0000000000..bc5dc8e24e --- /dev/null +++ b/extension/agenthealth/handler/stats/provider/interval.go @@ -0,0 +1,60 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package provider + +import ( + "sync" + "sync/atomic" + "time" + + "github.com/aws/amazon-cloudwatch-agent/extension/agenthealth/handler/stats/agent" +) + +// intervalStats restricts the Stats get function to once +// per interval. +type intervalStats struct { + mu sync.RWMutex + interval time.Duration + + getOnce *sync.Once + lastGet time.Time + + stats atomic.Value +} + +var _ agent.StatsProvider = (*intervalStats)(nil) + +func (p *intervalStats) Stats(string) agent.Stats { + p.mu.RLock() + defer p.mu.RUnlock() + var stats agent.Stats + p.getOnce.Do(func() { + p.lastGet = time.Now() + stats = p.getStats() + go p.allowNextGetAfter(p.interval) + }) + return stats +} + +func (p *intervalStats) getStats() agent.Stats { + var stats agent.Stats + if value := p.stats.Load(); value != nil { + stats = value.(agent.Stats) + } + return stats +} + +func (p *intervalStats) allowNextGetAfter(interval time.Duration) { + time.Sleep(interval) + p.mu.Lock() + defer p.mu.Unlock() + p.getOnce = new(sync.Once) +} + +func newIntervalStats(interval time.Duration) *intervalStats { + return &intervalStats{ + getOnce: new(sync.Once), + interval: interval, + } +} diff --git a/extension/agenthealth/handler/stats/provider/interval_test.go b/extension/agenthealth/handler/stats/provider/interval_test.go new file mode 100644 index 0000000000..cd2be8899b --- /dev/null +++ b/extension/agenthealth/handler/stats/provider/interval_test.go @@ -0,0 +1,30 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package provider + +import ( + "testing" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/stretchr/testify/assert" + + "github.com/aws/amazon-cloudwatch-agent/extension/agenthealth/handler/stats/agent" +) + +func TestIntervalStats(t *testing.T) { + s := newIntervalStats(time.Millisecond) + s.stats.Store(agent.Stats{ + ThreadCount: aws.Int32(2), + }) + got := s.Stats("") + assert.NotNil(t, got.ThreadCount) + got = s.Stats("") + assert.Nil(t, got.ThreadCount) + time.Sleep(2 * time.Millisecond) + got = s.Stats("") + assert.NotNil(t, got.ThreadCount) + got = s.Stats("") + assert.Nil(t, got.ThreadCount) +} diff --git a/extension/agenthealth/handler/stats/provider/process.go b/extension/agenthealth/handler/stats/provider/process.go new file mode 100644 index 0000000000..4e88ebbee5 --- /dev/null +++ b/extension/agenthealth/handler/stats/provider/process.go @@ -0,0 +1,101 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package provider + +import ( + "os" + "sync" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/shirou/gopsutil/v3/process" + + "github.com/aws/amazon-cloudwatch-agent/extension/agenthealth/handler/stats/agent" +) + +const ( + processGetInterval = time.Minute +) + +var ( + processSingleton *processStats + processOnce sync.Once +) + +type processMetrics interface { + CPUPercent() (float64, error) + MemoryInfo() (*process.MemoryInfoStat, error) + NumFDs() (int32, error) + NumThreads() (int32, error) +} + +type processStats struct { + *intervalStats + + proc processMetrics +} + +var _ agent.StatsProvider = (*processStats)(nil) + +func (p *processStats) cpuPercent() *float64 { + if cpuPercent, err := p.proc.CPUPercent(); err == nil { + return aws.Float64(float64(int64(cpuPercent*10)) / 10) // truncate to 10th decimal place + } + return nil +} + +func (p *processStats) memoryBytes() *uint64 { + if memInfo, err := p.proc.MemoryInfo(); err == nil { + return aws.Uint64(memInfo.RSS) + } + return nil +} + +func (p *processStats) fileDescriptorCount() *int32 { + if fdCount, err := p.proc.NumFDs(); err == nil { + return aws.Int32(fdCount) + } + return nil +} + +func (p *processStats) threadCount() *int32 { + if thCount, err := p.proc.NumThreads(); err == nil { + return aws.Int32(thCount) + } + return nil +} + +func (p *processStats) updateLoop() { + ticker := time.NewTicker(p.interval) + for range ticker.C { + p.refresh() + } +} + +func (p *processStats) refresh() { + p.stats.Store(agent.Stats{ + CpuPercent: p.cpuPercent(), + MemoryBytes: p.memoryBytes(), + FileDescriptorCount: p.fileDescriptorCount(), + ThreadCount: p.threadCount(), + }) +} + +func newProcessStats(proc processMetrics, interval time.Duration) *processStats { + ps := &processStats{ + intervalStats: newIntervalStats(interval), + proc: proc, + } + ps.refresh() + go ps.updateLoop() + return ps +} + +func GetProcessStats() agent.StatsProvider { + processOnce.Do(func() { + proc, _ := process.NewProcess(int32(os.Getpid())) + processSingleton = newProcessStats(proc, processGetInterval) + }) + return processSingleton +} diff --git a/extension/agenthealth/handler/stats/provider/process_test.go b/extension/agenthealth/handler/stats/provider/process_test.go new file mode 100644 index 0000000000..3448ad2fcd --- /dev/null +++ b/extension/agenthealth/handler/stats/provider/process_test.go @@ -0,0 +1,69 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package provider + +import ( + "errors" + "testing" + "time" + + "github.com/shirou/gopsutil/v3/process" + "github.com/stretchr/testify/assert" +) + +type mockProcessMetrics struct { + err error +} + +var _ processMetrics = (*mockProcessMetrics)(nil) + +func (m mockProcessMetrics) CPUPercent() (float64, error) { + if m.err != nil { + return -1, m.err + } + return 1, nil +} + +func (m mockProcessMetrics) MemoryInfo() (*process.MemoryInfoStat, error) { + if m.err != nil { + return nil, m.err + } + return &process.MemoryInfoStat{RSS: uint64(2)}, nil +} + +func (m mockProcessMetrics) NumFDs() (int32, error) { + if m.err != nil { + return -1, m.err + } + return 3, nil +} + +func (m mockProcessMetrics) NumThreads() (int32, error) { + if m.err != nil { + return -1, m.err + } + return 4, nil +} + +func TestProcessStats(t *testing.T) { + testErr := errors.New("test error") + mock := &mockProcessMetrics{} + provider := newProcessStats(mock, time.Millisecond) + got := provider.getStats() + assert.NotNil(t, got.CpuPercent) + assert.NotNil(t, got.MemoryBytes) + assert.NotNil(t, got.FileDescriptorCount) + assert.NotNil(t, got.ThreadCount) + assert.EqualValues(t, 1, *got.CpuPercent) + assert.EqualValues(t, 2, *got.MemoryBytes) + assert.EqualValues(t, 3, *got.FileDescriptorCount) + assert.EqualValues(t, 4, *got.ThreadCount) + mock.err = testErr + time.Sleep(2 * time.Millisecond) + got = provider.getStats() + assert.Nil(t, got.CpuPercent) + assert.Nil(t, got.MemoryBytes) + assert.Nil(t, got.FileDescriptorCount) + assert.Nil(t, got.ThreadCount) +} diff --git a/extension/agenthealth/handler/useragent/handler.go b/extension/agenthealth/handler/useragent/handler.go new file mode 100644 index 0000000000..c5933fa026 --- /dev/null +++ b/extension/agenthealth/handler/useragent/handler.go @@ -0,0 +1,67 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package useragent + +import ( + "context" + "net/http" + + "github.com/amazon-contributing/opentelemetry-collector-contrib/extension/awsmiddleware" + "go.uber.org/atomic" +) + +const ( + handlerID = "cloudwatchagent.UserAgent" + headerKeyUserAgent = "User-Agent" +) + +type userAgentHandler struct { + userAgent UserAgent + isUsageDataEnabled bool + header *atomic.String +} + +var _ awsmiddleware.RequestHandler = (*userAgentHandler)(nil) + +func (uah *userAgentHandler) ID() string { + return handlerID +} + +func (uah *userAgentHandler) Position() awsmiddleware.HandlerPosition { + return awsmiddleware.After +} + +// HandleRequest prepends the User-Agent header with the CloudWatch Agent's +// user agent string. +func (uah *userAgentHandler) HandleRequest(_ context.Context, r *http.Request) { + newHeader := uah.Header() + current := r.Header.Get(headerKeyUserAgent) + if current != "" { + newHeader += separator + current + } + r.Header.Set(headerKeyUserAgent, newHeader) +} + +func (uah *userAgentHandler) Header() string { + return uah.header.Load() +} + +func (uah *userAgentHandler) refreshHeader() { + uah.header.Store(uah.userAgent.Header(uah.isUsageDataEnabled)) +} + +func newHandler(userAgent UserAgent, isUsageDataEnabled bool) *userAgentHandler { + handler := &userAgentHandler{ + userAgent: userAgent, + header: &atomic.String{}, + isUsageDataEnabled: isUsageDataEnabled, + } + handler.refreshHeader() + userAgent.Listen(handler.refreshHeader) + return handler +} + +func NewHandler(isUsageDataEnabled bool) awsmiddleware.RequestHandler { + return newHandler(Get(), isUsageDataEnabled) +} diff --git a/extension/agenthealth/handler/useragent/handler_test.go b/extension/agenthealth/handler/useragent/handler_test.go new file mode 100644 index 0000000000..89d902b29c --- /dev/null +++ b/extension/agenthealth/handler/useragent/handler_test.go @@ -0,0 +1,32 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package useragent + +import ( + "context" + "net/http" + "testing" + + "github.com/amazon-contributing/opentelemetry-collector-contrib/extension/awsmiddleware" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/aws/amazon-cloudwatch-agent/cfg/envconfig" +) + +func TestUserAgentHandler(t *testing.T) { + t.Setenv(envconfig.CWAGENT_USER_AGENT, "FirstUA") + ua := newUserAgent() + handler := newHandler(ua, true) + assert.Equal(t, handlerID, handler.ID()) + assert.Equal(t, awsmiddleware.After, handler.Position()) + req, err := http.NewRequest("", "localhost", nil) + require.NoError(t, err) + handler.HandleRequest(context.Background(), req) + assert.Equal(t, "FirstUA", req.Header.Get(headerKeyUserAgent)) + t.Setenv(envconfig.CWAGENT_USER_AGENT, "SecondUA") + ua.notify() + handler.HandleRequest(context.Background(), req) + assert.Equal(t, "SecondUA FirstUA", req.Header.Get(headerKeyUserAgent)) +} diff --git a/extension/agenthealth/handler/useragent/useragent.go b/extension/agenthealth/handler/useragent/useragent.go new file mode 100644 index 0000000000..4af3be0f11 --- /dev/null +++ b/extension/agenthealth/handler/useragent/useragent.go @@ -0,0 +1,191 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package useragent + +import ( + "fmt" + "os" + "sort" + "strings" + "sync" + + "github.com/google/uuid" + telegraf "github.com/influxdata/telegraf/config" + "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsemfexporter" + "go.opentelemetry.io/collector/otelcol" + "go.uber.org/atomic" + "golang.org/x/exp/maps" + + "github.com/aws/amazon-cloudwatch-agent/cfg/envconfig" + "github.com/aws/amazon-cloudwatch-agent/extension/agenthealth/handler/stats/provider" + "github.com/aws/amazon-cloudwatch-agent/internal/util/collections" + "github.com/aws/amazon-cloudwatch-agent/internal/version" + "github.com/aws/amazon-cloudwatch-agent/receiver/adapter" +) + +const ( + flagRunAsUser = "run_as_user" + flagContainerInsights = "container_insights" + flagAppSignals = "app_signals" + flagEnhancedContainerInsights = "enhanced_container_insights" + + separator = " " + + typeInputs = "inputs" + typeProcessors = "processors" + typeOutputs = "outputs" +) + +var ( + singleton UserAgent + once sync.Once +) + +type UserAgent interface { + SetComponents(otelCfg *otelcol.Config, telegrafCfg *telegraf.Config) + SetContainerInsightsFlag() + Header(isUsageDataEnabled bool) string + Listen(listener func()) +} + +type userAgent struct { + dataLock sync.Mutex + id string + + listenerLock sync.Mutex + listeners []func() + isRoot bool + + inputs collections.Set[string] + processors collections.Set[string] + outputs collections.Set[string] + + inputsStr atomic.String + processorsStr atomic.String + outputsStr atomic.String +} + +var _ UserAgent = (*userAgent)(nil) + +func (ua *userAgent) SetComponents(otelCfg *otelcol.Config, telegrafCfg *telegraf.Config) { + for _, input := range telegrafCfg.Inputs { + ua.inputs.Add(input.Config.Name) + } + for _, output := range telegrafCfg.Outputs { + ua.outputs.Add(output.Config.Name) + } + + for _, pipeline := range otelCfg.Service.Pipelines { + for _, receiver := range pipeline.Receivers { + // trim the adapter prefix from adapted Telegraf plugins + name := strings.TrimPrefix(string(receiver.Type()), adapter.TelegrafPrefix) + ua.inputs.Add(name) + } + for _, processor := range pipeline.Processors { + ua.processors.Add(string(processor.Type())) + } + for _, exporter := range pipeline.Exporters { + ua.outputs.Add(string(exporter.Type())) + if exporter.Type() == "awsemf" { + cfg := otelCfg.Exporters[exporter].(*awsemfexporter.Config) + if cfg.IsAppSignalsEnabled() { + ua.outputs.Add(flagAppSignals) + provider.GetFlagsStats().SetFlag(provider.FlagAppSignal) + } + if cfg.IsEnhancedContainerInsights() { + ua.outputs.Add(flagEnhancedContainerInsights) + provider.GetFlagsStats().SetFlag(provider.FlagEnhancedContainerInsights) + } + } + } + } + + if !ua.isRoot { + ua.inputs.Add(flagRunAsUser) + } + + ua.inputsStr.Store(componentsStr(typeInputs, ua.inputs)) + ua.processorsStr.Store(componentsStr(typeProcessors, ua.processors)) + ua.outputsStr.Store(componentsStr(typeOutputs, ua.outputs)) + ua.notify() +} + +func (ua *userAgent) SetContainerInsightsFlag() { + ua.dataLock.Lock() + defer ua.dataLock.Unlock() + if !ua.outputs.Contains(flagContainerInsights) { + ua.outputs.Add(flagContainerInsights) + ua.outputsStr.Store(componentsStr(typeOutputs, ua.outputs)) + ua.notify() + } +} + +func (ua *userAgent) Listen(listener func()) { + ua.listenerLock.Lock() + defer ua.listenerLock.Unlock() + ua.listeners = append(ua.listeners, listener) +} + +func (ua *userAgent) notify() { + ua.listenerLock.Lock() + defer ua.listenerLock.Unlock() + for _, listener := range ua.listeners { + listener() + } +} + +func (ua *userAgent) Header(isUsageDataEnabled bool) string { + if envUserAgent := os.Getenv(envconfig.CWAGENT_USER_AGENT); envUserAgent != "" { + return envUserAgent + } + if !isUsageDataEnabled { + return version.Full() + } + + var components []string + inputs := ua.inputsStr.Load() + if inputs != "" { + components = append(components, inputs) + } + processors := ua.processorsStr.Load() + if processors != "" { + components = append(components, processors) + } + outputs := ua.outputsStr.Load() + if outputs != "" { + components = append(components, outputs) + } + + return strings.TrimSpace(fmt.Sprintf("%s ID/%s %s", version.Full(), ua.id, strings.Join(components, separator))) +} + +func componentsStr(componentType string, componentSet collections.Set[string]) string { + if len(componentSet) == 0 { + return "" + } + components := maps.Keys(componentSet) + sort.Strings(components) + return fmt.Sprintf("%s:(%s)", componentType, strings.Join(components, separator)) +} + +func isRunningAsRoot() bool { + return os.Getuid() == 0 +} + +func newUserAgent() *userAgent { + return &userAgent{ + id: uuid.NewString(), + isRoot: isRunningAsRoot(), + inputs: collections.NewSet[string](), + processors: collections.NewSet[string](), + outputs: collections.NewSet[string](), + } +} + +func Get() UserAgent { + once.Do(func() { + singleton = newUserAgent() + }) + return singleton +} diff --git a/extension/agenthealth/handler/useragent/useragent_test.go b/extension/agenthealth/handler/useragent/useragent_test.go new file mode 100644 index 0000000000..2ce645e03f --- /dev/null +++ b/extension/agenthealth/handler/useragent/useragent_test.go @@ -0,0 +1,145 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package useragent + +import ( + "sync" + "testing" + + telegraf "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/models" + "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsemfexporter" + "github.com/stretchr/testify/assert" + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/otelcol" + "go.opentelemetry.io/collector/service" + "go.opentelemetry.io/collector/service/pipelines" + + "github.com/aws/amazon-cloudwatch-agent/cfg/envconfig" + "github.com/aws/amazon-cloudwatch-agent/internal/version" + "github.com/aws/amazon-cloudwatch-agent/receiver/adapter" +) + +func TestSetComponents(t *testing.T) { + otelCfg := &otelcol.Config{ + Service: service.Config{ + Pipelines: map[component.ID]*pipelines.PipelineConfig{ + component.NewID("metrics"): { + Receivers: []component.ID{ + component.NewID(adapter.TelegrafPrefix + "cpu"), + component.NewID("prometheus"), + }, + Processors: []component.ID{ + component.NewID("batch"), + component.NewID("filter"), + }, + Exporters: []component.ID{ + component.NewID("cloudwatch"), + }, + }, + }, + }, + } + telegrafCfg := &telegraf.Config{ + Inputs: []*models.RunningInput{ + {Config: &models.InputConfig{Name: "logs"}}, + {Config: &models.InputConfig{Name: "cpu"}}, + }, + Outputs: []*models.RunningOutput{ + {Config: &models.OutputConfig{Name: "cloudwatchlogs"}}, + }, + } + + ua := newUserAgent() + ua.isRoot = true + ua.SetComponents(otelCfg, telegrafCfg) + assert.Len(t, ua.inputs, 3) + assert.Len(t, ua.processors, 2) + assert.Len(t, ua.outputs, 2) + + assert.Equal(t, "inputs:(cpu logs prometheus)", ua.inputsStr.Load()) + assert.Equal(t, "processors:(batch filter)", ua.processorsStr.Load()) + assert.Equal(t, "outputs:(cloudwatch cloudwatchlogs)", ua.outputsStr.Load()) + assert.Contains(t, ua.Header(true), "inputs:(cpu logs prometheus) processors:(batch filter) outputs:(cloudwatch cloudwatchlogs)") + + ua.isRoot = false + ua.SetComponents(otelCfg, telegrafCfg) + assert.Len(t, ua.inputs, 4) + assert.Equal(t, "inputs:(cpu logs prometheus run_as_user)", ua.inputsStr.Load()) +} + +func TestSetComponentsEmpty(t *testing.T) { + ua := newUserAgent() + ua.SetComponents(&otelcol.Config{}, &telegraf.Config{}) + assert.Len(t, ua.inputs, 1) + assert.Len(t, ua.processors, 0) + assert.Len(t, ua.outputs, 0) + + assert.Equal(t, "inputs:(run_as_user)", ua.inputsStr.Load()) + assert.Equal(t, "", ua.processorsStr.Load()) + assert.Equal(t, "", ua.outputsStr.Load()) +} + +func TestContainerInsightsFlag(t *testing.T) { + ua := newUserAgent() + ua.outputs.Add("TEST_EXPORTER") + ua.SetContainerInsightsFlag() + assert.Equal(t, "outputs:(TEST_EXPORTER container_insights)", ua.outputsStr.Load()) + // do not rebuild output string if flag already set + ua.outputs.Add("flag_already_set") + ua.SetContainerInsightsFlag() + assert.Equal(t, "outputs:(TEST_EXPORTER container_insights)", ua.outputsStr.Load()) +} + +func TestAlternateUserAgent(t *testing.T) { + t.Setenv(envconfig.CWAGENT_USER_AGENT, "TEST_AGENT") + ua := newUserAgent() + assert.Equal(t, "TEST_AGENT", ua.Header(false)) + t.Setenv(envconfig.CWAGENT_USER_AGENT, "") + assert.Equal(t, version.Full(), ua.Header(false)) +} + +func TestEmf(t *testing.T) { + otelCfg := &otelcol.Config{ + Service: service.Config{ + Pipelines: map[component.ID]*pipelines.PipelineConfig{ + component.NewID("metrics"): { + Receivers: []component.ID{ + component.NewID("nop"), + }, + Exporters: []component.ID{ + component.NewID("awsemf"), + }, + }, + }, + }, + Exporters: map[component.ID]component.Config{ + component.NewID("awsemf"): &awsemfexporter.Config{Namespace: "AppSignals", LogGroupName: "/aws/appsignals/log/group"}, + }, + } + ua := newUserAgent() + ua.SetComponents(otelCfg, &telegraf.Config{}) + assert.Len(t, ua.inputs, 2) + assert.Len(t, ua.processors, 0) + assert.Len(t, ua.outputs, 2) + + assert.Equal(t, "inputs:(nop run_as_user)", ua.inputsStr.Load()) + assert.Equal(t, "", ua.processorsStr.Load()) + assert.Equal(t, "outputs:(app_signals awsemf)", ua.outputsStr.Load()) +} + +func TestSingleton(t *testing.T) { + assert.Equal(t, Get().(*userAgent).id, Get().(*userAgent).id) +} + +func TestListen(t *testing.T) { + var wg sync.WaitGroup + ua := newUserAgent() + for i := 0; i < 4; i++ { + wg.Add(1) + ua.Listen(wg.Done) + } + ua.SetContainerInsightsFlag() + wg.Wait() +} diff --git a/extension/agenthealth/testdata/config.yaml b/extension/agenthealth/testdata/config.yaml new file mode 100644 index 0000000000..57fdbd02fe --- /dev/null +++ b/extension/agenthealth/testdata/config.yaml @@ -0,0 +1,8 @@ +agenthealth: +agenthealth/1: + is_usage_data_enabled: false +agenthealth/2: + is_usage_data_enabled: true + stats: + operations: + - 'ListBuckets' diff --git a/go.mod b/go.mod index 8dca448a5d..474e871a4f 100644 --- a/go.mod +++ b/go.mod @@ -6,7 +6,6 @@ replace github.com/influxdata/telegraf => github.com/aws/telegraf v0.10.2-0.2022 // Replace with https://github.com/amazon-contributing/opentelemetry-collector-contrib, there are no requirements for all receivers/processors/exporters // to be all replaced since there are some changes that will always be from upstream - replace github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsemfexporter => github.com/amazon-contributing/opentelemetry-collector-contrib/exporter/awsemfexporter v0.0.0-20231102130031-505e23230a50 replace github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsxrayexporter => github.com/amazon-contributing/opentelemetry-collector-contrib/exporter/awsxrayexporter v0.0.0-20231102130031-505e23230a50 @@ -86,6 +85,7 @@ replace github.com/openshift/api v3.9.0+incompatible => github.com/openshift/api require ( github.com/BurntSushi/toml v1.3.2 github.com/Jeffail/gabs v1.4.0 + github.com/amazon-contributing/opentelemetry-collector-contrib/extension/awsmiddleware v0.0.0-20231023230448-f645697bf350 github.com/aws/aws-sdk-go v1.45.24 github.com/aws/aws-sdk-go-v2 v1.21.2 github.com/aws/aws-sdk-go-v2/config v1.18.25 @@ -106,6 +106,7 @@ require ( github.com/hashicorp/golang-lru v1.0.2 github.com/influxdata/telegraf v0.0.0-00010101000000-000000000000 github.com/influxdata/wlog v0.0.0-20160411224016-7c63b0a71ef8 + github.com/jellydator/ttlcache/v3 v3.1.0 github.com/kardianos/service v1.2.1 // Keep this pinned to v1.2.1. v1.2.2 causes the agent to not register as a service on Windows github.com/kr/pretty v0.3.1 github.com/oklog/run v1.1.0 @@ -131,14 +132,18 @@ require ( github.com/xeipuuv/gojsonschema v1.2.0 go.opentelemetry.io/collector v0.84.1-0.20230908201109-ab3d6c5b6470 go.opentelemetry.io/collector/component v0.84.1-0.20230908201109-ab3d6c5b6470 + go.opentelemetry.io/collector/config/configtelemetry v0.84.1-0.20230908201109-ab3d6c5b6470 go.opentelemetry.io/collector/confmap v0.84.1-0.20230908201109-ab3d6c5b6470 go.opentelemetry.io/collector/consumer v0.84.1-0.20230908201109-ab3d6c5b6470 go.opentelemetry.io/collector/exporter v0.84.1-0.20230908201109-ab3d6c5b6470 go.opentelemetry.io/collector/exporter/loggingexporter v0.84.0 + go.opentelemetry.io/collector/extension v0.84.1-0.20230908201109-ab3d6c5b6470 go.opentelemetry.io/collector/pdata v1.0.0-rcv0014.0.20230908201109-ab3d6c5b6470 + go.opentelemetry.io/collector/processor v0.84.1-0.20230908201109-ab3d6c5b6470 go.opentelemetry.io/collector/processor/batchprocessor v0.84.1-0.20230908201109-ab3d6c5b6470 go.opentelemetry.io/collector/receiver v0.84.1-0.20230908201109-ab3d6c5b6470 go.opentelemetry.io/collector/receiver/otlpreceiver v0.84.0 + go.uber.org/atomic v1.11.0 go.uber.org/multierr v1.11.0 go.uber.org/zap v1.25.0 golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1 @@ -157,12 +162,6 @@ require ( k8s.io/klog/v2 v2.100.1 ) -require ( - go.opentelemetry.io/collector/config/configtelemetry v0.84.1-0.20230908201109-ab3d6c5b6470 - go.opentelemetry.io/collector/extension v0.84.1-0.20230908201109-ab3d6c5b6470 - go.opentelemetry.io/collector/processor v0.84.1-0.20230908201109-ab3d6c5b6470 -) - require ( cloud.google.com/go/compute v1.23.0 // indirect cloud.google.com/go/compute/metadata v0.2.4-0.20230617002413-005d2dfb6b68 // indirect @@ -182,7 +181,6 @@ require ( github.com/alecthomas/participle v0.4.1 // indirect github.com/alecthomas/participle/v2 v2.0.0 // indirect github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 // indirect - github.com/amazon-contributing/opentelemetry-collector-contrib/extension/awsmiddleware v0.0.0-20231023152757-c6e2437e6590 // indirect github.com/amazon-contributing/opentelemetry-collector-contrib/override/aws v0.0.0-20230928170322-0df38c533713 // indirect github.com/antchfx/jsonquery v1.1.5 // indirect github.com/antchfx/xmlquery v1.3.9 // indirect @@ -395,7 +393,6 @@ require ( go.opentelemetry.io/otel/sdk/metric v0.40.0 // indirect go.opentelemetry.io/otel/trace v1.17.0 // indirect go.opentelemetry.io/proto/otlp v1.0.0 // indirect - go.uber.org/atomic v1.11.0 // indirect go.uber.org/goleak v1.2.1 // indirect golang.org/x/crypto v0.13.0 // indirect golang.org/x/mod v0.12.0 // indirect diff --git a/go.sum b/go.sum index 912ce07b84..95c130e960 100644 --- a/go.sum +++ b/go.sum @@ -146,8 +146,8 @@ github.com/amazon-contributing/opentelemetry-collector-contrib/exporter/awsemfex github.com/amazon-contributing/opentelemetry-collector-contrib/exporter/awsemfexporter v0.0.0-20231102130031-505e23230a50/go.mod h1:UAXcRSojI8I0Kb9iS9a2v7J/iPrQ1loJIsBprSaVdFo= github.com/amazon-contributing/opentelemetry-collector-contrib/exporter/awsxrayexporter v0.0.0-20231102130031-505e23230a50 h1:TTRentWNAvzfYCKHclu08pPWgTiBTuxLHXmBjeN1w/M= github.com/amazon-contributing/opentelemetry-collector-contrib/exporter/awsxrayexporter v0.0.0-20231102130031-505e23230a50/go.mod h1:cr4dmBlfnMVYT+gyKUAKh39zQu5u/UAukxQj15MdZ18= -github.com/amazon-contributing/opentelemetry-collector-contrib/extension/awsmiddleware v0.0.0-20231023152757-c6e2437e6590 h1:uUCPnX2C5C36iyX46N1eUjmp4LRpSTGeexgUWmohv7c= -github.com/amazon-contributing/opentelemetry-collector-contrib/extension/awsmiddleware v0.0.0-20231023152757-c6e2437e6590/go.mod h1:uOQa5/9Jle9VADEdWCXL4AbJr35NJQil30tapcTHQlw= +github.com/amazon-contributing/opentelemetry-collector-contrib/extension/awsmiddleware v0.0.0-20231023230448-f645697bf350 h1:+75XAqf0Og8cshAdekRcqWf3v38Uw34XJRFbul6jbv0= +github.com/amazon-contributing/opentelemetry-collector-contrib/extension/awsmiddleware v0.0.0-20231023230448-f645697bf350/go.mod h1:uOQa5/9Jle9VADEdWCXL4AbJr35NJQil30tapcTHQlw= github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/awsutil v0.0.0-20231102130031-505e23230a50 h1:qpH231iNaHowAP+sLku3pF9Rnw6lNC+8lKDQl6UpS0k= github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/awsutil v0.0.0-20231102130031-505e23230a50/go.mod h1:9iAsO2SC8NIsa8/xCmC2Pj4MZPmYdvm+1/n89M74JS4= github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/containerinsight v0.0.0-20231102130031-505e23230a50 h1:POrjHtvpsWELsdWNq3HBZfvN2Z9pvFMVV0L4T5TJiVM= @@ -768,6 +768,8 @@ github.com/jcmturner/dnsutils/v2 v2.0.0 h1:lltnkeZGL0wILNvrNiVCR6Ro5PGU/SeBvVO/8 github.com/jcmturner/gofork v1.0.0 h1:J7uCkflzTEhUZ64xqKnkDxq3kzc96ajM1Gli5ktUem8= github.com/jcmturner/gokrb5/v8 v8.4.2 h1:6ZIM6b/JJN0X8UM43ZOM6Z4SJzla+a/u7scXFJzodkA= github.com/jcmturner/rpc/v2 v2.0.3 h1:7FXXj8Ti1IaVFpSAziCZWNzbNuZmnvw/i6CqLNdWfZY= +github.com/jellydator/ttlcache/v3 v3.1.0 h1:0gPFG0IHHP6xyUyXq+JaD8fwkDCqgqwohXNJBcYE71g= +github.com/jellydator/ttlcache/v3 v3.1.0/go.mod h1:hi7MGFdMAwZna5n2tuvh63DvFLzVKySzCVW6+0gA2n4= github.com/jhump/protoreflect v1.8.3-0.20210616212123-6cc1efa697ca h1:a0GZUdb+qnutF8shJxr2qs2qT3fnF+ptxTxPB8+oIvk= github.com/jhump/protoreflect v1.8.3-0.20210616212123-6cc1efa697ca/go.mod h1:7GcYQDdMU/O/BBrl/cX6PNHpXh6cenjd8pneu5yW7Tg= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= diff --git a/handlers/agentinfo/info.go b/handlers/agentinfo/info.go index 22c3868d2d..5e95aae31b 100644 --- a/handlers/agentinfo/info.go +++ b/handlers/agentinfo/info.go @@ -30,12 +30,22 @@ import ( "github.com/aws/amazon-cloudwatch-agent/cfg/envconfig" "github.com/aws/amazon-cloudwatch-agent/internal/util/collections" "github.com/aws/amazon-cloudwatch-agent/receiver/adapter" + translatorConfig "github.com/aws/amazon-cloudwatch-agent/translator/config" ) const ( versionFilename = "CWAGENT_VERSION" unknownVersion = "Unknown" updateInterval = time.Minute + // region types + AgentConfigJson = "ACJ" + CredsMap = "CM" + EC2Metadata = "EC2M" + ECSMetadata = "ECSM" + RegionNotFound = "RNF" + ModeEC2 = "EC2" + ModeOnPrem = "OP" + ModeWithIRSA = "WI" ) var ( @@ -50,10 +60,12 @@ var ( id = uuid.NewString() sharedConfigFallback atomic.Bool imdsFallbackSucceed atomic.Bool + isRunningAsRoot = defaultIsRunningAsRoot + runInContainer *int + regionType *string + mode *string ) -var isRunningAsRoot = defaultIsRunningAsRoot - type AgentInfo interface { RecordOpData(time.Duration, int, error) StatsHeader() string @@ -77,15 +89,21 @@ type agentStats struct { StatusCode *int `json:"code,omitempty"` SharedConfigFallback *int `json:"scfb,omitempty"` ImdsFallbackSucceed *int `json:"ifs,omitempty"` + RunInContainer *int `json:"ric,omitempty"` + RegionType *string `json:"rt,omitempty"` + Mode *string `json:"m,omitempty"` } -func New(groupName string) AgentInfo { - return newAgentInfo(groupName) +func New(groupName string, regionType string, mode string) AgentInfo { + return newAgentInfo(groupName, regionType, mode) } -func newAgentInfo(groupName string) *agentInfo { +func newAgentInfo(groupName string, regionTypeInput string, modeInput string) *agentInfo { ai := new(agentInfo) ai.userAgent = getUserAgent(groupName, fullVersion, receivers, processors, exporters, isUsageDataEnabled()) + runInContainer = runInContainerFunc() + regionType = aws.String(regionTypeInput) + mode = aws.String(modeInput) if isUsageDataEnabled() { ai.proc, _ = process.NewProcess(int32(os.Getpid())) if ai.proc == nil { @@ -96,6 +114,9 @@ func newAgentInfo(groupName string) *agentInfo { MemoryBytes: ai.memoryBytes(), FileDescriptorCount: ai.fileDescriptorCount(), ThreadCount: ai.threadCount(), + RunInContainer: runInContainer, + RegionType: regionType, + Mode: mode, } ai.statsHeader = getAgentStats(stats) ai.nextUpdate = time.Now().Add(updateInterval) @@ -126,6 +147,9 @@ func (ai *agentInfo) RecordOpData(latency time.Duration, payloadBytes int, err e stats.ThreadCount = ai.threadCount() stats.SharedConfigFallback = getSharedConfigFallback() stats.ImdsFallbackSucceed = succeedImdsFallback() + stats.RunInContainer = runInContainer + stats.RegionType = regionType + stats.Mode = mode ai.nextUpdate = now.Add(updateInterval) } @@ -330,3 +354,10 @@ func getSharedConfigFallback() *int { func SetImdsFallbackSucceed() { imdsFallbackSucceed.Store(true) } + +func runInContainerFunc() *int { + if os.Getenv(translatorConfig.RUN_IN_CONTAINER) == translatorConfig.RUN_IN_CONTAINER_TRUE { + return aws.Int(1) + } + return aws.Int(0) +} diff --git a/handlers/agentinfo/info_test.go b/handlers/agentinfo/info_test.go index 5a7550568d..edc0b05dd8 100644 --- a/handlers/agentinfo/info_test.go +++ b/handlers/agentinfo/info_test.go @@ -29,7 +29,7 @@ import ( ) func TestNew(t *testing.T) { - ai := New("") + ai := New("", "", "") expectedUserAgentRegex := `^CWAgent/Unknown \(.*\) ` + `ID/[0-9a-fA-F]{8}\b-[0-9a-fA-F]{4}\b-[0-9a-fA-F]{4}\b-[0-9a-fA-F]{4}\b-[0-9a-fA-F]{12}$` @@ -37,7 +37,7 @@ func TestNew(t *testing.T) { } func TestRecordOpData(t *testing.T) { - ai := newAgentInfo("") + ai := newAgentInfo("", "", "") stats := ai.StatsHeader() actual := agentStats{} @@ -124,8 +124,20 @@ func TestGetAgentStats(t *testing.T) { PayloadBytes: aws.Int(5678), StatusCode: aws.Int(200), ImdsFallbackSucceed: aws.Int(1), + RunInContainer: aws.Int(0), + RegionType: aws.String(EC2Metadata), + Mode: aws.String(ModeWithIRSA), } + assert.Equal(t, "\"cpu\":1.2,\"mem\":123,\"fd\":456,\"th\":789,\"lat\":1234,\"load\":5678,\"code\":200,\"ifs\":1,\"ric\":0,\"rt\":\"EC2M\",\"m\":\"WI\"", getAgentStats(stats)) + + stats.Mode = nil + assert.Equal(t, "\"cpu\":1.2,\"mem\":123,\"fd\":456,\"th\":789,\"lat\":1234,\"load\":5678,\"code\":200,\"ifs\":1,\"ric\":0,\"rt\":\"EC2M\"", getAgentStats(stats)) + + stats.RegionType = nil + assert.Equal(t, "\"cpu\":1.2,\"mem\":123,\"fd\":456,\"th\":789,\"lat\":1234,\"load\":5678,\"code\":200,\"ifs\":1,\"ric\":0", getAgentStats(stats)) + + stats.RunInContainer = nil assert.Equal(t, "\"cpu\":1.2,\"mem\":123,\"fd\":456,\"th\":789,\"lat\":1234,\"load\":5678,\"code\":200,\"ifs\":1", getAgentStats(stats)) stats.ImdsFallbackSucceed = nil diff --git a/internal/version/version.go b/internal/version/version.go new file mode 100644 index 0000000000..910fd90390 --- /dev/null +++ b/internal/version/version.go @@ -0,0 +1,58 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package version + +import ( + "fmt" + "os" + "path/filepath" + "runtime" + "strings" +) + +const ( + filename = "CWAGENT_VERSION" + unknownVersion = "Unknown" +) + +var ( + version = readVersionFile() + fullVersion = buildFullVersion(version) +) + +func Number() string { + return version +} + +func Full() string { + return fullVersion +} + +func FilePath() (string, error) { + ex, err := os.Executable() + if err != nil { + return "", err + } + return filepath.Join(filepath.Dir(ex), filename), nil +} + +func buildFullVersion(version string) string { + return fmt.Sprintf("CWAgent/%s (%s; %s; %s)", + version, + runtime.Version(), + runtime.GOOS, + runtime.GOARCH) +} + +func readVersionFile() string { + versionFilePath, err := FilePath() + if err != nil { + return unknownVersion + } + content, err := os.ReadFile(versionFilePath) + if err != nil { + return unknownVersion + } + return strings.Trim(string(content), " \n\r\t") +} diff --git a/internal/version/version_test.go b/internal/version/version_test.go new file mode 100644 index 0000000000..554eee2a63 --- /dev/null +++ b/internal/version/version_test.go @@ -0,0 +1,43 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package version + +import ( + "fmt" + "os" + "runtime" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestVersion(t *testing.T) { + expectedVersion := "Unknown" + expectedFullVersion := fmt.Sprintf("CWAgent/%s (%s; %s; %s)", + expectedVersion, + runtime.Version(), + runtime.GOOS, + runtime.GOARCH) + assert.Equal(t, expectedVersion, Number()) + assert.Equal(t, expectedFullVersion, Full()) + + expectedVersion = "TEST_VERSION" + expectedFullVersion = fmt.Sprintf("CWAgent/%s (%s; %s; %s)", + expectedVersion, + runtime.Version(), + runtime.GOOS, + runtime.GOARCH) + filePath, err := FilePath() + require.NoError(t, err) + err = os.WriteFile(filePath, []byte(expectedVersion), 0644) + require.NoError(t, err) + t.Cleanup(func() { + _ = os.Remove(filePath) + }) + + actualVersion := readVersionFile() + assert.Equal(t, expectedVersion, actualVersion) + assert.Equal(t, expectedFullVersion, buildFullVersion(actualVersion)) +} diff --git a/plugins/inputs/logfile/logfile_test.go b/plugins/inputs/logfile/logfile_test.go index f5a38ab48c..2c1a2e98dd 100644 --- a/plugins/inputs/logfile/logfile_test.go +++ b/plugins/inputs/logfile/logfile_test.go @@ -453,7 +453,7 @@ func createWriteRead(t *testing.T, prefix string, logFile *LogFile, done chan bo select { case <-done2: t.Log("Child completed before timeout (as expected)") - case <-time.After(time.Second * 10): + case <-time.After(time.Second * 20): require.Fail(t, "timeout waiting for child") } t.Log("Verify 1st temp file was auto deleted.") diff --git a/plugins/outputs/cloudwatch/cloudwatch.go b/plugins/outputs/cloudwatch/cloudwatch.go index f4d14cdb74..e936a72622 100644 --- a/plugins/outputs/cloudwatch/cloudwatch.go +++ b/plugins/outputs/cloudwatch/cloudwatch.go @@ -11,6 +11,7 @@ import ( "sync" "time" + "github.com/amazon-contributing/opentelemetry-collector-contrib/extension/awsmiddleware" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/cloudwatch" @@ -22,11 +23,12 @@ import ( "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/exporter" "go.opentelemetry.io/collector/pdata/pmetric" + "go.uber.org/zap" "golang.org/x/exp/maps" configaws "github.com/aws/amazon-cloudwatch-agent/cfg/aws" + "github.com/aws/amazon-cloudwatch-agent/extension/agenthealth/handler/stats/provider" "github.com/aws/amazon-cloudwatch-agent/handlers" - "github.com/aws/amazon-cloudwatch-agent/handlers/agentinfo" "github.com/aws/amazon-cloudwatch-agent/internal/publisher" "github.com/aws/amazon-cloudwatch-agent/internal/retryer" "github.com/aws/amazon-cloudwatch-agent/internal/util/collections" @@ -54,6 +56,7 @@ const ( type CloudWatch struct { config *Config + logger *zap.Logger svc cloudwatchiface.CloudWatchAPI // todo: may want to increase the size of the chan since the type changed. // 1 telegraf Metric could have many Fields. @@ -69,7 +72,6 @@ type CloudWatch struct { aggregator Aggregator aggregatorShutdownChan chan struct{} aggregatorWaitGroup sync.WaitGroup - agentInfo agentinfo.AgentInfo lastRequestBytes int } @@ -81,7 +83,6 @@ func (c *CloudWatch) Capabilities() consumer.Capabilities { } func (c *CloudWatch) Start(_ context.Context, host component.Host) error { - c.agentInfo = agentinfo.New("") c.publisher, _ = publisher.NewPublisher( publisher.NewNonBlockingFifoQueue(metricChanBufferSize), maxConcurrentPublisher, @@ -96,6 +97,8 @@ func (c *CloudWatch) Start(_ context.Context, host component.Host) error { Filename: c.config.SharedCredentialFilename, Token: c.config.Token, } + provider.GetFlagsStats().SetFlagWithValue(provider.FlagRegionType, c.config.RegionType) + provider.GetFlagsStats().SetFlagWithValue(provider.FlagMode, c.config.Mode) configProvider := credentialConfig.Credentials() logger := models.NewLogger("outputs", "cloudwatch", "") logThrottleRetryer := retryer.NewLogThrottleRetryer(logger) @@ -108,8 +111,9 @@ func (c *CloudWatch) Start(_ context.Context, host component.Host) error { Logger: configaws.SDKLogger{}, }) svc.Handlers.Build.PushBackNamed(handlers.NewRequestCompressionHandler([]string{opPutLogEvents, opPutMetricData})) - svc.Handlers.Build.PushBackNamed(handlers.NewCustomHeaderHandler("User-Agent", c.agentInfo.UserAgent())) - svc.Handlers.Build.PushBackNamed(handlers.NewDynamicCustomHeaderHandler("X-Amz-Agent-Stats", c.agentInfo.StatsHeader)) + if c.config.MiddlewareID != nil { + awsmiddleware.TryConfigure(c.logger, host, *c.config.MiddlewareID, awsmiddleware.SDKv1(&svc.Handlers)) + } //Format unique roll up list c.config.RollupDimensions = GetUniqueRollupList(c.config.RollupDimensions) c.svc = svc @@ -267,7 +271,7 @@ func (c *CloudWatch) publish() { if !bufferFullOccurred { // Set to true so this only happens once per push. bufferFullOccurred = true - // Keep interval above above 1 second. + // Keep interval above 1 second. if currentInterval.Seconds() > 1 { currentInterval /= 2 if currentInterval.Seconds() < 1 { @@ -341,9 +345,7 @@ func (c *CloudWatch) WriteToCloudWatch(req interface{}) { } var err error for i := 0; i < defaultRetryCount; i++ { - startTime := time.Now() _, err = c.svc.PutMetricData(params) - c.agentInfo.RecordOpData(time.Since(startTime), c.lastRequestBytes, err) if err != nil { awsErr, ok := err.(awserr.Error) if !ok { diff --git a/plugins/outputs/cloudwatch/cloudwatch_test.go b/plugins/outputs/cloudwatch/cloudwatch_test.go index 7e74dc7fae..79aa8a5c25 100644 --- a/plugins/outputs/cloudwatch/cloudwatch_test.go +++ b/plugins/outputs/cloudwatch/cloudwatch_test.go @@ -7,11 +7,14 @@ import ( "context" "log" "math" + "net/http" + "net/http/httptest" "strconv" "strings" "testing" "time" + "github.com/amazon-contributing/opentelemetry-collector-contrib/extension/awsmiddleware" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/cloudwatch" @@ -21,8 +24,9 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/component" + "go.uber.org/zap" - "github.com/aws/amazon-cloudwatch-agent/handlers/agentinfo" "github.com/aws/amazon-cloudwatch-agent/internal/publisher" "github.com/aws/amazon-cloudwatch-agent/metric/distribution" ) @@ -382,7 +386,6 @@ func newCloudWatchClient( MaxDatumsPerCall: defaultMaxDatumsPerCall, MaxValuesPerDatum: defaultMaxValuesPerDatum, }, - agentInfo: agentinfo.New(""), } cloudwatch.startRoutines() return cloudwatch @@ -495,6 +498,45 @@ func TestPublish(t *testing.T) { cw.Shutdown(ctx) } +func TestMiddleware(t *testing.T) { + t.Setenv("AWS_ACCESS_KEY_ID", "test") + t.Setenv("AWS_SECRET_ACCESS_KEY", "test") + id := component.NewID("test") + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + w.WriteHeader(http.StatusOK) + })) + defer server.Close() + cw := &CloudWatch{ + config: &Config{ + Region: "test-region", + Namespace: "test-namespace", + ForceFlushInterval: time.Second, + EndpointOverride: server.URL, + MiddlewareID: &id, + }, + logger: zap.NewNop(), + } + ctx := context.Background() + handler := new(awsmiddleware.MockHandler) + handler.On("ID").Return("test") + handler.On("Position").Return(awsmiddleware.After) + handler.On("HandleRequest", mock.Anything, mock.Anything) + handler.On("HandleResponse", mock.Anything, mock.Anything) + middleware := new(awsmiddleware.MockMiddlewareExtension) + middleware.On("Handlers").Return([]awsmiddleware.RequestHandler{handler}, []awsmiddleware.ResponseHandler{handler}) + extensions := map[component.ID]component.Component{id: middleware} + host := new(awsmiddleware.MockExtensionsHost) + host.On("GetExtensions").Return(extensions) + assert.NoError(t, cw.Start(ctx, host)) + // Expect 1500 metrics batched in 2 API calls. + pmetrics := createTestMetrics(1500, 1, 1, "B/s") + assert.NoError(t, cw.ConsumeMetrics(ctx, pmetrics)) + time.Sleep(2*time.Second + 2*cw.config.ForceFlushInterval) + handler.AssertCalled(t, "HandleRequest", mock.Anything, mock.Anything) + handler.AssertCalled(t, "HandleResponse", mock.Anything, mock.Anything) + require.NoError(t, cw.Shutdown(ctx)) +} + func TestBackoffRetries(t *testing.T) { c := &CloudWatch{} sleeps := []time.Duration{ diff --git a/plugins/outputs/cloudwatch/config.go b/plugins/outputs/cloudwatch/config.go index 0790b54216..43469bc9d5 100644 --- a/plugins/outputs/cloudwatch/config.go +++ b/plugins/outputs/cloudwatch/config.go @@ -11,13 +11,15 @@ import ( "go.opentelemetry.io/collector/component" ) -// Config represent a configuration for the CloudWatch logs exporter. +// Config represent a configuration for the CloudWatch metrics exporter. type Config struct { Region string `mapstructure:"region"` EndpointOverride string `mapstructure:"endpoint_override,omitempty"` AccessKey string `mapstructure:"access_key,omitempty"` SecretKey string `mapstructure:"secret_key,omitempty"` RoleARN string `mapstructure:"role_arn,omitempty"` + RegionType string `mapstructure:"region_type,omitempty"` + Mode string `mapstructure:"mode,omitempty"` Profile string `mapstructure:"profile,omitempty"` SharedCredentialFilename string `mapstructure:"shared_credential_file,omitempty"` Token string `mapstructure:"token,omitempty"` @@ -33,6 +35,8 @@ type Config struct { // "Enabled" - A boolean field to enable/disable this option. Default is `false`. // If enabled, all the resource attributes will be converted to metric labels by default. ResourceToTelemetrySettings resourcetotelemetry.Settings `mapstructure:"resource_to_telemetry_conversion"` + // MiddlewareID is an ID for an extension that can be used to configure the AWS client. + MiddlewareID *component.ID `mapstructure:"middleware,omitempty"` } var _ component.Config = (*Config)(nil) diff --git a/plugins/outputs/cloudwatch/factory.go b/plugins/outputs/cloudwatch/factory.go index 1e5048ed70..f1a554b703 100644 --- a/plugins/outputs/cloudwatch/factory.go +++ b/plugins/outputs/cloudwatch/factory.go @@ -43,20 +43,21 @@ func createMetricsExporter( settings exporter.CreateSettings, config component.Config, ) (exporter.Metrics, error) { - exp := &CloudWatch{ + cw := &CloudWatch{ config: config.(*Config), + logger: settings.Logger, } - exporter, err := exporterhelper.NewMetricsExporter( + exp, err := exporterhelper.NewMetricsExporter( ctx, settings, config, - exp.ConsumeMetrics, - exporterhelper.WithStart(exp.Start), - exporterhelper.WithShutdown(exp.Shutdown), + cw.ConsumeMetrics, + exporterhelper.WithStart(cw.Start), + exporterhelper.WithShutdown(cw.Shutdown), ) if err != nil { return nil, err } return resourcetotelemetry.WrapMetricsExporter( - config.(*Config).ResourceToTelemetrySettings, exporter), nil + config.(*Config).ResourceToTelemetrySettings, exp), nil } diff --git a/plugins/outputs/cloudwatchlogs/cloudwatchlogs.go b/plugins/outputs/cloudwatchlogs/cloudwatchlogs.go index 0cf6c93242..fb04338dc7 100644 --- a/plugins/outputs/cloudwatchlogs/cloudwatchlogs.go +++ b/plugins/outputs/cloudwatchlogs/cloudwatchlogs.go @@ -6,19 +6,26 @@ package cloudwatchlogs import ( "encoding/json" "fmt" + "regexp" "strings" "sync" "time" + "github.com/amazon-contributing/opentelemetry-collector-contrib/extension/awsmiddleware" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/request" "github.com/aws/aws-sdk-go/service/cloudwatchlogs" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/outputs" + "go.uber.org/zap" configaws "github.com/aws/amazon-cloudwatch-agent/cfg/aws" + "github.com/aws/amazon-cloudwatch-agent/cfg/envconfig" + "github.com/aws/amazon-cloudwatch-agent/extension/agenthealth" + "github.com/aws/amazon-cloudwatch-agent/extension/agenthealth/handler/stats/agent" + "github.com/aws/amazon-cloudwatch-agent/extension/agenthealth/handler/stats/provider" + "github.com/aws/amazon-cloudwatch-agent/extension/agenthealth/handler/useragent" "github.com/aws/amazon-cloudwatch-agent/handlers" - "github.com/aws/amazon-cloudwatch-agent/handlers/agentinfo" "github.com/aws/amazon-cloudwatch-agent/internal" "github.com/aws/amazon-cloudwatch-agent/internal/retryer" "github.com/aws/amazon-cloudwatch-agent/logs" @@ -31,7 +38,7 @@ const ( LogEntryField = "value" defaultFlushTimeout = 5 * time.Second - eventHeaderSize = 26 + eventHeaderSize = 200 truncatedSuffix = "[Truncated...]" msgSizeLimit = 256*1024 - eventHeaderSize @@ -41,8 +48,14 @@ const ( attributesInFields = "attributesInFields" ) +var ( + containerInsightsRegexp = regexp.MustCompile("^/aws/.*containerinsights/.*/(performance|prometheus)$") +) + type CloudWatchLogs struct { Region string `toml:"region"` + RegionType string `toml:"region_type"` + Mode string `toml:"mode"` EndpointOverride string `toml:"endpoint_override"` AccessKey string `toml:"access_key"` SecretKey string `toml:"secret_key"` @@ -65,6 +78,7 @@ type CloudWatchLogs struct { pusherStopChan chan struct{} pusherWaitGroup sync.WaitGroup cwDests map[Target]*cwDest + middleware awsmiddleware.Middleware } func (c *CloudWatchLogs) Connect() error { @@ -133,12 +147,20 @@ func (c *CloudWatchLogs) getDest(t Target) *cwDest { Logger: configaws.SDKLogger{}, }, ) - agentInfo := agentinfo.New(t.Group) + provider.GetFlagsStats().SetFlagWithValue(provider.FlagRegionType, c.RegionType) + provider.GetFlagsStats().SetFlagWithValue(provider.FlagMode, c.Mode) + if containerInsightsRegexp.MatchString(t.Group) { + useragent.Get().SetContainerInsightsFlag() + } client.Handlers.Build.PushBackNamed(handlers.NewRequestCompressionHandler([]string{"PutLogEvents"})) - client.Handlers.Build.PushBackNamed(handlers.NewCustomHeaderHandler("User-Agent", agentInfo.UserAgent())) - client.Handlers.Build.PushBackNamed(handlers.NewDynamicCustomHeaderHandler("X-Amz-Agent-Stats", agentInfo.StatsHeader)) - - pusher := NewPusher(t, client, c.ForceFlushInterval.Duration, maxRetryTimeout, c.Log, c.pusherStopChan, &c.pusherWaitGroup, agentInfo) + if c.middleware != nil { + if err := awsmiddleware.NewConfigurer(c.middleware.Handlers()).Configure(awsmiddleware.SDKv1(&client.Handlers)); err != nil { + c.Log.Errorf("Unable to configure middleware on cloudwatch logs client: %v", err) + } else { + c.Log.Info("Configured middleware on AWS client") + } + } + pusher := NewPusher(t, client, c.ForceFlushInterval.Duration, maxRetryTimeout, c.Log, c.pusherStopChan, &c.pusherWaitGroup) cwd := &cwDest{pusher: pusher, retryer: logThrottleRetryer} c.cwDests[t] = cwd return cwd @@ -375,6 +397,13 @@ func init() { ForceFlushInterval: internal.Duration{Duration: defaultFlushTimeout}, pusherStopChan: make(chan struct{}), cwDests: make(map[Target]*cwDest), + middleware: agenthealth.NewAgentHealth( + zap.NewNop(), + &agenthealth.Config{ + IsUsageDataEnabled: envconfig.IsUsageDataEnabled(), + Stats: agent.StatsConfig{Operations: []string{"PutLogEvents"}}, + }, + ), } }) } diff --git a/plugins/outputs/cloudwatchlogs/pusher.go b/plugins/outputs/cloudwatchlogs/pusher.go index 83b8f55282..1651f18cdb 100644 --- a/plugins/outputs/cloudwatchlogs/pusher.go +++ b/plugins/outputs/cloudwatchlogs/pusher.go @@ -14,7 +14,6 @@ import ( "github.com/aws/aws-sdk-go/service/cloudwatchlogs" "github.com/influxdata/telegraf" - "github.com/aws/amazon-cloudwatch-agent/handlers/agentinfo" "github.com/aws/amazon-cloudwatch-agent/logs" "github.com/aws/amazon-cloudwatch-agent/profiler" ) @@ -60,10 +59,9 @@ type pusher struct { initNonBlockingChOnce sync.Once startNonBlockCh chan struct{} wg *sync.WaitGroup - agentInfo agentinfo.AgentInfo } -func NewPusher(target Target, service CloudWatchLogsService, flushTimeout time.Duration, retryDuration time.Duration, logger telegraf.Logger, stop <-chan struct{}, wg *sync.WaitGroup, agentInfo agentinfo.AgentInfo) *pusher { +func NewPusher(target Target, service CloudWatchLogsService, flushTimeout time.Duration, retryDuration time.Duration, logger telegraf.Logger, stop <-chan struct{}, wg *sync.WaitGroup) *pusher { p := &pusher{ Target: target, Service: service, @@ -76,7 +74,6 @@ func NewPusher(target Target, service CloudWatchLogsService, flushTimeout time.D stop: stop, startNonBlockCh: make(chan struct{}), wg: wg, - agentInfo: agentInfo, } p.putRetentionPolicy() p.wg.Add(1) @@ -232,9 +229,7 @@ func (p *pusher) send() { retryCount := 0 for { input.SequenceToken = p.sequenceToken - opStartTime := time.Now() output, err := p.Service.PutLogEvents(input) - p.agentInfo.RecordOpData(time.Since(opStartTime), p.bufferredSize, err) if err == nil { if output.NextSequenceToken != nil { p.sequenceToken = output.NextSequenceToken diff --git a/plugins/outputs/cloudwatchlogs/pusher_test.go b/plugins/outputs/cloudwatchlogs/pusher_test.go index 551f27e8ea..4fc93c8ca2 100644 --- a/plugins/outputs/cloudwatchlogs/pusher_test.go +++ b/plugins/outputs/cloudwatchlogs/pusher_test.go @@ -20,8 +20,6 @@ import ( "github.com/aws/aws-sdk-go/service/cloudwatchlogs" "github.com/influxdata/telegraf/models" "github.com/stretchr/testify/require" - - "github.com/aws/amazon-cloudwatch-agent/handlers/agentinfo" ) var wg sync.WaitGroup @@ -765,6 +763,6 @@ func TestResendWouldStopAfterExhaustedRetries(t *testing.T) { func testPreparation(retention int, s *svcMock, flushTimeout time.Duration, retryDuration time.Duration) (chan struct{}, *pusher) { stop := make(chan struct{}) - p := NewPusher(Target{"G", "S", retention}, s, flushTimeout, retryDuration, models.NewLogger("cloudwatchlogs", "test", ""), stop, &wg, agentinfo.New("")) + p := NewPusher(Target{"G", "S", retention}, s, flushTimeout, retryDuration, models.NewLogger("cloudwatchlogs", "test", ""), stop, &wg) return stop, p } diff --git a/plugins/processors/ec2tagger/ec2metadataprovider.go b/plugins/processors/ec2tagger/ec2metadataprovider.go index 04f66a1c9e..b90258605a 100644 --- a/plugins/processors/ec2tagger/ec2metadataprovider.go +++ b/plugins/processors/ec2tagger/ec2metadataprovider.go @@ -12,7 +12,7 @@ import ( "github.com/aws/aws-sdk-go/aws/ec2metadata" configaws "github.com/aws/amazon-cloudwatch-agent/cfg/aws" - "github.com/aws/amazon-cloudwatch-agent/handlers/agentinfo" + "github.com/aws/amazon-cloudwatch-agent/extension/agenthealth/handler/stats/provider" "github.com/aws/amazon-cloudwatch-agent/internal/retryer" ) @@ -52,7 +52,7 @@ func (c *metadataClient) InstanceID(ctx context.Context) (string, error) { log.Printf("D! could not get instance id without imds v1 fallback enable thus enable fallback") instanceInner, errorInner := c.metadataFallbackEnabled.GetMetadataWithContext(ctx, "instance-id") if errorInner == nil { - agentinfo.SetImdsFallbackSucceed() + provider.GetFlagsStats().SetFlag(provider.FlagIMDSFallbackSucceed) } return instanceInner, errorInner } @@ -65,7 +65,7 @@ func (c *metadataClient) Hostname(ctx context.Context) (string, error) { log.Printf("D! could not get hostname without imds v1 fallback enable thus enable fallback") hostnameInner, errorInner := c.metadataFallbackEnabled.GetMetadataWithContext(ctx, "hostname") if errorInner == nil { - agentinfo.SetImdsFallbackSucceed() + provider.GetFlagsStats().SetFlag(provider.FlagIMDSFallbackSucceed) } return hostnameInner, errorInner } @@ -78,7 +78,7 @@ func (c *metadataClient) Get(ctx context.Context) (ec2metadata.EC2InstanceIdenti log.Printf("D! could not get instance document without imds v1 fallback enable thus enable fallback") instanceDocumentInner, errorInner := c.metadataFallbackEnabled.GetInstanceIdentityDocumentWithContext(ctx) if errorInner == nil { - agentinfo.SetImdsFallbackSucceed() + provider.GetFlagsStats().SetFlag(provider.FlagIMDSFallbackSucceed) } return instanceDocumentInner, errorInner } diff --git a/service/defaultcomponents/components.go b/service/defaultcomponents/components.go index 0df6e4818d..d6302773cc 100644 --- a/service/defaultcomponents/components.go +++ b/service/defaultcomponents/components.go @@ -23,6 +23,7 @@ import ( "go.opentelemetry.io/collector/receiver" "go.opentelemetry.io/collector/receiver/otlpreceiver" + "github.com/aws/amazon-cloudwatch-agent/extension/agenthealth" "github.com/aws/amazon-cloudwatch-agent/plugins/outputs/cloudwatch" "github.com/aws/amazon-cloudwatch-agent/plugins/processors/ec2tagger" ) @@ -61,7 +62,9 @@ func Factories() (otelcol.Factories, error) { return otelcol.Factories{}, err } - if factories.Extensions, err = extension.MakeFactoryMap(); err != nil { + if factories.Extensions, err = extension.MakeFactoryMap( + agenthealth.NewFactory(), + ); err != nil { return otelcol.Factories{}, err } diff --git a/service/defaultcomponents/components_test.go b/service/defaultcomponents/components_test.go index 01f93fbd9f..2d403603aa 100644 --- a/service/defaultcomponents/components_test.go +++ b/service/defaultcomponents/components_test.go @@ -13,7 +13,7 @@ const ( receiversCount = 5 processorCount = 5 exportersCount = 5 - extensionsCount = 0 + extensionsCount = 1 ) func TestComponents(t *testing.T) { @@ -45,4 +45,5 @@ func TestComponents(t *testing.T) { extensions := factories.Extensions assert.Len(t, extensions, extensionsCount) + assert.NotNil(t, extensions["agenthealth"]) } diff --git a/translator/config/envconst.go b/translator/config/envconst.go index dd757b301b..ed1785c138 100644 --- a/translator/config/envconst.go +++ b/translator/config/envconst.go @@ -3,17 +3,19 @@ package config +import "github.com/aws/amazon-cloudwatch-agent/cfg/envconfig" + const ( - RUN_IN_CONTAINER = "RUN_IN_CONTAINER" - RUN_IN_CONTAINER_TRUE = "True" - RUN_IN_AWS = "RUN_IN_AWS" - RUN_IN_AWS_TRUE = "True" - RUN_WITH_IRSA = "RUN_WITH_IRSA" - RUN_WITH_IRSA_TRUE = "True" - USE_DEFAULT_CONFIG = "USE_DEFAULT_CONFIG" - USE_DEFAULT_CONFIG_TRUE = "True" - HOST_NAME = "HOST_NAME" - POD_NAME = "POD_NAME" - HOST_IP = "HOST_IP" - CWConfigContent = "CW_CONFIG_CONTENT" + RUN_IN_CONTAINER = envconfig.RunInContainer + RUN_IN_CONTAINER_TRUE = envconfig.TrueValue + RUN_IN_AWS = envconfig.RunInAWS + RUN_IN_AWS_TRUE = envconfig.TrueValue + RUN_WITH_IRSA = envconfig.RunWithIRSA + RUN_WITH_IRSA_TRUE = envconfig.TrueValue + USE_DEFAULT_CONFIG = envconfig.UseDefaultConfig + USE_DEFAULT_CONFIG_TRUE = envconfig.TrueValue + HOST_NAME = envconfig.HostName + POD_NAME = envconfig.PodName + HOST_IP = envconfig.HostIP + CWConfigContent = envconfig.CWConfigContent ) diff --git a/translator/context/context.go b/translator/context/context.go index f4b88a4f4b..d39daff130 100644 --- a/translator/context/context.go +++ b/translator/context/context.go @@ -7,6 +7,7 @@ import ( "log" "os" + "github.com/aws/amazon-cloudwatch-agent/handlers/agentinfo" "github.com/aws/amazon-cloudwatch-agent/translator/config" ) @@ -41,6 +42,7 @@ type Context struct { multiConfig string outputTomlFilePath string mode string + shortMode string credentials map[string]string proxy map[string]string ssl map[string]string @@ -96,6 +98,10 @@ func (ctx *Context) Mode() string { return ctx.mode } +func (ctx *Context) ShortMode() string { + return ctx.shortMode +} + func (ctx *Context) Credentials() map[string]string { return ctx.credentials } @@ -112,12 +118,16 @@ func (ctx *Context) SetMode(mode string) { switch mode { case config.ModeEC2: ctx.mode = config.ModeEC2 + ctx.shortMode = agentinfo.ModeEC2 case config.ModeOnPrem: ctx.mode = config.ModeOnPrem + ctx.shortMode = agentinfo.ModeOnPrem case config.ModeOnPremise: ctx.mode = config.ModeOnPremise + ctx.shortMode = agentinfo.ModeOnPrem case config.ModeWithIRSA: ctx.mode = config.ModeWithIRSA + ctx.shortMode = agentinfo.ModeWithIRSA default: log.Panicf("Invalid mode %s. Valid mode values are %s, %s, %s and %s.", mode, config.ModeEC2, config.ModeOnPrem, config.ModeOnPremise, config.ModeWithIRSA) } diff --git a/translator/tocwconfig/sampleConfig/advanced_config_darwin.yaml b/translator/tocwconfig/sampleConfig/advanced_config_darwin.yaml index bb7860feb2..5f5b59c376 100644 --- a/translator/tocwconfig/sampleConfig/advanced_config_darwin.yaml +++ b/translator/tocwconfig/sampleConfig/advanced_config_darwin.yaml @@ -1,102 +1,112 @@ connectors: {} exporters: - awscloudwatch: - force_flush_interval: 1m0s - max_datums_per_call: 1000 - max_values_per_datum: 150 - namespace: CWAgent - region: us-west-2 - resource_to_telemetry_conversion: - enabled: true -extensions: {} + awscloudwatch: + force_flush_interval: 1m0s + max_datums_per_call: 1000 + max_values_per_datum: 150 + middleware: agenthealth/metrics + mode: EC2 + namespace: CWAgent + region: us-west-2 + region_type: ACJ + resource_to_telemetry_conversion: + enabled: true +extensions: + agenthealth/metrics: + is_usage_data_enabled: true + stats: + operations: + - PutMetricData processors: - cumulativetodelta/hostDeltaMetrics: - exclude: - match_type: strict - metrics: - - iops_in_progress - - diskio_iops_in_progress - regexp: null - include: - match_type: "" - metrics: [] - regexp: null - initial_value: 0 - max_staleness: 0s - ec2tagger: - ec2_instance_tag_keys: - - AutoScalingGroupName - ec2_metadata_tags: - - ImageId - - InstanceId - - InstanceType - refresh_interval_seconds: 0s - imds_retries: 1 + cumulativetodelta/hostDeltaMetrics: + exclude: + match_type: strict + metrics: + - iops_in_progress + - diskio_iops_in_progress + regexp: null + include: + match_type: "" + metrics: [] + regexp: null + initial_value: 0 + max_staleness: 0s + ec2tagger: + ec2_instance_tag_keys: + - AutoScalingGroupName + ec2_metadata_tags: + - ImageId + - InstanceId + - InstanceType + imds_retries: 1 + refresh_interval_seconds: 0s receivers: - telegraf_cpu: - collection_interval: 1m0s - initial_delay: 1s - timeout: 0s - telegraf_disk: - collection_interval: 1m0s - initial_delay: 1s - timeout: 0s - telegraf_diskio: - collection_interval: 1m0s - initial_delay: 1s - timeout: 0s - telegraf_mem: - collection_interval: 1m0s - initial_delay: 1s - timeout: 0s - telegraf_netstat: - collection_interval: 1m0s - initial_delay: 1s - timeout: 0s - telegraf_swap: - collection_interval: 1m0s - initial_delay: 1s - timeout: 0s + telegraf_cpu: + collection_interval: 1m0s + initial_delay: 1s + timeout: 0s + telegraf_disk: + collection_interval: 1m0s + initial_delay: 1s + timeout: 0s + telegraf_diskio: + collection_interval: 1m0s + initial_delay: 1s + timeout: 0s + telegraf_mem: + collection_interval: 1m0s + initial_delay: 1s + timeout: 0s + telegraf_netstat: + collection_interval: 1m0s + initial_delay: 1s + timeout: 0s + telegraf_swap: + collection_interval: 1m0s + initial_delay: 1s + timeout: 0s service: - extensions: [] - pipelines: - metrics/host: - exporters: - - awscloudwatch - processors: - - ec2tagger - receivers: - - telegraf_cpu - - telegraf_disk - - telegraf_mem - - telegraf_netstat - - telegraf_swap - metrics/hostDeltaMetrics: - exporters: - - awscloudwatch - processors: - - cumulativetodelta/hostDeltaMetrics - - ec2tagger - receivers: - - telegraf_diskio - telemetry: - logs: - development: false - disable_caller: false - disable_stacktrace: false - encoding: console - error_output_paths: [] - initial_fields: {} - level: info - output_paths: [/opt/aws/amazon-cloudwatch-agent/logs/amazon-cloudwatch-agent.log] - sampling: - initial: 2 - thereafter: 500 - metrics: - address: "" - level: None - readers: [] - resource: {} - traces: - processors: [] - propagators: [] + extensions: + - agenthealth/metrics + pipelines: + metrics/host: + exporters: + - awscloudwatch + processors: + - ec2tagger + receivers: + - telegraf_mem + - telegraf_netstat + - telegraf_swap + - telegraf_cpu + - telegraf_disk + metrics/hostDeltaMetrics: + exporters: + - awscloudwatch + processors: + - cumulativetodelta/hostDeltaMetrics + - ec2tagger + receivers: + - telegraf_diskio + telemetry: + logs: + development: false + disable_caller: false + disable_stacktrace: false + encoding: console + error_output_paths: [] + initial_fields: {} + level: info + output_paths: + - /opt/aws/amazon-cloudwatch-agent/logs/amazon-cloudwatch-agent.log + sampling: + initial: 2 + thereafter: 500 + metrics: + address: "" + level: None + readers: [] + resource: {} + traces: + processors: [] + propagators: [] diff --git a/translator/tocwconfig/sampleConfig/advanced_config_linux.yaml b/translator/tocwconfig/sampleConfig/advanced_config_linux.yaml index c67bce7610..98bf371b53 100644 --- a/translator/tocwconfig/sampleConfig/advanced_config_linux.yaml +++ b/translator/tocwconfig/sampleConfig/advanced_config_linux.yaml @@ -4,11 +4,19 @@ exporters: force_flush_interval: 1m0s max_datums_per_call: 1000 max_values_per_datum: 150 + middleware: agenthealth/metrics + mode: EC2 namespace: CWAgent region: us-west-2 + region_type: ACJ resource_to_telemetry_conversion: enabled: true -extensions: {} +extensions: + agenthealth/metrics: + is_usage_data_enabled: true + stats: + operations: + - PutMetricData processors: cumulativetodelta/hostDeltaMetrics: exclude: @@ -27,46 +35,47 @@ processors: ec2_instance_tag_keys: - AutoScalingGroupName ec2_metadata_tags: + - ImageId - InstanceId - InstanceType - - ImageId - refresh_interval_seconds: 0s imds_retries: 1 + refresh_interval_seconds: 0s receivers: telegraf_cpu: collection_interval: 1m0s - initial_delay: "1s" + initial_delay: 1s timeout: 0s telegraf_disk: collection_interval: 1m0s - initial_delay: "1s" + initial_delay: 1s timeout: 0s telegraf_diskio: collection_interval: 1m0s - initial_delay: "1s" + initial_delay: 1s timeout: 0s telegraf_ethtool: collection_interval: 1m0s - initial_delay: "1s" + initial_delay: 1s timeout: 0s telegraf_mem: collection_interval: 1m0s - initial_delay: "1s" + initial_delay: 1s timeout: 0s telegraf_netstat: collection_interval: 1m0s - initial_delay: "1s" + initial_delay: 1s timeout: 0s telegraf_nvidia_smi: collection_interval: 1m0s - initial_delay: "1s" + initial_delay: 1s timeout: 0s telegraf_swap: collection_interval: 1m0s - initial_delay: "1s" + initial_delay: 1s timeout: 0s service: - extensions: [] + extensions: + - agenthealth/metrics pipelines: metrics/host: exporters: @@ -74,13 +83,13 @@ service: processors: - ec2tagger receivers: - - telegraf_cpu - telegraf_disk - - telegraf_ethtool - telegraf_mem - telegraf_netstat - - telegraf_nvidia_smi - telegraf_swap + - telegraf_ethtool + - telegraf_nvidia_smi + - telegraf_cpu metrics/hostDeltaMetrics: exporters: - awscloudwatch @@ -98,7 +107,8 @@ service: error_output_paths: [] initial_fields: {} level: info - output_paths: [/opt/aws/amazon-cloudwatch-agent/logs/amazon-cloudwatch-agent.log] + output_paths: + - /opt/aws/amazon-cloudwatch-agent/logs/amazon-cloudwatch-agent.log sampling: initial: 2 thereafter: 500 diff --git a/translator/tocwconfig/sampleConfig/advanced_config_windows.yaml b/translator/tocwconfig/sampleConfig/advanced_config_windows.yaml index 2226ca01c1..904ab8de4d 100644 --- a/translator/tocwconfig/sampleConfig/advanced_config_windows.yaml +++ b/translator/tocwconfig/sampleConfig/advanced_config_windows.yaml @@ -4,11 +4,19 @@ exporters: force_flush_interval: 1m0s max_datums_per_call: 1000 max_values_per_datum: 150 + middleware: agenthealth/metrics + mode: EC2 namespace: CWAgent region: us-west-2 + region_type: ACJ resource_to_telemetry_conversion: enabled: true -extensions: {} +extensions: + agenthealth/metrics: + is_usage_data_enabled: true + stats: + operations: + - PutMetricData processors: ec2tagger: ec2_instance_tag_keys: @@ -17,46 +25,47 @@ processors: - ImageId - InstanceId - InstanceType - refresh_interval_seconds: 0s imds_retries: 1 + refresh_interval_seconds: 0s receivers: telegraf_win_perf_counters/1492679118: - alias_name : Memory + alias_name: Memory collection_interval: 1s - initial_delay: "1s" + initial_delay: 1s timeout: 0s telegraf_win_perf_counters/2039663244: - alias_name : TCPv6 + alias_name: TCPv6 collection_interval: 1s - initial_delay: "1s" + initial_delay: 1s timeout: 0s telegraf_win_perf_counters/2073218482: - alias_name : TCPv4 + alias_name: TCPv4 collection_interval: 1s - initial_delay: "1s" + initial_delay: 1s timeout: 0s telegraf_win_perf_counters/3446270237: - alias_name : PhysicalDisk + alias_name: PhysicalDisk collection_interval: 1s - initial_delay: "1s" + initial_delay: 1s timeout: 0s telegraf_win_perf_counters/3610923661: - alias_name : Paging File + alias_name: Paging File collection_interval: 1s - initial_delay: "1s" + initial_delay: 1s timeout: 0s telegraf_win_perf_counters/3762679655: - alias_name : Processor + alias_name: Processor collection_interval: 1s - initial_delay: "1s" + initial_delay: 1s timeout: 0s telegraf_win_perf_counters/4283769065: - alias_name : LogicalDisk + alias_name: LogicalDisk collection_interval: 1s - initial_delay: "1s" + initial_delay: 1s timeout: 0s service: - extensions: [] + extensions: + - agenthealth/metrics pipelines: metrics/host: exporters: @@ -64,13 +73,13 @@ service: processors: - ec2tagger receivers: - - telegraf_win_perf_counters/1492679118 - - telegraf_win_perf_counters/2039663244 - telegraf_win_perf_counters/3446270237 - - telegraf_win_perf_counters/2073218482 - - telegraf_win_perf_counters/3610923661 - telegraf_win_perf_counters/3762679655 + - telegraf_win_perf_counters/2073218482 + - telegraf_win_perf_counters/2039663244 - telegraf_win_perf_counters/4283769065 + - telegraf_win_perf_counters/1492679118 + - telegraf_win_perf_counters/3610923661 telemetry: logs: development: false @@ -80,7 +89,8 @@ service: error_output_paths: [] initial_fields: {} level: info - output_paths: [c:\ProgramData\Amazon\AmazonCloudWatchAgent\Logs\amazon-cloudwatch-agent.log] + output_paths: + - c:\ProgramData\Amazon\AmazonCloudWatchAgent\Logs\amazon-cloudwatch-agent.log sampling: initial: 2 thereafter: 500 diff --git a/translator/tocwconfig/sampleConfig/base_container_insights_config.yaml b/translator/tocwconfig/sampleConfig/base_container_insights_config.yaml index 94c18cc7e7..489d6d36e6 100644 --- a/translator/tocwconfig/sampleConfig/base_container_insights_config.yaml +++ b/translator/tocwconfig/sampleConfig/base_container_insights_config.yaml @@ -1,233 +1,270 @@ connectors: {} exporters: - awscloudwatchlogs/emf_logs: - certificate_file_path: "/etc/test/ca_bundle.pem" - emf_only: true - endpoint: "https://fake_endpoint" - imds_retries: 1 - local_mode: false - log_group_name: emf/logs/default - log_retention: 0 - log_stream_name: host_name_from_env - max_retries: 2 - no_verify_ssl: false - num_workers: 8 - proxy_address: "" - raw_log: true - region: us-east-1 - request_timeout_seconds: 30 - resource_arn: "" - retry_on_failure: - enabled: true - initial_interval: 5s - max_elapsed_time: 5m0s - max_interval: 30s - multiplier: 1.5 - randomization_factor: 0.5 - role_arn: "" - sending_queue: - queue_size: 1000 - profile: "" - shared_credentials_file: [] - awsemf/containerinsights: - namespace: ContainerInsights - log_group_name: '/aws/containerinsights/{ClusterName}/performance' - log_stream_name: '{NodeName}' - log_retention: 0 - detailed_metrics: false - dimension_rollup_option: NoDimensionRollup - disable_metric_extraction: true - version: "0" - eks_fargate_container_insights_enabled: false - certificate_file_path: "/etc/test/ca_bundle.pem" - endpoint: "https://fake_endpoint" - enhanced_container_insights: false - imds_retries: 1 - local_mode: false - retain_initial_value_of_delta_metric: false - parse_json_encoded_attr_values: [ Sources, kubernetes ] - max_retries: 2 - resource_to_telemetry_conversion: - enabled: true - metric_declarations: - # pod metrics - - dimensions: [ [ ClusterName, Namespace, PodName ], [ ClusterName ], [ ClusterName, Namespace, Service ], [ ClusterName, Namespace ] ] - label_matchers: [ ] - metric_name_selectors: - - pod_cpu_utilization - - pod_memory_utilization - - pod_network_rx_bytes - - pod_network_tx_bytes - - pod_cpu_utilization_over_pod_limit - - pod_memory_utilization_over_pod_limit - - dimensions: [ [ ClusterName, Namespace, PodName ] ] - label_matchers: [ ] - metric_name_selectors: - - pod_number_of_container_restarts - - dimensions: [ [ ClusterName, Namespace, PodName ], [ ClusterName ] ] - label_matchers: [ ] - metric_name_selectors: - - pod_cpu_reserved_capacity - - pod_memory_reserved_capacity - # node metrics - - dimensions: [ [ ClusterName, InstanceId, NodeName ], [ ClusterName ] ] - label_matchers: [ ] - metric_name_selectors: - - node_cpu_utilization - - node_memory_utilization - - node_network_total_bytes - - node_cpu_reserved_capacity - - node_memory_reserved_capacity - - node_number_of_running_pods - - node_number_of_running_containers - - dimensions: [ [ ClusterName ] ] - label_matchers: [ ] - metric_name_selectors: - - node_cpu_usage_total - - node_cpu_limit - - node_memory_working_set - - node_memory_limit - # node fs metrics - - dimensions: [ [ ClusterName, InstanceId, NodeName ], [ ClusterName ] ] - label_matchers: [ ] - metric_name_selectors: - - node_filesystem_utilization - # service metrics - - dimensions: [ [ ClusterName, Namespace, Service ], [ ClusterName ] ] - label_matchers: [ ] - metric_name_selectors: - - service_number_of_running_pods - # namespace metrics - - dimensions: [ [ ClusterName, Namespace ], [ ClusterName ] ] - label_matchers: [ ] - metric_name_selectors: - - namespace_number_of_running_pods - # cluster metrics - - dimensions: [ [ ClusterName ] ] - label_matchers: [ ] - metric_name_selectors: - - cluster_node_count - - cluster_failed_node_count - metric_descriptors: [ ] - no_verify_ssl: false - num_workers: 8 - output_destination: cloudwatch - proxy_address: "" - region: "us-east-1" - request_timeout_seconds: 30 - resource_arn: "" - role_arn: "" - profile: "" - shared_credentials_file: [ ] -extensions: {} + awscloudwatchlogs/emf_logs: + certificate_file_path: /etc/test/ca_bundle.pem + emf_only: true + endpoint: https://fake_endpoint + imds_retries: 1 + local_mode: false + log_group_name: emf/logs/default + log_retention: 0 + log_stream_name: host_name_from_env + max_retries: 2 + middleware: agenthealth/logs + no_verify_ssl: false + num_workers: 8 + profile: "" + proxy_address: "" + raw_log: true + region: us-east-1 + request_timeout_seconds: 30 + resource_arn: "" + retry_on_failure: + enabled: true + initial_interval: 5s + max_elapsed_time: 5m0s + max_interval: 30s + multiplier: 1.5 + randomization_factor: 0.5 + role_arn: "" + sending_queue: + queue_size: 1000 + shared_credentials_file: [] + awsemf/containerinsights: + certificate_file_path: /etc/test/ca_bundle.pem + detailed_metrics: false + dimension_rollup_option: NoDimensionRollup + disable_metric_extraction: true + eks_fargate_container_insights_enabled: false + endpoint: https://fake_endpoint + enhanced_container_insights: false + imds_retries: 1 + local_mode: false + log_group_name: /aws/containerinsights/{ClusterName}/performance + log_retention: 0 + log_stream_name: '{NodeName}' + max_retries: 2 + metric_declarations: + - dimensions: + - - ClusterName + - Namespace + - PodName + - - ClusterName + - - ClusterName + - Namespace + - Service + - - ClusterName + - Namespace + label_matchers: [] + metric_name_selectors: + - pod_cpu_utilization + - pod_memory_utilization + - pod_network_rx_bytes + - pod_network_tx_bytes + - pod_cpu_utilization_over_pod_limit + - pod_memory_utilization_over_pod_limit + - dimensions: + - - ClusterName + - Namespace + - PodName + label_matchers: [] + metric_name_selectors: + - pod_number_of_container_restarts + - dimensions: + - - ClusterName + - Namespace + - PodName + - - ClusterName + label_matchers: [] + metric_name_selectors: + - pod_cpu_reserved_capacity + - pod_memory_reserved_capacity + - dimensions: + - - ClusterName + - InstanceId + - NodeName + - - ClusterName + label_matchers: [] + metric_name_selectors: + - node_cpu_utilization + - node_memory_utilization + - node_network_total_bytes + - node_cpu_reserved_capacity + - node_memory_reserved_capacity + - node_number_of_running_pods + - node_number_of_running_containers + - dimensions: + - - ClusterName + label_matchers: [] + metric_name_selectors: + - node_cpu_usage_total + - node_cpu_limit + - node_memory_working_set + - node_memory_limit + - dimensions: + - - ClusterName + - InstanceId + - NodeName + - - ClusterName + label_matchers: [] + metric_name_selectors: + - node_filesystem_utilization + - dimensions: + - - ClusterName + - Namespace + - Service + - - ClusterName + label_matchers: [] + metric_name_selectors: + - service_number_of_running_pods + - dimensions: + - - ClusterName + - Namespace + - - ClusterName + label_matchers: [] + metric_name_selectors: + - namespace_number_of_running_pods + - dimensions: + - - ClusterName + label_matchers: [] + metric_name_selectors: + - cluster_node_count + - cluster_failed_node_count + metric_descriptors: [] + middleware: agenthealth/logs + namespace: ContainerInsights + no_verify_ssl: false + num_workers: 8 + output_destination: cloudwatch + parse_json_encoded_attr_values: + - Sources + - kubernetes + profile: "" + proxy_address: "" + region: us-east-1 + request_timeout_seconds: 30 + resource_arn: "" + resource_to_telemetry_conversion: + enabled: true + retain_initial_value_of_delta_metric: false + role_arn: "" + shared_credentials_file: [] + version: "0" +extensions: + agenthealth/logs: + is_usage_data_enabled: true + stats: + operations: + - PutLogEvents processors: - batch/emf_logs: - metadata_cardinality_limit: 1000 - metadata_keys: [] - send_batch_max_size: 0 - send_batch_size: 8192 - timeout: 5s - batch/containerinsights: - metadata_cardinality_limit: 1000 - metadata_keys: [] - send_batch_max_size: 0 - send_batch_size: 8192 - timeout: 5s + batch/containerinsights: + metadata_cardinality_limit: 1000 + metadata_keys: [] + send_batch_max_size: 0 + send_batch_size: 8192 + timeout: 5s + batch/emf_logs: + metadata_cardinality_limit: 1000 + metadata_keys: [] + send_batch_max_size: 0 + send_batch_size: 8192 + timeout: 5s receivers: - awscontainerinsightreceiver: - add_container_name_metric_label: false - add_full_pod_name_metric_label: false - add_service_as_attribute: true - cluster_name: TestCluster - collection_interval: 30s - container_orchestrator: eks - enable_control_plane_metrics: false - certificate_file_path: "" - endpoint: "" - imds_retries: 1 - prefer_full_pod_name: true - leader_lock_name: cwagent-clusterleader - leader_lock_using_config_map_only: true - local_mode: false - max_retries: 0 - no_verify_ssl: false - num_workers: 0 - proxy_address: "" - region: "us-east-1" - request_timeout_seconds: 0 - resource_arn: "" - role_arn: "" - profile: "" - shared_credentials_file: [ ] - tcplog/emf_logs: - attributes: {} - encoding: utf-8 - id: tcp_input - listen_address: 0.0.0.0:25888 - operators: [] - output: [] - resource: {} - retry_on_failure: - enabled: false - initial_interval: 0s - max_elapsed_time: 0s - max_interval: 0s - storage: null - type: tcp_input - udplog/emf_logs: - attributes: {} - encoding: utf-8 - id: udp_input - listen_address: 0.0.0.0:25888 - multiline: - line_end_pattern: .^ - line_start_pattern: "" - operators: [] - output: [] - resource: {} - retry_on_failure: - enabled: false - initial_interval: 0s - max_elapsed_time: 0s - max_interval: 0s - storage: null - type: udp_input + awscontainerinsightreceiver: + add_container_name_metric_label: false + add_full_pod_name_metric_label: false + add_service_as_attribute: true + certificate_file_path: "" + cluster_name: TestCluster + collection_interval: 30s + container_orchestrator: eks + enable_control_plane_metrics: false + endpoint: "" + imds_retries: 1 + leader_lock_name: cwagent-clusterleader + leader_lock_using_config_map_only: true + local_mode: false + max_retries: 0 + no_verify_ssl: false + num_workers: 0 + prefer_full_pod_name: true + profile: "" + proxy_address: "" + region: us-east-1 + request_timeout_seconds: 0 + resource_arn: "" + role_arn: "" + shared_credentials_file: [] + tcplog/emf_logs: + attributes: {} + encoding: utf-8 + id: tcp_input + listen_address: 0.0.0.0:25888 + operators: [] + output: [] + resource: {} + retry_on_failure: + enabled: false + initial_interval: 0s + max_elapsed_time: 0s + max_interval: 0s + storage: null + type: tcp_input + udplog/emf_logs: + attributes: {} + encoding: utf-8 + id: udp_input + listen_address: 0.0.0.0:25888 + multiline: + line_end_pattern: .^ + line_start_pattern: "" + operators: [] + output: [] + resource: {} + retry_on_failure: + enabled: false + initial_interval: 0s + max_elapsed_time: 0s + max_interval: 0s + storage: null + type: udp_input service: - extensions: [] - pipelines: - logs/emf_logs: - exporters: - - awscloudwatchlogs/emf_logs - processors: - - batch/emf_logs - receivers: - - tcplog/emf_logs - - udplog/emf_logs - metrics/containerinsights: - exporters: - - awsemf/containerinsights - processors: - - batch/containerinsights - receivers: - - awscontainerinsightreceiver - telemetry: - logs: - development: false - disable_caller: false - disable_stacktrace: false - encoding: console - error_output_paths: [] - initial_fields: {} - level: info - output_paths: [] - sampling: - initial: 2 - thereafter: 500 - metrics: - address: "" - level: None - readers: [] - resource: {} - traces: - processors: [] - propagators: [] \ No newline at end of file + extensions: + - agenthealth/logs + pipelines: + logs/emf_logs: + exporters: + - awscloudwatchlogs/emf_logs + processors: + - batch/emf_logs + receivers: + - tcplog/emf_logs + - udplog/emf_logs + metrics/containerinsights: + exporters: + - awsemf/containerinsights + processors: + - batch/containerinsights + receivers: + - awscontainerinsightreceiver + telemetry: + logs: + development: false + disable_caller: false + disable_stacktrace: false + encoding: console + error_output_paths: [] + initial_fields: {} + level: info + output_paths: [] + sampling: + initial: 2 + thereafter: 500 + metrics: + address: "" + level: None + readers: [] + resource: {} + traces: + processors: [] + propagators: [] diff --git a/translator/tocwconfig/sampleConfig/basic_config_linux.yaml b/translator/tocwconfig/sampleConfig/basic_config_linux.yaml index 676f41c6ad..a38a6daae9 100644 --- a/translator/tocwconfig/sampleConfig/basic_config_linux.yaml +++ b/translator/tocwconfig/sampleConfig/basic_config_linux.yaml @@ -4,32 +4,41 @@ exporters: force_flush_interval: 1m0s max_datums_per_call: 1000 max_values_per_datum: 150 + middleware: agenthealth/metrics + mode: EC2 namespace: CWAgent region: us-east-1 + region_type: ACJ resource_to_telemetry_conversion: enabled: true -extensions: {} +extensions: + agenthealth/metrics: + is_usage_data_enabled: true + stats: + operations: + - PutMetricData processors: ec2tagger: ec2_instance_tag_keys: - AutoScalingGroupName ec2_metadata_tags: + - InstanceType - ImageId - InstanceId - - InstanceType imds_retries: 1 refresh_interval_seconds: 0s receivers: telegraf_disk: collection_interval: 1m0s - initial_delay: "1s" + initial_delay: 1s timeout: 0s telegraf_mem: collection_interval: 1m0s - initial_delay: "1s" + initial_delay: 1s timeout: 0s service: - extensions: [] + extensions: + - agenthealth/metrics pipelines: metrics/host: exporters: @@ -37,8 +46,8 @@ service: processors: - ec2tagger receivers: - - telegraf_disk - telegraf_mem + - telegraf_disk telemetry: logs: development: false @@ -48,7 +57,8 @@ service: error_output_paths: [] initial_fields: {} level: info - output_paths: [/opt/aws/amazon-cloudwatch-agent/logs/amazon-cloudwatch-agent.log] + output_paths: + - /opt/aws/amazon-cloudwatch-agent/logs/amazon-cloudwatch-agent.log sampling: initial: 2 thereafter: 500 diff --git a/translator/tocwconfig/sampleConfig/basic_config_windows.yaml b/translator/tocwconfig/sampleConfig/basic_config_windows.yaml index 851790b6e4..1a1b36fffe 100644 --- a/translator/tocwconfig/sampleConfig/basic_config_windows.yaml +++ b/translator/tocwconfig/sampleConfig/basic_config_windows.yaml @@ -4,11 +4,19 @@ exporters: force_flush_interval: 1m0s max_datums_per_call: 1000 max_values_per_datum: 150 + middleware: agenthealth/metrics + mode: EC2 namespace: CWAgent region: us-west-2 + region_type: ACJ resource_to_telemetry_conversion: enabled: true -extensions: {} +extensions: + agenthealth/metrics: + is_usage_data_enabled: true + stats: + operations: + - PutMetricData processors: ec2tagger: ec2_instance_tag_keys: @@ -17,21 +25,22 @@ processors: - ImageId - InstanceId - InstanceType - refresh_interval_seconds: 0s imds_retries: 1 + refresh_interval_seconds: 0s receivers: telegraf_win_perf_counters/1492679118: - alias_name : Memory + alias_name: Memory collection_interval: 1m0s - initial_delay: "1s" + initial_delay: 1s timeout: 0s telegraf_win_perf_counters/4283769065: - alias_name : LogicalDisk + alias_name: LogicalDisk collection_interval: 1m0s - initial_delay: "1s" + initial_delay: 1s timeout: 0s service: - extensions: [] + extensions: + - agenthealth/metrics pipelines: metrics/host: exporters: @@ -50,7 +59,8 @@ service: error_output_paths: [] initial_fields: {} level: info - output_paths: [c:\ProgramData\Amazon\AmazonCloudWatchAgent\Logs\amazon-cloudwatch-agent.log] + output_paths: + - c:\ProgramData\Amazon\AmazonCloudWatchAgent\Logs\amazon-cloudwatch-agent.log sampling: initial: 2 thereafter: 500 diff --git a/translator/tocwconfig/sampleConfig/collectd_config_linux.yaml b/translator/tocwconfig/sampleConfig/collectd_config_linux.yaml index 420f130b3a..c79e2caa78 100644 --- a/translator/tocwconfig/sampleConfig/collectd_config_linux.yaml +++ b/translator/tocwconfig/sampleConfig/collectd_config_linux.yaml @@ -4,19 +4,28 @@ exporters: force_flush_interval: 1m0s max_datums_per_call: 1000 max_values_per_datum: 150 + middleware: agenthealth/metrics + mode: EC2 namespace: CWAgent region: us-west-2 + region_type: ACJ resource_to_telemetry_conversion: enabled: true -extensions: {} +extensions: + agenthealth/metrics: + is_usage_data_enabled: true + stats: + operations: + - PutMetricData processors: {} receivers: telegraf_socket_listener: collection_interval: 1m0s - initial_delay: "1s" + initial_delay: 1s timeout: 0s service: - extensions: [] + extensions: + - agenthealth/metrics pipelines: metrics/host: exporters: @@ -33,7 +42,8 @@ service: error_output_paths: [] initial_fields: {} level: info - output_paths: [/opt/aws/amazon-cloudwatch-agent/logs/amazon-cloudwatch-agent.log] + output_paths: + - /opt/aws/amazon-cloudwatch-agent/logs/amazon-cloudwatch-agent.log sampling: initial: 2 thereafter: 500 diff --git a/translator/tocwconfig/sampleConfig/complete_darwin_config.conf b/translator/tocwconfig/sampleConfig/complete_darwin_config.conf index f46174f758..dd69467e71 100644 --- a/translator/tocwconfig/sampleConfig/complete_darwin_config.conf +++ b/translator/tocwconfig/sampleConfig/complete_darwin_config.conf @@ -126,4 +126,6 @@ force_flush_interval = "60s" log_stream_name = "LOG_STREAM_NAME" region = "us-west-2" + region_type = "ACJ" + mode = "EC2" role_arn = "log_role_arn_value_test" diff --git a/translator/tocwconfig/sampleConfig/complete_darwin_config.yaml b/translator/tocwconfig/sampleConfig/complete_darwin_config.yaml index 5c6d95a9d9..532da43e87 100644 --- a/translator/tocwconfig/sampleConfig/complete_darwin_config.yaml +++ b/translator/tocwconfig/sampleConfig/complete_darwin_config.yaml @@ -5,8 +5,11 @@ exporters: force_flush_interval: 1m0s max_datums_per_call: 1000 max_values_per_datum: 5000 + middleware: agenthealth/metrics + mode: EC2 namespace: CWAgent region: us-west-2 + region_type: ACJ resource_to_telemetry_conversion: enabled: true role_arn: metrics_role_arn_value_test @@ -17,20 +20,22 @@ exporters: - - d1 - [] awscloudwatchlogs/emf_logs: - emf_only: true certificate_file_path: "" - endpoint: "https://logs-fips.us-west-2.amazonaws.com" + emf_only: true + endpoint: https://logs-fips.us-west-2.amazonaws.com imds_retries: 1 local_mode: false log_group_name: emf/logs/default log_retention: 0 log_stream_name: LOG_STREAM_NAME max_retries: 2 + middleware: agenthealth/logs no_verify_ssl: false num_workers: 8 + profile: "" proxy_address: "" raw_log: true - region: "us-west-2" + region: us-west-2 request_timeout_seconds: 30 resource_arn: "" retry_on_failure: @@ -43,8 +48,7 @@ exporters: role_arn: log_role_arn_value_test sending_queue: queue_size: 1000 - profile: "" - shared_credentials_file: [ ] + shared_credentials_file: [] awsxray: aws_log_groups: [] certificate_file_path: "" @@ -54,19 +58,35 @@ exporters: indexed_attributes: [] local_mode: true max_retries: 2 + middleware: agenthealth/traces no_verify_ssl: true num_workers: 1 + profile: "" proxy_address: https://proxy.proxy.com region: us-west-2 request_timeout_seconds: 30 resource_arn: arn:aws:iam::account:resource role_arn: trace_role_arn_value_test - profile: "" - shared_credentials_file: [ ] + shared_credentials_file: [] telemetry: enabled: true include_metadata: true -extensions: {} +extensions: + agenthealth/logs: + is_usage_data_enabled: true + stats: + operations: + - PutLogEvents + agenthealth/metrics: + is_usage_data_enabled: true + stats: + operations: + - PutMetricData + agenthealth/traces: + is_usage_data_enabled: true + stats: + operations: + - PutTraceSegments processors: batch/emf_logs: metadata_cardinality_limit: 1000 @@ -97,11 +117,11 @@ processors: ec2_instance_tag_keys: - AutoScalingGroupName ec2_metadata_tags: + - ImageId - InstanceId - InstanceType - - ImageId - refresh_interval_seconds: 0s imds_retries: 1 + refresh_interval_seconds: 0s transform: error_mode: propagate log_statements: [] @@ -126,13 +146,13 @@ receivers: role_arn: trace_role_arn_value_test tls: ca_file: "" - ca_pem: "[REDACTED]" + ca_pem: '[REDACTED]' cert_file: "" - cert_pem: "[REDACTED]" + cert_pem: '[REDACTED]' insecure: true insecure_skip_verify: false key_file: "" - key_pem: "[REDACTED]" + key_pem: '[REDACTED]' max_version: "" min_version: "" reload_interval: 0s @@ -164,48 +184,48 @@ receivers: traces_url_path: /v1/traces telegraf_cpu: collection_interval: 10s - initial_delay: "1s" + initial_delay: 1s timeout: 0s telegraf_disk: collection_interval: 1m0s - initial_delay: "1s" + initial_delay: 1s timeout: 0s telegraf_diskio: collection_interval: 1m0s - initial_delay: "1s" + initial_delay: 1s timeout: 0s telegraf_mem: collection_interval: 1s - initial_delay: "1s" + initial_delay: 1s timeout: 0s telegraf_net: collection_interval: 10s - initial_delay: "1s" + initial_delay: 1s timeout: 0s telegraf_netstat: collection_interval: 1m0s - initial_delay: "1s" + initial_delay: 1s timeout: 0s telegraf_processes: collection_interval: 10s - initial_delay: "1s" + initial_delay: 1s timeout: 0s telegraf_procstat/1917393364: alias_name: /var/run/example1.pid collection_interval: 10s - initial_delay: "1s" + initial_delay: 1s timeout: 0s telegraf_socket_listener: collection_interval: 10s - initial_delay: "1s" + initial_delay: 1s timeout: 0s telegraf_statsd: collection_interval: 10s - initial_delay: "1s" + initial_delay: 1s timeout: 0s telegraf_swap: collection_interval: 10s - initial_delay: "1s" + initial_delay: 1s timeout: 0s udplog/emf_logs: attributes: {} @@ -226,7 +246,10 @@ receivers: storage: null type: udp_input service: - extensions: [] + extensions: + - agenthealth/metrics + - agenthealth/logs + - agenthealth/traces pipelines: logs/emf_logs: exporters: diff --git a/translator/tocwconfig/sampleConfig/complete_linux_config.conf b/translator/tocwconfig/sampleConfig/complete_linux_config.conf index c5f2b4f9c3..8cc4d07b61 100644 --- a/translator/tocwconfig/sampleConfig/complete_linux_config.conf +++ b/translator/tocwconfig/sampleConfig/complete_linux_config.conf @@ -126,4 +126,6 @@ force_flush_interval = "60s" log_stream_name = "LOG_STREAM_NAME" region = "us-west-2" + region_type = "ACJ" + mode = "EC2" role_arn = "log_role_arn_value_test" diff --git a/translator/tocwconfig/sampleConfig/complete_linux_config.yaml b/translator/tocwconfig/sampleConfig/complete_linux_config.yaml index d55c341d43..257432f14c 100644 --- a/translator/tocwconfig/sampleConfig/complete_linux_config.yaml +++ b/translator/tocwconfig/sampleConfig/complete_linux_config.yaml @@ -8,8 +8,11 @@ exporters: force_flush_interval: 1m0s max_datums_per_call: 1000 max_values_per_datum: 5000 + middleware: agenthealth/metrics + mode: EC2 namespace: CWAgent region: us-west-2 + region_type: ACJ resource_to_telemetry_conversion: enabled: true role_arn: metrics_role_arn_value_test @@ -20,20 +23,22 @@ exporters: - - d1 - [] awscloudwatchlogs/emf_logs: - emf_only: true certificate_file_path: "" - endpoint: "https://logs-fips.us-west-2.amazonaws.com" + emf_only: true + endpoint: https://logs-fips.us-west-2.amazonaws.com imds_retries: 1 local_mode: false log_group_name: emf/logs/default log_retention: 0 log_stream_name: LOG_STREAM_NAME max_retries: 2 + middleware: agenthealth/logs no_verify_ssl: false num_workers: 8 + profile: "" proxy_address: "" raw_log: true - region: "us-west-2" + region: us-west-2 request_timeout_seconds: 30 resource_arn: "" retry_on_failure: @@ -46,8 +51,7 @@ exporters: role_arn: log_role_arn_value_test sending_queue: queue_size: 1000 - profile: "" - shared_credentials_file: [ ] + shared_credentials_file: [] awsxray: aws_log_groups: [] certificate_file_path: "" @@ -57,19 +61,35 @@ exporters: indexed_attributes: [] local_mode: true max_retries: 2 + middleware: agenthealth/traces no_verify_ssl: true num_workers: 1 + profile: "" proxy_address: https://proxy.proxy.com region: us-west-2 request_timeout_seconds: 30 resource_arn: arn:aws:iam::account:resource role_arn: trace_role_arn_value_test - profile: "" - shared_credentials_file: [ ] + shared_credentials_file: [] telemetry: enabled: true include_metadata: true -extensions: {} +extensions: + agenthealth/logs: + is_usage_data_enabled: true + stats: + operations: + - PutLogEvents + agenthealth/metrics: + is_usage_data_enabled: true + stats: + operations: + - PutMetricData + agenthealth/traces: + is_usage_data_enabled: true + stats: + operations: + - PutTraceSegments processors: batch/emf_logs: metadata_cardinality_limit: 1000 @@ -103,19 +123,19 @@ processors: - ImageId - InstanceId - InstanceType - refresh_interval_seconds: 0s imds_retries: 1 + refresh_interval_seconds: 0s transform: error_mode: propagate log_statements: [] metric_statements: - context: metric statements: - - set(unit, "unit") where name == "cpu_usage_idle" - - set(name, "CPU_USAGE_IDLE") where name == "cpu_usage_idle" - - set(unit, "unit") where name == "cpu_usage_nice" - - set(unit, "unit") where name == "disk_free" - - set(name, "DISK_FREE") where name == "disk_free" + - set(unit, "unit") where name == "disk_free" + - set(name, "DISK_FREE") where name == "disk_free" + - set(unit, "unit") where name == "cpu_usage_idle" + - set(name, "CPU_USAGE_IDLE") where name == "cpu_usage_idle" + - set(unit, "unit") where name == "cpu_usage_nice" trace_statements: [] receivers: awsxray: @@ -129,13 +149,13 @@ receivers: role_arn: trace_role_arn_value_test tls: ca_file: "" - ca_pem: "[REDACTED]" + ca_pem: '[REDACTED]' cert_file: "" - cert_pem: "[REDACTED]" + cert_pem: '[REDACTED]' insecure: true insecure_skip_verify: false key_file: "" - key_pem: "[REDACTED]" + key_pem: '[REDACTED]' max_version: "" min_version: "" reload_interval: 0s @@ -167,48 +187,48 @@ receivers: traces_url_path: /v1/traces telegraf_cpu: collection_interval: 10s - initial_delay: "1s" + initial_delay: 1s timeout: 0s telegraf_disk: collection_interval: 1m0s - initial_delay: "1s" + initial_delay: 1s timeout: 0s telegraf_diskio: collection_interval: 1m0s - initial_delay: "1s" + initial_delay: 1s timeout: 0s telegraf_mem: collection_interval: 1s - initial_delay: "1s" + initial_delay: 1s timeout: 0s telegraf_net: collection_interval: 10s - initial_delay: "1s" + initial_delay: 1s timeout: 0s telegraf_netstat: collection_interval: 1m0s - initial_delay: "1s" + initial_delay: 1s timeout: 0s telegraf_processes: collection_interval: 10s - initial_delay: "1s" + initial_delay: 1s timeout: 0s telegraf_procstat/1917393364: alias_name: /var/run/example1.pid collection_interval: 10s - initial_delay: "1s" + initial_delay: 1s timeout: 0s telegraf_socket_listener: collection_interval: 10s - initial_delay: "1s" + initial_delay: 1s timeout: 0s telegraf_statsd: collection_interval: 10s - initial_delay: "1s" + initial_delay: 1s timeout: 0s telegraf_swap: collection_interval: 10s - initial_delay: "1s" + initial_delay: 1s timeout: 0s udplog/emf_logs: attributes: {} @@ -229,7 +249,10 @@ receivers: storage: null type: udp_input service: - extensions: [] + extensions: + - agenthealth/metrics + - agenthealth/logs + - agenthealth/traces pipelines: logs/emf_logs: exporters: @@ -245,15 +268,15 @@ service: - ec2tagger - transform receivers: - - telegraf_procstat/1917393364 - telegraf_mem - - telegraf_cpu - telegraf_disk - - telegraf_swap - - telegraf_statsd - telegraf_socket_listener - - telegraf_processes + - telegraf_cpu + - telegraf_procstat/1917393364 + - telegraf_swap - telegraf_netstat + - telegraf_processes + - telegraf_statsd metrics/hostDeltaMetrics: exporters: - awscloudwatch @@ -262,8 +285,8 @@ service: - ec2tagger - transform receivers: - - telegraf_diskio - telegraf_net + - telegraf_diskio traces/xray: exporters: - awsxray diff --git a/translator/tocwconfig/sampleConfig/complete_windows_config.conf b/translator/tocwconfig/sampleConfig/complete_windows_config.conf index 26c8cf5cfb..9d06e7e62b 100644 --- a/translator/tocwconfig/sampleConfig/complete_windows_config.conf +++ b/translator/tocwconfig/sampleConfig/complete_windows_config.conf @@ -144,4 +144,6 @@ force_flush_interval = "60s" log_stream_name = "LOG_STREAM_NAME" region = "us-west-2" + region_type = "ACJ" + mode = "EC2" role_arn = "log_role_arn_value_test" diff --git a/translator/tocwconfig/sampleConfig/complete_windows_config.yaml b/translator/tocwconfig/sampleConfig/complete_windows_config.yaml index 284b6113f2..93872cc138 100644 --- a/translator/tocwconfig/sampleConfig/complete_windows_config.yaml +++ b/translator/tocwconfig/sampleConfig/complete_windows_config.yaml @@ -5,8 +5,11 @@ exporters: force_flush_interval: 1m0s max_datums_per_call: 1000 max_values_per_datum: 5000 + middleware: agenthealth/metrics + mode: EC2 namespace: CWAgent region: us-west-2 + region_type: ACJ resource_to_telemetry_conversion: enabled: true role_arn: metrics_role_arn_value_test @@ -19,18 +22,20 @@ exporters: awscloudwatchlogs/emf_logs: certificate_file_path: "" emf_only: true - endpoint: "https://logs-fips.us-west-2.amazonaws.com" + endpoint: https://logs-fips.us-west-2.amazonaws.com imds_retries: 1 local_mode: false log_group_name: emf/logs/default log_retention: 0 log_stream_name: LOG_STREAM_NAME max_retries: 2 + middleware: agenthealth/logs no_verify_ssl: false num_workers: 8 + profile: "" proxy_address: "" raw_log: true - region: "us-west-2" + region: us-west-2 request_timeout_seconds: 30 resource_arn: "" retry_on_failure: @@ -43,8 +48,7 @@ exporters: role_arn: log_role_arn_value_test sending_queue: queue_size: 1000 - profile: "" - shared_credentials_file: [ ] + shared_credentials_file: [] awsxray: aws_log_groups: [] certificate_file_path: "" @@ -54,19 +58,35 @@ exporters: indexed_attributes: [] local_mode: true max_retries: 2 + middleware: agenthealth/traces no_verify_ssl: true num_workers: 1 + profile: "" proxy_address: https://proxy.proxy.com region: us-west-2 request_timeout_seconds: 30 resource_arn: arn:aws:iam::account:resource role_arn: trace_role_arn_value_test - profile: "" - shared_credentials_file: [ ] + shared_credentials_file: [] telemetry: enabled: true include_metadata: true -extensions: {} +extensions: + agenthealth/logs: + is_usage_data_enabled: true + stats: + operations: + - PutLogEvents + agenthealth/metrics: + is_usage_data_enabled: true + stats: + operations: + - PutMetricData + agenthealth/traces: + is_usage_data_enabled: true + stats: + operations: + - PutTraceSegments processors: batch/emf_logs: metadata_cardinality_limit: 1000 @@ -210,7 +230,10 @@ receivers: storage: null type: udp_input service: - extensions: [] + extensions: + - agenthealth/metrics + - agenthealth/logs + - agenthealth/traces pipelines: logs/emf_logs: exporters: diff --git a/translator/tocwconfig/sampleConfig/config_with_env.yaml b/translator/tocwconfig/sampleConfig/config_with_env.yaml index f5863e139b..d353a9cbf7 100644 --- a/translator/tocwconfig/sampleConfig/config_with_env.yaml +++ b/translator/tocwconfig/sampleConfig/config_with_env.yaml @@ -1,106 +1,113 @@ connectors: {} exporters: - awscloudwatchlogs/emf_logs: - certificate_file_path: "" - emf_only: true - endpoint: "" - imds_retries: 1 - local_mode: false - log_group_name: emf/logs/default - log_retention: 0 - log_stream_name: ${ENV_LOG_STREAM_NAME} - max_retries: 2 - no_verify_ssl: false - num_workers: 8 - profile: "" - proxy_address: "" - raw_log: true - region: ${ENV_REGION} - request_timeout_seconds: 30 - resource_arn: "" - retry_on_failure: - enabled: true - initial_interval: 5s - max_elapsed_time: 5m0s - max_interval: 30s - multiplier: 1.5 - randomization_factor: 0.5 - role_arn: ${ENV_CREDENTIALS_ROLE_ARN} - sending_queue: - queue_size: 1000 - shared_credentials_file: [] -extensions: {} + awscloudwatchlogs/emf_logs: + certificate_file_path: "" + emf_only: true + endpoint: "" + imds_retries: 1 + local_mode: false + log_group_name: emf/logs/default + log_retention: 0 + log_stream_name: ${ENV_LOG_STREAM_NAME} + max_retries: 2 + middleware: agenthealth/logs + no_verify_ssl: false + num_workers: 8 + profile: "" + proxy_address: "" + raw_log: true + region: ${ENV_REGION} + request_timeout_seconds: 30 + resource_arn: "" + retry_on_failure: + enabled: true + initial_interval: 5s + max_elapsed_time: 5m0s + max_interval: 30s + multiplier: 1.5 + randomization_factor: 0.5 + role_arn: ${ENV_CREDENTIALS_ROLE_ARN} + sending_queue: + queue_size: 1000 + shared_credentials_file: [] +extensions: + agenthealth/logs: + is_usage_data_enabled: true + stats: + operations: + - PutLogEvents processors: - batch/emf_logs: - metadata_cardinality_limit: 1000 - metadata_keys: [] - send_batch_max_size: 0 - send_batch_size: 8192 - timeout: 5s + batch/emf_logs: + metadata_cardinality_limit: 1000 + metadata_keys: [] + send_batch_max_size: 0 + send_batch_size: 8192 + timeout: 5s receivers: - tcplog/emf_logs: - attributes: {} - encoding: utf-8 - id: tcp_input - listen_address: 0.0.0.0:25888 - operators: [] - output: [] - resource: {} - retry_on_failure: - enabled: false - initial_interval: 0s - max_elapsed_time: 0s - max_interval: 0s - storage: null - type: tcp_input - udplog/emf_logs: - attributes: {} - encoding: utf-8 - id: udp_input - listen_address: 0.0.0.0:25888 - multiline: - line_end_pattern: .^ - line_start_pattern: "" - operators: [] - output: [] - resource: {} - retry_on_failure: - enabled: false - initial_interval: 0s - max_elapsed_time: 0s - max_interval: 0s - storage: null - type: udp_input + tcplog/emf_logs: + attributes: {} + encoding: utf-8 + id: tcp_input + listen_address: 0.0.0.0:25888 + operators: [] + output: [] + resource: {} + retry_on_failure: + enabled: false + initial_interval: 0s + max_elapsed_time: 0s + max_interval: 0s + storage: null + type: tcp_input + udplog/emf_logs: + attributes: {} + encoding: utf-8 + id: udp_input + listen_address: 0.0.0.0:25888 + multiline: + line_end_pattern: .^ + line_start_pattern: "" + operators: [] + output: [] + resource: {} + retry_on_failure: + enabled: false + initial_interval: 0s + max_elapsed_time: 0s + max_interval: 0s + storage: null + type: udp_input service: - extensions: [] - pipelines: - logs/emf_logs: - exporters: - - awscloudwatchlogs/emf_logs - processors: - - batch/emf_logs - receivers: - - tcplog/emf_logs - - udplog/emf_logs - telemetry: - logs: - development: false - disable_caller: false - disable_stacktrace: false - encoding: console - error_output_paths: [] - initial_fields: {} - level: info - output_paths: - - /opt/aws/amazon-cloudwatch-agent/logs/amazon-cloudwatch-agent.log - sampling: - initial: 2 - thereafter: 500 - metrics: - address: "" - level: None - readers: [] - resource: {} - traces: - processors: [] - propagators: [] + extensions: + - agenthealth/logs + pipelines: + logs/emf_logs: + exporters: + - awscloudwatchlogs/emf_logs + processors: + - batch/emf_logs + receivers: + - tcplog/emf_logs + - udplog/emf_logs + telemetry: + logs: + development: false + disable_caller: false + disable_stacktrace: false + encoding: console + error_output_paths: [] + initial_fields: {} + level: info + output_paths: + - /opt/aws/amazon-cloudwatch-agent/logs/amazon-cloudwatch-agent.log + sampling: + initial: 2 + thereafter: 500 + metrics: + address: "" + level: None + readers: [] + resource: {} + traces: + processors: [] + propagators: [] diff --git a/translator/tocwconfig/sampleConfig/delta_config_linux.yaml b/translator/tocwconfig/sampleConfig/delta_config_linux.yaml index 1025e3674a..f34cd23047 100644 --- a/translator/tocwconfig/sampleConfig/delta_config_linux.yaml +++ b/translator/tocwconfig/sampleConfig/delta_config_linux.yaml @@ -1,67 +1,76 @@ connectors: {} exporters: awscloudwatch: - force_flush_interval: 1m0s - max_datums_per_call: 1000 - max_values_per_datum: 150 - namespace: CWAgent - region: us-east-1 - resource_to_telemetry_conversion: - enabled: true -extensions: {} + force_flush_interval: 1m0s + max_datums_per_call: 1000 + max_values_per_datum: 150 + middleware: agenthealth/metrics + mode: EC2 + namespace: CWAgent + region: us-east-1 + region_type: ACJ + resource_to_telemetry_conversion: + enabled: true +extensions: + agenthealth/metrics: + is_usage_data_enabled: true + stats: + operations: + - PutMetricData processors: - cumulativetodelta/hostDeltaMetrics: - exclude: - match_type: strict - metrics: - - iops_in_progress - - diskio_iops_in_progress - regexp: null - include: - match_type: "" - metrics: [] - regexp: null - initial_value: 0 - max_staleness: 0s - ec2tagger: - ec2_instance_tag_keys: - - AutoScalingGroupName - ec2_metadata_tags: - - ImageId - - InstanceId - - InstanceType - refresh_interval_seconds: 0s - imds_retries: 1 - transform: - log_statements: [ ] - error_mode: "propagate" - metric_statements: - - context: metric - statements: - - set(unit, "Count") where name == "diskio_iops_in_progress" - - set(name, "DRIVER_DISKIO_IOPS_IN_PROGRESS") where name == "diskio_iops_in_progress" - - set(unit, "Milliseconds") where name == "diskio_read_time" - - set(name, "DRIVER_DISKIO_READ_TIME") where name == "diskio_read_time" - - set(unit, "Milliseconds") where name == "diskio_write_time" - - set(name, "DRIVER_DISKIO_WRITE_TIME") where name == "diskio_write_time" - trace_statements: [] + cumulativetodelta/hostDeltaMetrics: + exclude: + match_type: strict + metrics: + - iops_in_progress + - diskio_iops_in_progress + regexp: null + include: + match_type: "" + metrics: [] + regexp: null + initial_value: 0 + max_staleness: 0s + ec2tagger: + ec2_instance_tag_keys: + - AutoScalingGroupName + ec2_metadata_tags: + - InstanceId + - InstanceType + - ImageId + imds_retries: 1 + refresh_interval_seconds: 0s + transform: + error_mode: propagate + log_statements: [] + metric_statements: + - context: metric + statements: + - set(unit, "Count") where name == "diskio_iops_in_progress" + - set(name, "DRIVER_DISKIO_IOPS_IN_PROGRESS") where name == "diskio_iops_in_progress" + - set(unit, "Milliseconds") where name == "diskio_read_time" + - set(name, "DRIVER_DISKIO_READ_TIME") where name == "diskio_read_time" + - set(unit, "Milliseconds") where name == "diskio_write_time" + - set(name, "DRIVER_DISKIO_WRITE_TIME") where name == "diskio_write_time" + trace_statements: [] receivers: telegraf_diskio: collection_interval: 1m0s - initial_delay: "1s" + initial_delay: 1s timeout: 0s service: - extensions: [] + extensions: + - agenthealth/metrics pipelines: - metrics/hostDeltaMetrics: - exporters: - - awscloudwatch - processors: - - cumulativetodelta/hostDeltaMetrics - - ec2tagger - - transform - receivers: - - telegraf_diskio + metrics/hostDeltaMetrics: + exporters: + - awscloudwatch + processors: + - cumulativetodelta/hostDeltaMetrics + - ec2tagger + - transform + receivers: + - telegraf_diskio telemetry: logs: development: false @@ -71,7 +80,8 @@ service: error_output_paths: [] initial_fields: {} level: info - output_paths: [/opt/aws/amazon-cloudwatch-agent/logs/amazon-cloudwatch-agent.log] + output_paths: + - /opt/aws/amazon-cloudwatch-agent/logs/amazon-cloudwatch-agent.log sampling: initial: 2 thereafter: 500 diff --git a/translator/tocwconfig/sampleConfig/delta_net_config_linux.yaml b/translator/tocwconfig/sampleConfig/delta_net_config_linux.yaml index f609509f15..8728209399 100644 --- a/translator/tocwconfig/sampleConfig/delta_net_config_linux.yaml +++ b/translator/tocwconfig/sampleConfig/delta_net_config_linux.yaml @@ -1,51 +1,60 @@ connectors: {} exporters: awscloudwatch: - force_flush_interval: 1m0s - max_datums_per_call: 1000 - max_values_per_datum: 150 - namespace: CWAgent - region: us-east-1 - resource_to_telemetry_conversion: - enabled: true -extensions: {} + force_flush_interval: 1m0s + max_datums_per_call: 1000 + max_values_per_datum: 150 + middleware: agenthealth/metrics + mode: EC2 + namespace: CWAgent + region: us-east-1 + region_type: ACJ + resource_to_telemetry_conversion: + enabled: true +extensions: + agenthealth/metrics: + is_usage_data_enabled: true + stats: + operations: + - PutMetricData processors: - cumulativetodelta/hostDeltaMetrics: - exclude: - match_type: "" - metrics: [ ] - regexp: null - include: - match_type: "" - metrics: [] - regexp: null - initial_value: 0 - max_staleness: 0s - ec2tagger: - ec2_instance_tag_keys: - - AutoScalingGroupName - ec2_metadata_tags: - - ImageId - - InstanceId - - InstanceType - refresh_interval_seconds: 0s - imds_retries: 1 + cumulativetodelta/hostDeltaMetrics: + exclude: + match_type: "" + metrics: [] + regexp: null + include: + match_type: "" + metrics: [] + regexp: null + initial_value: 0 + max_staleness: 0s + ec2tagger: + ec2_instance_tag_keys: + - AutoScalingGroupName + ec2_metadata_tags: + - ImageId + - InstanceId + - InstanceType + imds_retries: 1 + refresh_interval_seconds: 0s receivers: telegraf_net: collection_interval: 1m0s - initial_delay: "1s" + initial_delay: 1s timeout: 0s service: - extensions: [] + extensions: + - agenthealth/metrics pipelines: - metrics/hostDeltaMetrics: - exporters: - - awscloudwatch - processors: - - cumulativetodelta/hostDeltaMetrics - - ec2tagger - receivers: - - telegraf_net + metrics/hostDeltaMetrics: + exporters: + - awscloudwatch + processors: + - cumulativetodelta/hostDeltaMetrics + - ec2tagger + receivers: + - telegraf_net telemetry: logs: development: false @@ -55,7 +64,8 @@ service: error_output_paths: [] initial_fields: {} level: info - output_paths: [/opt/aws/amazon-cloudwatch-agent/logs/amazon-cloudwatch-agent.log] + output_paths: + - /opt/aws/amazon-cloudwatch-agent/logs/amazon-cloudwatch-agent.log sampling: initial: 2 thereafter: 500 diff --git a/translator/tocwconfig/sampleConfig/drop_origin_linux.yaml b/translator/tocwconfig/sampleConfig/drop_origin_linux.yaml index 1049f465b4..578ae33611 100644 --- a/translator/tocwconfig/sampleConfig/drop_origin_linux.yaml +++ b/translator/tocwconfig/sampleConfig/drop_origin_linux.yaml @@ -4,51 +4,60 @@ exporters: drop_original_metrics: CPU_USAGE_IDLE: true cpu_time_active: true - nvidia_smi_utilization_gpu: true nvidia_smi_temperature_gpu: true + nvidia_smi_utilization_gpu: true force_flush_interval: 1m0s max_datums_per_call: 1000 max_values_per_datum: 150 + middleware: agenthealth/metrics + mode: EC2 namespace: CWAgent region: us-west-2 + region_type: ACJ resource_to_telemetry_conversion: enabled: true -extensions: {} +extensions: + agenthealth/metrics: + is_usage_data_enabled: true + stats: + operations: + - PutMetricData processors: ec2tagger: ec2_instance_tag_keys: - AutoScalingGroupName ec2_metadata_tags: + - InstanceType - ImageId - InstanceId - - InstanceType - refresh_interval_seconds: 0s imds_retries: 1 + refresh_interval_seconds: 0s transform: + error_mode: propagate log_statements: [] - error_mode: "propagate" metric_statements: - context: metric statements: - - set(unit, "unit") where name == "cpu_usage_idle" - - set(name, "CPU_USAGE_IDLE") where name == "cpu_usage_idle" - - set(unit, "unit") where name == "cpu_usage_nice" + - set(unit, "unit") where name == "cpu_usage_idle" + - set(name, "CPU_USAGE_IDLE") where name == "cpu_usage_idle" + - set(unit, "unit") where name == "cpu_usage_nice" trace_statements: [] receivers: telegraf_cpu: collection_interval: 10s - initial_delay: "1s" + initial_delay: 1s timeout: 0s telegraf_disk: collection_interval: 1m0s - initial_delay: "1s" + initial_delay: 1s timeout: 0s telegraf_nvidia_smi: collection_interval: 1m0s - initial_delay: "1s" + initial_delay: 1s timeout: 0s service: - extensions: [] + extensions: + - agenthealth/metrics pipelines: metrics/host: exporters: @@ -69,7 +78,8 @@ service: error_output_paths: [] initial_fields: {} level: info - output_paths: [/opt/aws/amazon-cloudwatch-agent/logs/amazon-cloudwatch-agent.log] + output_paths: + - /opt/aws/amazon-cloudwatch-agent/logs/amazon-cloudwatch-agent.log sampling: initial: 2 thereafter: 500 diff --git a/translator/tocwconfig/sampleConfig/emf_and_kubernetes_config.yaml b/translator/tocwconfig/sampleConfig/emf_and_kubernetes_config.yaml index a1746ec979..4ff7d69f95 100644 --- a/translator/tocwconfig/sampleConfig/emf_and_kubernetes_config.yaml +++ b/translator/tocwconfig/sampleConfig/emf_and_kubernetes_config.yaml @@ -1,430 +1,539 @@ connectors: {} exporters: - awscloudwatchlogs/emf_logs: - emf_only: true - certificate_file_path: "" - endpoint: "https://fake_endpoint" - imds_retries: 2 - local_mode: false - log_group_name: emf/logs/default - log_retention: 0 - log_stream_name: host_name_from_env - max_retries: 2 - no_verify_ssl: false - num_workers: 8 - profile: "default" - proxy_address: "" - raw_log: true - region: us-east-1 - request_timeout_seconds: 30 - resource_arn: "" - retry_on_failure: - enabled: true - initial_interval: 5s - max_elapsed_time: 5m0s - max_interval: 30s - multiplier: 1.5 - randomization_factor: 0.5 - role_arn: "" - sending_queue: - queue_size: 1000 - shared_credentials_file: [ "/root/.aws/credentials" ] - awsemf/containerinsights: - detailed_metrics: false - dimension_rollup_option: NoDimensionRollup - disable_metric_extraction: true - eks_fargate_container_insights_enabled: false - certificate_file_path: "" - endpoint: "https://fake_endpoint" - enhanced_container_insights: true - imds_retries: 2 - local_mode: false - log_group_name: /aws/containerinsights/{ClusterName}/performance - log_retention: 0 - log_stream_name: '{NodeName}' - max_retries: 2 - metric_declarations: - # container metrics - - dimensions: [ [ ClusterName ], [ ClusterName, ContainerName, FullPodName, Namespace, PodName ], [ ClusterName, ContainerName, Namespace, PodName ] ] - label_matchers: [ ] - metric_name_selectors: - - container_cpu_utilization - - container_cpu_utilization_over_container_limit - - container_cpu_limit - - container_cpu_request - - container_memory_utilization - - container_memory_utilization_over_container_limit - - container_memory_failures_total - - container_memory_limit - - container_memory_request - - container_filesystem_usage - - container_filesystem_available - - container_filesystem_utilization - # pod metrics - - dimensions: [ [ ClusterName, Namespace, PodName ], [ ClusterName ], [ ClusterName, Namespace, Service ], [ ClusterName, Namespace ], [ ClusterName, FullPodName, Namespace, PodName ] ] - label_matchers: [ ] - metric_name_selectors: - - pod_cpu_utilization - - pod_memory_utilization - - pod_network_rx_bytes - - pod_network_tx_bytes - - pod_cpu_utilization_over_pod_limit - - pod_memory_utilization_over_pod_limit - - dimensions: [ [ ClusterName, FullPodName, Namespace, PodName ], [ ClusterName, Namespace, PodName ], [ ClusterName, Namespace ], [ ClusterName ] ] - label_matchers: [ ] - metric_name_selectors: - - pod_interface_network_rx_dropped - - pod_interface_network_tx_dropped - - dimensions: [ [ ClusterName, Namespace, PodName ], [ ClusterName ], [ ClusterName, FullPodName, Namespace, PodName ], [ ClusterName, Namespace, Service ] ] - label_matchers: [] - metric_name_selectors: - - pod_cpu_reserved_capacity - - pod_memory_reserved_capacity - - pod_number_of_container_restarts - - pod_number_of_containers - - pod_number_of_running_containers - - pod_status_ready - - pod_status_scheduled - - pod_status_running - - pod_status_pending - - pod_status_failed - - pod_status_unknown - - pod_status_succeeded - - pod_memory_request - - pod_memory_limit - - pod_cpu_limit - - pod_cpu_request - - pod_container_status_running - - pod_container_status_terminated - - pod_container_status_waiting - - pod_container_status_waiting_reason_crash_loop_back_off - - pod_container_status_waiting_reason_image_pull_error - - pod_container_status_waiting_reason_start_error - - pod_container_status_waiting_reason_create_container_error - - pod_container_status_waiting_reason_create_container_config_error - - pod_container_status_terminated_reason_oom_killed - # node metrics - - dimensions: [ [ ClusterName, InstanceId, NodeName ], [ ClusterName ] ] - label_matchers: [ ] - metric_name_selectors: - - node_cpu_utilization - - node_memory_utilization - - node_network_total_bytes - - node_cpu_reserved_capacity - - node_memory_reserved_capacity - - node_number_of_running_pods - - node_number_of_running_containers - - node_cpu_usage_total - - node_cpu_limit - - node_memory_working_set - - node_memory_limit - - node_status_condition_ready - - node_status_condition_disk_pressure - - node_status_condition_memory_pressure - - node_status_condition_pid_pressure - - node_status_condition_network_unavailable - - node_status_condition_unknown - - node_status_capacity_pods - - node_status_allocatable_pods - - dimensions: [ [ ClusterName, InstanceId, NodeName ], [ ClusterName ] ] - label_matchers: [ ] - metric_name_selectors: - - node_interface_network_rx_dropped - - node_interface_network_tx_dropped - - node_diskio_io_service_bytes_total - - node_diskio_io_serviced_total - # node fs metrics - - dimensions: [ [ ClusterName, InstanceId, NodeName ], [ ClusterName ] ] - label_matchers: [ ] - metric_name_selectors: - - node_filesystem_utilization - - node_filesystem_inodes - - node_filesystem_inodes_free - # service metrics - - dimensions: [ [ ClusterName, Namespace, Service ], [ ClusterName ] ] - label_matchers: [ ] - metric_name_selectors: - - service_number_of_running_pods - # deployment/stateful set/replica set metrics - - dimensions: [ [ ClusterName, Namespace, PodName ], [ ClusterName ] ] - label_matchers: [ ] - metric_name_selectors: - - replicas_desired - - replicas_ready - - status_replicas_available - - status_replicas_unavailable - # daemon set metrics - - dimensions: [ [ ClusterName, Namespace, PodName ], [ ClusterName ] ] - label_matchers: [ ] - metric_name_selectors: - - daemonset_status_number_available - - daemonset_status_number_unavailable - # namespace metrics - - dimensions: [ [ ClusterName, Namespace ], [ ClusterName ] ] - label_matchers: [ ] - metric_name_selectors: - - namespace_number_of_running_pods - # cluster metrics - - dimensions: [ [ ClusterName ] ] - label_matchers: [ ] - metric_name_selectors: - - cluster_node_count - - cluster_failed_node_count - - cluster_number_of_running_pods - # control plane metrics - - dimensions: [ [ClusterName, endpoint], [ ClusterName ] ] - label_matchers: [ ] - metric_name_selectors: - - apiserver_storage_size_bytes - - apiserver_storage_db_total_size_in_bytes - - etcd_db_total_size_in_bytes - - dimensions: [ [ClusterName, resource], [ ClusterName ] ] - label_matchers: [ ] - metric_name_selectors: - - apiserver_storage_list_duration_seconds - - apiserver_longrunning_requests - - apiserver_storage_objects - - dimensions: [ [ClusterName, verb], [ ClusterName ] ] - label_matchers: [ ] - metric_name_selectors: - - apiserver_request_duration_seconds - - rest_client_request_duration_seconds - - dimensions: [ [ClusterName, code, verb], [ ClusterName ] ] - label_matchers: [ ] - metric_name_selectors: - - apiserver_request_total - - apiserver_request_total_5xx - - dimensions: [ [ClusterName, operation], [ ClusterName ] ] - label_matchers: [ ] - metric_name_selectors: - - apiserver_admission_controller_admission_duration_seconds - - apiserver_admission_step_admission_duration_seconds - - etcd_request_duration_seconds - - dimensions: [ [ClusterName, code, method], [ ClusterName ] ] - label_matchers: [ ] - metric_name_selectors: - - rest_client_requests_total - - dimensions: [ [ClusterName, request_kind], [ ClusterName ] ] - label_matchers: [ ] - metric_name_selectors: - - apiserver_current_inflight_requests - - apiserver_current_inqueue_requests - - dimensions: [ [ClusterName, name], [ ClusterName ] ] - label_matchers: [ ] - metric_name_selectors: - - apiserver_admission_webhook_admission_duration_seconds - - dimensions: [ [ClusterName, group], [ ClusterName ] ] - label_matchers: [ ] - metric_name_selectors: - - apiserver_requested_deprecated_apis - - dimensions: [ [ClusterName, reason], [ ClusterName ] ] - label_matchers: [ ] - metric_name_selectors: - - apiserver_flowcontrol_rejected_requests_total - - dimensions: [ [ ClusterName, priority_level ], [ ClusterName ] ] - label_matchers: [ ] - metric_name_selectors: - - apiserver_flowcontrol_request_concurrency_limit - metric_descriptors: - - metric_name: apiserver_admission_controller_admission_duration_seconds - unit: Seconds - overwrite: true - - metric_name: apiserver_admission_step_admission_duration_seconds - unit: Seconds - overwrite: true - - metric_name: apiserver_admission_webhook_admission_duration_seconds - unit: Seconds - overwrite: true - - metric_name: apiserver_current_inflight_requests - unit: Count - overwrite: true - - metric_name: apiserver_current_inqueue_requests - unit: Count - overwrite: true - - metric_name: apiserver_flowcontrol_rejected_requests_total - unit: Count - overwrite: true - - metric_name: apiserver_flowcontrol_request_concurrency_limit - unit: Count - overwrite: true - - metric_name: apiserver_longrunning_requests - unit: Count - overwrite: true - - metric_name: apiserver_request_duration_seconds - unit: Seconds - overwrite: true - - metric_name: apiserver_request_total - unit: Count - overwrite: true - - metric_name: apiserver_request_total_5xx - unit: Count - overwrite: true - - metric_name: apiserver_requested_deprecated_apis - unit: Count - overwrite: true - - metric_name: apiserver_storage_objects - unit: Count - overwrite: true - - metric_name: etcd_request_duration_seconds - unit: Seconds - overwrite: true - - metric_name: apiserver_storage_list_duration_seconds - unit: Seconds - overwrite: true - - metric_name: apiserver_storage_db_total_size_in_bytes - unit: Bytes - overwrite: true - - metric_name: apiserver_storage_size_bytes - unit: Bytes - overwrite: true - - metric_name: etcd_db_total_size_in_bytes - unit: Bytes - overwrite: true - - metric_name: rest_client_request_duration_seconds - unit: Seconds - overwrite: true - - metric_name: rest_client_requests_total - unit: Count - overwrite: true - namespace: ContainerInsights - no_verify_ssl: false - num_workers: 8 - output_destination: cloudwatch - parse_json_encoded_attr_values: - - Sources - - kubernetes - proxy_address: "" - region: us-east-1 - request_timeout_seconds: 30 - resource_arn: "" - resource_to_telemetry_conversion: - enabled: true - retain_initial_value_of_delta_metric: false - role_arn: "" - profile: "default" - shared_credentials_file: [ "/root/.aws/credentials" ] - version: "0" -extensions: {} + awscloudwatchlogs/emf_logs: + certificate_file_path: "" + emf_only: true + endpoint: https://fake_endpoint + imds_retries: 2 + local_mode: false + log_group_name: emf/logs/default + log_retention: 0 + log_stream_name: host_name_from_env + max_retries: 2 + middleware: agenthealth/logs + no_verify_ssl: false + num_workers: 8 + profile: default + proxy_address: "" + raw_log: true + region: us-east-1 + request_timeout_seconds: 30 + resource_arn: "" + retry_on_failure: + enabled: true + initial_interval: 5s + max_elapsed_time: 5m0s + max_interval: 30s + multiplier: 1.5 + randomization_factor: 0.5 + role_arn: "" + sending_queue: + queue_size: 1000 + shared_credentials_file: + - /root/.aws/credentials + awsemf/containerinsights: + certificate_file_path: "" + detailed_metrics: false + dimension_rollup_option: NoDimensionRollup + disable_metric_extraction: true + eks_fargate_container_insights_enabled: false + endpoint: https://fake_endpoint + enhanced_container_insights: true + imds_retries: 2 + local_mode: false + log_group_name: /aws/containerinsights/{ClusterName}/performance + log_retention: 0 + log_stream_name: '{NodeName}' + max_retries: 2 + metric_declarations: + - dimensions: + - - ClusterName + - - ClusterName + - ContainerName + - FullPodName + - Namespace + - PodName + - - ClusterName + - ContainerName + - Namespace + - PodName + label_matchers: [] + metric_name_selectors: + - container_cpu_utilization + - container_cpu_utilization_over_container_limit + - container_cpu_limit + - container_cpu_request + - container_memory_utilization + - container_memory_utilization_over_container_limit + - container_memory_failures_total + - container_memory_limit + - container_memory_request + - container_filesystem_usage + - container_filesystem_available + - container_filesystem_utilization + - dimensions: + - - ClusterName + - Namespace + - PodName + - - ClusterName + - - ClusterName + - Namespace + - Service + - - ClusterName + - Namespace + - - ClusterName + - FullPodName + - Namespace + - PodName + label_matchers: [] + metric_name_selectors: + - pod_cpu_utilization + - pod_memory_utilization + - pod_network_rx_bytes + - pod_network_tx_bytes + - pod_cpu_utilization_over_pod_limit + - pod_memory_utilization_over_pod_limit + - dimensions: + - - ClusterName + - FullPodName + - Namespace + - PodName + - - ClusterName + - Namespace + - PodName + - - ClusterName + - Namespace + - - ClusterName + label_matchers: [] + metric_name_selectors: + - pod_interface_network_rx_dropped + - pod_interface_network_tx_dropped + - dimensions: + - - ClusterName + - Namespace + - PodName + - - ClusterName + - - ClusterName + - FullPodName + - Namespace + - PodName + - - ClusterName + - Namespace + - Service + label_matchers: [] + metric_name_selectors: + - pod_cpu_reserved_capacity + - pod_memory_reserved_capacity + - pod_number_of_container_restarts + - pod_number_of_containers + - pod_number_of_running_containers + - pod_status_ready + - pod_status_scheduled + - pod_status_running + - pod_status_pending + - pod_status_failed + - pod_status_unknown + - pod_status_succeeded + - pod_memory_request + - pod_memory_limit + - pod_cpu_limit + - pod_cpu_request + - pod_container_status_running + - pod_container_status_terminated + - pod_container_status_waiting + - pod_container_status_waiting_reason_crash_loop_back_off + - pod_container_status_waiting_reason_image_pull_error + - pod_container_status_waiting_reason_start_error + - pod_container_status_waiting_reason_create_container_error + - pod_container_status_waiting_reason_create_container_config_error + - pod_container_status_terminated_reason_oom_killed + - dimensions: + - - ClusterName + - InstanceId + - NodeName + - - ClusterName + label_matchers: [] + metric_name_selectors: + - node_cpu_utilization + - node_memory_utilization + - node_network_total_bytes + - node_cpu_reserved_capacity + - node_memory_reserved_capacity + - node_number_of_running_pods + - node_number_of_running_containers + - node_cpu_usage_total + - node_cpu_limit + - node_memory_working_set + - node_memory_limit + - node_status_condition_ready + - node_status_condition_disk_pressure + - node_status_condition_memory_pressure + - node_status_condition_pid_pressure + - node_status_condition_network_unavailable + - node_status_condition_unknown + - node_status_capacity_pods + - node_status_allocatable_pods + - dimensions: + - - ClusterName + - InstanceId + - NodeName + - - ClusterName + label_matchers: [] + metric_name_selectors: + - node_interface_network_rx_dropped + - node_interface_network_tx_dropped + - node_diskio_io_service_bytes_total + - node_diskio_io_serviced_total + - dimensions: + - - ClusterName + - InstanceId + - NodeName + - - ClusterName + label_matchers: [] + metric_name_selectors: + - node_filesystem_utilization + - node_filesystem_inodes + - node_filesystem_inodes_free + - dimensions: + - - ClusterName + - Namespace + - Service + - - ClusterName + label_matchers: [] + metric_name_selectors: + - service_number_of_running_pods + - dimensions: + - - ClusterName + - Namespace + - PodName + - - ClusterName + label_matchers: [] + metric_name_selectors: + - replicas_desired + - replicas_ready + - status_replicas_available + - status_replicas_unavailable + - dimensions: + - - ClusterName + - Namespace + - PodName + - - ClusterName + label_matchers: [] + metric_name_selectors: + - daemonset_status_number_available + - daemonset_status_number_unavailable + - dimensions: + - - ClusterName + - Namespace + - - ClusterName + label_matchers: [] + metric_name_selectors: + - namespace_number_of_running_pods + - dimensions: + - - ClusterName + label_matchers: [] + metric_name_selectors: + - cluster_node_count + - cluster_failed_node_count + - cluster_number_of_running_pods + - dimensions: + - - ClusterName + - endpoint + - - ClusterName + label_matchers: [] + metric_name_selectors: + - apiserver_storage_size_bytes + - apiserver_storage_db_total_size_in_bytes + - etcd_db_total_size_in_bytes + - dimensions: + - - ClusterName + - resource + - - ClusterName + label_matchers: [] + metric_name_selectors: + - apiserver_storage_list_duration_seconds + - apiserver_longrunning_requests + - apiserver_storage_objects + - dimensions: + - - ClusterName + - verb + - - ClusterName + label_matchers: [] + metric_name_selectors: + - apiserver_request_duration_seconds + - rest_client_request_duration_seconds + - dimensions: + - - ClusterName + - code + - verb + - - ClusterName + label_matchers: [] + metric_name_selectors: + - apiserver_request_total + - apiserver_request_total_5xx + - dimensions: + - - ClusterName + - operation + - - ClusterName + label_matchers: [] + metric_name_selectors: + - apiserver_admission_controller_admission_duration_seconds + - apiserver_admission_step_admission_duration_seconds + - etcd_request_duration_seconds + - dimensions: + - - ClusterName + - code + - method + - - ClusterName + label_matchers: [] + metric_name_selectors: + - rest_client_requests_total + - dimensions: + - - ClusterName + - request_kind + - - ClusterName + label_matchers: [] + metric_name_selectors: + - apiserver_current_inflight_requests + - apiserver_current_inqueue_requests + - dimensions: + - - ClusterName + - name + - - ClusterName + label_matchers: [] + metric_name_selectors: + - apiserver_admission_webhook_admission_duration_seconds + - dimensions: + - - ClusterName + - group + - - ClusterName + label_matchers: [] + metric_name_selectors: + - apiserver_requested_deprecated_apis + - dimensions: + - - ClusterName + - reason + - - ClusterName + label_matchers: [] + metric_name_selectors: + - apiserver_flowcontrol_rejected_requests_total + - dimensions: + - - ClusterName + - priority_level + - - ClusterName + label_matchers: [] + metric_name_selectors: + - apiserver_flowcontrol_request_concurrency_limit + metric_descriptors: + - metric_name: apiserver_admission_controller_admission_duration_seconds + overwrite: true + unit: Seconds + - metric_name: apiserver_admission_step_admission_duration_seconds + overwrite: true + unit: Seconds + - metric_name: apiserver_admission_webhook_admission_duration_seconds + overwrite: true + unit: Seconds + - metric_name: apiserver_current_inflight_requests + overwrite: true + unit: Count + - metric_name: apiserver_current_inqueue_requests + overwrite: true + unit: Count + - metric_name: apiserver_flowcontrol_rejected_requests_total + overwrite: true + unit: Count + - metric_name: apiserver_flowcontrol_request_concurrency_limit + overwrite: true + unit: Count + - metric_name: apiserver_longrunning_requests + overwrite: true + unit: Count + - metric_name: apiserver_request_duration_seconds + overwrite: true + unit: Seconds + - metric_name: apiserver_request_total + overwrite: true + unit: Count + - metric_name: apiserver_request_total_5xx + overwrite: true + unit: Count + - metric_name: apiserver_requested_deprecated_apis + overwrite: true + unit: Count + - metric_name: apiserver_storage_objects + overwrite: true + unit: Count + - metric_name: etcd_request_duration_seconds + overwrite: true + unit: Seconds + - metric_name: apiserver_storage_list_duration_seconds + overwrite: true + unit: Seconds + - metric_name: apiserver_storage_db_total_size_in_bytes + overwrite: true + unit: Bytes + - metric_name: apiserver_storage_size_bytes + overwrite: true + unit: Bytes + - metric_name: etcd_db_total_size_in_bytes + overwrite: true + unit: Bytes + - metric_name: rest_client_request_duration_seconds + overwrite: true + unit: Seconds + - metric_name: rest_client_requests_total + overwrite: true + unit: Count + middleware: agenthealth/logs + namespace: ContainerInsights + no_verify_ssl: false + num_workers: 8 + output_destination: cloudwatch + parse_json_encoded_attr_values: + - Sources + - kubernetes + profile: default + proxy_address: "" + region: us-east-1 + request_timeout_seconds: 30 + resource_arn: "" + resource_to_telemetry_conversion: + enabled: true + retain_initial_value_of_delta_metric: false + role_arn: "" + shared_credentials_file: + - /root/.aws/credentials + version: "0" +extensions: + agenthealth/logs: + is_usage_data_enabled: true + stats: + operations: + - PutLogEvents processors: - metricstransform/containerinsights: - transforms: - - include: apiserver_request_total - match_type: regexp - experimental_match_labels: { "code": "^5.*" } - action: insert - new_name: apiserver_request_total_5xx - aggregation_type: "" - group_resource_labels: { } - operations: [ ] - submatch_case: "" - batch/containerinsights: - metadata_cardinality_limit: 1000 - metadata_keys: [] - send_batch_max_size: 0 - send_batch_size: 8192 - timeout: 5s - batch/emf_logs: - metadata_cardinality_limit: 1000 - metadata_keys: [] - send_batch_max_size: 0 - send_batch_size: 8192 - timeout: 5s + batch/containerinsights: + metadata_cardinality_limit: 1000 + metadata_keys: [] + send_batch_max_size: 0 + send_batch_size: 8192 + timeout: 5s + batch/emf_logs: + metadata_cardinality_limit: 1000 + metadata_keys: [] + send_batch_max_size: 0 + send_batch_size: 8192 + timeout: 5s + metricstransform/containerinsights: + transforms: + - action: insert + aggregation_type: "" + experimental_match_labels: + code: ^5.* + group_resource_labels: {} + include: apiserver_request_total + match_type: regexp + new_name: apiserver_request_total_5xx + operations: [] + submatch_case: "" receivers: - awscontainerinsightreceiver: - add_container_name_metric_label: true - add_full_pod_name_metric_label: true - add_service_as_attribute: true - cluster_name: TestCluster - collection_interval: 30s - container_orchestrator: eks - enable_control_plane_metrics: true - certificate_file_path: "" - endpoint: "" - imds_retries: 2 - leader_lock_name: cwagent-clusterleader - leader_lock_using_config_map_only: true - local_mode: false - max_retries: 0 - no_verify_ssl: false - num_workers: 0 - prefer_full_pod_name: true - proxy_address: "" - region: "us-east-1" - request_timeout_seconds: 0 - resource_arn: "" - role_arn: "" - profile: "default" - shared_credentials_file: [ "/root/.aws/credentials" ] - tcplog/emf_logs: - attributes: {} - encoding: utf-8 - id: tcp_input - listen_address: 0.0.0.0:25888 - operators: [] - output: [] - resource: {} - retry_on_failure: - enabled: false - initial_interval: 0s - max_elapsed_time: 0s - max_interval: 0s - storage: null - type: tcp_input - udplog/emf_logs: - attributes: {} - encoding: utf-8 - id: udp_input - listen_address: 0.0.0.0:25888 - multiline: - line_end_pattern: .^ - line_start_pattern: "" - operators: [] - output: [] - resource: {} - retry_on_failure: - enabled: false - initial_interval: 0s - max_elapsed_time: 0s - max_interval: 0s - storage: null - type: udp_input + awscontainerinsightreceiver: + add_container_name_metric_label: true + add_full_pod_name_metric_label: true + add_service_as_attribute: true + certificate_file_path: "" + cluster_name: TestCluster + collection_interval: 30s + container_orchestrator: eks + enable_control_plane_metrics: true + endpoint: "" + imds_retries: 2 + leader_lock_name: cwagent-clusterleader + leader_lock_using_config_map_only: true + local_mode: false + max_retries: 0 + no_verify_ssl: false + num_workers: 0 + prefer_full_pod_name: true + profile: default + proxy_address: "" + region: us-east-1 + request_timeout_seconds: 0 + resource_arn: "" + role_arn: "" + shared_credentials_file: + - /root/.aws/credentials + tcplog/emf_logs: + attributes: {} + encoding: utf-8 + id: tcp_input + listen_address: 0.0.0.0:25888 + operators: [] + output: [] + resource: {} + retry_on_failure: + enabled: false + initial_interval: 0s + max_elapsed_time: 0s + max_interval: 0s + storage: null + type: tcp_input + udplog/emf_logs: + attributes: {} + encoding: utf-8 + id: udp_input + listen_address: 0.0.0.0:25888 + multiline: + line_end_pattern: .^ + line_start_pattern: "" + operators: [] + output: [] + resource: {} + retry_on_failure: + enabled: false + initial_interval: 0s + max_elapsed_time: 0s + max_interval: 0s + storage: null + type: udp_input service: - extensions: [] - pipelines: - logs/emf_logs: - exporters: - - awscloudwatchlogs/emf_logs - processors: - - batch/emf_logs - receivers: - - tcplog/emf_logs - - udplog/emf_logs - metrics/containerinsights: - exporters: - - awsemf/containerinsights - processors: - - metricstransform/containerinsights - - batch/containerinsights - receivers: - - awscontainerinsightreceiver - telemetry: - logs: - development: false - disable_caller: false - disable_stacktrace: false - encoding: console - error_output_paths: [] - initial_fields: {} - level: info - output_paths: [] - sampling: - initial: 2 - thereafter: 500 - metrics: - address: "" - level: None - readers: [] - resource: {} - traces: - processors: [] - propagators: [] + extensions: + - agenthealth/logs + pipelines: + logs/emf_logs: + exporters: + - awscloudwatchlogs/emf_logs + processors: + - batch/emf_logs + receivers: + - tcplog/emf_logs + - udplog/emf_logs + metrics/containerinsights: + exporters: + - awsemf/containerinsights + processors: + - metricstransform/containerinsights + - batch/containerinsights + receivers: + - awscontainerinsightreceiver + telemetry: + logs: + development: false + disable_caller: false + disable_stacktrace: false + encoding: console + error_output_paths: [] + initial_fields: {} + level: info + output_paths: [] + sampling: + initial: 2 + thereafter: 500 + metrics: + address: "" + level: None + readers: [] + resource: {} + traces: + processors: [] + propagators: [] diff --git a/translator/tocwconfig/sampleConfig/ignore_append_dimensions.yaml b/translator/tocwconfig/sampleConfig/ignore_append_dimensions.yaml index 952734d325..04eceffdae 100644 --- a/translator/tocwconfig/sampleConfig/ignore_append_dimensions.yaml +++ b/translator/tocwconfig/sampleConfig/ignore_append_dimensions.yaml @@ -1,58 +1,68 @@ connectors: {} exporters: - awscloudwatch: - force_flush_interval: 1m0s - max_datums_per_call: 1000 - max_values_per_datum: 150 - namespace: CWAgent - region: us-east-1 - resource_to_telemetry_conversion: - enabled: true -extensions: {} + awscloudwatch: + force_flush_interval: 1m0s + max_datums_per_call: 1000 + max_values_per_datum: 150 + middleware: agenthealth/metrics + mode: EC2 + namespace: CWAgent + region: us-east-1 + region_type: ACJ + resource_to_telemetry_conversion: + enabled: true +extensions: + agenthealth/metrics: + is_usage_data_enabled: true + stats: + operations: + - PutMetricData processors: - ec2tagger: - ec2_instance_tag_keys: [] - ec2_metadata_tags: [] - refresh_interval_seconds: 0s - imds_retries: 1 + ec2tagger: + ec2_instance_tag_keys: [] + ec2_metadata_tags: [] + imds_retries: 1 + refresh_interval_seconds: 0s receivers: - telegraf_disk: - collection_interval: 1m0s - initial_delay: "1s" - timeout: 0s - telegraf_mem: - collection_interval: 1m0s - initial_delay: "1s" - timeout: 0s + telegraf_disk: + collection_interval: 1m0s + initial_delay: 1s + timeout: 0s + telegraf_mem: + collection_interval: 1m0s + initial_delay: 1s + timeout: 0s service: - extensions: [] - pipelines: - metrics/host: - exporters: - - awscloudwatch - processors: - - ec2tagger - receivers: - - telegraf_disk - - telegraf_mem - telemetry: - logs: - development: false - disable_caller: false - disable_stacktrace: false - encoding: console - error_output_paths: [] - initial_fields: {} - level: info - output_paths: [/opt/aws/amazon-cloudwatch-agent/logs/amazon-cloudwatch-agent.log] - sampling: - initial: 2 - thereafter: 500 - metrics: - address: "" - level: None - readers: [] - resource: {} - traces: - processors: [] - propagators: [] + extensions: + - agenthealth/metrics + pipelines: + metrics/host: + exporters: + - awscloudwatch + processors: + - ec2tagger + receivers: + - telegraf_mem + - telegraf_disk + telemetry: + logs: + development: false + disable_caller: false + disable_stacktrace: false + encoding: console + error_output_paths: [] + initial_fields: {} + level: info + output_paths: + - /opt/aws/amazon-cloudwatch-agent/logs/amazon-cloudwatch-agent.log + sampling: + initial: 2 + thereafter: 500 + metrics: + address: "" + level: None + readers: [] + resource: {} + traces: + processors: [] + propagators: [] diff --git a/translator/tocwconfig/sampleConfig/invalid_input_linux.yaml b/translator/tocwconfig/sampleConfig/invalid_input_linux.yaml index caa37a5522..b8486df00c 100644 --- a/translator/tocwconfig/sampleConfig/invalid_input_linux.yaml +++ b/translator/tocwconfig/sampleConfig/invalid_input_linux.yaml @@ -4,11 +4,19 @@ exporters: force_flush_interval: 1m0s max_datums_per_call: 1000 max_values_per_datum: 150 + middleware: agenthealth/metrics + mode: EC2 namespace: CWAgent region: us-east-1 + region_type: ACJ resource_to_telemetry_conversion: enabled: true -extensions: {} +extensions: + agenthealth/metrics: + is_usage_data_enabled: true + stats: + operations: + - PutMetricData processors: ec2tagger: ec2_instance_tag_keys: @@ -17,19 +25,20 @@ processors: - ImageId - InstanceId - InstanceType - refresh_interval_seconds: 0s imds_retries: 1 + refresh_interval_seconds: 0s receivers: telegraf_disk: collection_interval: 1m0s - initial_delay: "1s" + initial_delay: 1s timeout: 0s telegraf_mem: collection_interval: 1m0s - initial_delay: "1s" + initial_delay: 1s timeout: 0s service: - extensions: [] + extensions: + - agenthealth/metrics pipelines: metrics/host: exporters: @@ -48,7 +57,8 @@ service: error_output_paths: [] initial_fields: {} level: info - output_paths: [/opt/aws/amazon-cloudwatch-agent/logs/amazon-cloudwatch-agent.log] + output_paths: + - /opt/aws/amazon-cloudwatch-agent/logs/amazon-cloudwatch-agent.log sampling: initial: 2 thereafter: 500 diff --git a/translator/tocwconfig/sampleConfig/kubernetes_on_prem_config.yaml b/translator/tocwconfig/sampleConfig/kubernetes_on_prem_config.yaml index 64ed8e36cb..2159ecb2aa 100644 --- a/translator/tocwconfig/sampleConfig/kubernetes_on_prem_config.yaml +++ b/translator/tocwconfig/sampleConfig/kubernetes_on_prem_config.yaml @@ -1,354 +1,461 @@ connectors: {} exporters: - awsemf/containerinsights: - certificate_file_path: "" - detailed_metrics: false - dimension_rollup_option: NoDimensionRollup - disable_metric_extraction: true - eks_fargate_container_insights_enabled: false - endpoint: "https://fake_endpoint" - enhanced_container_insights: true - imds_retries: 1 - local_mode: false - log_group_name: /aws/containerinsights/{ClusterName}/performance - log_retention: 0 - log_stream_name: '{NodeName}' - max_retries: 2 - metric_declarations: - # container metrics - - dimensions: [ [ ClusterName ], [ ClusterName, ContainerName, FullPodName, Namespace, PodName ], [ ClusterName, ContainerName, Namespace, PodName ] ] - label_matchers: [ ] - metric_name_selectors: - - container_cpu_utilization - - container_cpu_utilization_over_container_limit - - container_cpu_limit - - container_cpu_request - - container_memory_utilization - - container_memory_utilization_over_container_limit - - container_memory_failures_total - - container_memory_limit - - container_memory_request - - container_filesystem_usage - - container_filesystem_available - - container_filesystem_utilization - # pod metrics - - dimensions: [ [ ClusterName, Namespace, PodName ], [ ClusterName ], [ ClusterName, Namespace, Service ], [ ClusterName, Namespace ], [ ClusterName, FullPodName, Namespace, PodName ] ] - label_matchers: [ ] - metric_name_selectors: - - pod_cpu_utilization - - pod_memory_utilization - - pod_network_rx_bytes - - pod_network_tx_bytes - - pod_cpu_utilization_over_pod_limit - - pod_memory_utilization_over_pod_limit - - dimensions: [ [ ClusterName, FullPodName, Namespace, PodName ], [ ClusterName, Namespace, PodName ], [ ClusterName, Namespace ], [ ClusterName ] ] - label_matchers: [ ] - metric_name_selectors: - - pod_interface_network_rx_dropped - - pod_interface_network_tx_dropped - - dimensions: [ [ ClusterName, Namespace, PodName ], [ ClusterName ], [ ClusterName, FullPodName, Namespace, PodName ], [ ClusterName, Namespace, Service ] ] - label_matchers: [] - metric_name_selectors: - - pod_cpu_reserved_capacity - - pod_memory_reserved_capacity - - pod_number_of_container_restarts - - pod_number_of_containers - - pod_number_of_running_containers - - pod_status_ready - - pod_status_scheduled - - pod_status_running - - pod_status_pending - - pod_status_failed - - pod_status_unknown - - pod_status_succeeded - - pod_memory_request - - pod_memory_limit - - pod_cpu_limit - - pod_cpu_request - - pod_container_status_running - - pod_container_status_terminated - - pod_container_status_waiting - - pod_container_status_waiting_reason_crash_loop_back_off - - pod_container_status_waiting_reason_image_pull_error - - pod_container_status_waiting_reason_start_error - - pod_container_status_waiting_reason_create_container_error - - pod_container_status_waiting_reason_create_container_config_error - - pod_container_status_terminated_reason_oom_killed - # node metrics - - dimensions: [ [ ClusterName, InstanceId, NodeName ], [ ClusterName ] ] - label_matchers: [ ] - metric_name_selectors: - - node_cpu_utilization - - node_memory_utilization - - node_network_total_bytes - - node_cpu_reserved_capacity - - node_memory_reserved_capacity - - node_number_of_running_pods - - node_number_of_running_containers - - node_cpu_usage_total - - node_cpu_limit - - node_memory_working_set - - node_memory_limit - - node_status_condition_ready - - node_status_condition_disk_pressure - - node_status_condition_memory_pressure - - node_status_condition_pid_pressure - - node_status_condition_network_unavailable - - node_status_condition_unknown - - node_status_capacity_pods - - node_status_allocatable_pods - - dimensions: [ [ ClusterName, InstanceId, NodeName ], [ ClusterName ] ] - label_matchers: [ ] - metric_name_selectors: - - node_interface_network_rx_dropped - - node_interface_network_tx_dropped - - node_diskio_io_service_bytes_total - - node_diskio_io_serviced_total - # node fs metrics - - dimensions: [ [ ClusterName, InstanceId, NodeName ], [ ClusterName ] ] - label_matchers: [ ] - metric_name_selectors: - - node_filesystem_utilization - - node_filesystem_inodes - - node_filesystem_inodes_free - # service metrics - - dimensions: [ [ ClusterName, Namespace, Service ], [ ClusterName ] ] - label_matchers: [ ] - metric_name_selectors: - - service_number_of_running_pods - # deployment/stateful set/replica set metrics - - dimensions: [ [ ClusterName, Namespace, PodName ], [ ClusterName ] ] - label_matchers: [ ] - metric_name_selectors: - - replicas_desired - - replicas_ready - - status_replicas_available - - status_replicas_unavailable - # daemon set metrics - - dimensions: [ [ ClusterName, Namespace, PodName ], [ ClusterName ] ] - label_matchers: [ ] - metric_name_selectors: - - daemonset_status_number_available - - daemonset_status_number_unavailable - # namespace metrics - - dimensions: [ [ ClusterName, Namespace ], [ ClusterName ] ] - label_matchers: [ ] - metric_name_selectors: - - namespace_number_of_running_pods - # cluster metrics - - dimensions: [ [ ClusterName ] ] - label_matchers: [ ] - metric_name_selectors: - - cluster_node_count - - cluster_failed_node_count - - cluster_number_of_running_pods - # control plane metrics - - dimensions: [ [ ClusterName, endpoint ], [ ClusterName ] ] - label_matchers: [ ] - metric_name_selectors: - - apiserver_storage_size_bytes - - apiserver_storage_db_total_size_in_bytes - - etcd_db_total_size_in_bytes - - dimensions: [ [ ClusterName, resource ], [ ClusterName ] ] - label_matchers: [ ] - metric_name_selectors: - - apiserver_storage_list_duration_seconds - - apiserver_longrunning_requests - - apiserver_storage_objects - - dimensions: [ [ ClusterName, verb ], [ ClusterName ] ] - label_matchers: [ ] - metric_name_selectors: - - apiserver_request_duration_seconds - - rest_client_request_duration_seconds - - dimensions: [ [ ClusterName, code, verb ], [ ClusterName ] ] - label_matchers: [ ] - metric_name_selectors: - - apiserver_request_total - - apiserver_request_total_5xx - - dimensions: [ [ ClusterName, operation ], [ ClusterName ] ] - label_matchers: [ ] - metric_name_selectors: - - apiserver_admission_controller_admission_duration_seconds - - apiserver_admission_step_admission_duration_seconds - - etcd_request_duration_seconds - - dimensions: [ [ ClusterName, code, method ], [ ClusterName ] ] - label_matchers: [ ] - metric_name_selectors: - - rest_client_requests_total - - dimensions: [ [ ClusterName, request_kind ], [ ClusterName ] ] - label_matchers: [ ] - metric_name_selectors: - - apiserver_current_inflight_requests - - apiserver_current_inqueue_requests - - dimensions: [ [ ClusterName, name ], [ ClusterName ] ] - label_matchers: [ ] - metric_name_selectors: - - apiserver_admission_webhook_admission_duration_seconds - - dimensions: [ [ ClusterName, group ], [ ClusterName ] ] - label_matchers: [ ] - metric_name_selectors: - - apiserver_requested_deprecated_apis - - dimensions: [ [ ClusterName, reason ], [ ClusterName ] ] - label_matchers: [ ] - metric_name_selectors: - - apiserver_flowcontrol_rejected_requests_total - - dimensions: [ [ ClusterName, priority_level ], [ ClusterName ] ] - label_matchers: [ ] - metric_name_selectors: - - apiserver_flowcontrol_request_concurrency_limit - metric_descriptors: - - metric_name: apiserver_admission_controller_admission_duration_seconds - unit: Seconds - overwrite: true - - metric_name: apiserver_admission_step_admission_duration_seconds - unit: Seconds - overwrite: true - - metric_name: apiserver_admission_webhook_admission_duration_seconds - unit: Seconds - overwrite: true - - metric_name: apiserver_current_inflight_requests - unit: Count - overwrite: true - - metric_name: apiserver_current_inqueue_requests - unit: Count - overwrite: true - - metric_name: apiserver_flowcontrol_rejected_requests_total - unit: Count - overwrite: true - - metric_name: apiserver_flowcontrol_request_concurrency_limit - unit: Count - overwrite: true - - metric_name: apiserver_longrunning_requests - unit: Count - overwrite: true - - metric_name: apiserver_request_duration_seconds - unit: Seconds - overwrite: true - - metric_name: apiserver_request_total - unit: Count - overwrite: true - - metric_name: apiserver_request_total_5xx - unit: Count - overwrite: true - - metric_name: apiserver_requested_deprecated_apis - unit: Count - overwrite: true - - metric_name: apiserver_storage_objects - unit: Count - overwrite: true - - metric_name: etcd_request_duration_seconds - unit: Seconds - overwrite: true - - metric_name: apiserver_storage_list_duration_seconds - unit: Seconds - overwrite: true - - metric_name: apiserver_storage_db_total_size_in_bytes - unit: Bytes - overwrite: true - - metric_name: apiserver_storage_size_bytes - unit: Bytes - overwrite: true - - metric_name: etcd_db_total_size_in_bytes - unit: Bytes - overwrite: true - - metric_name: rest_client_request_duration_seconds - unit: Seconds - overwrite: true - - metric_name: rest_client_requests_total - unit: Count - overwrite: true - namespace: ContainerInsights - no_verify_ssl: false - num_workers: 8 - output_destination: cloudwatch - parse_json_encoded_attr_values: - - Sources - - kubernetes - proxy_address: "" - region: us-east-1 - request_timeout_seconds: 30 - resource_arn: "" - resource_to_telemetry_conversion: - enabled: true - retain_initial_value_of_delta_metric: false - role_arn: "" - profile: "AmazonCloudWatchAgent" - shared_credentials_file: ["fake-path"] - version: "0" -extensions: {} + awsemf/containerinsights: + certificate_file_path: "" + detailed_metrics: false + dimension_rollup_option: NoDimensionRollup + disable_metric_extraction: true + eks_fargate_container_insights_enabled: false + endpoint: https://fake_endpoint + enhanced_container_insights: true + imds_retries: 1 + local_mode: false + log_group_name: /aws/containerinsights/{ClusterName}/performance + log_retention: 0 + log_stream_name: '{NodeName}' + max_retries: 2 + metric_declarations: + - dimensions: + - - ClusterName + - - ClusterName + - ContainerName + - FullPodName + - Namespace + - PodName + - - ClusterName + - ContainerName + - Namespace + - PodName + label_matchers: [] + metric_name_selectors: + - container_cpu_utilization + - container_cpu_utilization_over_container_limit + - container_cpu_limit + - container_cpu_request + - container_memory_utilization + - container_memory_utilization_over_container_limit + - container_memory_failures_total + - container_memory_limit + - container_memory_request + - container_filesystem_usage + - container_filesystem_available + - container_filesystem_utilization + - dimensions: + - - ClusterName + - Namespace + - PodName + - - ClusterName + - - ClusterName + - Namespace + - Service + - - ClusterName + - Namespace + - - ClusterName + - FullPodName + - Namespace + - PodName + label_matchers: [] + metric_name_selectors: + - pod_cpu_utilization + - pod_memory_utilization + - pod_network_rx_bytes + - pod_network_tx_bytes + - pod_cpu_utilization_over_pod_limit + - pod_memory_utilization_over_pod_limit + - dimensions: + - - ClusterName + - FullPodName + - Namespace + - PodName + - - ClusterName + - Namespace + - PodName + - - ClusterName + - Namespace + - - ClusterName + label_matchers: [] + metric_name_selectors: + - pod_interface_network_rx_dropped + - pod_interface_network_tx_dropped + - dimensions: + - - ClusterName + - Namespace + - PodName + - - ClusterName + - - ClusterName + - FullPodName + - Namespace + - PodName + - - ClusterName + - Namespace + - Service + label_matchers: [] + metric_name_selectors: + - pod_cpu_reserved_capacity + - pod_memory_reserved_capacity + - pod_number_of_container_restarts + - pod_number_of_containers + - pod_number_of_running_containers + - pod_status_ready + - pod_status_scheduled + - pod_status_running + - pod_status_pending + - pod_status_failed + - pod_status_unknown + - pod_status_succeeded + - pod_memory_request + - pod_memory_limit + - pod_cpu_limit + - pod_cpu_request + - pod_container_status_running + - pod_container_status_terminated + - pod_container_status_waiting + - pod_container_status_waiting_reason_crash_loop_back_off + - pod_container_status_waiting_reason_image_pull_error + - pod_container_status_waiting_reason_start_error + - pod_container_status_waiting_reason_create_container_error + - pod_container_status_waiting_reason_create_container_config_error + - pod_container_status_terminated_reason_oom_killed + - dimensions: + - - ClusterName + - InstanceId + - NodeName + - - ClusterName + label_matchers: [] + metric_name_selectors: + - node_cpu_utilization + - node_memory_utilization + - node_network_total_bytes + - node_cpu_reserved_capacity + - node_memory_reserved_capacity + - node_number_of_running_pods + - node_number_of_running_containers + - node_cpu_usage_total + - node_cpu_limit + - node_memory_working_set + - node_memory_limit + - node_status_condition_ready + - node_status_condition_disk_pressure + - node_status_condition_memory_pressure + - node_status_condition_pid_pressure + - node_status_condition_network_unavailable + - node_status_condition_unknown + - node_status_capacity_pods + - node_status_allocatable_pods + - dimensions: + - - ClusterName + - InstanceId + - NodeName + - - ClusterName + label_matchers: [] + metric_name_selectors: + - node_interface_network_rx_dropped + - node_interface_network_tx_dropped + - node_diskio_io_service_bytes_total + - node_diskio_io_serviced_total + - dimensions: + - - ClusterName + - InstanceId + - NodeName + - - ClusterName + label_matchers: [] + metric_name_selectors: + - node_filesystem_utilization + - node_filesystem_inodes + - node_filesystem_inodes_free + - dimensions: + - - ClusterName + - Namespace + - Service + - - ClusterName + label_matchers: [] + metric_name_selectors: + - service_number_of_running_pods + - dimensions: + - - ClusterName + - Namespace + - PodName + - - ClusterName + label_matchers: [] + metric_name_selectors: + - replicas_desired + - replicas_ready + - status_replicas_available + - status_replicas_unavailable + - dimensions: + - - ClusterName + - Namespace + - PodName + - - ClusterName + label_matchers: [] + metric_name_selectors: + - daemonset_status_number_available + - daemonset_status_number_unavailable + - dimensions: + - - ClusterName + - Namespace + - - ClusterName + label_matchers: [] + metric_name_selectors: + - namespace_number_of_running_pods + - dimensions: + - - ClusterName + label_matchers: [] + metric_name_selectors: + - cluster_node_count + - cluster_failed_node_count + - cluster_number_of_running_pods + - dimensions: + - - ClusterName + - endpoint + - - ClusterName + label_matchers: [] + metric_name_selectors: + - apiserver_storage_size_bytes + - apiserver_storage_db_total_size_in_bytes + - etcd_db_total_size_in_bytes + - dimensions: + - - ClusterName + - resource + - - ClusterName + label_matchers: [] + metric_name_selectors: + - apiserver_storage_list_duration_seconds + - apiserver_longrunning_requests + - apiserver_storage_objects + - dimensions: + - - ClusterName + - verb + - - ClusterName + label_matchers: [] + metric_name_selectors: + - apiserver_request_duration_seconds + - rest_client_request_duration_seconds + - dimensions: + - - ClusterName + - code + - verb + - - ClusterName + label_matchers: [] + metric_name_selectors: + - apiserver_request_total + - apiserver_request_total_5xx + - dimensions: + - - ClusterName + - operation + - - ClusterName + label_matchers: [] + metric_name_selectors: + - apiserver_admission_controller_admission_duration_seconds + - apiserver_admission_step_admission_duration_seconds + - etcd_request_duration_seconds + - dimensions: + - - ClusterName + - code + - method + - - ClusterName + label_matchers: [] + metric_name_selectors: + - rest_client_requests_total + - dimensions: + - - ClusterName + - request_kind + - - ClusterName + label_matchers: [] + metric_name_selectors: + - apiserver_current_inflight_requests + - apiserver_current_inqueue_requests + - dimensions: + - - ClusterName + - name + - - ClusterName + label_matchers: [] + metric_name_selectors: + - apiserver_admission_webhook_admission_duration_seconds + - dimensions: + - - ClusterName + - group + - - ClusterName + label_matchers: [] + metric_name_selectors: + - apiserver_requested_deprecated_apis + - dimensions: + - - ClusterName + - reason + - - ClusterName + label_matchers: [] + metric_name_selectors: + - apiserver_flowcontrol_rejected_requests_total + - dimensions: + - - ClusterName + - priority_level + - - ClusterName + label_matchers: [] + metric_name_selectors: + - apiserver_flowcontrol_request_concurrency_limit + metric_descriptors: + - metric_name: apiserver_admission_controller_admission_duration_seconds + overwrite: true + unit: Seconds + - metric_name: apiserver_admission_step_admission_duration_seconds + overwrite: true + unit: Seconds + - metric_name: apiserver_admission_webhook_admission_duration_seconds + overwrite: true + unit: Seconds + - metric_name: apiserver_current_inflight_requests + overwrite: true + unit: Count + - metric_name: apiserver_current_inqueue_requests + overwrite: true + unit: Count + - metric_name: apiserver_flowcontrol_rejected_requests_total + overwrite: true + unit: Count + - metric_name: apiserver_flowcontrol_request_concurrency_limit + overwrite: true + unit: Count + - metric_name: apiserver_longrunning_requests + overwrite: true + unit: Count + - metric_name: apiserver_request_duration_seconds + overwrite: true + unit: Seconds + - metric_name: apiserver_request_total + overwrite: true + unit: Count + - metric_name: apiserver_request_total_5xx + overwrite: true + unit: Count + - metric_name: apiserver_requested_deprecated_apis + overwrite: true + unit: Count + - metric_name: apiserver_storage_objects + overwrite: true + unit: Count + - metric_name: etcd_request_duration_seconds + overwrite: true + unit: Seconds + - metric_name: apiserver_storage_list_duration_seconds + overwrite: true + unit: Seconds + - metric_name: apiserver_storage_db_total_size_in_bytes + overwrite: true + unit: Bytes + - metric_name: apiserver_storage_size_bytes + overwrite: true + unit: Bytes + - metric_name: etcd_db_total_size_in_bytes + overwrite: true + unit: Bytes + - metric_name: rest_client_request_duration_seconds + overwrite: true + unit: Seconds + - metric_name: rest_client_requests_total + overwrite: true + unit: Count + middleware: agenthealth/logs + namespace: ContainerInsights + no_verify_ssl: false + num_workers: 8 + output_destination: cloudwatch + parse_json_encoded_attr_values: + - Sources + - kubernetes + profile: AmazonCloudWatchAgent + proxy_address: "" + region: us-east-1 + request_timeout_seconds: 30 + resource_arn: "" + resource_to_telemetry_conversion: + enabled: true + retain_initial_value_of_delta_metric: false + role_arn: "" + shared_credentials_file: + - fake-path + version: "0" +extensions: + agenthealth/logs: + is_usage_data_enabled: true + stats: + operations: + - PutLogEvents processors: - metricstransform/containerinsights: - transforms: - - include: apiserver_request_total - match_type: regexp - experimental_match_labels: { "code": "^5.*" } - action: insert - new_name: apiserver_request_total_5xx - aggregation_type: "" - group_resource_labels: { } - operations: [ ] - submatch_case: "" - batch/containerinsights: - metadata_cardinality_limit: 1000 - metadata_keys: [] - send_batch_max_size: 0 - send_batch_size: 8192 - timeout: 5s + batch/containerinsights: + metadata_cardinality_limit: 1000 + metadata_keys: [] + send_batch_max_size: 0 + send_batch_size: 8192 + timeout: 5s + metricstransform/containerinsights: + transforms: + - action: insert + aggregation_type: "" + experimental_match_labels: + code: ^5.* + group_resource_labels: {} + include: apiserver_request_total + match_type: regexp + new_name: apiserver_request_total_5xx + operations: [] + submatch_case: "" receivers: - awscontainerinsightreceiver: - certificate_file_path: "" - add_container_name_metric_label: true - add_full_pod_name_metric_label: true - add_service_as_attribute: true - cluster_name: TestCluster - collection_interval: 30s - container_orchestrator: eks - enable_control_plane_metrics: true - endpoint: "" - imds_retries: 1 - leader_lock_name: cwagent-clusterleader - leader_lock_using_config_map_only: true - local_mode: true - max_retries: 0 - no_verify_ssl: false - num_workers: 0 - prefer_full_pod_name: true - proxy_address: "" - region: "us-east-1" - request_timeout_seconds: 0 - resource_arn: "" - role_arn: "" - profile: "AmazonCloudWatchAgent" - shared_credentials_file: [ "fake-path" ] + awscontainerinsightreceiver: + add_container_name_metric_label: true + add_full_pod_name_metric_label: true + add_service_as_attribute: true + certificate_file_path: "" + cluster_name: TestCluster + collection_interval: 30s + container_orchestrator: eks + enable_control_plane_metrics: true + endpoint: "" + imds_retries: 1 + leader_lock_name: cwagent-clusterleader + leader_lock_using_config_map_only: true + local_mode: true + max_retries: 0 + no_verify_ssl: false + num_workers: 0 + prefer_full_pod_name: true + profile: AmazonCloudWatchAgent + proxy_address: "" + region: us-east-1 + request_timeout_seconds: 0 + resource_arn: "" + role_arn: "" + shared_credentials_file: + - fake-path service: - extensions: [] - pipelines: - metrics/containerinsights: - exporters: - - awsemf/containerinsights - processors: - - metricstransform/containerinsights - - batch/containerinsights - receivers: - - awscontainerinsightreceiver - telemetry: - logs: - development: false - disable_caller: false - disable_stacktrace: false - encoding: console - error_output_paths: [] - initial_fields: {} - level: info - output_paths: [] - sampling: - initial: 2 - thereafter: 500 - metrics: - address: "" - level: None - readers: [] - resource: {} - traces: - processors: [] - propagators: [] + extensions: + - agenthealth/logs + pipelines: + metrics/containerinsights: + exporters: + - awsemf/containerinsights + processors: + - metricstransform/containerinsights + - batch/containerinsights + receivers: + - awscontainerinsightreceiver + telemetry: + logs: + development: false + disable_caller: false + disable_stacktrace: false + encoding: console + error_output_paths: [] + initial_fields: {} + level: info + output_paths: [] + sampling: + initial: 2 + thereafter: 500 + metrics: + address: "" + level: None + readers: [] + resource: {} + traces: + processors: [] + propagators: [] diff --git a/translator/tocwconfig/sampleConfig/log_ecs_metric_only.yaml b/translator/tocwconfig/sampleConfig/log_ecs_metric_only.yaml index 7d944ba2b3..194e7c1886 100644 --- a/translator/tocwconfig/sampleConfig/log_ecs_metric_only.yaml +++ b/translator/tocwconfig/sampleConfig/log_ecs_metric_only.yaml @@ -1,204 +1,212 @@ connectors: {} exporters: - awscloudwatchlogs/emf_logs: - emf_only: true - certificate_file_path: "" - endpoint: "https://fake_endpoint" - imds_retries: 1 - local_mode: false - log_group_name: emf/logs/default - log_retention: 0 - log_stream_name: fake-host-name - max_retries: 2 - no_verify_ssl: false - num_workers: 8 - proxy_address: "" - raw_log: true - region: us-west-2 - request_timeout_seconds: 30 - resource_arn: "" - retry_on_failure: - enabled: true - initial_interval: 5s - max_elapsed_time: 5m0s - max_interval: 30s - multiplier: 1.5 - randomization_factor: 0.5 - role_arn: "" - sending_queue: - queue_size: 1000 - profile: "" - shared_credentials_file: [ ] - awsemf/containerinsights: - detailed_metrics: false - dimension_rollup_option: NoDimensionRollup - disable_metric_extraction: false - eks_fargate_container_insights_enabled: false - certificate_file_path: "" - endpoint: "https://fake_endpoint" - "imds_retries": 1 - enhanced_container_insights: false - local_mode: false - log_group_name: /aws/ecs/containerinsights/{ClusterName}/performance - log_retention: 0 - log_stream_name: NodeTelemetry-{ContainerInstanceId} - max_retries: 2 - metric_declarations: - - dimensions: - - - ClusterName - - ContainerInstanceId - - InstanceId - label_matchers: [] - metric_name_selectors: - - instance_cpu_reserved_capacity - - instance_cpu_utilization - - instance_filesystem_utilization - - instance_memory_reserved_capacity - - instance_memory_utilization - - instance_network_total_bytes - - instance_number_of_running_tasks - - dimensions: - - - ClusterName - label_matchers: [] - metric_name_selectors: - - instance_cpu_limit - - instance_cpu_reserved_capacity - - instance_cpu_usage_total - - instance_cpu_utilization - - instance_filesystem_utilization - - instance_memory_limit - - instance_memory_reserved_capacity - - instance_memory_utilization - - instance_memory_working_set - - instance_network_total_bytes - - instance_number_of_running_tasks - metric_descriptors: [] - namespace: ECS/ContainerInsights - no_verify_ssl: false - num_workers: 8 - output_destination: cloudwatch - parse_json_encoded_attr_values: - - Sources - proxy_address: "" - region: us-west-2 - request_timeout_seconds: 30 - resource_arn: "" - resource_to_telemetry_conversion: - enabled: true - retain_initial_value_of_delta_metric: false - role_arn: "" - profile: "" - shared_credentials_file: [ ] - version: "0" -extensions: {} + awscloudwatchlogs/emf_logs: + certificate_file_path: "" + emf_only: true + endpoint: https://fake_endpoint + imds_retries: 1 + local_mode: false + log_group_name: emf/logs/default + log_retention: 0 + log_stream_name: fake-host-name + max_retries: 2 + middleware: agenthealth/logs + no_verify_ssl: false + num_workers: 8 + profile: "" + proxy_address: "" + raw_log: true + region: us-west-2 + request_timeout_seconds: 30 + resource_arn: "" + retry_on_failure: + enabled: true + initial_interval: 5s + max_elapsed_time: 5m0s + max_interval: 30s + multiplier: 1.5 + randomization_factor: 0.5 + role_arn: "" + sending_queue: + queue_size: 1000 + shared_credentials_file: [] + awsemf/containerinsights: + certificate_file_path: "" + detailed_metrics: false + dimension_rollup_option: NoDimensionRollup + disable_metric_extraction: false + eks_fargate_container_insights_enabled: false + endpoint: https://fake_endpoint + enhanced_container_insights: false + imds_retries: 1 + local_mode: false + log_group_name: /aws/ecs/containerinsights/{ClusterName}/performance + log_retention: 0 + log_stream_name: NodeTelemetry-{ContainerInstanceId} + max_retries: 2 + metric_declarations: + - dimensions: + - - ClusterName + - ContainerInstanceId + - InstanceId + label_matchers: [] + metric_name_selectors: + - instance_cpu_reserved_capacity + - instance_cpu_utilization + - instance_filesystem_utilization + - instance_memory_reserved_capacity + - instance_memory_utilization + - instance_network_total_bytes + - instance_number_of_running_tasks + - dimensions: + - - ClusterName + label_matchers: [] + metric_name_selectors: + - instance_cpu_limit + - instance_cpu_reserved_capacity + - instance_cpu_usage_total + - instance_cpu_utilization + - instance_filesystem_utilization + - instance_memory_limit + - instance_memory_reserved_capacity + - instance_memory_utilization + - instance_memory_working_set + - instance_network_total_bytes + - instance_number_of_running_tasks + metric_descriptors: [] + middleware: agenthealth/logs + namespace: ECS/ContainerInsights + no_verify_ssl: false + num_workers: 8 + output_destination: cloudwatch + parse_json_encoded_attr_values: + - Sources + profile: "" + proxy_address: "" + region: us-west-2 + request_timeout_seconds: 30 + resource_arn: "" + resource_to_telemetry_conversion: + enabled: true + retain_initial_value_of_delta_metric: false + role_arn: "" + shared_credentials_file: [] + version: "0" +extensions: + agenthealth/logs: + is_usage_data_enabled: true + stats: + operations: + - PutLogEvents processors: - batch/containerinsights: - metadata_cardinality_limit: 1000 - metadata_keys: [] - send_batch_max_size: 0 - send_batch_size: 8192 - timeout: 5s - batch/emf_logs: - metadata_cardinality_limit: 1000 - metadata_keys: [] - send_batch_max_size: 0 - send_batch_size: 8192 - timeout: 5s + batch/containerinsights: + metadata_cardinality_limit: 1000 + metadata_keys: [] + send_batch_max_size: 0 + send_batch_size: 8192 + timeout: 5s + batch/emf_logs: + metadata_cardinality_limit: 1000 + metadata_keys: [] + send_batch_max_size: 0 + send_batch_size: 8192 + timeout: 5s receivers: - awscontainerinsightreceiver: - add_container_name_metric_label: false - add_full_pod_name_metric_label: false - add_service_as_attribute: true - cluster_name: "" - collection_interval: 30s - container_orchestrator: ecs - enable_control_plane_metrics: false - certificate_file_path: "" - endpoint: "" - imds_retries: 1 - leader_lock_name: otel-container-insight-clusterleader - leader_lock_using_config_map_only: false - local_mode: false - max_retries: 0 - no_verify_ssl: false - num_workers: 0 - prefer_full_pod_name: false - proxy_address: "" - region: "us-west-2" - request_timeout_seconds: 0 - resource_arn: "" - role_arn: "" - profile: "" - shared_credentials_file: [ ] - tcplog/emf_logs: - attributes: {} - encoding: utf-8 - id: tcp_input - listen_address: 0.0.0.0:25888 - operators: [] - output: [] - resource: {} - retry_on_failure: - enabled: false - initial_interval: 0s - max_elapsed_time: 0s - max_interval: 0s - storage: null - type: tcp_input - udplog/emf_logs: - attributes: {} - encoding: utf-8 - id: udp_input - listen_address: 0.0.0.0:25888 - multiline: - line_end_pattern: .^ - line_start_pattern: "" - operators: [] - output: [] - resource: {} - retry_on_failure: - enabled: false - initial_interval: 0s - max_elapsed_time: 0s - max_interval: 0s - storage: null - type: udp_input + awscontainerinsightreceiver: + add_container_name_metric_label: false + add_full_pod_name_metric_label: false + add_service_as_attribute: true + certificate_file_path: "" + cluster_name: "" + collection_interval: 30s + container_orchestrator: ecs + enable_control_plane_metrics: false + endpoint: "" + imds_retries: 1 + leader_lock_name: otel-container-insight-clusterleader + leader_lock_using_config_map_only: false + local_mode: false + max_retries: 0 + no_verify_ssl: false + num_workers: 0 + prefer_full_pod_name: false + profile: "" + proxy_address: "" + region: us-west-2 + request_timeout_seconds: 0 + resource_arn: "" + role_arn: "" + shared_credentials_file: [] + tcplog/emf_logs: + attributes: {} + encoding: utf-8 + id: tcp_input + listen_address: 0.0.0.0:25888 + operators: [] + output: [] + resource: {} + retry_on_failure: + enabled: false + initial_interval: 0s + max_elapsed_time: 0s + max_interval: 0s + storage: null + type: tcp_input + udplog/emf_logs: + attributes: {} + encoding: utf-8 + id: udp_input + listen_address: 0.0.0.0:25888 + multiline: + line_end_pattern: .^ + line_start_pattern: "" + operators: [] + output: [] + resource: {} + retry_on_failure: + enabled: false + initial_interval: 0s + max_elapsed_time: 0s + max_interval: 0s + storage: null + type: udp_input service: - extensions: [] - pipelines: - logs/emf_logs: - exporters: - - awscloudwatchlogs/emf_logs - processors: - - batch/emf_logs - receivers: - - tcplog/emf_logs - - udplog/emf_logs - metrics/containerinsights: - exporters: - - awsemf/containerinsights - processors: - - batch/containerinsights - receivers: - - awscontainerinsightreceiver - telemetry: - logs: - development: false - disable_caller: false - disable_stacktrace: false - encoding: console - error_output_paths: [] - initial_fields: {} - level: info - output_paths: [] - sampling: - initial: 2 - thereafter: 500 - metrics: - address: "" - level: None - readers: [] - resource: {} - traces: - processors: [] - propagators: [] + extensions: + - agenthealth/logs + pipelines: + logs/emf_logs: + exporters: + - awscloudwatchlogs/emf_logs + processors: + - batch/emf_logs + receivers: + - tcplog/emf_logs + - udplog/emf_logs + metrics/containerinsights: + exporters: + - awsemf/containerinsights + processors: + - batch/containerinsights + receivers: + - awscontainerinsightreceiver + telemetry: + logs: + development: false + disable_caller: false + disable_stacktrace: false + encoding: console + error_output_paths: [] + initial_fields: {} + level: info + output_paths: [] + sampling: + initial: 2 + thereafter: 500 + metrics: + address: "" + level: None + readers: [] + resource: {} + traces: + processors: [] + propagators: [] diff --git a/translator/tocwconfig/sampleConfig/logs_and_kubernetes_config.yaml b/translator/tocwconfig/sampleConfig/logs_and_kubernetes_config.yaml index 3eb5ce2d7e..79ce1dc187 100644 --- a/translator/tocwconfig/sampleConfig/logs_and_kubernetes_config.yaml +++ b/translator/tocwconfig/sampleConfig/logs_and_kubernetes_config.yaml @@ -1,430 +1,536 @@ connectors: {} exporters: - awscloudwatchlogs/emf_logs: - certificate_file_path: "" - emf_only: true - endpoint: "https://fake_endpoint" - imds_retries: 0 - local_mode: false - log_group_name: emf/logs/default - log_retention: 0 - log_stream_name: host_name_from_env - max_retries: 2 - no_verify_ssl: false - num_workers: 8 - proxy_address: "" - raw_log: true - region: us-east-1 - request_timeout_seconds: 30 - resource_arn: "" - retry_on_failure: - enabled: true - initial_interval: 5s - max_elapsed_time: 5m0s - max_interval: 30s - multiplier: 1.5 - randomization_factor: 0.5 - role_arn: "" - sending_queue: - queue_size: 1000 - profile: "" - shared_credentials_file: [ ] - awsemf/containerinsights: - certificate_file_path: "" - detailed_metrics: false - dimension_rollup_option: NoDimensionRollup - disable_metric_extraction: false - eks_fargate_container_insights_enabled: false - endpoint: "https://fake_endpoint" - imds_retries: 0 - enhanced_container_insights: true - local_mode: false - log_group_name: /aws/containerinsights/{ClusterName}/performance - log_retention: 0 - log_stream_name: '{NodeName}' - max_retries: 2 - metric_declarations: - # container metrics - - dimensions: [ [ ClusterName ], [ ClusterName, ContainerName, FullPodName, Namespace, PodName ], [ ClusterName, ContainerName, Namespace, PodName ] ] - label_matchers: [ ] - metric_name_selectors: - - container_cpu_utilization - - container_cpu_utilization_over_container_limit - - container_cpu_limit - - container_cpu_request - - container_memory_utilization - - container_memory_utilization_over_container_limit - - container_memory_failures_total - - container_memory_limit - - container_memory_request - - container_filesystem_usage - - container_filesystem_available - - container_filesystem_utilization - # pod metrics - - dimensions: [ [ ClusterName, Namespace, PodName ], [ ClusterName ], [ ClusterName, Namespace, Service ], [ ClusterName, Namespace ], [ ClusterName, FullPodName, Namespace, PodName ] ] - label_matchers: [ ] - metric_name_selectors: - - pod_cpu_utilization - - pod_memory_utilization - - pod_network_rx_bytes - - pod_network_tx_bytes - - pod_cpu_utilization_over_pod_limit - - pod_memory_utilization_over_pod_limit - - dimensions: [ [ ClusterName, FullPodName, Namespace, PodName ], [ ClusterName, Namespace, PodName ], [ ClusterName, Namespace ], [ ClusterName ] ] - label_matchers: [ ] - metric_name_selectors: - - pod_interface_network_rx_dropped - - pod_interface_network_tx_dropped - - dimensions: [ [ ClusterName, Namespace, PodName ], [ ClusterName ], [ ClusterName, FullPodName, Namespace, PodName ], [ ClusterName, Namespace, Service ] ] - label_matchers: [] - metric_name_selectors: - - pod_cpu_reserved_capacity - - pod_memory_reserved_capacity - - pod_number_of_container_restarts - - pod_number_of_containers - - pod_number_of_running_containers - - pod_status_ready - - pod_status_scheduled - - pod_status_running - - pod_status_pending - - pod_status_failed - - pod_status_unknown - - pod_status_succeeded - - pod_memory_request - - pod_memory_limit - - pod_cpu_limit - - pod_cpu_request - - pod_container_status_running - - pod_container_status_terminated - - pod_container_status_waiting - - pod_container_status_waiting_reason_crash_loop_back_off - - pod_container_status_waiting_reason_image_pull_error - - pod_container_status_waiting_reason_start_error - - pod_container_status_waiting_reason_create_container_error - - pod_container_status_waiting_reason_create_container_config_error - - pod_container_status_terminated_reason_oom_killed - # node metrics - - dimensions: [ [ ClusterName, InstanceId, NodeName ], [ ClusterName ] ] - label_matchers: [ ] - metric_name_selectors: - - node_cpu_utilization - - node_memory_utilization - - node_network_total_bytes - - node_cpu_reserved_capacity - - node_memory_reserved_capacity - - node_number_of_running_pods - - node_number_of_running_containers - - node_cpu_usage_total - - node_cpu_limit - - node_memory_working_set - - node_memory_limit - - node_status_condition_ready - - node_status_condition_disk_pressure - - node_status_condition_memory_pressure - - node_status_condition_pid_pressure - - node_status_condition_network_unavailable - - node_status_condition_unknown - - node_status_capacity_pods - - node_status_allocatable_pods - - dimensions: [ [ ClusterName, InstanceId, NodeName ], [ ClusterName ] ] - label_matchers: [ ] - metric_name_selectors: - - node_interface_network_rx_dropped - - node_interface_network_tx_dropped - - node_diskio_io_service_bytes_total - - node_diskio_io_serviced_total - # node fs metrics - - dimensions: [ [ ClusterName, InstanceId, NodeName ], [ ClusterName ] ] - label_matchers: [ ] - metric_name_selectors: - - node_filesystem_utilization - - node_filesystem_inodes - - node_filesystem_inodes_free - # service metrics - - dimensions: [ [ ClusterName, Namespace, Service ], [ ClusterName ] ] - label_matchers: [ ] - metric_name_selectors: - - service_number_of_running_pods - # deployment/stateful set/replica set metrics - - dimensions: [ [ ClusterName, Namespace, PodName ], [ ClusterName ] ] - label_matchers: [ ] - metric_name_selectors: - - replicas_desired - - replicas_ready - - status_replicas_available - - status_replicas_unavailable - # daemon set metrics - - dimensions: [ [ ClusterName, Namespace, PodName ], [ ClusterName ] ] - label_matchers: [ ] - metric_name_selectors: - - daemonset_status_number_available - - daemonset_status_number_unavailable - # namespace metrics - - dimensions: [ [ ClusterName, Namespace ], [ ClusterName ] ] - label_matchers: [ ] - metric_name_selectors: - - namespace_number_of_running_pods - # cluster metrics - - dimensions: [ [ ClusterName ] ] - label_matchers: [ ] - metric_name_selectors: - - cluster_node_count - - cluster_failed_node_count - - cluster_number_of_running_pods - # control plane metrics - - dimensions: [ [ ClusterName, endpoint ], [ ClusterName ] ] - label_matchers: [ ] - metric_name_selectors: - - apiserver_storage_size_bytes - - apiserver_storage_db_total_size_in_bytes - - etcd_db_total_size_in_bytes - - dimensions: [ [ ClusterName, resource ], [ ClusterName ] ] - label_matchers: [ ] - metric_name_selectors: - - apiserver_storage_list_duration_seconds - - apiserver_longrunning_requests - - apiserver_storage_objects - - dimensions: [ [ ClusterName, verb ], [ ClusterName ] ] - label_matchers: [ ] - metric_name_selectors: - - apiserver_request_duration_seconds - - rest_client_request_duration_seconds - - dimensions: [ [ ClusterName, code, verb ], [ ClusterName ] ] - label_matchers: [ ] - metric_name_selectors: - - apiserver_request_total - - apiserver_request_total_5xx - - dimensions: [ [ ClusterName, operation ], [ ClusterName ] ] - label_matchers: [ ] - metric_name_selectors: - - apiserver_admission_controller_admission_duration_seconds - - apiserver_admission_step_admission_duration_seconds - - etcd_request_duration_seconds - - dimensions: [ [ ClusterName, code, method ], [ ClusterName ] ] - label_matchers: [ ] - metric_name_selectors: - - rest_client_requests_total - - dimensions: [ [ ClusterName, request_kind ], [ ClusterName ] ] - label_matchers: [ ] - metric_name_selectors: - - apiserver_current_inflight_requests - - apiserver_current_inqueue_requests - - dimensions: [ [ ClusterName, name ], [ ClusterName ] ] - label_matchers: [ ] - metric_name_selectors: - - apiserver_admission_webhook_admission_duration_seconds - - dimensions: [ [ ClusterName, group ], [ ClusterName ] ] - label_matchers: [ ] - metric_name_selectors: - - apiserver_requested_deprecated_apis - - dimensions: [ [ ClusterName, reason ], [ ClusterName ] ] - label_matchers: [ ] - metric_name_selectors: - - apiserver_flowcontrol_rejected_requests_total - - dimensions: [ [ ClusterName, priority_level ], [ ClusterName ] ] - label_matchers: [ ] - metric_name_selectors: - - apiserver_flowcontrol_request_concurrency_limit - metric_descriptors: - - metric_name: apiserver_admission_controller_admission_duration_seconds - unit: Seconds - overwrite: true - - metric_name: apiserver_admission_step_admission_duration_seconds - unit: Seconds - overwrite: true - - metric_name: apiserver_admission_webhook_admission_duration_seconds - unit: Seconds - overwrite: true - - metric_name: apiserver_current_inflight_requests - unit: Count - overwrite: true - - metric_name: apiserver_current_inqueue_requests - unit: Count - overwrite: true - - metric_name: apiserver_flowcontrol_rejected_requests_total - unit: Count - overwrite: true - - metric_name: apiserver_flowcontrol_request_concurrency_limit - unit: Count - overwrite: true - - metric_name: apiserver_longrunning_requests - unit: Count - overwrite: true - - metric_name: apiserver_request_duration_seconds - unit: Seconds - overwrite: true - - metric_name: apiserver_request_total - unit: Count - overwrite: true - - metric_name: apiserver_request_total_5xx - unit: Count - overwrite: true - - metric_name: apiserver_requested_deprecated_apis - unit: Count - overwrite: true - - metric_name: apiserver_storage_objects - unit: Count - overwrite: true - - metric_name: etcd_request_duration_seconds - unit: Seconds - overwrite: true - - metric_name: apiserver_storage_list_duration_seconds - unit: Seconds - overwrite: true - - metric_name: apiserver_storage_db_total_size_in_bytes - unit: Bytes - overwrite: true - - metric_name: apiserver_storage_size_bytes - unit: Bytes - overwrite: true - - metric_name: etcd_db_total_size_in_bytes - unit: Bytes - overwrite: true - - metric_name: rest_client_request_duration_seconds - unit: Seconds - overwrite: true - - metric_name: rest_client_requests_total - unit: Count - overwrite: true - namespace: ContainerInsights - no_verify_ssl: false - num_workers: 8 - output_destination: cloudwatch - parse_json_encoded_attr_values: - - Sources - - kubernetes - proxy_address: "" - region: us-east-1 - request_timeout_seconds: 30 - resource_arn: "" - resource_to_telemetry_conversion: - enabled: true - retain_initial_value_of_delta_metric: false - role_arn: "" - profile: "" - shared_credentials_file: [ ] - version: "0" -extensions: {} + awscloudwatchlogs/emf_logs: + certificate_file_path: "" + emf_only: true + endpoint: https://fake_endpoint + imds_retries: 0 + local_mode: false + log_group_name: emf/logs/default + log_retention: 0 + log_stream_name: host_name_from_env + max_retries: 2 + middleware: agenthealth/logs + no_verify_ssl: false + num_workers: 8 + profile: "" + proxy_address: "" + raw_log: true + region: us-east-1 + request_timeout_seconds: 30 + resource_arn: "" + retry_on_failure: + enabled: true + initial_interval: 5s + max_elapsed_time: 5m0s + max_interval: 30s + multiplier: 1.5 + randomization_factor: 0.5 + role_arn: "" + sending_queue: + queue_size: 1000 + shared_credentials_file: [] + awsemf/containerinsights: + certificate_file_path: "" + detailed_metrics: false + dimension_rollup_option: NoDimensionRollup + disable_metric_extraction: false + eks_fargate_container_insights_enabled: false + endpoint: https://fake_endpoint + enhanced_container_insights: true + imds_retries: 0 + local_mode: false + log_group_name: /aws/containerinsights/{ClusterName}/performance + log_retention: 0 + log_stream_name: '{NodeName}' + max_retries: 2 + metric_declarations: + - dimensions: + - - ClusterName + - - ClusterName + - ContainerName + - FullPodName + - Namespace + - PodName + - - ClusterName + - ContainerName + - Namespace + - PodName + label_matchers: [] + metric_name_selectors: + - container_cpu_utilization + - container_cpu_utilization_over_container_limit + - container_cpu_limit + - container_cpu_request + - container_memory_utilization + - container_memory_utilization_over_container_limit + - container_memory_failures_total + - container_memory_limit + - container_memory_request + - container_filesystem_usage + - container_filesystem_available + - container_filesystem_utilization + - dimensions: + - - ClusterName + - Namespace + - PodName + - - ClusterName + - - ClusterName + - Namespace + - Service + - - ClusterName + - Namespace + - - ClusterName + - FullPodName + - Namespace + - PodName + label_matchers: [] + metric_name_selectors: + - pod_cpu_utilization + - pod_memory_utilization + - pod_network_rx_bytes + - pod_network_tx_bytes + - pod_cpu_utilization_over_pod_limit + - pod_memory_utilization_over_pod_limit + - dimensions: + - - ClusterName + - FullPodName + - Namespace + - PodName + - - ClusterName + - Namespace + - PodName + - - ClusterName + - Namespace + - - ClusterName + label_matchers: [] + metric_name_selectors: + - pod_interface_network_rx_dropped + - pod_interface_network_tx_dropped + - dimensions: + - - ClusterName + - Namespace + - PodName + - - ClusterName + - - ClusterName + - FullPodName + - Namespace + - PodName + - - ClusterName + - Namespace + - Service + label_matchers: [] + metric_name_selectors: + - pod_cpu_reserved_capacity + - pod_memory_reserved_capacity + - pod_number_of_container_restarts + - pod_number_of_containers + - pod_number_of_running_containers + - pod_status_ready + - pod_status_scheduled + - pod_status_running + - pod_status_pending + - pod_status_failed + - pod_status_unknown + - pod_status_succeeded + - pod_memory_request + - pod_memory_limit + - pod_cpu_limit + - pod_cpu_request + - pod_container_status_running + - pod_container_status_terminated + - pod_container_status_waiting + - pod_container_status_waiting_reason_crash_loop_back_off + - pod_container_status_waiting_reason_image_pull_error + - pod_container_status_waiting_reason_start_error + - pod_container_status_waiting_reason_create_container_error + - pod_container_status_waiting_reason_create_container_config_error + - pod_container_status_terminated_reason_oom_killed + - dimensions: + - - ClusterName + - InstanceId + - NodeName + - - ClusterName + label_matchers: [] + metric_name_selectors: + - node_cpu_utilization + - node_memory_utilization + - node_network_total_bytes + - node_cpu_reserved_capacity + - node_memory_reserved_capacity + - node_number_of_running_pods + - node_number_of_running_containers + - node_cpu_usage_total + - node_cpu_limit + - node_memory_working_set + - node_memory_limit + - node_status_condition_ready + - node_status_condition_disk_pressure + - node_status_condition_memory_pressure + - node_status_condition_pid_pressure + - node_status_condition_network_unavailable + - node_status_condition_unknown + - node_status_capacity_pods + - node_status_allocatable_pods + - dimensions: + - - ClusterName + - InstanceId + - NodeName + - - ClusterName + label_matchers: [] + metric_name_selectors: + - node_interface_network_rx_dropped + - node_interface_network_tx_dropped + - node_diskio_io_service_bytes_total + - node_diskio_io_serviced_total + - dimensions: + - - ClusterName + - InstanceId + - NodeName + - - ClusterName + label_matchers: [] + metric_name_selectors: + - node_filesystem_utilization + - node_filesystem_inodes + - node_filesystem_inodes_free + - dimensions: + - - ClusterName + - Namespace + - Service + - - ClusterName + label_matchers: [] + metric_name_selectors: + - service_number_of_running_pods + - dimensions: + - - ClusterName + - Namespace + - PodName + - - ClusterName + label_matchers: [] + metric_name_selectors: + - replicas_desired + - replicas_ready + - status_replicas_available + - status_replicas_unavailable + - dimensions: + - - ClusterName + - Namespace + - PodName + - - ClusterName + label_matchers: [] + metric_name_selectors: + - daemonset_status_number_available + - daemonset_status_number_unavailable + - dimensions: + - - ClusterName + - Namespace + - - ClusterName + label_matchers: [] + metric_name_selectors: + - namespace_number_of_running_pods + - dimensions: + - - ClusterName + label_matchers: [] + metric_name_selectors: + - cluster_node_count + - cluster_failed_node_count + - cluster_number_of_running_pods + - dimensions: + - - ClusterName + - endpoint + - - ClusterName + label_matchers: [] + metric_name_selectors: + - apiserver_storage_size_bytes + - apiserver_storage_db_total_size_in_bytes + - etcd_db_total_size_in_bytes + - dimensions: + - - ClusterName + - resource + - - ClusterName + label_matchers: [] + metric_name_selectors: + - apiserver_storage_list_duration_seconds + - apiserver_longrunning_requests + - apiserver_storage_objects + - dimensions: + - - ClusterName + - verb + - - ClusterName + label_matchers: [] + metric_name_selectors: + - apiserver_request_duration_seconds + - rest_client_request_duration_seconds + - dimensions: + - - ClusterName + - code + - verb + - - ClusterName + label_matchers: [] + metric_name_selectors: + - apiserver_request_total + - apiserver_request_total_5xx + - dimensions: + - - ClusterName + - operation + - - ClusterName + label_matchers: [] + metric_name_selectors: + - apiserver_admission_controller_admission_duration_seconds + - apiserver_admission_step_admission_duration_seconds + - etcd_request_duration_seconds + - dimensions: + - - ClusterName + - code + - method + - - ClusterName + label_matchers: [] + metric_name_selectors: + - rest_client_requests_total + - dimensions: + - - ClusterName + - request_kind + - - ClusterName + label_matchers: [] + metric_name_selectors: + - apiserver_current_inflight_requests + - apiserver_current_inqueue_requests + - dimensions: + - - ClusterName + - name + - - ClusterName + label_matchers: [] + metric_name_selectors: + - apiserver_admission_webhook_admission_duration_seconds + - dimensions: + - - ClusterName + - group + - - ClusterName + label_matchers: [] + metric_name_selectors: + - apiserver_requested_deprecated_apis + - dimensions: + - - ClusterName + - reason + - - ClusterName + label_matchers: [] + metric_name_selectors: + - apiserver_flowcontrol_rejected_requests_total + - dimensions: + - - ClusterName + - priority_level + - - ClusterName + label_matchers: [] + metric_name_selectors: + - apiserver_flowcontrol_request_concurrency_limit + metric_descriptors: + - metric_name: apiserver_admission_controller_admission_duration_seconds + overwrite: true + unit: Seconds + - metric_name: apiserver_admission_step_admission_duration_seconds + overwrite: true + unit: Seconds + - metric_name: apiserver_admission_webhook_admission_duration_seconds + overwrite: true + unit: Seconds + - metric_name: apiserver_current_inflight_requests + overwrite: true + unit: Count + - metric_name: apiserver_current_inqueue_requests + overwrite: true + unit: Count + - metric_name: apiserver_flowcontrol_rejected_requests_total + overwrite: true + unit: Count + - metric_name: apiserver_flowcontrol_request_concurrency_limit + overwrite: true + unit: Count + - metric_name: apiserver_longrunning_requests + overwrite: true + unit: Count + - metric_name: apiserver_request_duration_seconds + overwrite: true + unit: Seconds + - metric_name: apiserver_request_total + overwrite: true + unit: Count + - metric_name: apiserver_request_total_5xx + overwrite: true + unit: Count + - metric_name: apiserver_requested_deprecated_apis + overwrite: true + unit: Count + - metric_name: apiserver_storage_objects + overwrite: true + unit: Count + - metric_name: etcd_request_duration_seconds + overwrite: true + unit: Seconds + - metric_name: apiserver_storage_list_duration_seconds + overwrite: true + unit: Seconds + - metric_name: apiserver_storage_db_total_size_in_bytes + overwrite: true + unit: Bytes + - metric_name: apiserver_storage_size_bytes + overwrite: true + unit: Bytes + - metric_name: etcd_db_total_size_in_bytes + overwrite: true + unit: Bytes + - metric_name: rest_client_request_duration_seconds + overwrite: true + unit: Seconds + - metric_name: rest_client_requests_total + overwrite: true + unit: Count + middleware: agenthealth/logs + namespace: ContainerInsights + no_verify_ssl: false + num_workers: 8 + output_destination: cloudwatch + parse_json_encoded_attr_values: + - Sources + - kubernetes + profile: "" + proxy_address: "" + region: us-east-1 + request_timeout_seconds: 30 + resource_arn: "" + resource_to_telemetry_conversion: + enabled: true + retain_initial_value_of_delta_metric: false + role_arn: "" + shared_credentials_file: [] + version: "0" +extensions: + agenthealth/logs: + is_usage_data_enabled: true + stats: + operations: + - PutLogEvents processors: - metricstransform/containerinsights: - transforms: - - include: apiserver_request_total - match_type: regexp - experimental_match_labels: { "code": "^5.*" } - action: insert - new_name: apiserver_request_total_5xx - aggregation_type: "" - group_resource_labels: { } - operations: [ ] - submatch_case: "" - batch/containerinsights: - metadata_cardinality_limit: 1000 - metadata_keys: [] - send_batch_max_size: 0 - send_batch_size: 8192 - timeout: 5s - batch/emf_logs: - metadata_cardinality_limit: 1000 - metadata_keys: [] - send_batch_max_size: 0 - send_batch_size: 8192 - timeout: 5s + batch/containerinsights: + metadata_cardinality_limit: 1000 + metadata_keys: [] + send_batch_max_size: 0 + send_batch_size: 8192 + timeout: 5s + batch/emf_logs: + metadata_cardinality_limit: 1000 + metadata_keys: [] + send_batch_max_size: 0 + send_batch_size: 8192 + timeout: 5s + metricstransform/containerinsights: + transforms: + - action: insert + aggregation_type: "" + experimental_match_labels: + code: ^5.* + group_resource_labels: {} + include: apiserver_request_total + match_type: regexp + new_name: apiserver_request_total_5xx + operations: [] + submatch_case: "" receivers: - awscontainerinsightreceiver: - certificate_file_path: "" - add_container_name_metric_label: true - add_full_pod_name_metric_label: true - add_service_as_attribute: true - cluster_name: TestCluster - collection_interval: 30s - container_orchestrator: eks - enable_control_plane_metrics: true - endpoint: "" - imds_retries: 0 - leader_lock_name: cwagent-clusterleader - leader_lock_using_config_map_only: true - local_mode: false - max_retries: 0 - no_verify_ssl: false - num_workers: 0 - prefer_full_pod_name: true - proxy_address: "" - region: "us-east-1" - request_timeout_seconds: 0 - resource_arn: "" - role_arn: "" - profile: "" - shared_credentials_file: [ ] - tcplog/emf_logs: - attributes: {} - encoding: utf-8 - id: tcp_input - listen_address: 0.0.0.0:25888 - operators: [] - output: [] - resource: {} - retry_on_failure: - enabled: false - initial_interval: 0s - max_elapsed_time: 0s - max_interval: 0s - storage: null - type: tcp_input - udplog/emf_logs: - attributes: {} - encoding: utf-8 - id: udp_input - listen_address: 0.0.0.0:25888 - multiline: - line_end_pattern: .^ - line_start_pattern: "" - operators: [] - output: [] - resource: {} - retry_on_failure: - enabled: false - initial_interval: 0s - max_elapsed_time: 0s - max_interval: 0s - storage: null - type: udp_input + awscontainerinsightreceiver: + add_container_name_metric_label: true + add_full_pod_name_metric_label: true + add_service_as_attribute: true + certificate_file_path: "" + cluster_name: TestCluster + collection_interval: 30s + container_orchestrator: eks + enable_control_plane_metrics: true + endpoint: "" + imds_retries: 0 + leader_lock_name: cwagent-clusterleader + leader_lock_using_config_map_only: true + local_mode: false + max_retries: 0 + no_verify_ssl: false + num_workers: 0 + prefer_full_pod_name: true + profile: "" + proxy_address: "" + region: us-east-1 + request_timeout_seconds: 0 + resource_arn: "" + role_arn: "" + shared_credentials_file: [] + tcplog/emf_logs: + attributes: {} + encoding: utf-8 + id: tcp_input + listen_address: 0.0.0.0:25888 + operators: [] + output: [] + resource: {} + retry_on_failure: + enabled: false + initial_interval: 0s + max_elapsed_time: 0s + max_interval: 0s + storage: null + type: tcp_input + udplog/emf_logs: + attributes: {} + encoding: utf-8 + id: udp_input + listen_address: 0.0.0.0:25888 + multiline: + line_end_pattern: .^ + line_start_pattern: "" + operators: [] + output: [] + resource: {} + retry_on_failure: + enabled: false + initial_interval: 0s + max_elapsed_time: 0s + max_interval: 0s + storage: null + type: udp_input service: - extensions: [] - pipelines: - logs/emf_logs: - exporters: - - awscloudwatchlogs/emf_logs - processors: - - batch/emf_logs - receivers: - - tcplog/emf_logs - - udplog/emf_logs - metrics/containerinsights: - exporters: - - awsemf/containerinsights - processors: - - metricstransform/containerinsights - - batch/containerinsights - receivers: - - awscontainerinsightreceiver - telemetry: - logs: - development: false - disable_caller: false - disable_stacktrace: false - encoding: console - error_output_paths: [] - initial_fields: {} - level: info - output_paths: [] - sampling: - initial: 2 - thereafter: 500 - metrics: - address: "" - level: None - readers: [] - resource: {} - traces: - processors: [] - propagators: [] + extensions: + - agenthealth/logs + pipelines: + logs/emf_logs: + exporters: + - awscloudwatchlogs/emf_logs + processors: + - batch/emf_logs + receivers: + - tcplog/emf_logs + - udplog/emf_logs + metrics/containerinsights: + exporters: + - awsemf/containerinsights + processors: + - metricstransform/containerinsights + - batch/containerinsights + receivers: + - awscontainerinsightreceiver + telemetry: + logs: + development: false + disable_caller: false + disable_stacktrace: false + encoding: console + error_output_paths: [] + initial_fields: {} + level: info + output_paths: [] + sampling: + initial: 2 + thereafter: 500 + metrics: + address: "" + level: None + readers: [] + resource: {} + traces: + processors: [] + propagators: [] diff --git a/translator/tocwconfig/sampleConfig/prometheus_config_linux.yaml b/translator/tocwconfig/sampleConfig/prometheus_config_linux.yaml index d8464cc07f..3041976fac 100644 --- a/translator/tocwconfig/sampleConfig/prometheus_config_linux.yaml +++ b/translator/tocwconfig/sampleConfig/prometheus_config_linux.yaml @@ -1,118 +1,123 @@ connectors: {} exporters: - awsemf/prometheus: - detailed_metrics: false - dimension_rollup_option: NoDimensionRollup - disable_metric_extraction: false - version: "0" - retain_initial_value_of_delta_metric: false - eks_fargate_container_insights_enabled: false - certificate_file_path: "" - endpoint: "https://fake_endpoint" - "imds_retries": 1 - enhanced_container_insights: false - local_mode: false - log_group_name: /aws/ecs/containerinsights/TestCluster/prometheus - log_retention: 0 - log_stream_name: '{JobName}' - max_retries: 2 - metric_descriptors: - - metric_name: "nginx_request_count" - overwrite: false - unit: "Count" - - metric_declarations: - - dimensions: - - - Service - label_matchers: - - label_names: - - Service - regex: nginx.* - separator: ; - metric_name_selectors: - - ^nginx_request_count$ - - dimensions: [] - label_matchers: - - label_names: - - Namespace - regex: default - separator: ; - metric_name_selectors: - - .* - - dimensions: [ ["name"] ] - label_matchers: - - label_names: - - name - regex: .* - separator: ; - metric_name_selectors: - - ^.*$ - - dimensions: [ ["name"] ] - label_matchers: - - label_names: - - name - regex: .* - separator: ; - metric_name_selectors: - - ^node_cpu_guest_seconds_total$ - namespace: CustomizedNamespace - no_verify_ssl: false - num_workers: 8 - output_destination: cloudwatch - parse_json_encoded_attr_values: [] - proxy_address: "" - region: us-east-1 - request_timeout_seconds: 30 - resource_arn: "" - resource_to_telemetry_conversion: - enabled: true - role_arn: "" - profile: "" - shared_credentials_file: [ ] - -extensions: {} - + awsemf/prometheus: + certificate_file_path: "" + detailed_metrics: false + dimension_rollup_option: NoDimensionRollup + disable_metric_extraction: false + eks_fargate_container_insights_enabled: false + endpoint: https://fake_endpoint + enhanced_container_insights: false + imds_retries: 1 + local_mode: false + log_group_name: /aws/ecs/containerinsights/TestCluster/prometheus + log_retention: 0 + log_stream_name: '{JobName}' + max_retries: 2 + metric_declarations: + - dimensions: + - - Service + label_matchers: + - label_names: + - Service + regex: nginx.* + separator: ; + metric_name_selectors: + - ^nginx_request_count$ + - dimensions: [] + label_matchers: + - label_names: + - Namespace + regex: default + separator: ; + metric_name_selectors: + - .* + - dimensions: + - - name + label_matchers: + - label_names: + - name + regex: .* + separator: ; + metric_name_selectors: + - ^.*$ + - dimensions: + - - name + label_matchers: + - label_names: + - name + regex: .* + separator: ; + metric_name_selectors: + - ^node_cpu_guest_seconds_total$ + metric_descriptors: + - metric_name: nginx_request_count + overwrite: false + unit: Count + middleware: agenthealth/logs + namespace: CustomizedNamespace + no_verify_ssl: false + num_workers: 8 + output_destination: cloudwatch + parse_json_encoded_attr_values: [] + profile: "" + proxy_address: "" + region: us-east-1 + request_timeout_seconds: 30 + resource_arn: "" + resource_to_telemetry_conversion: + enabled: true + retain_initial_value_of_delta_metric: false + role_arn: "" + shared_credentials_file: [] + version: "0" +extensions: + agenthealth/logs: + is_usage_data_enabled: true + stats: + operations: + - PutLogEvents processors: - batch/prometheus: - metadata_cardinality_limit: 1000 - metadata_keys: [] - send_batch_max_size: 0 - send_batch_size: 8192 - timeout: 30s - + batch/prometheus: + metadata_cardinality_limit: 1000 + metadata_keys: [] + send_batch_max_size: 0 + send_batch_size: 8192 + timeout: 30s receivers: - telegraf_prometheus: - collection_interval: 1m0s - initial_delay: "1s" - timeout: 0s + telegraf_prometheus: + collection_interval: 1m0s + initial_delay: 1s + timeout: 0s service: - extensions: [] - pipelines: - metrics/prometheus: - exporters: - - awsemf/prometheus - processors: - - batch/prometheus - receivers: - - telegraf_prometheus - telemetry: - logs: - development: false - disable_caller: false - disable_stacktrace: false - encoding: console - error_output_paths: [] - initial_fields: {} - level: info - output_paths: [] - sampling: - initial: 2 - thereafter: 500 - metrics: - address: "" - level: None - readers: [] - resource: {} - traces: - processors: [] - propagators: [] + extensions: + - agenthealth/logs + pipelines: + metrics/prometheus: + exporters: + - awsemf/prometheus + processors: + - batch/prometheus + receivers: + - telegraf_prometheus + telemetry: + logs: + development: false + disable_caller: false + disable_stacktrace: false + encoding: console + error_output_paths: [] + initial_fields: {} + level: info + output_paths: [] + sampling: + initial: 2 + thereafter: 500 + metrics: + address: "" + level: None + readers: [] + resource: {} + traces: + processors: [] + propagators: [] diff --git a/translator/tocwconfig/sampleConfig/prometheus_config_windows.yaml b/translator/tocwconfig/sampleConfig/prometheus_config_windows.yaml index 639e1cd466..c6b636e23d 100644 --- a/translator/tocwconfig/sampleConfig/prometheus_config_windows.yaml +++ b/translator/tocwconfig/sampleConfig/prometheus_config_windows.yaml @@ -1,86 +1,88 @@ connectors: {} exporters: - awsemf/prometheus: - detailed_metrics: false - dimension_rollup_option: NoDimensionRollup - disable_metric_extraction: false - version: "0" - retain_initial_value_of_delta_metric: false - eks_fargate_container_insights_enabled: false - certificate_file_path: "" - endpoint: "https://fake_endpoint" - "imds_retries": 1 - enhanced_container_insights: false - local_mode: false - log_group_name: /aws/ecs/containerinsights/TestCluster/prometheus - log_retention: 0 - log_stream_name: '{JobName}' - max_retries: 2 - metric_descriptors: - - metric_name: "nginx_request_count" - overwrite: false - unit: "Count" - metric_declarations: - - dimensions: - - - Service - label_matchers: - - label_names: - - Service - regex: nginx.* - separator: ; - metric_name_selectors: - - ^nginx_request_count$ - - dimensions: [] - label_matchers: - - label_names: - - Namespace - regex: default - separator: ; - metric_name_selectors: - - .* - namespace: CustomizedNamespace - no_verify_ssl: false - num_workers: 8 - output_destination: cloudwatch - parse_json_encoded_attr_values: [] - proxy_address: "" - region: us-east-1 - request_timeout_seconds: 30 - resource_arn: "" - resource_to_telemetry_conversion: - enabled: true - role_arn: "" - profile: "" - shared_credentials_file: [ ] - -extensions: {} - + awsemf/prometheus: + certificate_file_path: "" + detailed_metrics: false + dimension_rollup_option: NoDimensionRollup + disable_metric_extraction: false + eks_fargate_container_insights_enabled: false + endpoint: https://fake_endpoint + enhanced_container_insights: false + imds_retries: 1 + local_mode: false + log_group_name: /aws/ecs/containerinsights/TestCluster/prometheus + log_retention: 0 + log_stream_name: '{JobName}' + max_retries: 2 + metric_declarations: + - dimensions: + - - Service + label_matchers: + - label_names: + - Service + regex: nginx.* + separator: ; + metric_name_selectors: + - ^nginx_request_count$ + - dimensions: [] + label_matchers: + - label_names: + - Namespace + regex: default + separator: ; + metric_name_selectors: + - .* + metric_descriptors: + - metric_name: nginx_request_count + overwrite: false + unit: Count + middleware: agenthealth/logs + namespace: CustomizedNamespace + no_verify_ssl: false + num_workers: 8 + output_destination: cloudwatch + parse_json_encoded_attr_values: [] + profile: "" + proxy_address: "" + region: us-east-1 + request_timeout_seconds: 30 + resource_arn: "" + resource_to_telemetry_conversion: + enabled: true + retain_initial_value_of_delta_metric: false + role_arn: "" + shared_credentials_file: [] + version: "0" +extensions: + agenthealth/logs: + is_usage_data_enabled: true + stats: + operations: + - PutLogEvents processors: - batch/prometheus: - metadata_cardinality_limit: 1000 - metadata_keys: [] - send_batch_max_size: 0 - send_batch_size: 8192 - timeout: 5s - + batch/prometheus: + metadata_cardinality_limit: 1000 + metadata_keys: [] + send_batch_max_size: 0 + send_batch_size: 8192 + timeout: 5s receivers: - telegraf_prometheus: - collection_interval: 1m0s - initial_delay: "1s" - timeout: 0s - + telegraf_prometheus: + collection_interval: 1m0s + initial_delay: 1s + timeout: 0s service: - extensions: [] - pipelines: - metrics/prometheus: - exporters: - - awsemf/prometheus - processors: - - batch/prometheus - receivers: - - telegraf_prometheus - - telemetry: + extensions: + - agenthealth/logs + pipelines: + metrics/prometheus: + exporters: + - awsemf/prometheus + processors: + - batch/prometheus + receivers: + - telegraf_prometheus + telemetry: logs: development: false disable_caller: false diff --git a/translator/tocwconfig/sampleConfig/standard_config_linux.yaml b/translator/tocwconfig/sampleConfig/standard_config_linux.yaml index 28b536860d..e0e45370b0 100644 --- a/translator/tocwconfig/sampleConfig/standard_config_linux.yaml +++ b/translator/tocwconfig/sampleConfig/standard_config_linux.yaml @@ -4,11 +4,19 @@ exporters: force_flush_interval: 1m0s max_datums_per_call: 1000 max_values_per_datum: 150 + middleware: agenthealth/metrics + mode: EC2 namespace: CWAgent region: us-west-2 + region_type: ACJ resource_to_telemetry_conversion: enabled: true -extensions: {} +extensions: + agenthealth/metrics: + is_usage_data_enabled: true + stats: + operations: + - PutMetricData processors: cumulativetodelta/hostDeltaMetrics: exclude: @@ -34,26 +42,27 @@ processors: receivers: telegraf_cpu: collection_interval: 1m0s - initial_delay: "1s" + initial_delay: 1s timeout: 0s telegraf_disk: collection_interval: 1m0s - initial_delay: "1s" + initial_delay: 1s timeout: 0s telegraf_diskio: collection_interval: 1m0s - initial_delay: "1s" + initial_delay: 1s timeout: 0s telegraf_mem: collection_interval: 1m0s - initial_delay: "1s" + initial_delay: 1s timeout: 0s telegraf_swap: collection_interval: 1m0s - initial_delay: "1s" + initial_delay: 1s timeout: 0s service: - extensions: [] + extensions: + - agenthealth/metrics pipelines: metrics/host: exporters: @@ -82,7 +91,8 @@ service: error_output_paths: [] initial_fields: {} level: info - output_paths: [/opt/aws/amazon-cloudwatch-agent/logs/amazon-cloudwatch-agent.log] + output_paths: + - /opt/aws/amazon-cloudwatch-agent/logs/amazon-cloudwatch-agent.log sampling: initial: 2 thereafter: 500 diff --git a/translator/tocwconfig/sampleConfig/standard_config_linux_with_common_config.yaml b/translator/tocwconfig/sampleConfig/standard_config_linux_with_common_config.yaml index 4d5f36b549..4cbb7adaf7 100644 --- a/translator/tocwconfig/sampleConfig/standard_config_linux_with_common_config.yaml +++ b/translator/tocwconfig/sampleConfig/standard_config_linux_with_common_config.yaml @@ -4,13 +4,21 @@ exporters: force_flush_interval: 1m0s max_datums_per_call: 1000 max_values_per_datum: 150 + middleware: agenthealth/metrics + mode: EC2 namespace: CWAgent profile: AmazonCloudWatchAgent region: us-west-2 + region_type: ACJ resource_to_telemetry_conversion: enabled: true shared_credential_file: fake-path -extensions: {} +extensions: + agenthealth/metrics: + is_usage_data_enabled: true + stats: + operations: + - PutMetricData processors: cumulativetodelta/hostDeltaMetrics: exclude: @@ -32,33 +40,34 @@ processors: - ImageId - InstanceId - InstanceType + imds_retries: 2 profile: AmazonCloudWatchAgent refresh_interval_seconds: 0s shared_credential_file: fake-path - imds_retries: 2 receivers: telegraf_cpu: collection_interval: 1m0s - initial_delay: "1s" + initial_delay: 1s timeout: 0s telegraf_disk: collection_interval: 1m0s - initial_delay: "1s" + initial_delay: 1s timeout: 0s telegraf_diskio: collection_interval: 1m0s - initial_delay: "1s" + initial_delay: 1s timeout: 0s telegraf_mem: collection_interval: 1m0s - initial_delay: "1s" + initial_delay: 1s timeout: 0s telegraf_swap: collection_interval: 1m0s - initial_delay: "1s" + initial_delay: 1s timeout: 0s service: - extensions: [] + extensions: + - agenthealth/metrics pipelines: metrics/host: exporters: @@ -87,7 +96,8 @@ service: error_output_paths: [] initial_fields: {} level: info - output_paths: [/opt/aws/amazon-cloudwatch-agent/logs/amazon-cloudwatch-agent.log] + output_paths: + - /opt/aws/amazon-cloudwatch-agent/logs/amazon-cloudwatch-agent.log sampling: initial: 2 thereafter: 500 diff --git a/translator/tocwconfig/sampleConfig/standard_config_windows.yaml b/translator/tocwconfig/sampleConfig/standard_config_windows.yaml index ad93d6290d..2b4c5b495a 100644 --- a/translator/tocwconfig/sampleConfig/standard_config_windows.yaml +++ b/translator/tocwconfig/sampleConfig/standard_config_windows.yaml @@ -4,11 +4,19 @@ exporters: force_flush_interval: 1m0s max_datums_per_call: 1000 max_values_per_datum: 150 + middleware: agenthealth/metrics + mode: EC2 namespace: CWAgent region: us-west-2 + region_type: ACJ resource_to_telemetry_conversion: enabled: true -extensions: {} +extensions: + agenthealth/metrics: + is_usage_data_enabled: true + stats: + operations: + - PutMetricData processors: ec2tagger: ec2_instance_tag_keys: @@ -20,32 +28,33 @@ processors: refresh_interval_seconds: 0s receivers: telegraf_win_perf_counters/1492679118: - alias_name: Memory + alias_name: Memory collection_interval: 1m0s - initial_delay: "1s" + initial_delay: 1s timeout: 0s telegraf_win_perf_counters/3446270237: - alias_name: PhysicalDisk + alias_name: PhysicalDisk collection_interval: 1m0s - initial_delay: "1s" + initial_delay: 1s timeout: 0s telegraf_win_perf_counters/3610923661: - alias_name: Paging File + alias_name: Paging File collection_interval: 1m0s - initial_delay: "1s" + initial_delay: 1s timeout: 0s telegraf_win_perf_counters/3762679655: - alias_name: Processor + alias_name: Processor collection_interval: 1m0s - initial_delay: "1s" + initial_delay: 1s timeout: 0s telegraf_win_perf_counters/4283769065: - alias_name: LogicalDisk + alias_name: LogicalDisk collection_interval: 1m0s - initial_delay: "1s" + initial_delay: 1s timeout: 0s service: - extensions: [] + extensions: + - agenthealth/metrics pipelines: metrics/host: exporters: @@ -53,11 +62,11 @@ service: processors: - ec2tagger receivers: - - telegraf_win_perf_counters/1492679118 - - telegraf_win_perf_counters/3446270237 - telegraf_win_perf_counters/3610923661 + - telegraf_win_perf_counters/3446270237 - telegraf_win_perf_counters/3762679655 - telegraf_win_perf_counters/4283769065 + - telegraf_win_perf_counters/1492679118 telemetry: logs: development: false @@ -67,7 +76,8 @@ service: error_output_paths: [] initial_fields: {} level: info - output_paths: [c:\ProgramData\Amazon\AmazonCloudWatchAgent\Logs\amazon-cloudwatch-agent.log] + output_paths: + - c:\ProgramData\Amazon\AmazonCloudWatchAgent\Logs\amazon-cloudwatch-agent.log sampling: initial: 2 thereafter: 500 diff --git a/translator/tocwconfig/sampleConfig/standard_config_windows_with_common_config.yaml b/translator/tocwconfig/sampleConfig/standard_config_windows_with_common_config.yaml index 15e01ad980..15aea9445c 100644 --- a/translator/tocwconfig/sampleConfig/standard_config_windows_with_common_config.yaml +++ b/translator/tocwconfig/sampleConfig/standard_config_windows_with_common_config.yaml @@ -4,13 +4,21 @@ exporters: force_flush_interval: 1m0s max_datums_per_call: 1000 max_values_per_datum: 150 + middleware: agenthealth/metrics + mode: EC2 namespace: CWAgent profile: AmazonCloudWatchAgent region: us-west-2 + region_type: ACJ resource_to_telemetry_conversion: enabled: true shared_credential_file: fake-path -extensions: {} +extensions: + agenthealth/metrics: + is_usage_data_enabled: true + stats: + operations: + - PutMetricData processors: ec2tagger: ec2_instance_tag_keys: @@ -19,38 +27,39 @@ processors: - ImageId - InstanceId - InstanceType + imds_retries: 2 profile: AmazonCloudWatchAgent refresh_interval_seconds: 0s shared_credential_file: fake-path - imds_retries: 2 receivers: telegraf_win_perf_counters/1492679118: - alias_name: Memory + alias_name: Memory collection_interval: 1m0s - initial_delay: "1s" + initial_delay: 1s timeout: 0s telegraf_win_perf_counters/3446270237: - alias_name: PhysicalDisk + alias_name: PhysicalDisk collection_interval: 1m0s - initial_delay: "1s" + initial_delay: 1s timeout: 0s telegraf_win_perf_counters/3610923661: - alias_name: Paging File + alias_name: Paging File collection_interval: 1m0s - initial_delay: "1s" + initial_delay: 1s timeout: 0s telegraf_win_perf_counters/3762679655: - alias_name: Processor + alias_name: Processor collection_interval: 1m0s - initial_delay: "1s" + initial_delay: 1s timeout: 0s telegraf_win_perf_counters/4283769065: - alias_name: LogicalDisk + alias_name: LogicalDisk collection_interval: 1m0s - initial_delay: "1s" + initial_delay: 1s timeout: 0s service: - extensions: [] + extensions: + - agenthealth/metrics pipelines: metrics/host: exporters: @@ -58,11 +67,11 @@ service: processors: - ec2tagger receivers: - - telegraf_win_perf_counters/1492679118 - - telegraf_win_perf_counters/3446270237 - telegraf_win_perf_counters/3610923661 + - telegraf_win_perf_counters/3446270237 - telegraf_win_perf_counters/3762679655 - telegraf_win_perf_counters/4283769065 + - telegraf_win_perf_counters/1492679118 telemetry: logs: development: false @@ -72,7 +81,8 @@ service: error_output_paths: [] initial_fields: {} level: info - output_paths: [c:\ProgramData\Amazon\AmazonCloudWatchAgent\Logs\amazon-cloudwatch-agent.log] + output_paths: + - c:\ProgramData\Amazon\AmazonCloudWatchAgent\Logs\amazon-cloudwatch-agent.log sampling: initial: 2 thereafter: 500 diff --git a/translator/tocwconfig/sampleConfig/statsd_config_linux.yaml b/translator/tocwconfig/sampleConfig/statsd_config_linux.yaml index 8440d99174..5aee89148b 100644 --- a/translator/tocwconfig/sampleConfig/statsd_config_linux.yaml +++ b/translator/tocwconfig/sampleConfig/statsd_config_linux.yaml @@ -1,23 +1,31 @@ connectors: {} - exporters: awscloudwatch: force_flush_interval: 1m0s max_datums_per_call: 1000 max_values_per_datum: 150 + middleware: agenthealth/metrics + mode: EC2 namespace: CWAgent region: us-west-2 + region_type: ACJ resource_to_telemetry_conversion: enabled: true -extensions: {} +extensions: + agenthealth/metrics: + is_usage_data_enabled: true + stats: + operations: + - PutMetricData processors: {} receivers: telegraf_statsd: collection_interval: 10s - initial_delay: "1s" + initial_delay: 1s timeout: 0s service: - extensions: [] + extensions: + - agenthealth/metrics pipelines: metrics/host: exporters: @@ -34,7 +42,8 @@ service: error_output_paths: [] initial_fields: {} level: info - output_paths: [/opt/aws/amazon-cloudwatch-agent/logs/amazon-cloudwatch-agent.log] + output_paths: + - /opt/aws/amazon-cloudwatch-agent/logs/amazon-cloudwatch-agent.log sampling: initial: 2 thereafter: 500 @@ -45,4 +54,4 @@ service: resource: {} traces: processors: [] - propagators: [] \ No newline at end of file + propagators: [] diff --git a/translator/tocwconfig/sampleConfig/statsd_config_windows.yaml b/translator/tocwconfig/sampleConfig/statsd_config_windows.yaml index a64e21b8a7..ac6dbd3c57 100644 --- a/translator/tocwconfig/sampleConfig/statsd_config_windows.yaml +++ b/translator/tocwconfig/sampleConfig/statsd_config_windows.yaml @@ -4,19 +4,28 @@ exporters: force_flush_interval: 1m0s max_datums_per_call: 1000 max_values_per_datum: 150 + middleware: agenthealth/metrics + mode: EC2 namespace: CWAgent region: us-west-2 + region_type: ACJ resource_to_telemetry_conversion: enabled: true -extensions: {} +extensions: + agenthealth/metrics: + is_usage_data_enabled: true + stats: + operations: + - PutMetricData processors: {} receivers: telegraf_statsd: collection_interval: 10s - initial_delay: "1s" + initial_delay: 1s timeout: 0s service: - extensions: [] + extensions: + - agenthealth/metrics pipelines: metrics/host: exporters: @@ -33,7 +42,8 @@ service: error_output_paths: [] initial_fields: {} level: info - output_paths: [c:\ProgramData\Amazon\AmazonCloudWatchAgent\Logs\amazon-cloudwatch-agent.log] + output_paths: + - c:\ProgramData\Amazon\AmazonCloudWatchAgent\Logs\amazon-cloudwatch-agent.log sampling: initial: 2 thereafter: 500 diff --git a/translator/tocwconfig/sampleConfig/trace_config_linux.yaml b/translator/tocwconfig/sampleConfig/trace_config_linux.yaml index 8299964997..6f9d150a5b 100644 --- a/translator/tocwconfig/sampleConfig/trace_config_linux.yaml +++ b/translator/tocwconfig/sampleConfig/trace_config_linux.yaml @@ -1,99 +1,107 @@ connectors: {} exporters: - awsxray: - aws_log_groups: [] - certificate_file_path: "" - endpoint: "" - imds_retries: 2 - index_all_attributes: false - indexed_attributes: [] - local_mode: false - max_retries: 2 - no_verify_ssl: false - num_workers: 8 - proxy_address: "" - region: us-west-2 - request_timeout_seconds: 30 - resource_arn: "" - role_arn: "" - profile: "default" - shared_credentials_file: [ "/root/.aws/credentials" ] - telemetry: - enabled: true - include_metadata: true -extensions: {} + awsxray: + aws_log_groups: [] + certificate_file_path: "" + endpoint: "" + imds_retries: 2 + index_all_attributes: false + indexed_attributes: [] + local_mode: false + max_retries: 2 + middleware: agenthealth/traces + no_verify_ssl: false + num_workers: 8 + profile: default + proxy_address: "" + region: us-west-2 + request_timeout_seconds: 30 + resource_arn: "" + role_arn: "" + shared_credentials_file: + - /root/.aws/credentials + telemetry: + enabled: true + include_metadata: true +extensions: + agenthealth/traces: + is_usage_data_enabled: true + stats: + operations: + - PutTraceSegments processors: - batch/xray: - metadata_cardinality_limit: 1000 - metadata_keys: [] - send_batch_max_size: 0 - send_batch_size: 8192 - timeout: 200ms + batch/xray: + metadata_cardinality_limit: 1000 + metadata_keys: [] + send_batch_max_size: 0 + send_batch_size: 8192 + timeout: 200ms receivers: - awsxray: - endpoint: 127.0.0.1:2000 - proxy_server: - aws_endpoint: "" - endpoint: 127.0.0.1:2000 - local_mode: false - proxy_address: "" - region: us-west-2 - role_arn: "" - transport: udp - otlp: - protocols: - grpc: - auth: null - endpoint: 127.0.0.1:4317 - include_metadata: false - keepalive: null - max_concurrent_streams: 0 - max_recv_msg_size_mib: 0 - read_buffer_size: 524288 - tls: null - transport: tcp - write_buffer_size: 0 - http: - auth: null - cors: null - endpoint: 127.0.0.1:4318 - include_metadata: false - logs_url_path: /v1/logs - max_request_body_size: 0 - metrics_url_path: /v1/metrics - response_headers: {} - tls: null - traces_url_path: /v1/traces + awsxray: + endpoint: 127.0.0.1:2000 + proxy_server: + aws_endpoint: "" + endpoint: 127.0.0.1:2000 + local_mode: false + proxy_address: "" + region: us-west-2 + role_arn: "" + transport: udp + otlp: + protocols: + grpc: + auth: null + endpoint: 127.0.0.1:4317 + include_metadata: false + keepalive: null + max_concurrent_streams: 0 + max_recv_msg_size_mib: 0 + read_buffer_size: 524288 + tls: null + transport: tcp + write_buffer_size: 0 + http: + auth: null + cors: null + endpoint: 127.0.0.1:4318 + include_metadata: false + logs_url_path: /v1/logs + max_request_body_size: 0 + metrics_url_path: /v1/metrics + response_headers: {} + tls: null + traces_url_path: /v1/traces service: - extensions: [] - pipelines: - traces/xray: - exporters: - - awsxray - processors: - - batch/xray - receivers: - - awsxray - - otlp - telemetry: - logs: - development: false - disable_caller: false - disable_stacktrace: false - encoding: console - error_output_paths: [] - initial_fields: {} - level: info - output_paths: - - /opt/aws/amazon-cloudwatch-agent/logs/amazon-cloudwatch-agent.log - sampling: - initial: 2 - thereafter: 500 - metrics: - address: "" - level: None - readers: [] - resource: {} - traces: - processors: [] - propagators: [] + extensions: + - agenthealth/traces + pipelines: + traces/xray: + exporters: + - awsxray + processors: + - batch/xray + receivers: + - awsxray + - otlp + telemetry: + logs: + development: false + disable_caller: false + disable_stacktrace: false + encoding: console + error_output_paths: [] + initial_fields: {} + level: info + output_paths: + - /opt/aws/amazon-cloudwatch-agent/logs/amazon-cloudwatch-agent.log + sampling: + initial: 2 + thereafter: 500 + metrics: + address: "" + level: None + readers: [] + resource: {} + traces: + processors: [] + propagators: [] diff --git a/translator/tocwconfig/sampleConfig/trace_config_windows.yaml b/translator/tocwconfig/sampleConfig/trace_config_windows.yaml index 8b6738d027..7f551dae65 100644 --- a/translator/tocwconfig/sampleConfig/trace_config_windows.yaml +++ b/translator/tocwconfig/sampleConfig/trace_config_windows.yaml @@ -1,99 +1,107 @@ connectors: {} exporters: - awsxray: - aws_log_groups: [] - certificate_file_path: "" - endpoint: "" - imds_retries: 2 - index_all_attributes: false - indexed_attributes: [] - local_mode: false - max_retries: 2 - no_verify_ssl: false - num_workers: 8 - proxy_address: "" - region: us-west-2 - request_timeout_seconds: 30 - resource_arn: "" - role_arn: "" - profile: "default" - shared_credentials_file: [ "/root/.aws/credentials" ] - telemetry: - enabled: true - include_metadata: true -extensions: {} + awsxray: + aws_log_groups: [] + certificate_file_path: "" + endpoint: "" + imds_retries: 2 + index_all_attributes: false + indexed_attributes: [] + local_mode: false + max_retries: 2 + middleware: agenthealth/traces + no_verify_ssl: false + num_workers: 8 + profile: default + proxy_address: "" + region: us-west-2 + request_timeout_seconds: 30 + resource_arn: "" + role_arn: "" + shared_credentials_file: + - /root/.aws/credentials + telemetry: + enabled: true + include_metadata: true +extensions: + agenthealth/traces: + is_usage_data_enabled: true + stats: + operations: + - PutTraceSegments processors: - batch/xray: - metadata_cardinality_limit: 1000 - metadata_keys: [] - send_batch_max_size: 0 - send_batch_size: 8192 - timeout: 200ms + batch/xray: + metadata_cardinality_limit: 1000 + metadata_keys: [] + send_batch_max_size: 0 + send_batch_size: 8192 + timeout: 200ms receivers: - awsxray: - endpoint: 127.0.0.1:2000 - proxy_server: - aws_endpoint: "" - endpoint: 127.0.0.1:2000 - local_mode: false - proxy_address: "" - region: us-west-2 - role_arn: "" - transport: udp - otlp: - protocols: - grpc: - auth: null - endpoint: 127.0.0.1:4317 - include_metadata: false - keepalive: null - max_concurrent_streams: 0 - max_recv_msg_size_mib: 0 - read_buffer_size: 524288 - tls: null - transport: tcp - write_buffer_size: 0 - http: - auth: null - cors: null - endpoint: 127.0.0.1:4318 - include_metadata: false - logs_url_path: /v1/logs - max_request_body_size: 0 - metrics_url_path: /v1/metrics - response_headers: {} - tls: null - traces_url_path: /v1/traces + awsxray: + endpoint: 127.0.0.1:2000 + proxy_server: + aws_endpoint: "" + endpoint: 127.0.0.1:2000 + local_mode: false + proxy_address: "" + region: us-west-2 + role_arn: "" + transport: udp + otlp: + protocols: + grpc: + auth: null + endpoint: 127.0.0.1:4317 + include_metadata: false + keepalive: null + max_concurrent_streams: 0 + max_recv_msg_size_mib: 0 + read_buffer_size: 524288 + tls: null + transport: tcp + write_buffer_size: 0 + http: + auth: null + cors: null + endpoint: 127.0.0.1:4318 + include_metadata: false + logs_url_path: /v1/logs + max_request_body_size: 0 + metrics_url_path: /v1/metrics + response_headers: {} + tls: null + traces_url_path: /v1/traces service: - extensions: [] - pipelines: - traces/xray: - exporters: - - awsxray - processors: - - batch/xray - receivers: - - awsxray - - otlp - telemetry: - logs: - development: false - disable_caller: false - disable_stacktrace: false - encoding: console - error_output_paths: [] - initial_fields: {} - level: info - output_paths: - - c:\ProgramData\Amazon\AmazonCloudWatchAgent\Logs\amazon-cloudwatch-agent.log - sampling: - initial: 2 - thereafter: 500 - metrics: - address: "" - level: None - readers: [] - resource: {} - traces: - processors: [] - propagators: [] + extensions: + - agenthealth/traces + pipelines: + traces/xray: + exporters: + - awsxray + processors: + - batch/xray + receivers: + - awsxray + - otlp + telemetry: + logs: + development: false + disable_caller: false + disable_stacktrace: false + encoding: console + error_output_paths: [] + initial_fields: {} + level: info + output_paths: + - c:\ProgramData\Amazon\AmazonCloudWatchAgent\Logs\amazon-cloudwatch-agent.log + sampling: + initial: 2 + thereafter: 500 + metrics: + address: "" + level: None + readers: [] + resource: {} + traces: + processors: [] + propagators: [] diff --git a/translator/tocwconfig/tocwconfig_test.go b/translator/tocwconfig/tocwconfig_test.go index 816824aef8..aeeab33ce3 100644 --- a/translator/tocwconfig/tocwconfig_test.go +++ b/translator/tocwconfig/tocwconfig_test.go @@ -134,6 +134,7 @@ func TestStatsDConfig(t *testing.T) { for name, testCase := range testCases { t.Run(name, func(t *testing.T) { resetContext(t) + context.CurrentContext().SetMode(config.ModeEC2) checkTranslation(t, testCase.filename, testCase.targetPlatform, testCase.expectedEnvVars, testCase.appendString) }) } @@ -142,6 +143,7 @@ func TestStatsDConfig(t *testing.T) { // Linux only for CollectD func TestCollectDConfig(t *testing.T) { resetContext(t) + context.CurrentContext().SetMode(config.ModeEC2) expectedEnvVars := map[string]string{} checkTranslation(t, "collectd_config_linux", "linux", expectedEnvVars, "") checkTranslation(t, "collectd_config_linux", "darwin", nil, "") @@ -195,6 +197,7 @@ func TestBasicConfig(t *testing.T) { for name, testCase := range testCases { t.Run(name, func(t *testing.T) { resetContext(t) + context.CurrentContext().SetMode(config.ModeEC2) checkTranslation(t, testCase.filename, testCase.targetPlatform, testCase.expectedEnvVars, testCase.appendString) }) } @@ -202,6 +205,7 @@ func TestBasicConfig(t *testing.T) { func TestInvalidInputConfig(t *testing.T) { resetContext(t) + context.CurrentContext().SetMode(config.ModeEC2) expectedEnvVars := map[string]string{} checkTranslation(t, "invalid_input_linux", "linux", expectedEnvVars, "") } @@ -232,6 +236,7 @@ func TestStandardConfig(t *testing.T) { for name, testCase := range testCases { t.Run(name, func(t *testing.T) { resetContext(t) + context.CurrentContext().SetMode(config.ModeEC2) t.Setenv(envconfig.IMDS_NUMBER_RETRY, "0") checkTranslation(t, testCase.filename, testCase.targetPlatform, testCase.expectedEnvVars, testCase.appendString) }) @@ -262,6 +267,7 @@ func TestAdvancedConfig(t *testing.T) { for name, testCase := range testCases { t.Run(name, func(t *testing.T) { resetContext(t) + context.CurrentContext().SetMode(config.ModeEC2) checkTranslation(t, testCase.filename, testCase.targetPlatform, testCase.expectedEnvVars, testCase.appendString) }) } @@ -414,6 +420,7 @@ func TestStandardConfigWithCommonConfig(t *testing.T) { for name, testCase := range testCases { t.Run(name, func(t *testing.T) { resetContext(t) + context.CurrentContext().SetMode(config.ModeEC2) readCommonConfig(t, "./sampleConfig/commonConfig/withCredentialsProxySsl.toml") checkTranslation(t, testCase.filename, testCase.targetPlatform, testCase.expectedEnvVars, testCase.appendString) }) @@ -422,6 +429,7 @@ func TestStandardConfigWithCommonConfig(t *testing.T) { func TestDeltaNetConfigLinux(t *testing.T) { resetContext(t) + context.CurrentContext().SetMode(config.ModeEC2) expectedEnvVars := map[string]string{} checkTranslation(t, "delta_net_config_linux", "linux", expectedEnvVars, "") checkTranslation(t, "delta_net_config_linux", "darwin", nil, "") @@ -445,6 +453,7 @@ func TestLogFilterConfig(t *testing.T) { func TestIgnoreInvalidAppendDimensions(t *testing.T) { resetContext(t) + context.CurrentContext().SetMode(config.ModeEC2) expectedEnvVars := map[string]string{} checkTranslation(t, "ignore_append_dimensions", "linux", expectedEnvVars, "") } @@ -499,8 +508,8 @@ func readCommonConfig(t *testing.T, commonConfigFilePath string) { func resetContext(t *testing.T) { t.Setenv(envconfig.IMDS_NUMBER_RETRY, strconv.Itoa(retryer.DefaultImdsRetries)) - util.DetectRegion = func(string, map[string]string) string { - return "us-west-2" + util.DetectRegion = func(string, map[string]string) (string, string) { + return "us-west-2", "ACJ" } util.DetectCredentialsPath = func() string { return "fake-path" diff --git a/translator/tocwconfig/tocwconfig_unix_test.go b/translator/tocwconfig/tocwconfig_unix_test.go index cd81848570..0d57e53b2d 100644 --- a/translator/tocwconfig/tocwconfig_unix_test.go +++ b/translator/tocwconfig/tocwconfig_unix_test.go @@ -6,10 +6,16 @@ package tocwconfig -import "testing" +import ( + "testing" + + "github.com/aws/amazon-cloudwatch-agent/translator/config" + "github.com/aws/amazon-cloudwatch-agent/translator/context" +) func TestCompleteConfigUnix(t *testing.T) { resetContext(t) + context.CurrentContext().SetMode(config.ModeEC2) expectedEnvVars := map[string]string{ "CWAGENT_USER_AGENT": "CUSTOM USER AGENT VALUE", "CWAGENT_LOG_LEVEL": "DEBUG", @@ -24,6 +30,7 @@ func TestCompleteConfigUnix(t *testing.T) { func TestDeltaConfigLinux(t *testing.T) { resetContext(t) + context.CurrentContext().SetMode(config.ModeEC2) expectedEnvVars := map[string]string{} checkTranslation(t, "delta_config_linux", "linux", expectedEnvVars, "") checkTranslation(t, "delta_config_linux", "darwin", nil, "") @@ -31,6 +38,7 @@ func TestDeltaConfigLinux(t *testing.T) { func TestDropOriginConfig(t *testing.T) { resetContext(t) + context.CurrentContext().SetMode(config.ModeEC2) expectedEnvVars := map[string]string{} checkTranslation(t, "drop_origin_linux", "linux", expectedEnvVars, "") } diff --git a/translator/tocwconfig/tocwconfig_windows_test.go b/translator/tocwconfig/tocwconfig_windows_test.go index 7e074def52..166499a850 100644 --- a/translator/tocwconfig/tocwconfig_windows_test.go +++ b/translator/tocwconfig/tocwconfig_windows_test.go @@ -6,10 +6,16 @@ package tocwconfig -import "testing" +import ( + "testing" + + "github.com/aws/amazon-cloudwatch-agent/translator/config" + "github.com/aws/amazon-cloudwatch-agent/translator/context" +) func TestCompleteConfigWindows(t *testing.T) { resetContext(t) + context.CurrentContext().SetMode(config.ModeEC2) expectedEnvVars := map[string]string{ "CWAGENT_USER_AGENT": "CUSTOM USER AGENT VALUE", "CWAGENT_LOG_LEVEL": "DEBUG", diff --git a/translator/translate/agent/agent.go b/translator/translate/agent/agent.go index 2951f4ab81..dad8a19392 100644 --- a/translator/translate/agent/agent.go +++ b/translator/translate/agent/agent.go @@ -13,6 +13,7 @@ var ChildRule = map[string]translator.Rule{} const ( SectionKey = "agent" + Mode = "mode" ) func GetCurPath() string { @@ -27,6 +28,8 @@ type Agent struct { Interval string Credentials map[string]interface{} Region string + RegionType string + Mode string Internal bool Role_arn string } diff --git a/translator/translate/agent/ruleRegion.go b/translator/translate/agent/ruleRegion.go index aee69f4df8..dddfd32bba 100644 --- a/translator/translate/agent/ruleRegion.go +++ b/translator/translate/agent/ruleRegion.go @@ -6,6 +6,7 @@ package agent import ( "fmt" + "github.com/aws/amazon-cloudwatch-agent/handlers/agentinfo" "github.com/aws/amazon-cloudwatch-agent/translator" "github.com/aws/amazon-cloudwatch-agent/translator/context" "github.com/aws/amazon-cloudwatch-agent/translator/util" @@ -15,20 +16,21 @@ type Region struct { } const ( - RegionKey = "region" + RegionKey = "region" + RegionType = "region_type" ) // This region will be provided to the corresponding input and output plugins // This should be applied before interpreting other component. func (r *Region) ApplyRule(input interface{}) (returnKey string, returnVal interface{}) { - var region string ctx := context.CurrentContext() _, inputRegion := translator.DefaultCase(RegionKey, "", input) if inputRegion != "" { Global_Config.Region = inputRegion.(string) + Global_Config.RegionType = agentinfo.AgentConfigJson return } - region = util.DetectRegion(ctx.Mode(), ctx.Credentials()) + region, regionType := util.DetectRegion(ctx.Mode(), ctx.Credentials()) if region == "" { translator.AddErrorMessages(GetCurPath()+"ruleRegion/", fmt.Sprintf("Region info is missing for mode: %s", @@ -36,6 +38,7 @@ func (r *Region) ApplyRule(input interface{}) (returnKey string, returnVal inter } Global_Config.Region = region + Global_Config.RegionType = regionType return } diff --git a/translator/translate/logs/logs_test.go b/translator/translate/logs/logs_test.go index e0bed36ef1..923386164f 100644 --- a/translator/translate/logs/logs_test.go +++ b/translator/translate/logs/logs_test.go @@ -19,6 +19,7 @@ import ( func TestLogs(t *testing.T) { l := new(Logs) agent.Global_Config.Region = "us-east-1" + agent.Global_Config.RegionType = "any" var input interface{} err := json.Unmarshal([]byte(`{"logs":{"log_stream_name":"LOG_STREAM_NAME"}}`), &input) @@ -32,6 +33,8 @@ func TestLogs(t *testing.T) { "cloudwatchlogs": []interface{}{ map[string]interface{}{ "region": "us-east-1", + "region_type": "any", + "mode": "", "log_stream_name": "LOG_STREAM_NAME", "force_flush_interval": "5s", }, @@ -44,6 +47,7 @@ func TestLogs(t *testing.T) { func TestLogs_LogStreamName(t *testing.T) { l := new(Logs) agent.Global_Config.Region = "us-east-1" + agent.Global_Config.RegionType = "any" var input interface{} err := json.Unmarshal([]byte(`{"logs":{}}`), &input) @@ -61,6 +65,8 @@ func TestLogs_LogStreamName(t *testing.T) { "cloudwatchlogs": []interface{}{ map[string]interface{}{ "region": "us-east-1", + "region_type": "any", + "mode": "OP", "log_stream_name": hostname, "force_flush_interval": "5s", }, @@ -86,6 +92,8 @@ func TestLogs_LogStreamName(t *testing.T) { "cloudwatchlogs": []interface{}{ map[string]interface{}{ "region": "us-east-1", + "region_type": "any", + "mode": "", "log_stream_name": "arn_aws_ecs_us-east-2_012345678910_task/cluster-name/9781c248-0edd-4cdb-9a93-f63cb662a5d3", "force_flush_interval": "5s", }, @@ -108,6 +116,8 @@ func TestLogs_LogStreamName(t *testing.T) { "cloudwatchlogs": []interface{}{ map[string]interface{}{ "region": "us-east-1", + "region_type": "any", + "mode": "", "log_stream_name": "demo-app-5ffc89b95c-jgnf6", "force_flush_interval": "5s", }, @@ -123,6 +133,7 @@ func TestLogs_LogStreamName(t *testing.T) { func TestLogs_ForceFlushInterval(t *testing.T) { l := new(Logs) agent.Global_Config.Region = "us-east-1" + agent.Global_Config.RegionType = "any" var input interface{} err := json.Unmarshal([]byte(`{"logs":{"force_flush_interval":10}}`), &input) @@ -140,6 +151,8 @@ func TestLogs_ForceFlushInterval(t *testing.T) { "cloudwatchlogs": []interface{}{ map[string]interface{}{ "region": "us-east-1", + "region_type": "any", + "mode": "OP", "log_stream_name": hostname, "force_flush_interval": "10s", }, @@ -155,6 +168,7 @@ func TestLogs_ForceFlushInterval(t *testing.T) { func TestLogs_EndpointOverride(t *testing.T) { l := new(Logs) agent.Global_Config.Region = "us-east-1" + agent.Global_Config.RegionType = "any" var input interface{} err := json.Unmarshal([]byte(`{"logs":{"endpoint_override":"https://logs-fips.us-east-1.amazonaws.com"}}`), &input) @@ -172,6 +186,8 @@ func TestLogs_EndpointOverride(t *testing.T) { "cloudwatchlogs": []interface{}{ map[string]interface{}{ "region": "us-east-1", + "region_type": "any", + "mode": "OP", "endpoint_override": "https://logs-fips.us-east-1.amazonaws.com", "log_stream_name": hostname, "force_flush_interval": "5s", diff --git a/translator/translate/logs/ruleBasicLogConfig.go b/translator/translate/logs/ruleBasicLogConfig.go index 07a9d37d37..28a4c9224a 100644 --- a/translator/translate/logs/ruleBasicLogConfig.go +++ b/translator/translate/logs/ruleBasicLogConfig.go @@ -5,6 +5,7 @@ package logs import ( "github.com/aws/amazon-cloudwatch-agent/translator" + "github.com/aws/amazon-cloudwatch-agent/translator/context" "github.com/aws/amazon-cloudwatch-agent/translator/translate/agent" ) @@ -16,6 +17,8 @@ func (f *BasicLogConfig) ApplyRule(input interface{}) (returnKey string, returnV // add creds cloudwatchlogsConfig = translator.MergeTwoUniqueMaps(cloudwatchlogsConfig, agent.Global_Config.Credentials) cloudwatchlogsConfig[agent.RegionKey] = agent.Global_Config.Region + cloudwatchlogsConfig[agent.RegionType] = agent.Global_Config.RegionType + cloudwatchlogsConfig[agent.Mode] = context.CurrentContext().ShortMode() returnKey = Output_Cloudwatch_Logs returnVal = cloudwatchlogsConfig diff --git a/translator/translate/otel/exporter/awscloudwatch/translator.go b/translator/translate/otel/exporter/awscloudwatch/translator.go index da497d0a37..839353a634 100644 --- a/translator/translate/otel/exporter/awscloudwatch/translator.go +++ b/translator/translate/otel/exporter/awscloudwatch/translator.go @@ -12,10 +12,12 @@ import ( "github.com/aws/amazon-cloudwatch-agent/internal/metric" "github.com/aws/amazon-cloudwatch-agent/plugins/outputs/cloudwatch" + "github.com/aws/amazon-cloudwatch-agent/translator/context" "github.com/aws/amazon-cloudwatch-agent/translator/translate/agent" "github.com/aws/amazon-cloudwatch-agent/translator/translate/metrics/config" "github.com/aws/amazon-cloudwatch-agent/translator/translate/metrics/rollup_dimensions" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/common" + "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/extension/agenthealth" ) const ( @@ -57,6 +59,8 @@ func (t *translator) Translate(conf *confmap.Conf) (component.Config, error) { _ = credentials.Unmarshal(cfg) cfg.RoleARN = getRoleARN(conf) cfg.Region = agent.Global_Config.Region + cfg.RegionType = agent.Global_Config.RegionType + cfg.Mode = context.CurrentContext().ShortMode() if namespace, ok := common.GetString(conf, common.ConfigKey(common.MetricsKey, namespaceKey)); ok { cfg.Namespace = namespace } @@ -75,6 +79,7 @@ func (t *translator) Translate(conf *confmap.Conf) (component.Config, error) { if dropOriginalMetrics := getDropOriginalMetrics(conf); len(dropOriginalMetrics) != 0 { cfg.DropOriginalConfigs = dropOriginalMetrics } + cfg.MiddlewareID = &agenthealth.MetricsID return cfg, nil } @@ -144,21 +149,21 @@ func getDropOriginalMetrics(conf *confmap.Conf) map[string]bool { measurementCfgKey := common.ConfigKey(common.MetricsKey, common.MetricsCollectedKey, category, common.MeasurementKey) dropOriginalCfgKey := common.ConfigKey(common.MetricsKey, common.MetricsCollectedKey, category, common.DropOriginalMetricsKey) /* Drop original metrics does not support procstat since procstat can monitor multiple process - "procstat": [ - { - "exe": "W3SVC", - "measurement": [ - "pid_count" - ] - }, - { - "exe": "IISADMIN", - "measurement": [ - "pid_count" - ] - }] - Therefore, dropping the original metrics can conflict between these two processes (e.g customers can drop pid_count with the first - process but not the second process) + "procstat": [ + { + "exe": "W3SVC", + "measurement": [ + "pid_count" + ] + }, + { + "exe": "IISADMIN", + "measurement": [ + "pid_count" + ] + }] + Therefore, dropping the original metrics can conflict between these two processes (e.g customers can drop pid_count with the first + process but not the second process) */ if dropMetrics := common.GetArray[any](conf, dropOriginalCfgKey); dropMetrics != nil { for _, dropMetric := range dropMetrics { diff --git a/translator/translate/otel/exporter/awscloudwatch/translator_test.go b/translator/translate/otel/exporter/awscloudwatch/translator_test.go index aea52dc250..62924e995b 100644 --- a/translator/translate/otel/exporter/awscloudwatch/translator_test.go +++ b/translator/translate/otel/exporter/awscloudwatch/translator_test.go @@ -11,6 +11,7 @@ import ( "testing" "time" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/confmap" @@ -147,21 +148,23 @@ func TestTranslator(t *testing.T) { require.NoError(t, err) gotCfg, ok := got.(*cloudwatch.Config) require.True(t, ok) - require.Equal(t, testCase.want.Namespace, gotCfg.Namespace) - require.Equal(t, testCase.want.Region, gotCfg.Region) - require.Equal(t, testCase.want.ForceFlushInterval, gotCfg.ForceFlushInterval) - require.Equal(t, testCase.want.RoleARN, gotCfg.RoleARN) - require.Equal(t, testCase.want.AccessKey, gotCfg.AccessKey) - require.Equal(t, testCase.want.SecretKey, gotCfg.SecretKey) - require.Equal(t, testCase.want.Token, gotCfg.Token) - require.Equal(t, testCase.want.Profile, gotCfg.Profile) - require.Equal(t, testCase.want.SharedCredentialFilename, gotCfg.SharedCredentialFilename) - require.Equal(t, testCase.want.MaxValuesPerDatum, gotCfg.MaxValuesPerDatum) - require.Equal(t, testCase.want.RollupDimensions, gotCfg.RollupDimensions) + assert.Equal(t, testCase.want.Namespace, gotCfg.Namespace) + assert.Equal(t, testCase.want.Region, gotCfg.Region) + assert.Equal(t, testCase.want.ForceFlushInterval, gotCfg.ForceFlushInterval) + assert.Equal(t, testCase.want.RoleARN, gotCfg.RoleARN) + assert.Equal(t, testCase.want.AccessKey, gotCfg.AccessKey) + assert.Equal(t, testCase.want.SecretKey, gotCfg.SecretKey) + assert.Equal(t, testCase.want.Token, gotCfg.Token) + assert.Equal(t, testCase.want.Profile, gotCfg.Profile) + assert.Equal(t, testCase.want.SharedCredentialFilename, gotCfg.SharedCredentialFilename) + assert.Equal(t, testCase.want.MaxValuesPerDatum, gotCfg.MaxValuesPerDatum) + assert.Equal(t, testCase.want.RollupDimensions, gotCfg.RollupDimensions) + assert.NotNil(t, gotCfg.MiddlewareID) + assert.Equal(t, "agenthealth/metrics", gotCfg.MiddlewareID.String()) if testCase.wantWindows != nil && runtime.GOOS == "windows" { - require.Equal(t, testCase.wantWindows.DropOriginalConfigs, gotCfg.DropOriginalConfigs) + assert.Equal(t, testCase.wantWindows.DropOriginalConfigs, gotCfg.DropOriginalConfigs) } else { - require.Equal(t, testCase.want.DropOriginalConfigs, gotCfg.DropOriginalConfigs) + assert.Equal(t, testCase.want.DropOriginalConfigs, gotCfg.DropOriginalConfigs) } } }) diff --git a/translator/translate/otel/exporter/awsemf/translator.go b/translator/translate/otel/exporter/awsemf/translator.go index b33881ed70..20c12cb2ae 100644 --- a/translator/translate/otel/exporter/awsemf/translator.go +++ b/translator/translate/otel/exporter/awsemf/translator.go @@ -18,6 +18,7 @@ import ( "github.com/aws/amazon-cloudwatch-agent/internal/retryer" "github.com/aws/amazon-cloudwatch-agent/translator/translate/agent" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/common" + "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/extension/agenthealth" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/receiver/awscontainerinsight" ) @@ -60,6 +61,7 @@ func (t *translator) ID() component.ID { // Translate creates an awsemf exporter config based on the input json config func (t *translator) Translate(c *confmap.Conf) (component.Config, error) { cfg := t.factory.CreateDefaultConfig().(*awsemfexporter.Config) + cfg.MiddlewareID = &agenthealth.LogsID var defaultConfig string if isEcs(c) { @@ -93,6 +95,7 @@ func (t *translator) Translate(c *confmap.Conf) (component.Config, error) { if credentialsFileKey, ok := agent.Global_Config.Credentials[agent.CredentialsFile_Key]; ok { cfg.AWSSessionSettings.SharedCredentialsFile = []string{fmt.Sprintf("%v", credentialsFileKey)} } + cfg.AWSSessionSettings.RoleARN = agent.Global_Config.Role_arn cfg.AWSSessionSettings.IMDSRetries = retryer.GetDefaultRetryNumber() if isEcs(c) { diff --git a/translator/translate/otel/exporter/awsemf/translator_test.go b/translator/translate/otel/exporter/awsemf/translator_test.go index fb76195e0c..2a4e0889f8 100644 --- a/translator/translate/otel/exporter/awsemf/translator_test.go +++ b/translator/translate/otel/exporter/awsemf/translator_test.go @@ -8,10 +8,12 @@ import ( "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsemfexporter" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/confmap" legacytranslator "github.com/aws/amazon-cloudwatch-agent/translator" + "github.com/aws/amazon-cloudwatch-agent/translator/translate/agent" ) var nilSlice []string @@ -19,6 +21,8 @@ var nilMetricDescriptorsSlice []awsemfexporter.MetricDescriptor func TestTranslator(t *testing.T) { tt := NewTranslator() + agent.Global_Config.Region = "us-east-1" + agent.Global_Config.Role_arn = "global_arn" require.EqualValues(t, "awsemf", tt.ID().String()) testCases := map[string]struct { env map[string]string @@ -663,18 +667,22 @@ func TestTranslator(t *testing.T) { require.NotNil(t, got) gotCfg, ok := got.(*awsemfexporter.Config) require.True(t, ok) - require.Equal(t, testCase.want["namespace"], gotCfg.Namespace) - require.Equal(t, testCase.want["log_group_name"], gotCfg.LogGroupName) - require.Equal(t, testCase.want["log_stream_name"], gotCfg.LogStreamName) - require.Equal(t, testCase.want["dimension_rollup_option"], gotCfg.DimensionRollupOption) - require.Equal(t, testCase.want["disable_metric_extraction"], gotCfg.DisableMetricExtraction) - require.Equal(t, testCase.want["enhanced_container_insights"], gotCfg.EnhancedContainerInsights) - require.Equal(t, testCase.want["parse_json_encoded_attr_values"], gotCfg.ParseJSONEncodedAttributeValues) - require.Equal(t, testCase.want["output_destination"], gotCfg.OutputDestination) - require.Equal(t, testCase.want["eks_fargate_container_insights_enabled"], gotCfg.EKSFargateContainerInsightsEnabled) - require.Equal(t, testCase.want["resource_to_telemetry_conversion"], gotCfg.ResourceToTelemetrySettings) - require.ElementsMatch(t, testCase.want["metric_declarations"], gotCfg.MetricDeclarations) - require.ElementsMatch(t, testCase.want["metric_descriptors"], gotCfg.MetricDescriptors) + assert.Equal(t, testCase.want["namespace"], gotCfg.Namespace) + assert.Equal(t, testCase.want["log_group_name"], gotCfg.LogGroupName) + assert.Equal(t, testCase.want["log_stream_name"], gotCfg.LogStreamName) + assert.Equal(t, testCase.want["dimension_rollup_option"], gotCfg.DimensionRollupOption) + assert.Equal(t, testCase.want["disable_metric_extraction"], gotCfg.DisableMetricExtraction) + assert.Equal(t, testCase.want["enhanced_container_insights"], gotCfg.EnhancedContainerInsights) + assert.Equal(t, testCase.want["parse_json_encoded_attr_values"], gotCfg.ParseJSONEncodedAttributeValues) + assert.Equal(t, testCase.want["output_destination"], gotCfg.OutputDestination) + assert.Equal(t, testCase.want["eks_fargate_container_insights_enabled"], gotCfg.EKSFargateContainerInsightsEnabled) + assert.Equal(t, testCase.want["resource_to_telemetry_conversion"], gotCfg.ResourceToTelemetrySettings) + assert.ElementsMatch(t, testCase.want["metric_declarations"], gotCfg.MetricDeclarations) + assert.ElementsMatch(t, testCase.want["metric_descriptors"], gotCfg.MetricDescriptors) + assert.Equal(t, "global_arn", gotCfg.RoleARN) + assert.Equal(t, "us-east-1", gotCfg.Region) + assert.NotNil(t, gotCfg.MiddlewareID) + assert.Equal(t, "agenthealth/logs", gotCfg.MiddlewareID.String()) } }) } diff --git a/translator/translate/otel/exporter/awsxray/testdata/config.yaml b/translator/translate/otel/exporter/awsxray/testdata/config.yaml index aca5e7f26c..5d27d316d3 100644 --- a/translator/translate/otel/exporter/awsxray/testdata/config.yaml +++ b/translator/translate/otel/exporter/awsxray/testdata/config.yaml @@ -9,4 +9,5 @@ imds_retries: 1 proxy_address: https://proxy.proxy.com telemetry: enabled: true - include_metadata: true \ No newline at end of file + include_metadata: true +middleware: agenthealth/traces \ No newline at end of file diff --git a/translator/translate/otel/exporter/awsxray/translator.go b/translator/translate/otel/exporter/awsxray/translator.go index 1b1eeeea35..a67404ecbb 100644 --- a/translator/translate/otel/exporter/awsxray/translator.go +++ b/translator/translate/otel/exporter/awsxray/translator.go @@ -14,6 +14,7 @@ import ( "github.com/aws/amazon-cloudwatch-agent/internal/retryer" "github.com/aws/amazon-cloudwatch-agent/translator/translate/agent" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/common" + "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/extension/agenthealth" ) const ( @@ -84,6 +85,7 @@ func (t *translator) Translate(conf *confmap.Conf) (component.Config, error) { cfg.ProxyAddress = proxyAddress } cfg.AWSSessionSettings.IMDSRetries = retryer.GetDefaultRetryNumber() + cfg.MiddlewareID = &agenthealth.TracesID return cfg, nil } diff --git a/translator/translate/otel/exporter/awsxray/translator_test.go b/translator/translate/otel/exporter/awsxray/translator_test.go index 848c3d12bc..743362cc8f 100644 --- a/translator/translate/otel/exporter/awsxray/translator_test.go +++ b/translator/translate/otel/exporter/awsxray/translator_test.go @@ -45,6 +45,7 @@ func TestTranslator(t *testing.T) { "enabled": true, "include_metadata": true, }, + "middleware": "agenthealth/traces", }), }, "WithCompleteConfig": { diff --git a/translator/translate/otel/exporter/otel_aws_cloudwatch_logs/translator.go b/translator/translate/otel/exporter/otel_aws_cloudwatch_logs/translator.go index 09cf02b61f..768f977596 100644 --- a/translator/translate/otel/exporter/otel_aws_cloudwatch_logs/translator.go +++ b/translator/translate/otel/exporter/otel_aws_cloudwatch_logs/translator.go @@ -19,6 +19,7 @@ import ( "github.com/aws/amazon-cloudwatch-agent/translator/translate/agent" "github.com/aws/amazon-cloudwatch-agent/translator/translate/logs" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/common" + "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/extension/agenthealth" ) //go:embed aws_cloudwatch_logs_default.yaml @@ -49,6 +50,7 @@ func (t *translator) ID() component.ID { // Translate creates an awscloudwatchlogsexporter exporter config based on the input json config func (t *translator) Translate(c *confmap.Conf) (component.Config, error) { cfg := t.factory.CreateDefaultConfig().(*awscloudwatchlogsexporter.Config) + cfg.MiddlewareID = &agenthealth.LogsID var defaultConfig string // Add more else if when otel supports log reading diff --git a/translator/translate/otel/exporter/otel_aws_cloudwatch_logs/translator_test.go b/translator/translate/otel/exporter/otel_aws_cloudwatch_logs/translator_test.go index d7f228070b..49d050a266 100644 --- a/translator/translate/otel/exporter/otel_aws_cloudwatch_logs/translator_test.go +++ b/translator/translate/otel/exporter/otel_aws_cloudwatch_logs/translator_test.go @@ -7,6 +7,7 @@ import ( "testing" "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awscloudwatchlogsexporter" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/confmap" @@ -49,10 +50,12 @@ func TestTranslator(t *testing.T) { require.NotNil(t, got) gotCfg, ok := got.(*awscloudwatchlogsexporter.Config) require.True(t, ok) - require.Equal(t, testCase.want.LogGroupName, gotCfg.LogGroupName) - require.Equal(t, testCase.want.LogStreamName, gotCfg.LogStreamName) - require.Equal(t, testCase.want.RawLog, gotCfg.RawLog) - require.Equal(t, testCase.want.Region, gotCfg.Region) + assert.Equal(t, testCase.want.LogGroupName, gotCfg.LogGroupName) + assert.Equal(t, testCase.want.LogStreamName, gotCfg.LogStreamName) + assert.Equal(t, testCase.want.RawLog, gotCfg.RawLog) + assert.Equal(t, testCase.want.Region, gotCfg.Region) + assert.NotNil(t, gotCfg.MiddlewareID) + assert.Equal(t, "agenthealth/logs", gotCfg.MiddlewareID.String()) } }) } diff --git a/translator/translate/otel/extension/agenthealth/translator.go b/translator/translate/otel/extension/agenthealth/translator.go new file mode 100644 index 0000000000..bfe2fb963f --- /dev/null +++ b/translator/translate/otel/extension/agenthealth/translator.go @@ -0,0 +1,62 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package agenthealth + +import ( + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/confmap" + "go.opentelemetry.io/collector/extension" + + "github.com/aws/amazon-cloudwatch-agent/cfg/envconfig" + "github.com/aws/amazon-cloudwatch-agent/extension/agenthealth" + "github.com/aws/amazon-cloudwatch-agent/extension/agenthealth/handler/stats/agent" + "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/common" +) + +const ( + OperationPutMetricData = "PutMetricData" + OperationPutLogEvents = "PutLogEvents" + OperationPutTraceSegments = "PutTraceSegments" + + usageDataKey = "usage_data" +) + +var ( + MetricsID = component.NewIDWithName(agenthealth.TypeStr, string(component.DataTypeMetrics)) + LogsID = component.NewIDWithName(agenthealth.TypeStr, string(component.DataTypeLogs)) + TracesID = component.NewIDWithName(agenthealth.TypeStr, string(component.DataTypeTraces)) +) + +type translator struct { + name string + operations []string + isUsageDataEnabled bool + factory extension.Factory +} + +var _ common.Translator[component.Config] = (*translator)(nil) + +func NewTranslator(name component.DataType, operations []string) common.Translator[component.Config] { + return &translator{ + name: string(name), + operations: operations, + factory: agenthealth.NewFactory(), + isUsageDataEnabled: envconfig.IsUsageDataEnabled(), + } +} + +func (t *translator) ID() component.ID { + return component.NewIDWithName(t.factory.Type(), t.name) +} + +// Translate creates an extension configuration. +func (t *translator) Translate(conf *confmap.Conf) (component.Config, error) { + cfg := t.factory.CreateDefaultConfig().(*agenthealth.Config) + cfg.IsUsageDataEnabled = t.isUsageDataEnabled + if usageData, ok := common.GetBool(conf, common.ConfigKey(common.AgentKey, usageDataKey)); ok { + cfg.IsUsageDataEnabled = cfg.IsUsageDataEnabled && usageData + } + cfg.Stats = agent.StatsConfig{Operations: t.operations} + return cfg, nil +} diff --git a/translator/translate/otel/extension/agenthealth/translator_test.go b/translator/translate/otel/extension/agenthealth/translator_test.go new file mode 100644 index 0000000000..f0febf174b --- /dev/null +++ b/translator/translate/otel/extension/agenthealth/translator_test.go @@ -0,0 +1,75 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package agenthealth + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "go.opentelemetry.io/collector/confmap" + + "github.com/aws/amazon-cloudwatch-agent/extension/agenthealth" + "github.com/aws/amazon-cloudwatch-agent/extension/agenthealth/handler/stats/agent" +) + +func TestTranslate(t *testing.T) { + operations := []string{OperationPutLogEvents} + testCases := map[string]struct { + input map[string]interface{} + isEnvUsageData bool + want *agenthealth.Config + }{ + "WithUsageData/NotInConfig": { + input: map[string]interface{}{"agent": map[string]interface{}{}}, + isEnvUsageData: true, + want: &agenthealth.Config{ + IsUsageDataEnabled: true, + Stats: agent.StatsConfig{ + Operations: operations, + }, + }, + }, + "WithUsageData/FalseInConfig": { + input: map[string]interface{}{"agent": map[string]interface{}{"usage_data": false}}, + isEnvUsageData: true, + want: &agenthealth.Config{ + IsUsageDataEnabled: false, + Stats: agent.StatsConfig{ + Operations: operations, + }, + }, + }, + "WithUsageData/FalseInEnv": { + input: map[string]interface{}{"agent": map[string]interface{}{"usage_data": true}}, + isEnvUsageData: false, + want: &agenthealth.Config{ + IsUsageDataEnabled: false, + Stats: agent.StatsConfig{ + Operations: operations, + }, + }, + }, + "WithUsageData/BothTrue": { + input: map[string]interface{}{"agent": map[string]interface{}{"usage_data": true}}, + isEnvUsageData: true, + want: &agenthealth.Config{ + IsUsageDataEnabled: true, + Stats: agent.StatsConfig{ + Operations: operations, + }, + }, + }, + } + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + tt := NewTranslator("test", operations).(*translator) + assert.Equal(t, "agenthealth/test", tt.ID().String()) + tt.isUsageDataEnabled = testCase.isEnvUsageData + conf := confmap.NewFromStringMap(testCase.input) + got, err := tt.Translate(conf) + assert.NoError(t, err) + assert.Equal(t, testCase.want, got) + }) + } +} diff --git a/translator/translate/otel/pipeline/containerinsights/translator.go b/translator/translate/otel/pipeline/containerinsights/translator.go index 6122ea8dd5..ba5fb093e2 100644 --- a/translator/translate/otel/pipeline/containerinsights/translator.go +++ b/translator/translate/otel/pipeline/containerinsights/translator.go @@ -11,6 +11,7 @@ import ( "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/common" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/exporter/awsemf" + "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/extension/agenthealth" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/processor/batchprocessor" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/processor/metricstransformprocessor" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/receiver/awscontainerinsight" @@ -53,6 +54,7 @@ func (t *translator) Translate(conf *confmap.Conf) (*common.ComponentTranslators Receivers: common.NewTranslatorMap(awscontainerinsight.NewTranslator()), Processors: common.NewTranslatorMap(metricstransformprocessor.NewTranslatorWithName(pipelineName), batchprocessor.NewTranslatorWithNameAndSection(pipelineName, common.LogsKey)), // EKS & ECS CI sit under metrics_collected in "logs" Exporters: common.NewTranslatorMap(awsemf.NewTranslatorWithName(pipelineName)), + Extensions: common.NewTranslatorMap(agenthealth.NewTranslator(component.DataTypeLogs, []string{agenthealth.OperationPutLogEvents})), }, nil } @@ -60,5 +62,6 @@ func (t *translator) Translate(conf *confmap.Conf) (*common.ComponentTranslators Receivers: common.NewTranslatorMap(awscontainerinsight.NewTranslator()), Processors: common.NewTranslatorMap(batchprocessor.NewTranslatorWithNameAndSection(pipelineName, common.LogsKey)), // EKS & ECS CI sit under metrics_collected in "logs" Exporters: common.NewTranslatorMap(awsemf.NewTranslatorWithName(pipelineName)), + Extensions: common.NewTranslatorMap(agenthealth.NewTranslator(component.DataTypeLogs, []string{agenthealth.OperationPutLogEvents})), }, nil } diff --git a/translator/translate/otel/pipeline/containerinsights/translator_test.go b/translator/translate/otel/pipeline/containerinsights/translator_test.go index 9ea15de298..14a721e6b2 100644 --- a/translator/translate/otel/pipeline/containerinsights/translator_test.go +++ b/translator/translate/otel/pipeline/containerinsights/translator_test.go @@ -22,6 +22,7 @@ func TestTranslator(t *testing.T) { receivers []string processors []string exporters []string + extensions []string } cit := NewTranslator() require.EqualValues(t, "metrics/containerinsights", cit.ID().String()) @@ -47,6 +48,7 @@ func TestTranslator(t *testing.T) { receivers: []string{"awscontainerinsightreceiver"}, processors: []string{"batch/containerinsights"}, exporters: []string{"awsemf/containerinsights"}, + extensions: []string{"agenthealth/logs"}, }, }, "WithKubernetesKey": { @@ -62,6 +64,7 @@ func TestTranslator(t *testing.T) { receivers: []string{"awscontainerinsightreceiver"}, processors: []string{"batch/containerinsights"}, exporters: []string{"awsemf/containerinsights"}, + extensions: []string{"agenthealth/logs"}, }, }, "WithKubernetes/WithEnhancedContainerInsights": { @@ -80,6 +83,7 @@ func TestTranslator(t *testing.T) { receivers: []string{"awscontainerinsightreceiver"}, processors: []string{"metricstransform/containerinsights", "batch/containerinsights"}, exporters: []string{"awsemf/containerinsights"}, + extensions: []string{"agenthealth/logs"}, }, }, } @@ -95,6 +99,7 @@ func TestTranslator(t *testing.T) { assert.Equal(t, testCase.want.receivers, collections.MapSlice(got.Receivers.Keys(), component.ID.String)) assert.Equal(t, testCase.want.processors, collections.MapSlice(got.Processors.Keys(), component.ID.String)) assert.Equal(t, testCase.want.exporters, collections.MapSlice(got.Exporters.Keys(), component.ID.String)) + assert.Equal(t, testCase.want.extensions, collections.MapSlice(got.Extensions.Keys(), component.ID.String)) } }) } diff --git a/translator/translate/otel/pipeline/emf_logs/translator.go b/translator/translate/otel/pipeline/emf_logs/translator.go index f73224efd1..433e44f17b 100644 --- a/translator/translate/otel/pipeline/emf_logs/translator.go +++ b/translator/translate/otel/pipeline/emf_logs/translator.go @@ -11,6 +11,7 @@ import ( "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/common" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/exporter/otel_aws_cloudwatch_logs" + "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/extension/agenthealth" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/processor/batchprocessor" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/receiver/tcp_logs" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/receiver/udp_logs" @@ -49,6 +50,7 @@ func (t *translator) Translate(conf *confmap.Conf) (*common.ComponentTranslators Receivers: common.NewTranslatorMap[component.Config](), Processors: common.NewTranslatorMap(batchprocessor.NewTranslatorWithNameAndSection(common.PipelineNameEmfLogs, common.LogsKey)), // EMF logs sit under metrics_collected in "logs" Exporters: common.NewTranslatorMap(otel_aws_cloudwatch_logs.NewTranslatorWithName(common.PipelineNameEmfLogs)), + Extensions: common.NewTranslatorMap(agenthealth.NewTranslator(component.DataTypeLogs, []string{agenthealth.OperationPutLogEvents})), } if serviceAddress, ok := common.GetString(conf, serviceAddressEMFKey); ok { if strings.Contains(serviceAddress, common.Udp) { diff --git a/translator/translate/otel/pipeline/emf_logs/translator_test.go b/translator/translate/otel/pipeline/emf_logs/translator_test.go index f8ff28e2e6..f3efe38b8b 100644 --- a/translator/translate/otel/pipeline/emf_logs/translator_test.go +++ b/translator/translate/otel/pipeline/emf_logs/translator_test.go @@ -22,6 +22,7 @@ func TestTranslator(t *testing.T) { receivers []string processors []string exporters []string + extensions []string } cit := NewTranslator() require.EqualValues(t, "logs/emf_logs", cit.ID().String()) @@ -47,6 +48,7 @@ func TestTranslator(t *testing.T) { receivers: []string{"tcplog/emf_logs", "udplog/emf_logs"}, processors: []string{"batch/emf_logs"}, exporters: []string{"awscloudwatchlogs/emf_logs"}, + extensions: []string{"agenthealth/logs"}, }, }, "WithStructuredLogKey": { @@ -62,6 +64,7 @@ func TestTranslator(t *testing.T) { receivers: []string{"tcplog/emf_logs", "udplog/emf_logs"}, processors: []string{"batch/emf_logs"}, exporters: []string{"awscloudwatchlogs/emf_logs"}, + extensions: []string{"agenthealth/logs"}, }, }, "WithUdpServiceAddress": { @@ -79,6 +82,7 @@ func TestTranslator(t *testing.T) { receivers: []string{"udplog/emf_logs"}, processors: []string{"batch/emf_logs"}, exporters: []string{"awscloudwatchlogs/emf_logs"}, + extensions: []string{"agenthealth/logs"}, }, }, "WithTcpServiceAddress": { @@ -96,6 +100,7 @@ func TestTranslator(t *testing.T) { receivers: []string{"tcplog/emf_logs"}, processors: []string{"batch/emf_logs"}, exporters: []string{"awscloudwatchlogs/emf_logs"}, + extensions: []string{"agenthealth/logs"}, }, }, } @@ -111,6 +116,7 @@ func TestTranslator(t *testing.T) { assert.Equal(t, testCase.want.receivers, collections.MapSlice(got.Receivers.Keys(), component.ID.String)) assert.Equal(t, testCase.want.processors, collections.MapSlice(got.Processors.Keys(), component.ID.String)) assert.Equal(t, testCase.want.exporters, collections.MapSlice(got.Exporters.Keys(), component.ID.String)) + assert.Equal(t, testCase.want.extensions, collections.MapSlice(got.Extensions.Keys(), component.ID.String)) } }) } diff --git a/translator/translate/otel/pipeline/host/translator.go b/translator/translate/otel/pipeline/host/translator.go index 41ca5f57d9..4b126341fe 100644 --- a/translator/translate/otel/pipeline/host/translator.go +++ b/translator/translate/otel/pipeline/host/translator.go @@ -11,6 +11,7 @@ import ( "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/common" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/exporter/awscloudwatch" + "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/extension/agenthealth" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/processor/cumulativetodeltaprocessor" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/processor/ec2taggerprocessor" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/processor/metricsdecorator" @@ -50,6 +51,7 @@ func (t translator) Translate(conf *confmap.Conf) (*common.ComponentTranslators, Receivers: t.receivers, Processors: common.NewTranslatorMap[component.Config](), Exporters: common.NewTranslatorMap(awscloudwatch.NewTranslator()), + Extensions: common.NewTranslatorMap(agenthealth.NewTranslator(component.DataTypeMetrics, []string{agenthealth.OperationPutMetricData})), } // we need to add delta processor because (only) diskio and net input plugins report delta metric diff --git a/translator/translate/otel/pipeline/host/translator_test.go b/translator/translate/otel/pipeline/host/translator_test.go index 14a7fd33f8..b91400e53f 100644 --- a/translator/translate/otel/pipeline/host/translator_test.go +++ b/translator/translate/otel/pipeline/host/translator_test.go @@ -35,6 +35,7 @@ func TestTranslator(t *testing.T) { receivers []string processors []string exporters []string + extensions []string } testCases := map[string]struct { input map[string]interface{} @@ -60,6 +61,7 @@ func TestTranslator(t *testing.T) { receivers: []string{"nop", "other"}, processors: []string{}, exporters: []string{"awscloudwatch"}, + extensions: []string{"agenthealth/metrics"}, }, }, "WithMetricsKeyNet": { @@ -76,6 +78,7 @@ func TestTranslator(t *testing.T) { receivers: []string{"nop", "other"}, processors: []string{"cumulativetodelta/hostDeltaMetrics"}, exporters: []string{"awscloudwatch"}, + extensions: []string{"agenthealth/metrics"}, }, }, "WithMetricDecoration": { @@ -99,6 +102,7 @@ func TestTranslator(t *testing.T) { receivers: []string{"nop", "other"}, processors: []string{"transform"}, exporters: []string{"awscloudwatch"}, + extensions: []string{"agenthealth/metrics"}, }, }, "WithoutMetricDecoration": { @@ -119,6 +123,7 @@ func TestTranslator(t *testing.T) { receivers: []string{"nop", "other"}, processors: []string{}, exporters: []string{"awscloudwatch"}, + extensions: []string{"agenthealth/metrics"}, }, }, } @@ -139,6 +144,7 @@ func TestTranslator(t *testing.T) { assert.Equal(t, testCase.want.receivers, collections.MapSlice(got.Receivers.Keys(), component.ID.String)) assert.Equal(t, testCase.want.processors, collections.MapSlice(got.Processors.Keys(), component.ID.String)) assert.Equal(t, testCase.want.exporters, collections.MapSlice(got.Exporters.Keys(), component.ID.String)) + assert.Equal(t, testCase.want.extensions, collections.MapSlice(got.Extensions.Keys(), component.ID.String)) } }) } diff --git a/translator/translate/otel/pipeline/prometheus/translator.go b/translator/translate/otel/pipeline/prometheus/translator.go index 545a75aea4..2943273c59 100644 --- a/translator/translate/otel/pipeline/prometheus/translator.go +++ b/translator/translate/otel/pipeline/prometheus/translator.go @@ -12,6 +12,7 @@ import ( "github.com/aws/amazon-cloudwatch-agent/translator/translate/logs/metrics_collected/prometheus" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/common" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/exporter/awsemf" + "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/extension/agenthealth" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/processor/batchprocessor" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/receiver/adapter" ) @@ -45,6 +46,7 @@ func (t *translator) Translate(conf *confmap.Conf) (*common.ComponentTranslators Processors: common.NewTranslatorMap( batchprocessor.NewTranslatorWithNameAndSection(pipelineName, common.LogsKey), // prometheus sits under metrics_collected in "logs" ), - Exporters: common.NewTranslatorMap(awsemf.NewTranslatorWithName(pipelineName)), + Exporters: common.NewTranslatorMap(awsemf.NewTranslatorWithName(pipelineName)), + Extensions: common.NewTranslatorMap(agenthealth.NewTranslator(component.DataTypeLogs, []string{agenthealth.OperationPutLogEvents})), }, nil } diff --git a/translator/translate/otel/pipeline/prometheus/translator_test.go b/translator/translate/otel/pipeline/prometheus/translator_test.go index bd444c8a54..b83aa7eea0 100644 --- a/translator/translate/otel/pipeline/prometheus/translator_test.go +++ b/translator/translate/otel/pipeline/prometheus/translator_test.go @@ -45,6 +45,7 @@ func TestTranslator(t *testing.T) { receivers: []string{"telegraf_prometheus"}, processors: []string{"batch/prometheus"}, exporters: []string{"awsemf/prometheus"}, + extensions: []string{"agenthealth/logs"}, }, }, } @@ -60,6 +61,7 @@ func TestTranslator(t *testing.T) { assert.Equal(t, testCase.want.receivers, collections.MapSlice(got.Receivers.Keys(), component.ID.String)) assert.Equal(t, testCase.want.processors, collections.MapSlice(got.Processors.Keys(), component.ID.String)) assert.Equal(t, testCase.want.exporters, collections.MapSlice(got.Exporters.Keys(), component.ID.String)) + assert.Equal(t, testCase.want.extensions, collections.MapSlice(got.Extensions.Keys(), component.ID.String)) } }) } diff --git a/translator/translate/otel/pipeline/xray/translator.go b/translator/translate/otel/pipeline/xray/translator.go index 40bcb1430a..a3b20ae904 100644 --- a/translator/translate/otel/pipeline/xray/translator.go +++ b/translator/translate/otel/pipeline/xray/translator.go @@ -12,6 +12,7 @@ import ( "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/common" awsxrayexporter "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/exporter/awsxray" + "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/extension/agenthealth" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/processor" awsxrayreceiver "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/receiver/awsxray" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/receiver/otlp" @@ -47,6 +48,7 @@ func (t *translator) Translate(conf *confmap.Conf) (*common.ComponentTranslators Receivers: common.NewTranslatorMap[component.Config](), Processors: common.NewTranslatorMap(processor.NewDefaultTranslatorWithName(pipelineName, batchprocessor.NewFactory())), Exporters: common.NewTranslatorMap(awsxrayexporter.NewTranslator()), + Extensions: common.NewTranslatorMap(agenthealth.NewTranslator(component.DataTypeTraces, []string{agenthealth.OperationPutTraceSegments})), } if conf.IsSet(xrayKey) { translators.Receivers.Set(awsxrayreceiver.NewTranslator()) diff --git a/translator/translate/otel/pipeline/xray/translator_test.go b/translator/translate/otel/pipeline/xray/translator_test.go index b27df5c545..450db8ff54 100644 --- a/translator/translate/otel/pipeline/xray/translator_test.go +++ b/translator/translate/otel/pipeline/xray/translator_test.go @@ -21,6 +21,7 @@ func TestTranslator(t *testing.T) { receivers []string processors []string exporters []string + extensions []string } tt := NewTranslator() assert.EqualValues(t, "traces/xray", tt.ID().String()) @@ -45,6 +46,7 @@ func TestTranslator(t *testing.T) { receivers: []string{"awsxray"}, processors: []string{"batch/xray"}, exporters: []string{"awsxray"}, + extensions: []string{"agenthealth/traces"}, }, }, "WithOtlpKey": { @@ -59,6 +61,7 @@ func TestTranslator(t *testing.T) { receivers: []string{"otlp"}, processors: []string{"batch/xray"}, exporters: []string{"awsxray"}, + extensions: []string{"agenthealth/traces"}, }, }, "WithXrayAndOtlpKey": { @@ -74,6 +77,7 @@ func TestTranslator(t *testing.T) { receivers: []string{"awsxray", "otlp"}, processors: []string{"batch/xray"}, exporters: []string{"awsxray"}, + extensions: []string{"agenthealth/traces"}, }, }, } @@ -89,6 +93,7 @@ func TestTranslator(t *testing.T) { assert.Equal(t, testCase.want.receivers, collections.MapSlice(got.Receivers.Keys(), component.ID.String)) assert.Equal(t, testCase.want.processors, collections.MapSlice(got.Processors.Keys(), component.ID.String)) assert.Equal(t, testCase.want.exporters, collections.MapSlice(got.Exporters.Keys(), component.ID.String)) + assert.Equal(t, testCase.want.extensions, collections.MapSlice(got.Extensions.Keys(), component.ID.String)) } }) } diff --git a/translator/util/ec2util/ec2util.go b/translator/util/ec2util/ec2util.go index 6b3ce5e37b..5da0790d23 100644 --- a/translator/util/ec2util/ec2util.go +++ b/translator/util/ec2util/ec2util.go @@ -14,7 +14,7 @@ import ( "github.com/aws/aws-sdk-go/aws/session" configaws "github.com/aws/amazon-cloudwatch-agent/cfg/aws" - "github.com/aws/amazon-cloudwatch-agent/handlers/agentinfo" + "github.com/aws/amazon-cloudwatch-agent/extension/agenthealth/handler/stats/provider" "github.com/aws/amazon-cloudwatch-agent/internal/retryer" "github.com/aws/amazon-cloudwatch-agent/translator/config" "github.com/aws/amazon-cloudwatch-agent/translator/context" @@ -116,7 +116,7 @@ func (e *ec2Util) deriveEC2MetadataFromIMDS() error { hostnameInner, errInner := mdEnableFallback.GetMetadata("hostname") if errInner == nil { e.Hostname = hostnameInner - agentinfo.SetImdsFallbackSucceed() + provider.GetFlagsStats().SetFlag(provider.FlagIMDSFallbackSucceed) } else { fmt.Println("E! [EC2] Fetch hostname from EC2 metadata fail:", errInner) } @@ -136,7 +136,7 @@ func (e *ec2Util) deriveEC2MetadataFromIMDS() error { e.AccountID = instanceIdentityDocumentInner.AccountID e.PrivateIP = instanceIdentityDocumentInner.PrivateIP e.InstanceID = instanceIdentityDocumentInner.InstanceID - agentinfo.SetImdsFallbackSucceed() + provider.GetFlagsStats().SetFlag(provider.FlagIMDSFallbackSucceed) } else { fmt.Println("E! [EC2] Fetch identity document from EC2 metadata fail:", errInner) } diff --git a/translator/util/sdkutil.go b/translator/util/sdkutil.go index 7d998e0c5b..de5d5a27f0 100644 --- a/translator/util/sdkutil.go +++ b/translator/util/sdkutil.go @@ -13,6 +13,7 @@ import ( "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/amazon-cloudwatch-agent/cfg/commonconfig" + "github.com/aws/amazon-cloudwatch-agent/handlers/agentinfo" "github.com/aws/amazon-cloudwatch-agent/translator" "github.com/aws/amazon-cloudwatch-agent/translator/config" "github.com/aws/amazon-cloudwatch-agent/translator/util/ec2util" @@ -97,19 +98,25 @@ func defaultECSRegion() string { return ecsutil.GetECSUtilSingleton().Region } -func detectRegion(mode string, credsConfig map[string]string) (region string) { +func detectRegion(mode string, credsConfig map[string]string) (region string, regionType string) { region = SDKRegionWithCredsMap(mode, credsConfig) + regionType = agentinfo.RegionNotFound + if region != "" { + regionType = agentinfo.CredsMap + } // For ec2, fallback to metadata when no region info found in credential profile. if region == "" && mode == config.ModeEC2 { fmt.Println("I! Trying to detect region from ec2") region = DefaultEC2Region() + regionType = agentinfo.EC2Metadata } // try to get region from ecs metadata if region == "" && mode == config.ModeEC2 { fmt.Println("I! Trying to detect region from ecs") region = DefaultECSRegion() + regionType = agentinfo.ECSMetadata } return From c91f04b801b00b1e626f0d66357ddc1aca7fa2fb Mon Sep 17 00:00:00 2001 From: Adam <90734270+adam-mateen@users.noreply.github.com> Date: Thu, 18 Apr 2024 11:07:46 -0500 Subject: [PATCH 14/55] Delete canary test and soak test workflows. (#696) --- .github/workflows/deploy-canary.yml | 90 ----------------------------- .github/workflows/soak-test.yml | 88 ---------------------------- 2 files changed, 178 deletions(-) delete mode 100644 .github/workflows/deploy-canary.yml delete mode 100644 .github/workflows/soak-test.yml diff --git a/.github/workflows/deploy-canary.yml b/.github/workflows/deploy-canary.yml deleted file mode 100644 index 0bdebc8ac3..0000000000 --- a/.github/workflows/deploy-canary.yml +++ /dev/null @@ -1,90 +0,0 @@ -# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -# SPDX-License-Identifier: MIT - -name: Deploy Canary -env: - TERRAFORM_AWS_ASSUME_ROLE: ${{ secrets.TERRAFORM_AWS_ASSUME_ROLE }} - S3_INTEGRATION_BUCKET: ${{ secrets.S3_INTEGRATION_BUCKET }} - KEY_NAME: ${{ secrets.KEY_NAME }} - PRIVATE_KEY: ${{ secrets.AWS_PRIVATE_KEY }} - CWA_GITHUB_TEST_REPO_NAME: "aws/amazon-cloudwatch-agent-test" - CWA_GITHUB_TEST_REPO_URL: "https://github.com/aws/amazon-cloudwatch-agent-test.git" - CWA_GITHUB_TEST_REPO_BRANCH: "main" - -on: - schedule: - - cron: "0 15 * * *" # Run daily at 15:00 UTC - workflow_call: - workflow_dispatch: - -concurrency: - group: ${{ github.workflow }} - cancel-in-progress: true - -jobs: - DeployCanary: - name: "DeployCanary" - runs-on: ubuntu-latest - permissions: - id-token: write - contents: read - steps: - - uses: actions/checkout@v3 - with: - repository: ${{env.CWA_GITHUB_TEST_REPO_NAME}} - ref: ${{env.CWA_GITHUB_TEST_REPO_BRANCH}} - - - name: Configure AWS Credentials - uses: aws-actions/configure-aws-credentials@v1 - with: - role-to-assume: ${{ env.TERRAFORM_AWS_ASSUME_ROLE }} - aws-region: us-west-2 - - - name: Terminate Last Canary - run: | - if aws s3api wait object-exists --bucket ${S3_INTEGRATION_BUCKET} --key canary/al2/terraform.tfstate ; - then - cd terraform/ec2/linux - aws s3 cp s3://${S3_INTEGRATION_BUCKET}/canary/al2/terraform.tfstate . - terraform --version - terraform init - terraform destroy -auto-approve - aws s3api delete-object --bucket ${S3_INTEGRATION_BUCKET} --key canary/al2/terraform.tfstate - fi - - # @TODO we can add a matrix in the future but for alpha we will only deploy to al2 - - name: Terraform apply - uses: nick-fields/retry@v2 - with: - max_attempts: 3 - timeout_minutes: 60 - retry_wait_seconds: 5 - command: | - cd terraform/ec2/linux - terraform init - if terraform apply --auto-approve \ - -var="github_test_repo=${{env.CWA_GITHUB_TEST_REPO_URL}}" \ - -var="github_test_repo_branch=${{env.CWA_GITHUB_TEST_REPO_BRANCH}}" \ - -var="user=ec2-user" \ - -var="ami=cloudwatch-agent-integration-test-al2*" \ - -var="arc=amd64" \ - -var="binary_name=amazon-cloudwatch-agent.rpm" \ - -var="s3_bucket=${S3_INTEGRATION_BUCKET}" \ - -var="ssh_key_name=${KEY_NAME}" \ - -var="ssh_key_value=${PRIVATE_KEY}" \ - -var="test_name=canary" \ - -var="is_canary=true" \ - -var="test_dir=./test/canary" ; then aws s3 cp terraform.tfstate s3://${S3_INTEGRATION_BUCKET}/canary/al2/terraform.tfstate - else - terraform destroy -auto-approve && exit 1 - fi - - #This is here just in case workflow cancel - - name: Terraform destroy - if: ${{ cancelled() }} - uses: nick-fields/retry@v2 - with: - max_attempts: 3 - timeout_minutes: 8 - retry_wait_seconds: 5 - command: cd terraform/ec2/linux && terraform destroy --auto-approve diff --git a/.github/workflows/soak-test.yml b/.github/workflows/soak-test.yml deleted file mode 100644 index a6ddfdc557..0000000000 --- a/.github/workflows/soak-test.yml +++ /dev/null @@ -1,88 +0,0 @@ -# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -# SPDX-License-Identifier: MIT - -name: Soak Test -env: - TERRAFORM_AWS_ASSUME_ROLE: ${{ secrets.TERRAFORM_AWS_ASSUME_ROLE }} - S3_INTEGRATION_BUCKET: ${{ secrets.S3_INTEGRATION_BUCKET }} - KEY_NAME: ${{ secrets.KEY_NAME }} - PRIVATE_KEY: ${{ secrets.AWS_PRIVATE_KEY }} - CWA_GITHUB_TEST_REPO_NAME: "aws/amazon-cloudwatch-agent-test" - CWA_GITHUB_TEST_REPO_URL: "https://github.com/aws/amazon-cloudwatch-agent-test.git" - CWA_GITHUB_TEST_REPO_BRANCH: "main" - -on: - schedule: - # Run at midnight on Sunday (once a week) - - cron: "0 0 * * 0" - workflow_call: - workflow_dispatch: - -concurrency: - group: ${{ github.workflow }}-${{ github.ref_name }} - cancel-in-progress: true - -jobs: - BuildAndUpload: - uses: ./.github/workflows/test-build.yml - secrets: inherit - permissions: - id-token: write - contents: read - with: - ContainerRepositoryNameAndTag: "cwagent-integration-test:${{ github.sha }}" - BucketKey: "integration-test/binary/${{ github.sha }}" - PackageBucketKey: "integration-test/binary/${{ github.sha }}" - - DeploySoakTest: - name: "DeploySoakTest" - needs: [BuildAndUpload] - runs-on: ubuntu-latest - permissions: - id-token: write - contents: read - steps: - - uses: actions/checkout@v3 - with: - repository: ${{env.CWA_GITHUB_TEST_REPO_NAME}} - ref: ${{env.CWA_GITHUB_TEST_REPO_BRANCH}} - - - name: Configure AWS Credentials - uses: aws-actions/configure-aws-credentials@v2 - with: - role-to-assume: ${{ env.TERRAFORM_AWS_ASSUME_ROLE }} - aws-region: us-west-2 - - # @TODO we can add a matrix in the future but for for now, we will only deploy to AL2. - - name: Terraform apply - uses: nick-fields/retry@v2 - with: - max_attempts: 3 - timeout_minutes: 60 - retry_wait_seconds: 5 - command: | - cd terraform/ec2/linux - terraform init - terraform apply --auto-approve \ - -var="github_test_repo=${{env.CWA_GITHUB_TEST_REPO_URL}}" \ - -var="github_test_repo_branch=${{env.CWA_GITHUB_TEST_REPO_BRANCH}}" \ - -var="cwa_github_sha=${GITHUB_SHA}" \ - -var="user=ec2-user" \ - -var="ami=cloudwatch-agent-integration-test-al2*" \ - -var="arc=amd64" \ - -var="binary_name=amazon-cloudwatch-agent.rpm" \ - -var="s3_bucket=${S3_INTEGRATION_BUCKET}" \ - -var="ssh_key_name=${KEY_NAME}" \ - -var="ssh_key_value=${PRIVATE_KEY}" \ - -var="test_name=SoakTest" \ - -var="test_dir=./test/soak -run TestSoakHigh" - - #This is here just in case workflow cancel - - name: Terraform destroy - if: ${{ cancelled() }} - uses: nick-fields/retry@v2 - with: - max_attempts: 3 - timeout_minutes: 8 - retry_wait_seconds: 5 - command: cd terraform/ec2/linux && terraform destroy --auto-approve From c67752c3090619d61218d01082a609b835005e60 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Thu, 30 May 2024 10:36:50 -0400 Subject: [PATCH 15/55] Automated sync with upstream - last commit eca8174758d95308006632ec4d5533d765db9ca8 - run #148.1 (#702) --- .github/actions/patch-dependencies/action.yml | 97 -- .github/workflows/PR-build.yml | 4 +- ...lication-signals-java-e2e-ec2-asg-test.yml | 168 +++ ...application-signals-java-e2e-ec2-test.yml} | 86 +- ...application-signals-java-e2e-eks-test.yml} | 98 +- ...cation-signals-python-e2e-ec2-asg-test.yml | 167 +++ ...pplication-signals-python-e2e-ec2-test.yml | 160 +++ ...pplication-signals-python-e2e-eks-test.yml | 317 +++++ .github/workflows/clean-aws-resources.yml | 41 +- .github/workflows/deploy-canary.yml | 88 ++ .github/workflows/ec2-integration-test.yml | 30 +- .github/workflows/integration-test.yml | 337 +++-- .github/workflows/nightly-build.yml | 26 + .github/workflows/otel-fork-replace.yml | 2 +- .github/workflows/soak-test.yml | 114 ++ .github/workflows/start-localstack.yml | 84 ++ .github/workflows/stop-localstack.yml | 63 + .github/workflows/test-build-docker.yml | 438 ++++++ .github/workflows/test-build-packages.yml | 182 +++ .github/workflows/test-build.yml | 305 +---- Makefile | 54 +- RELEASE_NOTES | 80 ++ .../localbin/Dockerfile | 19 +- .../localbin/Dockerfile.Windows | 22 + .../localbin/Dockerfile.dockerignore | 0 .../localmsi/Dockerfile.Windows | 32 + .../source/Dockerfile.Windows | 45 + cfg/aws/credentials.go | 4 +- cfg/envconfig/envconfig.go | 43 +- .../amazon-cloudwatch-agent.go | 13 +- cmd/config-translator/translator.go | 5 +- .../path_windows.go | 43 +- extension/agenthealth/factory.go | 4 +- .../agenthealth/handler/stats/agent/agent.go | 2 + .../agenthealth/handler/stats/agent/flag.go | 188 +++ .../handler/stats/agent/flag_test.go | 90 ++ .../agenthealth/handler/stats/handler.go | 1 + .../handler/stats/provider/flag.go | 90 +- .../handler/stats/provider/flag_test.go | 18 +- .../handler/stats/provider/interval_test.go | 1 + .../handler/stats/provider/process_test.go | 1 + .../handler/useragent/useragent.go | 16 +- .../handler/useragent/useragent_test.go | 31 +- go.mod | 439 +++--- go.sum | 1035 ++++++++------ internal/cloudwatch/unit.go | 110 +- internal/cloudwatch/unit_test.go | 44 +- internal/containerinsightscommon/const.go | 46 +- internal/containerinsightscommon/k8sconst.go | 3 + internal/containerinsightscommon/util.go | 22 +- internal/exec.go | 53 + internal/exec_unix.go | 69 + internal/exec_windows.go | 44 + internal/mapstructure/encoder.go | 286 ++++ internal/mapstructure/encoder_test.go | 416 ++++++ internal/mapstructure/marshaler.go | 71 + internal/mapstructure/marshaler_test.go | 99 ++ internal/util/testutil/testutil.go | 9 + internal/util/unit/prefix.go | 27 +- internal/util/unit/prefix_test.go | 26 +- .../dependencies/amazon-cloudwatch-agent-ctl | 2 +- plugins/inputs/logfile/fileconfig_test.go | 11 + plugins/inputs/logfile/logfile.go | 3 +- plugins/inputs/nvidia_smi/README.md | 155 +++ plugins/inputs/nvidia_smi/common/setters.go | 48 + plugins/inputs/nvidia_smi/nvidia_smi.go | 143 ++ plugins/inputs/nvidia_smi/nvidia_smi_test.go | 542 ++++++++ plugins/inputs/nvidia_smi/sample.conf | 15 + .../inputs/nvidia_smi/schema_v11/parser.go | 77 ++ plugins/inputs/nvidia_smi/schema_v11/types.go | 121 ++ .../inputs/nvidia_smi/schema_v12/parser.go | 113 ++ plugins/inputs/nvidia_smi/schema_v12/types.go | 291 ++++ .../nvidia_smi/testdata/a100-sxm4-v12.xml | 452 +++++++ plugins/inputs/nvidia_smi/testdata/a10g.xml | 355 +++++ .../nvidia_smi/testdata/gtx-1070-ti.xml | 47 + .../nvidia_smi/testdata/gtx-1660-ti.xml | 189 +++ .../nvidia_smi/testdata/quadro-p2000-v12.xml | 558 ++++++++ .../nvidia_smi/testdata/quadro-p400.xml | 447 +++++++ .../nvidia_smi/testdata/rtx-3080-v12.xml | 786 +++++++++++ .../inputs/nvidia_smi/testdata/tesla-t4.xml | 348 +++++ .../prometheus/metric_type_handler_test.go | 47 +- plugins/inputs/prometheus/metrics_receiver.go | 5 + .../inputs/prometheus/metrics_type_handler.go | 14 +- plugins/inputs/prometheus/start.go | 9 +- plugins/outputs/cloudwatch/cloudwatch.go | 3 - plugins/outputs/cloudwatch/cloudwatch_test.go | 3 +- plugins/outputs/cloudwatch/config.go | 2 - plugins/outputs/cloudwatch/factory.go | 7 +- .../outputs/cloudwatchlogs/cloudwatchlogs.go | 5 +- plugins/outputs/cloudwatchlogs/pusher.go | 18 +- plugins/plugins.go | 2 +- .../README.md | 36 +- .../awsapplicationsignals/common/types.go | 43 + .../awsapplicationsignals/config/config.go | 77 ++ .../config/config_test.go | 8 +- .../config/resolvers.go | 11 + .../config/resolvers_test.go | 5 + .../factory.go | 22 +- .../factory_test.go | 26 +- .../internal/attributes/attributes.go | 24 + .../cardinalitycontrol/count_min_sketch.go | 98 ++ .../count_min_sketch_test.go | 94 ++ .../cardinalitycontrol/metrics_limiter.go | 418 ++++++ .../metrics_limiter_test.go | 272 ++++ .../normalizer/attributesnormalizer.go | 234 ++++ .../normalizer/attributesnormalizer_test.go | 206 +++ .../internal/prune/metric_pruner.go | 42 + .../internal/prune/metric_pruner_test.go | 85 ++ .../internal/resolver/attributesresolver.go | 163 +++ .../resolver/attributesresolver_test.go | 247 ++++ .../internal/resolver/kubernetes.go | 77 +- .../internal/resolver/kubernetes_test.go | 247 +++- .../processor.go | 114 +- .../processor_test.go | 41 +- .../rules/common.go | 31 +- .../rules/common_test.go | 46 + .../rules/dropper.go | 0 .../rules/dropper_test.go | 0 .../rules/keeper.go | 17 +- .../rules/keeper_test.go | 6 +- .../rules/replacer.go | 29 +- .../rules/replacer_test.go | 79 +- .../testdata/config_eks.yaml | 2 +- .../testdata/config_generic.yaml | 2 +- .../processors/awsappsignals/config/config.go | 37 - .../internal/attributes/attributes.go | 29 - .../normalizer/attributesnormalizer.go | 109 -- .../normalizer/attributesnormalizer_test.go | 105 -- .../internal/resolver/attributesresolver.go | 98 -- .../resolver/attributesresolver_test.go | 106 -- .../awsappsignals/rules/common_test.go | 23 - plugins/processors/ec2tagger/config.go | 5 + plugins/processors/ec2tagger/constants.go | 1 - plugins/processors/ec2tagger/ebsvolume.go | 78 -- .../ec2tagger/ec2metadataprovider.go | 8 +- plugins/processors/ec2tagger/ec2tagger.go | 46 +- .../processors/ec2tagger/ec2tagger_test.go | 324 ++--- plugins/processors/ec2tagger/factory.go | 6 +- .../internal/volume/describevolumes.go | 51 + .../internal/volume/describevolumes_test.go | 77 ++ .../ec2tagger/internal/volume/host_linux.go | 73 + .../internal/volume/host_linux_test.go | 81 ++ .../internal/volume/host_nonlinux.go | 21 + .../internal/volume/host_nonlinux_test.go | 19 + .../ec2tagger/internal/volume/merge.go | 34 + .../ec2tagger/internal/volume/merge_test.go | 74 + .../ec2tagger/internal/volume/volume.go | 145 ++ .../ec2tagger/internal/volume/volume_test.go | 52 + .../ecsdecorator/ecsdecorator_test.go | 10 +- plugins/processors/ecsdecorator/metricRule.go | 4 +- .../ecsdecorator/metricRule_test.go | 6 +- plugins/processors/gpuattributes/config.go | 19 + .../processors/gpuattributes/config_test.go | 19 + plugins/processors/gpuattributes/factory.go | 56 + .../processors/gpuattributes/factory_test.go | 45 + .../awsneuron_memory_metric_aggregator.go | 92 ++ ...awsneuron_memory_metric_aggregator_test.go | 128 ++ .../internal/awsneuron_metric_modifier.go | 367 +++++ .../awsneuron_metric_modifier_test.go | 423 ++++++ plugins/processors/gpuattributes/processor.go | 252 ++++ .../gpuattributes/processor_test.go | 142 ++ .../structuredlogsadapter/metricruletagger.go | 4 +- .../metricruletagger_test.go | 8 +- receiver/adapter/config.go | 2 +- receiver/adapter/factory.go | 11 +- receiver/adapter/factory_test.go | 9 +- receiver/adapter/testdata/cpu_plugin.toml | 3 +- service/configprovider/provider.go | 4 +- service/configprovider/provider_test.go | 3 +- service/defaultcomponents/components.go | 6 +- service/defaultcomponents/components_test.go | 58 +- service/registry/registry_test.go | 14 +- tool/clean/clean_ebs/clean_ebs.go | 68 + tool/clean/clean_eks/clean_eks.go | 98 +- tool/clean/clean_host/clean_host.go | 8 +- tool/clean/clean_util.go | 1 + tool/paths/paths.go | 2 - tool/paths/paths_unix.go | 2 + tool/paths/paths_windows.go | 10 + translator/cmdutil/translatorutil.go | 38 +- translator/config/mode.go | 19 +- translator/config/schema.json | 157 ++- translator/context/context.go | 23 +- .../sampleConfig/advanced_config_darwin.yaml | 19 +- .../sampleConfig/advanced_config_linux.yaml | 19 +- .../sampleConfig/advanced_config_windows.yaml | 16 +- .../appsignals_and_eks_config.json | 17 +- .../appsignals_and_eks_config.yaml | 276 ++-- .../appsignals_and_k8s_config.json | 13 +- .../appsignals_and_k8s_config.yaml | 474 +++++-- .../appsignals_fallback_and_eks_config.conf | 27 + .../appsignals_fallback_and_eks_config.json | 34 + .../appsignals_fallback_and_eks_config.yaml | 698 ++++++++++ .../appsignals_over_fallback_config.conf | 27 + .../appsignals_over_fallback_config.json | 46 + .../appsignals_over_fallback_config.yaml | 698 ++++++++++ .../sampleConfig/base_appsignals_config.conf | 4 +- .../sampleConfig/base_appsignals_config.json | 5 +- .../sampleConfig/base_appsignals_config.yaml | 197 +-- .../base_appsignals_fallback_config.conf | 27 + .../base_appsignals_fallback_config.json | 18 + .../base_appsignals_fallback_config.yaml | 504 +++++++ .../base_container_insights_config.json | 3 +- .../base_container_insights_config.yaml | 38 +- .../sampleConfig/basic_config_linux.yaml | 16 +- .../sampleConfig/basic_config_windows.yaml | 16 +- .../sampleConfig/collectd_config_linux.yaml | 15 +- .../sampleConfig/complete_darwin_config.yaml | 81 +- .../sampleConfig/complete_linux_config.yaml | 71 +- .../sampleConfig/complete_windows_config.yaml | 56 +- .../sampleConfig/config_with_env.yaml | 23 +- .../sampleConfig/delta_config_linux.yaml | 19 +- .../sampleConfig/delta_net_config_linux.yaml | 20 +- .../sampleConfig/drop_origin_linux.yaml | 16 +- .../emf_and_kubernetes_config.json | 3 +- .../emf_and_kubernetes_config.yaml | 56 +- .../emf_and_kubernetes_with_gpu_config.conf | 27 + .../emf_and_kubernetes_with_gpu_config.json | 19 + .../emf_and_kubernetes_with_gpu_config.yaml | 1185 +++++++++++++++++ .../ignore_append_dimensions.yaml | 18 +- .../sampleConfig/invalid_input_linux.yaml | 16 +- .../kubernetes_on_prem_config.json | 3 +- .../kubernetes_on_prem_config.yaml | 42 +- .../sampleConfig/log_ecs_metric_only.yaml | 31 +- .../logs_and_kubernetes_config.json | 3 +- .../logs_and_kubernetes_config.yaml | 53 +- .../sampleConfig/prometheus_config_linux.yaml | 16 +- .../prometheus_config_windows.yaml | 16 +- .../sampleConfig/standard_config_linux.yaml | 19 +- ...ndard_config_linux_with_common_config.yaml | 19 +- .../sampleConfig/standard_config_windows.yaml | 16 +- ...ard_config_windows_with_common_config.yaml | 18 +- .../sampleConfig/statsd_config_linux.yaml | 15 +- .../sampleConfig/statsd_config_windows.yaml | 15 +- .../sampleConfig/trace_config_linux.yaml | 38 +- .../sampleConfig/trace_config_windows.yaml | 38 +- translator/tocwconfig/tocwconfig_test.go | 87 +- .../metrics/util/commonconfigutil.go | 3 +- translator/translate/otel/common/common.go | 30 +- .../translate/otel/common/common_test.go | 21 +- .../otel/exporter/awscloudwatch/translator.go | 3 - .../awsemf/appsignals_config_eks.yaml | 40 +- .../awsemf/appsignals_config_generic.yaml | 30 +- .../awsemf/appsignals_config_k8s.yaml | 40 +- .../otel/exporter/awsemf/kubernetes.go | 193 +++ .../otel/exporter/awsemf/translator.go | 63 +- .../otel/exporter/awsemf/translator_test.go | 258 +++- .../otel/exporter/awsxray/translator.go | 32 +- .../otel/exporter/awsxray/translator_test.go | 72 +- .../otel_aws_cloudwatch_logs/translator.go | 7 +- .../otel/extension/agenthealth/translator.go | 18 +- .../extension/agenthealth/translator_test.go | 17 +- .../otel/extension/awsproxy/translator.go | 53 + .../extension/awsproxy/translator_test.go | 5 +- .../translator.go | 14 +- .../applicationsignals/translator_test.go | 229 ++++ .../pipeline/appsignals/translator_test.go | 159 --- .../pipeline/containerinsights/translator.go | 9 +- .../containerinsights/translator_test.go | 2 +- .../otel/pipeline/host/translator.go | 19 +- .../otel/pipeline/host/translator_test.go | 6 +- .../translate/otel/pipeline/translator.go | 3 +- .../otel/pipeline/translator_test.go | 3 +- .../otel/pipeline/xray/translator_test.go | 4 +- .../testdata/config_ec2.yaml | 3 + .../testdata/config_eks.yaml | 0 .../testdata/config_generic.yaml | 0 .../testdata/config_k8s.yaml | 0 .../testdata/invalidRulesConfig.json | 6 + .../testdata/validRulesConfig.json | 6 + .../testdata/validRulesConfigEKS.yaml | 6 + .../testdata/validRulesConfigGeneric.yaml | 6 + .../translator.go | 130 +- .../awsapplicationsignals/translator_test.go | 202 +++ .../awsappsignals/translator_test.go | 128 -- .../ec2taggerprocessor/translator.go | 10 +- .../ec2taggerprocessor/translator_test.go | 28 + .../otel/processor/gpu/translator.go | 33 + .../metricstransformprocessor/translator.go | 107 +- .../resourcedetection/configs/config.yaml | 3 +- .../resourcedetection/translator_test.go | 2 +- .../otel/receiver/adapter/translator.go | 3 +- .../otel/receiver/adapter/translator_test.go | 3 +- .../otel/receiver/adapter/translators.go | 10 + .../otel/receiver/adapter/translators_test.go | 37 +- .../awscontainerinsight/translator.go | 2 + .../{granularity.go => utils.go} | 4 + .../receiver/awsxray/testdata/config.yaml | 3 +- .../otel/receiver/awsxray/translator.go | 18 + .../otel/receiver/awsxray/translator_test.go | 7 +- .../otel/receiver/otlp/appsignals_config.yaml | 5 - .../otel/receiver/otlp/translator.go | 89 +- .../otel/receiver/otlp/translator_test.go | 101 +- translator/translate/otel/translate_otel.go | 7 +- .../translate/otel/translate_otel_test.go | 105 +- translator/translate/util/tagsutil.go | 20 +- translator/util/ec2util/ec2util.go | 6 +- .../eksdetector}/eksdetector.go | 61 +- .../eksdetector}/eksdetector_test.go | 51 +- .../eksdetector}/eksdetectortestutil.go | 12 +- translator/util/sdkutil.go | 26 + translator/util/sdkutil_test.go | 24 + 302 files changed, 21400 insertions(+), 4184 deletions(-) delete mode 100644 .github/actions/patch-dependencies/action.yml create mode 100644 .github/workflows/application-signals-java-e2e-ec2-asg-test.yml rename .github/workflows/{appsignals-e2e-ec2-test.yml => application-signals-java-e2e-ec2-test.yml} (70%) rename .github/workflows/{appsignals-e2e-eks-test.yml => application-signals-java-e2e-eks-test.yml} (81%) create mode 100644 .github/workflows/application-signals-python-e2e-ec2-asg-test.yml create mode 100644 .github/workflows/application-signals-python-e2e-ec2-test.yml create mode 100644 .github/workflows/application-signals-python-e2e-eks-test.yml create mode 100644 .github/workflows/deploy-canary.yml create mode 100644 .github/workflows/soak-test.yml create mode 100644 .github/workflows/start-localstack.yml create mode 100644 .github/workflows/stop-localstack.yml create mode 100644 .github/workflows/test-build-docker.yml create mode 100644 .github/workflows/test-build-packages.yml create mode 100644 amazon-cloudwatch-container-insights/cloudwatch-agent-dockerfile/localbin/Dockerfile.Windows create mode 100644 amazon-cloudwatch-container-insights/cloudwatch-agent-dockerfile/localbin/Dockerfile.dockerignore create mode 100644 amazon-cloudwatch-container-insights/cloudwatch-agent-dockerfile/localmsi/Dockerfile.Windows create mode 100644 amazon-cloudwatch-container-insights/cloudwatch-agent-dockerfile/source/Dockerfile.Windows create mode 100644 extension/agenthealth/handler/stats/agent/flag.go create mode 100644 extension/agenthealth/handler/stats/agent/flag_test.go create mode 100644 internal/exec.go create mode 100644 internal/exec_unix.go create mode 100644 internal/exec_windows.go create mode 100644 internal/mapstructure/encoder.go create mode 100644 internal/mapstructure/encoder_test.go create mode 100644 internal/mapstructure/marshaler.go create mode 100644 internal/mapstructure/marshaler_test.go create mode 100644 plugins/inputs/nvidia_smi/README.md create mode 100644 plugins/inputs/nvidia_smi/common/setters.go create mode 100644 plugins/inputs/nvidia_smi/nvidia_smi.go create mode 100644 plugins/inputs/nvidia_smi/nvidia_smi_test.go create mode 100644 plugins/inputs/nvidia_smi/sample.conf create mode 100644 plugins/inputs/nvidia_smi/schema_v11/parser.go create mode 100644 plugins/inputs/nvidia_smi/schema_v11/types.go create mode 100644 plugins/inputs/nvidia_smi/schema_v12/parser.go create mode 100644 plugins/inputs/nvidia_smi/schema_v12/types.go create mode 100644 plugins/inputs/nvidia_smi/testdata/a100-sxm4-v12.xml create mode 100644 plugins/inputs/nvidia_smi/testdata/a10g.xml create mode 100644 plugins/inputs/nvidia_smi/testdata/gtx-1070-ti.xml create mode 100644 plugins/inputs/nvidia_smi/testdata/gtx-1660-ti.xml create mode 100644 plugins/inputs/nvidia_smi/testdata/quadro-p2000-v12.xml create mode 100644 plugins/inputs/nvidia_smi/testdata/quadro-p400.xml create mode 100644 plugins/inputs/nvidia_smi/testdata/rtx-3080-v12.xml create mode 100644 plugins/inputs/nvidia_smi/testdata/tesla-t4.xml rename plugins/processors/{awsappsignals => awsapplicationsignals}/README.md (92%) create mode 100644 plugins/processors/awsapplicationsignals/common/types.go create mode 100644 plugins/processors/awsapplicationsignals/config/config.go rename plugins/processors/{awsappsignals => awsapplicationsignals}/config/config_test.go (81%) rename plugins/processors/{awsappsignals => awsapplicationsignals}/config/resolvers.go (78%) rename plugins/processors/{awsappsignals => awsapplicationsignals}/config/resolvers_test.go (82%) rename plugins/processors/{awsappsignals => awsapplicationsignals}/factory.go (79%) rename plugins/processors/{awsappsignals => awsapplicationsignals}/factory_test.go (85%) create mode 100644 plugins/processors/awsapplicationsignals/internal/attributes/attributes.go create mode 100644 plugins/processors/awsapplicationsignals/internal/cardinalitycontrol/count_min_sketch.go create mode 100644 plugins/processors/awsapplicationsignals/internal/cardinalitycontrol/count_min_sketch_test.go create mode 100644 plugins/processors/awsapplicationsignals/internal/cardinalitycontrol/metrics_limiter.go create mode 100644 plugins/processors/awsapplicationsignals/internal/cardinalitycontrol/metrics_limiter_test.go create mode 100644 plugins/processors/awsapplicationsignals/internal/normalizer/attributesnormalizer.go create mode 100644 plugins/processors/awsapplicationsignals/internal/normalizer/attributesnormalizer_test.go create mode 100644 plugins/processors/awsapplicationsignals/internal/prune/metric_pruner.go create mode 100644 plugins/processors/awsapplicationsignals/internal/prune/metric_pruner_test.go create mode 100644 plugins/processors/awsapplicationsignals/internal/resolver/attributesresolver.go create mode 100644 plugins/processors/awsapplicationsignals/internal/resolver/attributesresolver_test.go rename plugins/processors/{awsappsignals => awsapplicationsignals}/internal/resolver/kubernetes.go (90%) rename plugins/processors/{awsappsignals => awsapplicationsignals}/internal/resolver/kubernetes_test.go (80%) rename plugins/processors/{awsappsignals => awsapplicationsignals}/processor.go (63%) rename plugins/processors/{awsappsignals => awsapplicationsignals}/processor_test.go (83%) rename plugins/processors/{awsappsignals => awsapplicationsignals}/rules/common.go (70%) create mode 100644 plugins/processors/awsapplicationsignals/rules/common_test.go rename plugins/processors/{awsappsignals => awsapplicationsignals}/rules/dropper.go (100%) rename plugins/processors/{awsappsignals => awsapplicationsignals}/rules/dropper_test.go (100%) rename plugins/processors/{awsappsignals => awsapplicationsignals}/rules/keeper.go (54%) rename plugins/processors/{awsappsignals => awsapplicationsignals}/rules/keeper_test.go (97%) rename plugins/processors/{awsappsignals => awsapplicationsignals}/rules/replacer.go (59%) rename plugins/processors/{awsappsignals => awsapplicationsignals}/rules/replacer_test.go (75%) rename plugins/processors/{awsappsignals => awsapplicationsignals}/testdata/config_eks.yaml (97%) rename plugins/processors/{awsappsignals => awsapplicationsignals}/testdata/config_generic.yaml (97%) delete mode 100644 plugins/processors/awsappsignals/config/config.go delete mode 100644 plugins/processors/awsappsignals/internal/attributes/attributes.go delete mode 100644 plugins/processors/awsappsignals/internal/normalizer/attributesnormalizer.go delete mode 100644 plugins/processors/awsappsignals/internal/normalizer/attributesnormalizer_test.go delete mode 100644 plugins/processors/awsappsignals/internal/resolver/attributesresolver.go delete mode 100644 plugins/processors/awsappsignals/internal/resolver/attributesresolver_test.go delete mode 100644 plugins/processors/awsappsignals/rules/common_test.go delete mode 100644 plugins/processors/ec2tagger/ebsvolume.go create mode 100644 plugins/processors/ec2tagger/internal/volume/describevolumes.go create mode 100644 plugins/processors/ec2tagger/internal/volume/describevolumes_test.go create mode 100644 plugins/processors/ec2tagger/internal/volume/host_linux.go create mode 100644 plugins/processors/ec2tagger/internal/volume/host_linux_test.go create mode 100644 plugins/processors/ec2tagger/internal/volume/host_nonlinux.go create mode 100644 plugins/processors/ec2tagger/internal/volume/host_nonlinux_test.go create mode 100644 plugins/processors/ec2tagger/internal/volume/merge.go create mode 100644 plugins/processors/ec2tagger/internal/volume/merge_test.go create mode 100644 plugins/processors/ec2tagger/internal/volume/volume.go create mode 100644 plugins/processors/ec2tagger/internal/volume/volume_test.go create mode 100644 plugins/processors/gpuattributes/config.go create mode 100644 plugins/processors/gpuattributes/config_test.go create mode 100644 plugins/processors/gpuattributes/factory.go create mode 100644 plugins/processors/gpuattributes/factory_test.go create mode 100644 plugins/processors/gpuattributes/internal/awsneuron_memory_metric_aggregator.go create mode 100644 plugins/processors/gpuattributes/internal/awsneuron_memory_metric_aggregator_test.go create mode 100644 plugins/processors/gpuattributes/internal/awsneuron_metric_modifier.go create mode 100644 plugins/processors/gpuattributes/internal/awsneuron_metric_modifier_test.go create mode 100644 plugins/processors/gpuattributes/processor.go create mode 100644 plugins/processors/gpuattributes/processor_test.go create mode 100644 tool/clean/clean_ebs/clean_ebs.go create mode 100644 translator/tocwconfig/sampleConfig/appsignals_fallback_and_eks_config.conf create mode 100644 translator/tocwconfig/sampleConfig/appsignals_fallback_and_eks_config.json create mode 100644 translator/tocwconfig/sampleConfig/appsignals_fallback_and_eks_config.yaml create mode 100644 translator/tocwconfig/sampleConfig/appsignals_over_fallback_config.conf create mode 100644 translator/tocwconfig/sampleConfig/appsignals_over_fallback_config.json create mode 100644 translator/tocwconfig/sampleConfig/appsignals_over_fallback_config.yaml create mode 100644 translator/tocwconfig/sampleConfig/base_appsignals_fallback_config.conf create mode 100644 translator/tocwconfig/sampleConfig/base_appsignals_fallback_config.json create mode 100644 translator/tocwconfig/sampleConfig/base_appsignals_fallback_config.yaml create mode 100644 translator/tocwconfig/sampleConfig/emf_and_kubernetes_with_gpu_config.conf create mode 100644 translator/tocwconfig/sampleConfig/emf_and_kubernetes_with_gpu_config.json create mode 100644 translator/tocwconfig/sampleConfig/emf_and_kubernetes_with_gpu_config.yaml rename translator/translate/otel/pipeline/{appsignals => applicationsignals}/translator.go (87%) create mode 100644 translator/translate/otel/pipeline/applicationsignals/translator_test.go delete mode 100644 translator/translate/otel/pipeline/appsignals/translator_test.go create mode 100644 translator/translate/otel/processor/awsapplicationsignals/testdata/config_ec2.yaml rename translator/translate/otel/processor/{awsappsignals => awsapplicationsignals}/testdata/config_eks.yaml (100%) rename translator/translate/otel/processor/{awsappsignals => awsapplicationsignals}/testdata/config_generic.yaml (100%) rename translator/translate/otel/processor/{awsappsignals => awsapplicationsignals}/testdata/config_k8s.yaml (100%) rename translator/translate/otel/processor/{awsappsignals => awsapplicationsignals}/testdata/invalidRulesConfig.json (67%) rename translator/translate/otel/processor/{awsappsignals => awsapplicationsignals}/testdata/validRulesConfig.json (88%) rename translator/translate/otel/processor/{awsappsignals => awsapplicationsignals}/testdata/validRulesConfigEKS.yaml (82%) rename translator/translate/otel/processor/{awsappsignals => awsapplicationsignals}/testdata/validRulesConfigGeneric.yaml (83%) rename translator/translate/otel/processor/{awsappsignals => awsapplicationsignals}/translator.go (50%) create mode 100644 translator/translate/otel/processor/awsapplicationsignals/translator_test.go delete mode 100644 translator/translate/otel/processor/awsappsignals/translator_test.go create mode 100644 translator/translate/otel/processor/gpu/translator.go rename translator/translate/otel/receiver/awscontainerinsight/{granularity.go => utils.go} (77%) delete mode 100644 translator/translate/otel/receiver/otlp/appsignals_config.yaml rename translator/{translate/otel/common => util/eksdetector}/eksdetector.go (65%) rename translator/{translate/otel/common => util/eksdetector}/eksdetector_test.go (73%) rename translator/{translate/otel/common => util/eksdetector}/eksdetectortestutil.go (77%) diff --git a/.github/actions/patch-dependencies/action.yml b/.github/actions/patch-dependencies/action.yml deleted file mode 100644 index 57d66eccef..0000000000 --- a/.github/actions/patch-dependencies/action.yml +++ /dev/null @@ -1,97 +0,0 @@ -name: "Patch dependencies" -description: | - Patches direct dependencies of this project leveraging maven local to publish the results. - - This workflow supports patching opentelemetry-java and opentelemetry-java-instrumentation repositories by executing - the `patch.sh` script that will try to patch those repositories and after that will optionally test and then publish - the artifacts to maven local. - To add a patch you have to add a file in the `.github/patches/` directory with the name of the repository that must - be patched. - This action assumes that java was set correctly. -inputs: - run_tests: - default: "false" - required: false - description: "If the workflow should run tests of the dependencies. Anything different than false will evaluate to true" - -runs: - using: "composite" - steps: - - name: check patches - run: | - if [[ -f .github/patches/opentelemetry-java.patch ]]; then - echo 'patch_otel_java=true' >> $GITHUB_ENV - fi - if [[ -f .github/patches/opentelemetry-java-instrumentation.patch ]]; then - echo 'patch_otel_java_instrumentation=true' >> $GITHUB_ENV - fi - if [[ -f .github/patches/opentelemetry-java-contrib.patch ]]; then - echo 'patch_otel_java_contrib=true' >> $GITHUB_ENV - fi - shell: bash - - - name: Clone and patch repositories - run: .github/scripts/patch.sh - if: ${{ env.patch_otel_java == 'true' || - env.patch_otel_java_instrumentation == 'true' || - env.patch_otel_java_contrib == 'true' }} - shell: bash - - - name: Build opentelemetry-java with tests - uses: gradle/gradle-build-action@v2 - if: ${{ env.patch_otel_java == 'true' && inputs.run_tests != 'false' }} - with: - arguments: build publishToMavenLocal - build-root-directory: opentelemetry-java - - - name: Build opentelemetry-java - uses: gradle/gradle-build-action@v2 - if: ${{ env.patch_otel_java == 'true' && inputs.run_tests == 'false' }} - with: - arguments: publishToMavenLocal - build-root-directory: opentelemetry-java - - - name: cleanup opentelemetry-java - run: rm -rf opentelemetry-java - if: ${{ env.patch_otel_java == 'true' }} - shell: bash - - - name: Build opentelemetry-java-contrib with tests - uses: gradle/gradle-build-action@v2 - if: ${{ env.patch_otel_java_contrib == 'true' && inputs.run_tests != 'false' }} - with: - arguments: build publishToMavenLocal - build-root-directory: opentelemetry-java-contrib - - - name: Build opentelemetry-java-contrib - uses: gradle/gradle-build-action@v2 - if: ${{ env.patch_otel_java_contrib == 'true' && inputs.run_tests == 'false' }} - with: - arguments: publishToMavenLocal - build-root-directory: opentelemetry-java-contrib - - - name: cleanup opentelemetry-java-contrib - run: rm -rf opentelemetry-java-contrib - if: ${{ env.patch_otel_java_contrib == 'true' }} - shell: bash - - - name: Build opentelemetry-java-instrumentation with tests - uses: gradle/gradle-build-action@v2 - if: ${{ env.patch_otel_java_instrumentation == 'true' && inputs.run_tests != 'false' }} - with: - arguments: check -x spotlessCheck publishToMavenLocal - build-root-directory: opentelemetry-java-instrumentation - cache-read-only: false - - - name: Build opentelemetry java instrumentation - uses: gradle/gradle-build-action@v2 - if: ${{ env.patch_otel_java_instrumentation == 'true' && inputs.run_tests == 'false' }} - with: - arguments: publishToMavenLocal - build-root-directory: opentelemetry-java-instrumentation - cache-read-only: false - - - name: cleanup opentelmetry-java-instrumentation - run: rm -rf opentelemetry-java-instrumentation - if: ${{ env.patch_otel_java_instrumentation == 'true' }} - shell: bash \ No newline at end of file diff --git a/.github/workflows/PR-build.yml b/.github/workflows/PR-build.yml index 8d178b2a4f..a8d2fb7af2 100644 --- a/.github/workflows/PR-build.yml +++ b/.github/workflows/PR-build.yml @@ -47,7 +47,7 @@ jobs: if: needs.changes.outputs.lint == 'true' uses: actions/setup-go@v4 with: - go-version: ~1.21.1 + go-version: ~1.22.2 cache: false - name: Check out code @@ -102,7 +102,7 @@ jobs: if: needs.changes.outputs.build == 'true' uses: actions/setup-go@v4 with: - go-version: ~1.21.1 + go-version: ~1.22.2 cache: false - name: Check out code diff --git a/.github/workflows/application-signals-java-e2e-ec2-asg-test.yml b/.github/workflows/application-signals-java-e2e-ec2-asg-test.yml new file mode 100644 index 0000000000..af25972507 --- /dev/null +++ b/.github/workflows/application-signals-java-e2e-ec2-asg-test.yml @@ -0,0 +1,168 @@ +## Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +## SPDX-License-Identifier: Apache-2.0 + +# This is a reusable workflow for running the E2E test for App Signals. +# It is meant to be called from another workflow. +# Read more about reusable workflows: https://docs.github.com/en/actions/using-workflows/reusing-workflows#overview +name: App Signals Enablement E2E Testing - EC2 ASG Use Case +on: + workflow_call: + +permissions: + id-token: write + contents: read + +env: + # The presence of this env var is required for use by terraform and AWS CLI commands + # It is not redundant + AWS_DEFAULT_REGION: us-east-1 + APP_SIGNALS_E2E_TEST_ACCOUNT_ID: ${{ secrets.APP_SIGNALS_E2E_TEST_ACCOUNT_ID }} + SAMPLE_APP_FRONTEND_SERVICE_JAR: "s3://aws-appsignals-sample-app-prod-us-east-1/main-service.jar" + SAMPLE_APP_REMOTE_SERVICE_JAR: "s3://aws-appsignals-sample-app-prod-us-east-1/remote-service.jar" + GET_ADOT_JAR_COMMAND: "aws s3 cp s3://adot-main-build-staging-jar/aws-opentelemetry-agent.jar ./adot.jar" + GET_CW_AGENT_RPM_COMMAND: "aws s3 cp s3://${{ secrets.S3_INTEGRATION_BUCKET }}/integration-test/binary/${{ github.sha }}/amazon_linux/amd64/latest/amazon-cloudwatch-agent.rpm ./cw-agent.rpm" + METRIC_NAMESPACE: ApplicationSignals + LOG_GROUP_NAME: /aws/application-signals/data + +jobs: + e2e-ec2-single-asg-test: + runs-on: ubuntu-latest + steps: + - name: Get testing resources from aws-application-signals-test-framework + uses: actions/checkout@v4 + with: + repository: aws-observability/aws-application-signals-test-framework + ref: add-ec2-platform-support + + - name: Generate testing id + run: echo TESTING_ID="java-asg-${{ github.run_id }}-${{ github.run_number }}" >> $GITHUB_ENV + + - name: Configure AWS Credentials + uses: aws-actions/configure-aws-credentials@v4 + with: + role-to-assume: arn:aws:iam::${{ env.APP_SIGNALS_E2E_TEST_ACCOUNT_ID }}:role/${{ secrets.APP_SIGNALS_E2E_TEST_ROLE_NAME }} + aws-region: ${{ env.AWS_DEFAULT_REGION }} + + - name: Set up terraform + uses: hashicorp/setup-terraform@v3 + with: + terraform_wrapper: false + + - name: Deploy sample app via terraform + working-directory: terraform/ec2/asg + run: | + terraform init + terraform validate + terraform apply -auto-approve \ + -var="aws_region=${{ env.AWS_DEFAULT_REGION }}" \ + -var="test_id=${{ env.TESTING_ID }}" \ + -var="sample_app_jar=${{ env.SAMPLE_APP_FRONTEND_SERVICE_JAR }}" \ + -var="sample_remote_app_jar=${{ env.SAMPLE_APP_REMOTE_SERVICE_JAR }}" \ + -var="get_cw_agent_rpm_command=${{ env.GET_CW_AGENT_RPM_COMMAND }}" \ + -var="get_adot_jar_command=${{ env.GET_ADOT_JAR_COMMAND }}" + + - name: Get sample app and EC2 instance information + working-directory: terraform/ec2/asg + run: | + main_service_instance_id=$(aws autoscaling describe-auto-scaling-groups --auto-scaling-group-names ec2-single-asg-${{ env.TESTING_ID }} --region ${{ env.AWS_DEFAULT_REGION }} --query "AutoScalingGroups[].Instances[0].InstanceId" --output text) + main_service_public_ip=$(aws ec2 describe-instances --instance-ids $main_service_instance_id --region ${{ env.AWS_DEFAULT_REGION }} --query "Reservations[].Instances[].PublicIpAddress" --output text) + main_service_private_dns_name=$(aws ec2 describe-instances --instance-ids $main_service_instance_id --region ${{ env.AWS_DEFAULT_REGION }} --query "Reservations[].Instances[].PrivateDnsName" --output text) + echo "INSTANCE_ID=$main_service_instance_id" >> $GITHUB_ENV + echo "MAIN_SERVICE_ENDPOINT=$main_service_public_ip:8080" >> $GITHUB_ENV + echo "PRIVATE_DNS_NAME=$main_service_private_dns_name" >> $GITHUB_ENV + echo "EC2_INSTANCE_AMI=$(terraform output ec2_instance_ami)" >> $GITHUB_ENV + echo "REMOTE_SERVICE_IP=$(terraform output sample_app_remote_service_public_ip)" >> $GITHUB_ENV + + - name: Wait for app endpoint to come online + id: endpoint-check + run: | + attempt_counter=0 + max_attempts=30 + until $(curl --output /dev/null --silent --head --fail http://${{ env.MAIN_SERVICE_ENDPOINT }}); do + if [ ${attempt_counter} -eq ${max_attempts} ];then + echo "Max attempts reached" + exit 1 + fi + + printf '.' + attempt_counter=$(($attempt_counter+1)) + sleep 10 + done + + # This steps increases the speed of the validation by creating the telemetry data in advance + - name: Call all test APIs + continue-on-error: true + run: | + curl -S -s "http://${{ env.MAIN_SERVICE_ENDPOINT }}/outgoing-http-call" + curl -S -s "http://${{ env.MAIN_SERVICE_ENDPOINT }}/aws-sdk-call?ip=${{ env.REMOTE_SERVICE_IP }}&testingId=${{ env.TESTING_ID }}" + curl -S -s "http://${{ env.MAIN_SERVICE_ENDPOINT }}/remote-service?ip=${{ env.REMOTE_SERVICE_IP }}&testingId=${{ env.TESTING_ID }}" + curl -S -s "http://${{ env.MAIN_SERVICE_ENDPOINT }}/client-call" + + # Validation for pulse telemetry data + - name: Validate generated EMF logs + id: log-validation + run: ./gradlew validator:run --args='-c java/ec2/asg/log-validation.yml + --testing-id ${{ env.TESTING_ID }} + --endpoint http://${{ env.MAIN_SERVICE_ENDPOINT }} + --remote-service-deployment-name ${{ env.REMOTE_SERVICE_IP }}:8080 + --region ${{ env.AWS_DEFAULT_REGION }} + --account-id ${{ env.APP_SIGNALS_E2E_TEST_ACCOUNT_ID }} + --metric-namespace ${{ env.METRIC_NAMESPACE }} + --log-group ${{ env.LOG_GROUP_NAME }} + --service-name sample-application-${{ env.TESTING_ID }} + --remote-service-name sample-remote-application-${{ env.TESTING_ID }} + --instance-ami ${{ env.EC2_INSTANCE_AMI }} + --platform-info ec2-single-asg-${{ env.TESTING_ID }} + --instance-id ${{ env.INSTANCE_ID }} + --private-dns-name ${{ env.PRIVATE_DNS_NAME }} + --query-string ip=${{ env.REMOTE_SERVICE_IP }}&testingId=${{ env.TESTING_ID }} + --rollup' + + - name: Validate generated metrics + id: metric-validation + if: (success() || steps.log-validation-1.outcome == 'failure') && !cancelled() + run: ./gradlew validator:run --args='-c java/ec2/asg/metric-validation.yml + --testing-id ${{ env.TESTING_ID }} + --endpoint http://${{ env.MAIN_SERVICE_ENDPOINT }} + --remote-service-deployment-name ${{ env.REMOTE_SERVICE_IP }}:8080 + --region ${{ env.AWS_DEFAULT_REGION }} + --account-id ${{ env.APP_SIGNALS_E2E_TEST_ACCOUNT_ID }} + --metric-namespace ${{ env.METRIC_NAMESPACE }} + --log-group ${{ env.LOG_GROUP_NAME }} + --service-name sample-application-${{ env.TESTING_ID }} + --remote-service-name sample-remote-application-${{ env.TESTING_ID }} + --instance-ami ${{ env.EC2_INSTANCE_AMI }} + --platform-info ec2-single-asg-${{ env.TESTING_ID }} + --instance-id ${{ env.INSTANCE_ID }} + --private-dns-name ${{ env.PRIVATE_DNS_NAME }} + --query-string ip=${{ env.REMOTE_SERVICE_IP }}&testingId=${{ env.TESTING_ID }} + --rollup' + + - name: Validate generated traces + id: trace-validation + if: (success() || steps.log-validation-1.outcome == 'failure' || steps.metric-validation-1.outcome == 'failure') && !cancelled() + run: ./gradlew validator:run --args='-c java/ec2/asg/trace-validation.yml + --testing-id ${{ env.TESTING_ID }} + --endpoint http://${{ env.MAIN_SERVICE_ENDPOINT }} + --remote-service-deployment-name ${{ env.REMOTE_SERVICE_IP }}:8080 + --region ${{ env.AWS_DEFAULT_REGION }} + --account-id ${{ env.APP_SIGNALS_E2E_TEST_ACCOUNT_ID }} + --metric-namespace ${{ env.METRIC_NAMESPACE }} + --log-group ${{ env.LOG_GROUP_NAME }} + --service-name sample-application-${{ env.TESTING_ID }} + --remote-service-name sample-remote-application-${{ env.TESTING_ID }} + --instance-ami ${{ env.EC2_INSTANCE_AMI }} + --platform-info ec2-single-asg-${{ env.TESTING_ID }} + --instance-id ${{ env.INSTANCE_ID }} + --private-dns-name ${{ env.PRIVATE_DNS_NAME }} + --query-string ip=${{ env.REMOTE_SERVICE_IP }}&testingId=${{ env.TESTING_ID }} + --rollup' + + # Clean up Procedures + - name: Terraform destroy + if: always() + continue-on-error: true + working-directory: terraform/ec2/asg + run: | + terraform destroy -auto-approve \ + -var="test_id=${{ env.TESTING_ID }}" diff --git a/.github/workflows/appsignals-e2e-ec2-test.yml b/.github/workflows/application-signals-java-e2e-ec2-test.yml similarity index 70% rename from .github/workflows/appsignals-e2e-ec2-test.yml rename to .github/workflows/application-signals-java-e2e-ec2-test.yml index 84456cea5e..79052fae38 100644 --- a/.github/workflows/appsignals-e2e-ec2-test.yml +++ b/.github/workflows/application-signals-java-e2e-ec2-test.yml @@ -16,38 +16,22 @@ permissions: env: AWS_DEFAULT_REGION: us-east-1 APP_SIGNALS_E2E_TEST_ACCOUNT_ID: ${{ secrets.APP_SIGNALS_E2E_TEST_ACCOUNT_ID }} - SAMPLE_APP_FRONTEND_SERVICE_JAR: "s3://aws-appsignals-sample-app/main-service.jar" - SAMPLE_APP_REMOTE_SERVICE_JAR: "s3://aws-appsignals-sample-app/remote-service.jar" - GET_ADOT_JAR_COMMAND: "wget -O adot.jar https://github.com/aws-observability/aws-otel-java-instrumentation/releases/latest/download/aws-opentelemetry-agent.jar" + SAMPLE_APP_FRONTEND_SERVICE_JAR: "s3://aws-appsignals-sample-app-prod-us-east-1/main-service.jar" + SAMPLE_APP_REMOTE_SERVICE_JAR: "s3://aws-appsignals-sample-app-prod-us-east-1/remote-service.jar" + GET_ADOT_JAR_COMMAND: "aws s3 cp s3://adot-main-build-staging-jar/aws-opentelemetry-agent.jar ./adot.jar" GET_CW_AGENT_RPM_COMMAND: "aws s3 cp s3://${{ secrets.S3_INTEGRATION_BUCKET }}/integration-test/binary/${{ github.sha }}/amazon_linux/amd64/latest/amazon-cloudwatch-agent.rpm ./cw-agent.rpm" - METRIC_NAMESPACE: AppSignals - LOG_GROUP_NAME: /aws/appsignals/generic + METRIC_NAMESPACE: ApplicationSignals + LOG_GROUP_NAME: /aws/application-signals/data jobs: e2e-ec2-test: runs-on: ubuntu-latest steps: - - name: Get testing resources from ADOT + - name: Get testing resources from aws-application-signals-test-framework uses: actions/checkout@v4 with: - repository: aws-observability/aws-otel-java-instrumentation - ref: main - - - name: Download patch action script - uses: actions/checkout@v4 - with: - path: patch-dependencies - sparse-checkout: | - .github/actions/patch-dependencies/action.yml - - - name: Replace patch dependency action.yml - run: | - cp -f ./patch-dependencies/.github/actions/patch-dependencies/action.yml ./.github/actions/patch-dependencies/action.yml - - - uses: actions/setup-java@v4 - with: - java-version: 17 - distribution: temurin + repository: aws-observability/aws-application-signals-test-framework + ref: ga-release - name: Generate testing id run: echo TESTING_ID="${{ github.run_id }}-${{ github.run_number }}" >> $GITHUB_ENV @@ -55,7 +39,7 @@ jobs: - name: Configure AWS Credentials uses: aws-actions/configure-aws-credentials@v4 with: - role-to-assume: ${{ secrets.APP_SIGNALS_E2E_IAM_ROLE }} + role-to-assume: arn:aws:iam::${{ env.APP_SIGNALS_E2E_TEST_ACCOUNT_ID }}:role/${{ secrets.APP_SIGNALS_E2E_TEST_ROLE_NAME }} aws-region: ${{ env.AWS_DEFAULT_REGION }} - name: Set up terraform @@ -64,7 +48,7 @@ jobs: terraform_wrapper: false - name: Deploy sample app via terraform - working-directory: testing/terraform/ec2 + working-directory: terraform/ec2 run: | terraform init terraform validate @@ -77,17 +61,17 @@ jobs: -var="get_adot_jar_command=${{ env.GET_ADOT_JAR_COMMAND }}" - name: Get the ec2 instance ami id - working-directory: testing/terraform/ec2 + working-directory: terraform/ec2 run: | echo "EC2_INSTANCE_AMI=$(terraform output ec2_instance_ami)" >> $GITHUB_ENV - name: Get the sample app endpoint - working-directory: testing/terraform/ec2 + working-directory: terraform/ec2 run: | echo "MAIN_SERVICE_ENDPOINT=$(terraform output sample_app_main_service_public_dns):8080" >> $GITHUB_ENV echo "REMOTE_SERVICE_IP=$(terraform output sample_app_remote_service_public_ip)" >> $GITHUB_ENV - - + echo "MAIN_SERVICE_INSTANCE_ID=$(terraform output main_service_instance_id)" >> $GITHUB_ENV + - name: Wait for app endpoint to come online id: endpoint-check run: | @@ -108,28 +92,19 @@ jobs: - name: Call all test APIs continue-on-error: true run: | - curl -S -s -o /dev/null http://${{ env.MAIN_SERVICE_ENDPOINT }}/outgoing-http-call/ - curl -S -s -o /dev/null http://${{ env.MAIN_SERVICE_ENDPOINT }}/aws-sdk-call/ - curl -S -s -o /dev/null http://${{ env.MAIN_SERVICE_ENDPOINT }}/remote-service?ip=${{ env.REMOTE_SERVICE_IP }}/ - curl -S -s -o /dev/null http://${{ env.MAIN_SERVICE_ENDPOINT }}/client-call/ - - # cache local patch outputs - - name: Cache local Maven repository - id: cache-local-maven-repo - uses: actions/cache@v3 - with: - path: | - ~/.m2/repository/io/opentelemetry/ - key: ${{ runner.os }}-maven-local-${{ hashFiles('.github/patches/opentelemetry-java*.patch') }} + curl -S -s "http://${{ env.MAIN_SERVICE_ENDPOINT }}/outgoing-http-call" + curl -S -s "http://${{ env.MAIN_SERVICE_ENDPOINT }}/aws-sdk-call?ip=${{ env.REMOTE_SERVICE_IP }}&testingId=${{ env.TESTING_ID }}" + curl -S -s "http://${{ env.MAIN_SERVICE_ENDPOINT }}/remote-service?ip=${{ env.REMOTE_SERVICE_IP }}&testingId=${{ env.TESTING_ID }}" + curl -S -s "http://${{ env.MAIN_SERVICE_ENDPOINT }}/client-call" - - name: Publish patched dependencies to maven local - uses: ./.github/actions/patch-dependencies - if: steps.cache-local-maven-repo.outputs.cache-hit != 'true' + - name: Build Gradle + working-directory: ${{ env.TEST_RESOURCES_FOLDER }} + run: ./gradlew # Validation for pulse telemetry data - name: Validate generated EMF logs id: log-validation - run: ./gradlew testing:validator:run --args='-c ec2/log-validation.yml + run: ./gradlew validator:run --args='-c ec2/log-validation.yml --testing-id ${{ env.TESTING_ID }} --endpoint http://${{ env.MAIN_SERVICE_ENDPOINT }} --remote-service-deployment-name ${{ env.REMOTE_SERVICE_IP }}:8080 @@ -139,14 +114,15 @@ jobs: --log-group ${{ env.LOG_GROUP_NAME }} --service-name sample-application-${{ env.TESTING_ID }} --remote-service-name sample-remote-application-${{ env.TESTING_ID }} - --request-body ip=${{ env.REMOTE_SERVICE_IP }} + --query-string ip=${{ env.REMOTE_SERVICE_IP }}&testingId=${{ env.TESTING_ID }} --instance-ami ${{ env.EC2_INSTANCE_AMI }} + --instance-id ${{ env.MAIN_SERVICE_INSTANCE_ID }} --rollup' - name: Validate generated metrics id: metric-validation if: (success() || steps.log-validation.outcome == 'failure') && !cancelled() - run: ./gradlew testing:validator:run --args='-c ec2/metric-validation.yml + run: ./gradlew validator:run --args='-c ec2/metric-validation.yml --testing-id ${{ env.TESTING_ID }} --endpoint http://${{ env.MAIN_SERVICE_ENDPOINT }} --remote-service-deployment-name ${{ env.REMOTE_SERVICE_IP }}:8080 @@ -156,14 +132,15 @@ jobs: --log-group ${{ env.LOG_GROUP_NAME }} --service-name sample-application-${{ env.TESTING_ID }} --remote-service-name sample-remote-application-${{ env.TESTING_ID }} - --request-body ip=${{ env.REMOTE_SERVICE_IP }} + --query-string ip=${{ env.REMOTE_SERVICE_IP }}&testingId=${{ env.TESTING_ID }} --instance-ami ${{ env.EC2_INSTANCE_AMI }} + --instance-id ${{ env.MAIN_SERVICE_INSTANCE_ID }} --rollup' - name: Validate generated traces id: trace-validation if: (success() || steps.log-validation.outcome == 'failure' || steps.metric-validation.outcome == 'failure') && !cancelled() - run: ./gradlew testing:validator:run --args='-c ec2/trace-validation.yml + run: ./gradlew validator:run --args='-c ec2/trace-validation.yml --testing-id ${{ env.TESTING_ID }} --endpoint http://${{ env.MAIN_SERVICE_ENDPOINT }} --remote-service-deployment-name ${{ env.REMOTE_SERVICE_IP }}:8080 @@ -173,17 +150,16 @@ jobs: --log-group ${{ env.LOG_GROUP_NAME }} --service-name sample-application-${{ env.TESTING_ID }} --remote-service-name sample-remote-application-${{ env.TESTING_ID }} - --request-body ip=${{ env.REMOTE_SERVICE_IP }} + --query-string ip=${{ env.REMOTE_SERVICE_IP }}&testingId=${{ env.TESTING_ID }} --instance-ami ${{ env.EC2_INSTANCE_AMI }} + --instance-id ${{ env.MAIN_SERVICE_INSTANCE_ID }} --rollup' # Clean up Procedures - - name: Terraform destroy if: always() continue-on-error: true - working-directory: testing/terraform/ec2 + working-directory: terraform/ec2 run: | terraform destroy -auto-approve \ -var="test_id=${{ env.TESTING_ID }}" - diff --git a/.github/workflows/appsignals-e2e-eks-test.yml b/.github/workflows/application-signals-java-e2e-eks-test.yml similarity index 81% rename from .github/workflows/appsignals-e2e-eks-test.yml rename to .github/workflows/application-signals-java-e2e-eks-test.yml index 327b37aba8..97fe125a5c 100644 --- a/.github/workflows/appsignals-e2e-eks-test.yml +++ b/.github/workflows/application-signals-java-e2e-eks-test.yml @@ -24,30 +24,20 @@ env: SAMPLE_APP_NAMESPACE: sample-app-namespace SAMPLE_APP_FRONTEND_SERVICE_IMAGE: ${{ secrets.APP_SIGNALS_E2E_SAMPLE_APP_FRONTEND_SVC_IMG }} SAMPLE_APP_REMOTE_SERVICE_IMAGE: ${{ secrets.APP_SIGNALS_E2E_SAMPLE_APP_REMOTE_SVC_IMG }} - METRIC_NAMESPACE: AppSignals - LOG_GROUP_NAME: /aws/appsignals/eks + METRIC_NAMESPACE: ApplicationSignals + LOG_GROUP_NAME: /aws/application-signals/data ECR_INTEGRATION_TEST_REPO: "cwagent-integration-test" + APPLICATION_SIGNALS_ADOT_IMAGE: 611364707713.dkr.ecr.us-west-2.amazonaws.com/adot-autoinstrumentation-java-operator-staging:1.33.0-SNAPSHOT-91cbba8 jobs: appsignals-e2e-test: runs-on: ubuntu-latest steps: - - name: Get testing resources from ADOT + - name: Get testing resources from aws-application-signals-test-framework uses: actions/checkout@v4 with: - repository: aws-observability/aws-otel-java-instrumentation - ref: main - - - name: Download patch action script - uses: actions/checkout@v4 - with: - path: patch-dependencies - sparse-checkout: | - .github/actions/patch-dependencies/action.yml - - - name: Replace patch dependency action.yml - run: | - cp -f ./patch-dependencies/.github/actions/patch-dependencies/action.yml ./.github/actions/patch-dependencies/action.yml + repository: aws-observability/aws-application-signals-test-framework + ref: ga-release - name: Download enablement script uses: actions/checkout@v4 @@ -60,10 +50,10 @@ jobs: scripts/eks/appsignals/clean-app-signals.sh sparse-checkout-cone-mode: false - - uses: actions/setup-java@v4 - with: - java-version: 17 - distribution: temurin +# TODO: If there are any new changes to the staging image that will require updating the testing resources, first +# make the changes here using sed commands and submit a PR in the aws-application-signals-test-framework +# - name: Update validation template with new changes +# run: | - name: Generate testing id run: echo TESTING_ID="${{ env.AWS_DEFAULT_REGION }}-${{ github.run_id }}-${{ github.run_number }}" >> $GITHUB_ENV @@ -71,7 +61,7 @@ jobs: - name: Configure AWS Credentials uses: aws-actions/configure-aws-credentials@v4 with: - role-to-assume: ${{ secrets.APP_SIGNALS_E2E_IAM_ROLE }} + role-to-assume: arn:aws:iam::${{ env.APP_SIGNALS_E2E_TEST_ACCOUNT_ID }}:role/${{ secrets.APP_SIGNALS_E2E_TEST_ROLE_NAME }} aws-region: ${{ env.AWS_DEFAULT_REGION }} # local directory to store the kubernetes config @@ -109,7 +99,7 @@ jobs: terraform_wrapper: false - name: Deploy sample app via terraform - working-directory: testing/terraform/eks + working-directory: terraform/eks run: | terraform init terraform validate @@ -142,6 +132,15 @@ jobs: run: | kubectl patch amazoncloudwatchagents -n amazon-cloudwatch cloudwatch-agent --type='json' -p='[{"op": "replace", "path": "/spec/image", "value": ${{ secrets.AWS_ECR_PRIVATE_REGISTRY }}/${{ env.ECR_INTEGRATION_TEST_REPO }}:${{ github.sha }}}]' kubectl delete pods --all -n amazon-cloudwatch + sleep 10 + kubectl wait --for=condition=Ready pod --all -n amazon-cloudwatch + + - name: Patch the ADOT image and restart CloudWatch pods + run: | + kubectl patch deploy -namazon-cloudwatch amazon-cloudwatch-observability-controller-manager --type='json' \ + -p='[{"op": "replace", "path": "/spec/template/spec/containers/0/args/1", "value": "--auto-instrumentation-java-image=${{ env.APPLICATION_SIGNALS_ADOT_IMAGE }}"}]' + kubectl delete pods --all -n amazon-cloudwatch + sleep 10 kubectl wait --for=condition=Ready pod --all -n amazon-cloudwatch # Application pods need to be restarted for the @@ -181,17 +180,17 @@ jobs: echo "NEW_CW_AGENT_IMAGE"=$(kubectl get pods -n amazon-cloudwatch -l app.kubernetes.io/name=cloudwatch-agent -o json | \ jq '.items[0].status.containerStatuses[0].image') >> $GITHUB_ENV - - name: Check if CW Agent image has changed - run: | - if [ ${{ env.OLD_CW_AGENT_IMAGE }} = ${{ env.NEW_CW_AGENT_IMAGE }} ]; then - echo "Operator image did not change" - exit 1 - fi + # - name: Check if CW Agent image has changed + # run: | + # if [ ${{ env.OLD_CW_AGENT_IMAGE }} = ${{ env.NEW_CW_AGENT_IMAGE }} ]; then + # echo "Operator image did not change" + # exit 1 + # fi - name: Get the sample app endpoint run: | echo "APP_ENDPOINT=$(terraform output sample_app_endpoint)" >> $GITHUB_ENV - working-directory: testing/terraform/eks + working-directory: terraform/eks - name: Wait for app endpoint to come online id: endpoint-check @@ -213,29 +212,20 @@ jobs: - name: Call all test APIs continue-on-error: true run: | - curl -S -s -o /dev/null http://${{ env.APP_ENDPOINT }}/outgoing-http-call/ - curl -S -s -o /dev/null http://${{ env.APP_ENDPOINT }}/aws-sdk-call/ - curl -S -s -o /dev/null http://${{ env.APP_ENDPOINT }}/remote-service?ip=${{ env.REMOTE_SERVICE_POD_IP }}/ - curl -S -s -o /dev/null http://${{ env.APP_ENDPOINT }}/client-call/ - - # cache local patch outputs - - name: Cache local Maven repository - id: cache-local-maven-repo - uses: actions/cache@v3 - with: - path: | - ~/.m2/repository/io/opentelemetry/ - key: ${{ runner.os }}-maven-local-${{ hashFiles('.github/patches/opentelemetry-java*.patch') }} - - - name: Publish patched dependencies to maven local - uses: ./.github/actions/patch-dependencies - if: steps.cache-local-maven-repo.outputs.cache-hit != 'true' + curl -S -s "http://${{ env.APP_ENDPOINT }}/outgoing-http-call" + curl -S -s "http://${{ env.APP_ENDPOINT }}/aws-sdk-call?ip=${{ env.REMOTE_SERVICE_POD_IP }}&testingId=${{ env.TESTING_ID }}" + curl -S -s "http://${{ env.APP_ENDPOINT }}/remote-service?ip=${{ env.REMOTE_SERVICE_POD_IP }}&testingId=${{ env.TESTING_ID }}" + curl -S -s "http://${{ env.APP_ENDPOINT }}/client-call" + + - name: Build Gradle + working-directory: ${{ env.TEST_RESOURCES_FOLDER }} + run: ./gradlew # Validation for app signals telemetry data - name: Call endpoint and validate generated EMF logs id: log-validation if: steps.endpoint-check.outcome == 'success' && !cancelled() - run: ./gradlew testing:validator:run --args='-c eks/log-validation.yml + run: ./gradlew validator:run --args='-c eks/log-validation.yml --testing-id ${{ env.TESTING_ID }} --endpoint http://${{ env.APP_ENDPOINT }} --region ${{ env.AWS_DEFAULT_REGION }} @@ -246,13 +236,13 @@ jobs: --platform-info ${{ inputs.test-cluster-name }} --service-name sample-application-${{ env.TESTING_ID }} --remote-service-deployment-name ${{ env.REMOTE_SERVICE_DEPLOYMENT_NAME }} - --request-body ip=${{ env.REMOTE_SERVICE_POD_IP }} + --query-string ip=${{ env.REMOTE_SERVICE_POD_IP }}&testingId=${{ env.TESTING_ID }} --rollup' - name: Call endpoints and validate generated metrics id: metric-validation if: (success() || steps.log-validation.outcome == 'failure') && !cancelled() - run: ./gradlew testing:validator:run --args='-c eks/metric-validation.yml + run: ./gradlew validator:run --args='-c eks/metric-validation.yml --testing-id ${{ env.TESTING_ID }} --endpoint http://${{ env.APP_ENDPOINT }} --region ${{ env.AWS_DEFAULT_REGION }} @@ -264,13 +254,13 @@ jobs: --service-name sample-application-${{ env.TESTING_ID }} --remote-service-name sample-remote-application-${{ env.TESTING_ID }} --remote-service-deployment-name ${{ env.REMOTE_SERVICE_DEPLOYMENT_NAME }} - --request-body ip=${{ env.REMOTE_SERVICE_POD_IP }} + --query-string ip=${{ env.REMOTE_SERVICE_POD_IP }}&testingId=${{ env.TESTING_ID }} --rollup' - name: Call endpoints and validate generated traces id: trace-validation if: (success() || steps.log-validation.outcome == 'failure' || steps.metric-validation.outcome == 'failure') && !cancelled() - run: ./gradlew testing:validator:run --args='-c eks/trace-validation.yml + run: ./gradlew validator:run --args='-c eks/trace-validation.yml --testing-id ${{ env.TESTING_ID }} --endpoint http://${{ env.APP_ENDPOINT }} --region ${{ env.AWS_DEFAULT_REGION }} @@ -281,7 +271,7 @@ jobs: --platform-info ${{ inputs.test-cluster-name }} --service-name sample-application-${{ env.TESTING_ID }} --remote-service-deployment-name ${{ env.REMOTE_SERVICE_DEPLOYMENT_NAME }} - --request-body ip=${{ env.REMOTE_SERVICE_POD_IP }} + --query-string ip=${{ env.REMOTE_SERVICE_POD_IP }}&testingId=${{ env.TESTING_ID }} --rollup' # Clean up Procedures @@ -313,7 +303,7 @@ jobs: - name: Terraform destroy if: always() continue-on-error: true - working-directory: testing/terraform/eks + working-directory: terraform/eks run: | terraform destroy -auto-approve \ -var="test_id=${{ env.TESTING_ID }}" \ @@ -332,4 +322,4 @@ jobs: --name service-account-${{ env.TESTING_ID }} \ --namespace ${{ env.SAMPLE_APP_NAMESPACE }} \ --cluster ${{ inputs.test-cluster-name }} \ - --region ${{ env.AWS_DEFAULT_REGION }} \ No newline at end of file + --region ${{ env.AWS_DEFAULT_REGION }} diff --git a/.github/workflows/application-signals-python-e2e-ec2-asg-test.yml b/.github/workflows/application-signals-python-e2e-ec2-asg-test.yml new file mode 100644 index 0000000000..553e792e10 --- /dev/null +++ b/.github/workflows/application-signals-python-e2e-ec2-asg-test.yml @@ -0,0 +1,167 @@ +## Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +## SPDX-License-Identifier: Apache-2.0 + +# This is a reusable workflow for running the Python E2E Canary test for Application Signals. +# It is meant to be called from another workflow. +# Read more about reusable workflows: https://docs.github.com/en/actions/using-workflows/reusing-workflows#overview +name: Application Signals Enablement E2E Testing - Python EC2 Asg Use Case +on: + workflow_call: + +permissions: + id-token: write + contents: read + +env: + AWS_DEFAULT_REGION: us-east-1 + APP_SIGNALS_E2E_TEST_ACCOUNT_ID: ${{ secrets.APP_SIGNALS_E2E_TEST_ACCOUNT_ID }} + SAMPLE_APP_ZIP: s3://aws-appsignals-sample-app-prod-us-east-1/python-sample-app.zip + METRIC_NAMESPACE: ApplicationSignals + LOG_GROUP_NAME: /aws/application-signals/data + ADOT_WHEEL_NAME: ${{ inputs.staging_wheel_name }} + TEST_RESOURCES_FOLDER: ${GITHUB_WORKSPACE} + GET_CW_AGENT_RPM_COMMAND: "aws s3 cp s3://${{ secrets.S3_INTEGRATION_BUCKET }}/integration-test/binary/${{ github.sha }}/amazon_linux/amd64/latest/amazon-cloudwatch-agent.rpm ./cw-agent.rpm" + GET_ADOT_WHEEL_COMMAND: "aws s3 cp s3://metric-schema-changes/aws_opentelemetry_distro-0.2.0-py3-none-any.whl ./aws_opentelemetry_distro-0.2.0-py3-none-any.whl && python3.9 -m pip install aws_opentelemetry_distro-0.2.0-py3-none-any.whl" + +jobs: + python-e2e-ec2-asg-test: + runs-on: ubuntu-latest + steps: + - name: Get testing resources from aws-application-signals-test-framework + uses: actions/checkout@v4 + with: + repository: aws-observability/aws-application-signals-test-framework + ref: add-ec2-platform-for-python-ga + + - name: Generate testing id + run: echo TESTING_ID="python-asg-${{ github.run_id }}-${{ github.run_number }}" >> $GITHUB_ENV + + - name: Configure AWS Credentials + uses: aws-actions/configure-aws-credentials@v4 + with: + role-to-assume: arn:aws:iam::${{ env.APP_SIGNALS_E2E_TEST_ACCOUNT_ID }}:role/${{ secrets.APP_SIGNALS_E2E_TEST_ROLE_NAME }} + aws-region: ${{ env.AWS_DEFAULT_REGION }} + + - name: Set up terraform + uses: hashicorp/setup-terraform@v3 + with: + terraform_wrapper: false + + - name: Deploy sample app via terraform + working-directory: terraform/python/ec2/asg + run: | + terraform init + terraform validate + terraform apply -auto-approve \ + -var="aws_region=${{ env.AWS_DEFAULT_REGION }}" \ + -var="test_id=${{ env.TESTING_ID }}" \ + -var="sample_app_zip=${{ env.SAMPLE_APP_ZIP }}" \ + -var="get_cw_agent_rpm_command=${{ env.GET_CW_AGENT_RPM_COMMAND }}" \ + -var="get_adot_wheel_command=${{ env.GET_ADOT_WHEEL_COMMAND }}" + + - name: Get sample app and EC2 instance information + working-directory: terraform/python/ec2/asg + run: | + main_service_instance_id=$(aws autoscaling describe-auto-scaling-groups --auto-scaling-group-names python-ec2-single-asg-${{ env.TESTING_ID }} --region ${{ env.AWS_DEFAULT_REGION }} --query "AutoScalingGroups[].Instances[0].InstanceId" --output text) + main_service_public_ip=$(aws ec2 describe-instances --instance-ids $main_service_instance_id --region ${{ env.AWS_DEFAULT_REGION }} --query "Reservations[].Instances[].PublicIpAddress" --output text) + main_service_private_dns_name=$(aws ec2 describe-instances --instance-ids $main_service_instance_id --region ${{ env.AWS_DEFAULT_REGION }} --query "Reservations[].Instances[].PrivateDnsName" --output text) + echo "INSTANCE_ID=$main_service_instance_id" >> $GITHUB_ENV + echo "MAIN_SERVICE_ENDPOINT=$main_service_public_ip:8000" >> $GITHUB_ENV + echo "PRIVATE_DNS_NAME=$main_service_private_dns_name" >> $GITHUB_ENV + echo "EC2_INSTANCE_AMI=$(terraform output ec2_instance_ami)" >> $GITHUB_ENV + echo "REMOTE_SERVICE_IP=$(terraform output sample_app_remote_service_public_ip)" >> $GITHUB_ENV + + - name: Wait for app endpoint to come online + id: endpoint-check + run: | + attempt_counter=0 + max_attempts=30 + until $(curl --output /dev/null --silent --head --fail http://${{ env.MAIN_SERVICE_ENDPOINT }}); do + if [ ${attempt_counter} -eq ${max_attempts} ];then + echo "Max attempts reached" + exit 1 + fi + + printf '.' + attempt_counter=$(($attempt_counter+1)) + sleep 10 + done + + # This steps increases the speed of the validation by creating the telemetry data in advance + - name: Call all test APIs + continue-on-error: true + run: | + curl -S -s "http://${{ env.MAIN_SERVICE_ENDPOINT }}/outgoing-http-call" + curl -S -s "http://${{ env.MAIN_SERVICE_ENDPOINT }}/aws-sdk-call?ip=${{ env.REMOTE_SERVICE_IP }}&testingId=${{ env.TESTING_ID }}" + curl -S -s "http://${{ env.MAIN_SERVICE_ENDPOINT }}/remote-service?ip=${{ env.REMOTE_SERVICE_IP }}&testingId=${{ env.TESTING_ID }}" + curl -S -s "http://${{ env.MAIN_SERVICE_ENDPOINT }}/client-call" + + - name: Build Gradle + run: ./gradlew + + # Validation for pulse telemetry data + - name: Validate generated EMF logs + id: log-validation + run: ./gradlew validator:run --args='-c python/ec2/asg/log-validation.yml + --testing-id ${{ env.TESTING_ID }} + --endpoint http://${{ env.MAIN_SERVICE_ENDPOINT }} + --remote-service-deployment-name ${{ env.REMOTE_SERVICE_IP }}:8001 + --region ${{ env.AWS_DEFAULT_REGION }} + --metric-namespace ${{ env.METRIC_NAMESPACE }} + --log-group ${{ env.LOG_GROUP_NAME }} + --service-name python-sample-application-${{ env.TESTING_ID }} + --remote-service-name python-sample-remote-application-${{ env.TESTING_ID }} + --query-string ip=${{ env.REMOTE_SERVICE_IP }}&testingId=${{ env.TESTING_ID }} + --instance-ami ${{ env.EC2_INSTANCE_AMI }} + --platform-info python-ec2-single-asg-${{ env.TESTING_ID }} + --instance-id ${{ env.INSTANCE_ID }} + --private-dns-name ${{ env.PRIVATE_DNS_NAME }} + --rollup' + + - name: Validate generated metrics + id: metric-validation + if: (success() || steps.log-validation.outcome == 'failure') && !cancelled() + run: ./gradlew validator:run --args='-c python/ec2/asg/metric-validation.yml + --testing-id ${{ env.TESTING_ID }} + --endpoint http://${{ env.MAIN_SERVICE_ENDPOINT }} + --remote-service-deployment-name ${{ env.REMOTE_SERVICE_IP }}:8001 + --region ${{ env.AWS_DEFAULT_REGION }} + --metric-namespace ${{ env.METRIC_NAMESPACE }} + --log-group ${{ env.LOG_GROUP_NAME }} + --service-name python-sample-application-${{ env.TESTING_ID }} + --remote-service-name python-sample-remote-application-${{ env.TESTING_ID }} + --query-string ip=${{ env.REMOTE_SERVICE_IP }}&testingId=${{ env.TESTING_ID }} + --instance-ami ${{ env.EC2_INSTANCE_AMI }} + --platform-info python-ec2-single-asg-${{ env.TESTING_ID }} + --instance-id ${{ env.INSTANCE_ID }} + --private-dns-name ${{ env.PRIVATE_DNS_NAME }} + --rollup' + + - name: Validate generated traces + id: trace-validation + if: (success() || steps.log-validation.outcome == 'failure' || steps.metric-validation.outcome == 'failure') && !cancelled() + run: ./gradlew validator:run --args='-c python/ec2/asg/trace-validation.yml + --testing-id ${{ env.TESTING_ID }} + --endpoint http://${{ env.MAIN_SERVICE_ENDPOINT }} + --remote-service-deployment-name ${{ env.REMOTE_SERVICE_IP }}:8001 + --region ${{ env.AWS_DEFAULT_REGION }} + --account-id ${{ env.APP_SIGNALS_E2E_TEST_ACCOUNT_ID }} + --metric-namespace ${{ env.METRIC_NAMESPACE }} + --log-group ${{ env.LOG_GROUP_NAME }} + --service-name python-sample-application-${{ env.TESTING_ID }} + --remote-service-name python-sample-remote-application-${{ env.TESTING_ID }} + --query-string ip=${{ env.REMOTE_SERVICE_IP }}&testingId=${{ env.TESTING_ID }} + --instance-ami ${{ env.EC2_INSTANCE_AMI }} + --platform-info python-ec2-single-asg-${{ env.TESTING_ID }} + --instance-id ${{ env.INSTANCE_ID }} + --private-dns-name ${{ env.PRIVATE_DNS_NAME }} + --rollup' + + # Clean up Procedures + - name: Terraform destroy + if: always() + continue-on-error: true + working-directory: terraform/python/ec2/asg + run: | + terraform destroy -auto-approve \ + -var="test_id=${{ env.TESTING_ID }}" \ No newline at end of file diff --git a/.github/workflows/application-signals-python-e2e-ec2-test.yml b/.github/workflows/application-signals-python-e2e-ec2-test.yml new file mode 100644 index 0000000000..470bb492ac --- /dev/null +++ b/.github/workflows/application-signals-python-e2e-ec2-test.yml @@ -0,0 +1,160 @@ +## Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +## SPDX-License-Identifier: Apache-2.0 + +# This is a reusable workflow for running the Python E2E Canary test for Application Signals. +# It is meant to be called from another workflow. +# Read more about reusable workflows: https://docs.github.com/en/actions/using-workflows/reusing-workflows#overview +name: Application Signals Enablement E2E Testing - Python EC2 Use Case +on: + workflow_call: + +permissions: + id-token: write + contents: read + +env: + AWS_DEFAULT_REGION: us-east-1 + APP_SIGNALS_E2E_TEST_ACCOUNT_ID: ${{ secrets.APP_SIGNALS_E2E_TEST_ACCOUNT_ID }} + SAMPLE_APP_ZIP: s3://aws-appsignals-sample-app-prod-us-east-1/python-sample-app.zip + METRIC_NAMESPACE: ApplicationSignals + LOG_GROUP_NAME: /aws/application-signals/data + TEST_RESOURCES_FOLDER: ${GITHUB_WORKSPACE} + GET_CW_AGENT_RPM_COMMAND: "aws s3 cp s3://${{ secrets.S3_INTEGRATION_BUCKET }}/integration-test/binary/${{ github.sha }}/amazon_linux/amd64/latest/amazon-cloudwatch-agent.rpm ./cw-agent.rpm" + GET_ADOT_WHEEL_COMMAND: "aws s3 cp s3://metric-schema-changes/aws_opentelemetry_distro-0.2.0-py3-none-any.whl ./aws_opentelemetry_distro-0.2.0-py3-none-any.whl && python3.9 -m pip install aws_opentelemetry_distro-0.2.0-py3-none-any.whl" + +jobs: + python-e2e-ec2-test: + runs-on: ubuntu-latest + steps: + - name: Get testing resources from aws-application-signals-test-framework + uses: actions/checkout@v4 + with: + repository: aws-observability/aws-application-signals-test-framework + ref: ga-python + + - name: Generate testing id + run: echo TESTING_ID="${{ github.run_id }}-${{ github.run_number }}" >> $GITHUB_ENV + + - name: Configure AWS Credentials + uses: aws-actions/configure-aws-credentials@v4 + with: + role-to-assume: arn:aws:iam::${{ env.APP_SIGNALS_E2E_TEST_ACCOUNT_ID }}:role/${{ secrets.APP_SIGNALS_E2E_TEST_ROLE_NAME }} + aws-region: ${{ env.AWS_DEFAULT_REGION }} + + - name: Set up terraform + uses: hashicorp/setup-terraform@v3 + with: + terraform_wrapper: false + + - name: Deploy sample app via terraform + working-directory: terraform/python/ec2 + run: | + terraform init + terraform validate + terraform apply -auto-approve \ + -var="aws_region=${{ env.AWS_DEFAULT_REGION }}" \ + -var="test_id=${{ env.TESTING_ID }}" \ + -var="sample_app_zip=${{ env.SAMPLE_APP_ZIP }}" \ + -var="get_cw_agent_rpm_command=${{ env.GET_CW_AGENT_RPM_COMMAND }}" \ + -var="get_adot_wheel_command=${{ env.GET_ADOT_WHEEL_COMMAND }}" + + - name: Get the ec2 instance ami id + working-directory: terraform/python/ec2 + run: | + echo "EC2_INSTANCE_AMI=$(terraform output ec2_instance_ami)" >> $GITHUB_ENV + + - name: Get the sample app endpoint + working-directory: terraform/python/ec2 + run: | + echo "MAIN_SERVICE_ENDPOINT=$(terraform output sample_app_main_service_public_dns):8000" >> $GITHUB_ENV + echo "REMOTE_SERVICE_IP=$(terraform output sample_app_remote_service_public_ip)" >> $GITHUB_ENV + echo "MAIN_SERVICE_INSTANCE_ID=$(terraform output main_service_instance_id)" >> $GITHUB_ENV + + - name: Wait for app endpoint to come online + id: endpoint-check + run: | + attempt_counter=0 + max_attempts=30 + until $(curl --output /dev/null --silent --head --fail http://${{ env.MAIN_SERVICE_ENDPOINT }}); do + if [ ${attempt_counter} -eq ${max_attempts} ];then + echo "Max attempts reached" + exit 1 + fi + + printf '.' + attempt_counter=$(($attempt_counter+1)) + sleep 10 + done + + # This steps increases the speed of the validation by creating the telemetry data in advance + - name: Call all test APIs + continue-on-error: true + run: | + curl -S -s -o /dev/null http://${{ env.MAIN_SERVICE_ENDPOINT }}/outgoing-http-call/; echo + curl -S -s -o /dev/null "http://${{ env.MAIN_SERVICE_ENDPOINT }}/aws-sdk-call?ip=${{ env.REMOTE_SERVICE_IP }}&testingId=${{ env.TESTING_ID }}"; echo + curl -S -s -o /dev/null "http://${{ env.MAIN_SERVICE_ENDPOINT }}/remote-service?ip=${{ env.REMOTE_SERVICE_IP }}&testingId=${{ env.TESTING_ID }}"; echo + curl -S -s -o /dev/null http://${{ env.MAIN_SERVICE_ENDPOINT }}/client-call/; echo + + - name: Build Gradle + run: ./gradlew + + # Validation for pulse telemetry data + - name: Validate generated EMF logs + id: log-validation + run: ./gradlew validator:run --args='-c python/ec2/log-validation.yml + --testing-id ${{ env.TESTING_ID }} + --endpoint http://${{ env.MAIN_SERVICE_ENDPOINT }} + --remote-service-deployment-name ${{ env.REMOTE_SERVICE_IP }}:8001 + --region ${{ env.AWS_DEFAULT_REGION }} + --metric-namespace ${{ env.METRIC_NAMESPACE }} + --log-group ${{ env.LOG_GROUP_NAME }} + --service-name python-sample-application-${{ env.TESTING_ID }} + --remote-service-name python-sample-remote-application-${{ env.TESTING_ID }} + --query-string ip=${{ env.REMOTE_SERVICE_IP }}&testingId=${{ env.TESTING_ID }} + --instance-ami ${{ env.EC2_INSTANCE_AMI }} + --instance-id ${{ env.MAIN_SERVICE_INSTANCE_ID }} + --rollup' + + - name: Validate generated metrics + id: metric-validation + if: (success() || steps.log-validation.outcome == 'failure') && !cancelled() + run: ./gradlew validator:run --args='-c python/ec2/metric-validation.yml + --testing-id ${{ env.TESTING_ID }} + --endpoint http://${{ env.MAIN_SERVICE_ENDPOINT }} + --remote-service-deployment-name ${{ env.REMOTE_SERVICE_IP }}:8001 + --region ${{ env.AWS_DEFAULT_REGION }} + --metric-namespace ${{ env.METRIC_NAMESPACE }} + --log-group ${{ env.LOG_GROUP_NAME }} + --service-name python-sample-application-${{ env.TESTING_ID }} + --remote-service-name python-sample-remote-application-${{ env.TESTING_ID }} + --query-string ip=${{ env.REMOTE_SERVICE_IP }}&testingId=${{ env.TESTING_ID }} + --instance-ami ${{ env.EC2_INSTANCE_AMI }} + --instance-id ${{ env.MAIN_SERVICE_INSTANCE_ID }} + --rollup' + + - name: Validate generated traces + id: trace-validation + if: (success() || steps.log-validation.outcome == 'failure' || steps.metric-validation.outcome == 'failure') && !cancelled() + run: ./gradlew validator:run --args='-c python/ec2/trace-validation.yml + --testing-id ${{ env.TESTING_ID }} + --endpoint http://${{ env.MAIN_SERVICE_ENDPOINT }} + --remote-service-deployment-name ${{ env.REMOTE_SERVICE_IP }}:8001 + --region ${{ env.AWS_DEFAULT_REGION }} + --account-id ${{ env.APP_SIGNALS_E2E_TEST_ACCOUNT_ID }} + --metric-namespace ${{ env.METRIC_NAMESPACE }} + --log-group ${{ env.LOG_GROUP_NAME }} + --service-name python-sample-application-${{ env.TESTING_ID }} + --remote-service-name python-sample-remote-application-${{ env.TESTING_ID }} + --query-string ip=${{ env.REMOTE_SERVICE_IP }}&testingId=${{ env.TESTING_ID }} + --instance-ami ${{ env.EC2_INSTANCE_AMI }} + --instance-id ${{ env.MAIN_SERVICE_INSTANCE_ID }} + --rollup' + + # Clean up Procedures + - name: Terraform destroy + if: always() + continue-on-error: true + working-directory: terraform/ec2 + run: | + terraform destroy -auto-approve \ + -var="test_id=${{ env.TESTING_ID }}" \ No newline at end of file diff --git a/.github/workflows/application-signals-python-e2e-eks-test.yml b/.github/workflows/application-signals-python-e2e-eks-test.yml new file mode 100644 index 0000000000..f33e12a553 --- /dev/null +++ b/.github/workflows/application-signals-python-e2e-eks-test.yml @@ -0,0 +1,317 @@ +## Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +## SPDX-License-Identifier: Apache-2.0 + +# This is a reusable workflow for running the E2E test for Application Signals. +# It is meant to be called from another workflow. +# Read more about reusable workflows: https://docs.github.com/en/actions/using-workflows/reusing-workflows#overview +name: Application Signals Enablement E2E Testing - Python EKS +on: + workflow_call: + inputs: + test-cluster-name: + required: true + type: string + +permissions: + id-token: write + contents: read + +env: + AWS_DEFAULT_REGION: us-east-1 + APP_SIGNALS_E2E_TEST_ACCOUNT_ID: ${{ secrets.APP_SIGNALS_E2E_TEST_ACCOUNT_ID }} + SAMPLE_APP_NAMESPACE: sample-app-namespace + SAMPLE_APP_FRONTEND_SERVICE_IMAGE: ${{ secrets.APP_SIGNALS_E2E_SAMPLE_APP_FRONTEND_SVC_IMG }} + SAMPLE_APP_REMOTE_SERVICE_IMAGE: ${{ secrets.APP_SIGNALS_E2E_SAMPLE_APP_REMOTE_SVC_IMG }} + METRIC_NAMESPACE: ApplicationSignals + LOG_GROUP_NAME: /aws/application-signals/data + ECR_INTEGRATION_TEST_REPO: "cwagent-integration-test" + APPLICATION_SIGNALS_ADOT_IMAGE: 637423224110.dkr.ecr.us-east-1.amazonaws.com/aws-observability/adot-autoinstrumentation-python-staging:0.2.0-408d938 + +jobs: + python-e2e-eks-test: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + with: + repository: aws-observability/aws-application-signals-test-framework + ref: ga-python + + - name: Download enablement script + uses: actions/checkout@v4 + with: + repository: aws-observability/application-signals-demo + ref: main + path: enablement-script + sparse-checkout: | + scripts/eks/appsignals/enable-app-signals.sh + scripts/eks/appsignals/clean-app-signals.sh + sparse-checkout-cone-mode: false + + - name: Generate testing id + run: echo TESTING_ID="${{ env.AWS_DEFAULT_REGION }}-${{ github.run_id }}-${{ github.run_number }}" >> $GITHUB_ENV + + - name: Configure AWS Credentials + uses: aws-actions/configure-aws-credentials@v4 + with: + role-to-assume: arn:aws:iam::${{ env.APP_SIGNALS_E2E_TEST_ACCOUNT_ID }}:role/${{ secrets.APP_SIGNALS_E2E_TEST_ROLE_NAME }} + aws-region: ${{ env.AWS_DEFAULT_REGION }} + + # local directory to store the kubernetes config + - name: Create kubeconfig directory + run: mkdir -p ${{ github.workspace }}/.kube + + - name: Set KUBECONFIG environment variable + run: echo KUBECONFIG="${{ github.workspace }}/.kube/config" >> $GITHUB_ENV + + - name: Set up kubeconfig + run: aws eks update-kubeconfig --name ${{ inputs.test-cluster-name }} --region ${{ env.AWS_DEFAULT_REGION }} + + - name: Install eksctl + run: | + mkdir ${{ github.workspace }}/eksctl + curl -sLO "https://github.com/weaveworks/eksctl/releases/latest/download/eksctl_Linux_amd64.tar.gz" + tar -xzf eksctl_Linux_amd64.tar.gz -C ${{ github.workspace }}/eksctl && rm eksctl_Linux_amd64.tar.gz + echo "${{ github.workspace }}/eksctl" >> $GITHUB_PATH + + - name: Create role for AWS access from the sample app + id: create_service_account + run: | + eksctl create iamserviceaccount \ + --name service-account-${{ env.TESTING_ID }} \ + --namespace ${{ env.SAMPLE_APP_NAMESPACE }} \ + --cluster ${{ inputs.test-cluster-name }} \ + --role-name eks-s3-access-${{ env.TESTING_ID }} \ + --attach-policy-arn arn:aws:iam::aws:policy/AmazonS3ReadOnlyAccess \ + --region ${{ env.AWS_DEFAULT_REGION }} \ + --approve + + - name: Set up terraform + uses: hashicorp/setup-terraform@v3 + with: + terraform_wrapper: false + + - name: Deploy sample app via terraform + working-directory: terraform/python/eks + run: | + terraform init + terraform validate + terraform apply -auto-approve \ + -var="test_id=${{ env.TESTING_ID }}" \ + -var="aws_region=${{ env.AWS_DEFAULT_REGION }}" \ + -var="kube_directory_path=${{ github.workspace }}/.kube" \ + -var="eks_cluster_name=${{ inputs.test-cluster-name }}" \ + -var="eks_cluster_context_name=$(kubectl config current-context)" \ + -var="test_namespace=${{ env.SAMPLE_APP_NAMESPACE }}" \ + -var="service_account_aws_access=service-account-${{ env.TESTING_ID }}" \ + -var="python_app_image=654654176582.dkr.ecr.us-east-1.amazonaws.com/appsignals-python-django-main-service" \ + -var="python_remote_app_image=654654176582.dkr.ecr.us-east-1.amazonaws.com/appsignals-python-django-remote-service" + + # Enable App Signals on the test cluster + - name: Enable App Signals + working-directory: enablement-script/scripts/eks/appsignals + run: | + ./enable-app-signals.sh \ + ${{ inputs.test-cluster-name }} \ + ${{ env.AWS_DEFAULT_REGION }} \ + ${{ env.SAMPLE_APP_NAMESPACE }} + + - name: Save CloudWatch image to environment before patching + run: | + echo "OLD_CW_AGENT_IMAGE"=$(kubectl get pods -n amazon-cloudwatch -l app.kubernetes.io/name=cloudwatch-agent -o json | \ + jq '.items[0].status.containerStatuses[0].image') >> $GITHUB_ENV + + - name: Patch the CloudWatch Agent image and restart CloudWatch pods + run: | + kubectl patch amazoncloudwatchagents -n amazon-cloudwatch cloudwatch-agent --type='json' -p='[{"op": "replace", "path": "/spec/image", "value": ${{ secrets.AWS_ECR_PRIVATE_REGISTRY }}/${{ env.ECR_INTEGRATION_TEST_REPO }}:${{ github.sha }}}]' + kubectl delete pods --all -n amazon-cloudwatch + sleep 10 + kubectl wait --for=condition=Ready pod --all -n amazon-cloudwatch + + - name: Patch the ADOT image and restart CloudWatch pods + run: | + kubectl patch deploy -namazon-cloudwatch amazon-cloudwatch-observability-controller-manager --type='json' \ + -p='[{"op": "replace", "path": "/spec/template/spec/containers/0/args/2", "value": "--auto-instrumentation-python-image=${{ env.APPLICATION_SIGNALS_ADOT_IMAGE }}"}]' + kubectl delete pods --all -n amazon-cloudwatch + sleep 10 + kubectl wait --for=condition=Ready pod --all -n amazon-cloudwatch + + # Application pods need to be restarted for the + # app signals instrumentation to take effect + - name: Restart the app pods + run: kubectl delete pods --all -n ${{ env.SAMPLE_APP_NAMESPACE }} + + - name: Wait for sample app pods to come up + run: | + kubectl wait --for=condition=Ready pod --all -n ${{ env.SAMPLE_APP_NAMESPACE }} \ + + - name: Get remote service deployment name and IP + run: | + echo "REMOTE_SERVICE_DEPLOYMENT_NAME=$(kubectl get deployments -n ${{ env.SAMPLE_APP_NAMESPACE }} --selector=app=remote-app -o jsonpath='{.items[0].metadata.name}')" >> $GITHUB_ENV + echo "REMOTE_SERVICE_POD_IP=$(kubectl get pods -n ${{ env.SAMPLE_APP_NAMESPACE }} --selector=app=remote-app -o jsonpath='{.items[0].status.podIP}')" >> $GITHUB_ENV + + - name: Log pod ADOT image ID + run: | + kubectl get pods -n ${{ env.SAMPLE_APP_NAMESPACE }} --output json | \ + jq '.items[0].status.initContainerStatuses[0].imageID' + + - name: Log pod CWAgent Operator image ID + run: | + kubectl get pods -n amazon-cloudwatch -l app.kubernetes.io/name=amazon-cloudwatch-observability -o json | \ + jq '.items[0].status.containerStatuses[0].imageID' + + - name: Log pod FluentBit image ID + run: | + kubectl get pods -n amazon-cloudwatch -l k8s-app=fluent-bit -o json | \ + jq '.items[0].status.containerStatuses[0].imageID' + + - name: Log pod CWAgent image ID and save image to the environment + run: | + kubectl get pods -n amazon-cloudwatch -l app.kubernetes.io/name=cloudwatch-agent -o json | \ + jq '.items[0].status.containerStatuses[0].imageID' + + echo "NEW_CW_AGENT_IMAGE"=$(kubectl get pods -n amazon-cloudwatch -l app.kubernetes.io/name=cloudwatch-agent -o json | \ + jq '.items[0].status.containerStatuses[0].image') >> $GITHUB_ENV + +# - name: Check if CW Agent image has changed +# run: | +# if [ ${{ env.OLD_CW_AGENT_IMAGE }} = ${{ env.NEW_CW_AGENT_IMAGE }} ]; then +# echo "Operator image did not change" +# exit 1 +# fi + + - name: Get the sample app endpoint + run: | + echo "APP_ENDPOINT=$(terraform output python_app_endpoint)" >> $GITHUB_ENV + working-directory: terraform/python/eks + + - name: Wait for app endpoint to come online + id: endpoint-check + run: | + attempt_counter=0 + max_attempts=30 + until $(curl --output /dev/null --silent --head --fail http://${{ env.APP_ENDPOINT }}); do + if [ ${attempt_counter} -eq ${max_attempts} ];then + echo "Max attempts reached" + exit 1 + fi + + printf '.' + attempt_counter=$(($attempt_counter+1)) + sleep 10 + done + + # This steps increases the speed of the validation by creating the telemetry data in advance + - name: Call all test APIs + continue-on-error: true + run: | + curl -S -s -o /dev/null "http://${{ env.APP_ENDPOINT }}/outgoing-http-call"; echo + curl -S -s -o /dev/null "http://${{ env.APP_ENDPOINT }}/aws-sdk-call?ip=${{ env.REMOTE_SERVICE_POD_IP }}&testingId=${{ env.TESTING_ID }}"; echo + curl -S -s -o /dev/null "http://${{ env.APP_ENDPOINT }}/remote-service?ip=${{ env.REMOTE_SERVICE_POD_IP }}&testingId=${{ env.TESTING_ID }}"; echo + curl -S -s -o /dev/null "http://${{ env.APP_ENDPOINT }}/client-call"; echo + + - name: Build Gradle + run: ./gradlew + + # Validation for application signals telemetry data + - name: Call endpoint and validate generated EMF logs + id: log-validation + if: steps.endpoint-check.outcome == 'success' && !cancelled() + run: ./gradlew validator:run --args='-c python/eks/log-validation.yml + --testing-id ${{ env.TESTING_ID }} + --endpoint http://${{ env.APP_ENDPOINT }} + --region ${{ env.AWS_DEFAULT_REGION }} + --account-id ${{ env.APP_SIGNALS_E2E_TEST_ACCOUNT_ID }} + --metric-namespace ${{ env.METRIC_NAMESPACE }} + --log-group ${{ env.LOG_GROUP_NAME }} + --app-namespace ${{ env.SAMPLE_APP_NAMESPACE }} + --platform-info ${{ inputs.test-cluster-name }} + --service-name python-application-${{ env.TESTING_ID }} + --remote-service-deployment-name ${{ env.REMOTE_SERVICE_DEPLOYMENT_NAME }} + --query-string ip=${{ env.REMOTE_SERVICE_POD_IP }}&testingId=${{ env.TESTING_ID }} + --rollup' + + - name: Call endpoints and validate generated metrics + id: metric-validation + if: (success() || steps.log-validation.outcome == 'failure') && !cancelled() + run: ./gradlew validator:run --args='-c python/eks/metric-validation.yml + --testing-id ${{ env.TESTING_ID }} + --endpoint http://${{ env.APP_ENDPOINT }} + --region ${{ env.AWS_DEFAULT_REGION }} + --account-id ${{ env.APP_SIGNALS_E2E_TEST_ACCOUNT_ID }} + --metric-namespace ${{ env.METRIC_NAMESPACE }} + --log-group ${{ env.LOG_GROUP_NAME }} + --app-namespace ${{ env.SAMPLE_APP_NAMESPACE }} + --platform-info ${{ inputs.test-cluster-name }} + --service-name python-application-${{ env.TESTING_ID }} + --remote-service-name python-remote-application-${{ env.TESTING_ID }} + --remote-service-deployment-name ${{ env.REMOTE_SERVICE_DEPLOYMENT_NAME }} + --query-string ip=${{ env.REMOTE_SERVICE_POD_IP }}&testingId=${{ env.TESTING_ID }} + --rollup' + + - name: Call endpoints and validate generated traces + id: trace-validation + if: (success() || steps.log-validation.outcome == 'failure' || steps.metric-validation.outcome == 'failure') && !cancelled() + run: ./gradlew validator:run --args='-c python/eks/trace-validation.yml + --testing-id ${{ env.TESTING_ID }} + --endpoint http://${{ env.APP_ENDPOINT }} + --region ${{ env.AWS_DEFAULT_REGION }} + --account-id ${{ env.APP_SIGNALS_E2E_TEST_ACCOUNT_ID }} + --log-group ${{ env.LOG_GROUP_NAME }} + --app-namespace ${{ env.SAMPLE_APP_NAMESPACE }} + --platform-info ${{ inputs.test-cluster-name }} + --service-name python-application-${{ env.TESTING_ID }} + --remote-service-deployment-name ${{ env.REMOTE_SERVICE_DEPLOYMENT_NAME }} + --query-string ip=${{ env.REMOTE_SERVICE_POD_IP }}&testingId=${{ env.TESTING_ID }} + --rollup' + + # Clean up Procedures + + - name: Remove log group deletion command + if: always() + working-directory: enablement-script/scripts/eks/appsignals + run: | + delete_log_group="aws logs delete-log-group --log-group-name '${{ env.LOG_GROUP_NAME }}' --region \$REGION" + sed -i "s#$delete_log_group##g" clean-app-signals.sh + + - name: Clean Up Application Signals + if: always() + continue-on-error: true + working-directory: enablement-script + run: | + ./clean-app-signals.sh \ + ${{ inputs.test-cluster-name }} \ + ${{ inputs.aws-region }} \ + ${{ env.SAMPLE_APP_NAMESPACE }} + + # This step also deletes lingering resources from previous test runs + - name: Delete all sample app resources + if: always() + continue-on-error: true + timeout-minutes: 10 + run: kubectl delete namespace ${{ env.SAMPLE_APP_NAMESPACE }} + + - name: Terraform destroy + if: always() + continue-on-error: true + timeout-minutes: 5 + working-directory: terraform/python/eks + run: | + terraform destroy -auto-approve \ + -var="test_id=${{ env.TESTING_ID }}" \ + -var="aws_region=${{ inputs.aws-region }}" \ + -var="kube_directory_path=${{ github.workspace }}/.kube" \ + -var="eks_cluster_name=${{ inputs.test-cluster-name }}" \ + -var="test_namespace=${{ env.SAMPLE_APP_NAMESPACE }}" \ + -var="service_account_aws_access=service-account-${{ env.TESTING_ID }}" \ + -var="python_app_image=${{ env.ACCOUNT_ID }}.dkr.ecr.${{ inputs.aws-region }}.amazonaws.com/${{ secrets.APP_SIGNALS_PYTHON_E2E_FE_SA_IMG }}" \ + -var="python_remote_app_image=${{ env.ACCOUNT_ID }}.dkr.ecr.${{ inputs.aws-region }}.amazonaws.com/${{ secrets.APP_SIGNALS_PYTHON_E2E_RE_SA_IMG }}" + + - name: Remove aws access service account + if: always() + continue-on-error: true + run: | + eksctl delete iamserviceaccount \ + --name service-account-${{ env.TESTING_ID }} \ + --namespace ${{ env.SAMPLE_APP_NAMESPACE }} \ + --cluster ${{ inputs.test-cluster-name }} \ + --region ${{ inputs.aws-region }} diff --git a/.github/workflows/clean-aws-resources.yml b/.github/workflows/clean-aws-resources.yml index 3e054528c1..ef308ec7d9 100644 --- a/.github/workflows/clean-aws-resources.yml +++ b/.github/workflows/clean-aws-resources.yml @@ -136,9 +136,28 @@ jobs: role-to-assume: ${{ secrets[matrix.role_secret] }} aws-region: ${{ matrix.region }} - - name: Clean old dedicated host + - name: Clean old host + working-directory: tool/clean + run: go run ./clean_host/clean_host.go ${{ matrix.region }} + + clean-hosts-china: + runs-on: ubuntu-latest + permissions: + id-token: write + contents: read + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-go@v4 + + - name: Configure AWS Credentials + uses: aws-actions/configure-aws-credentials@v2 + with: + role-to-assume: ${{ vars.TERRAFORM_AWS_ASSUME_ROLE_CN }} + aws-region: "cn-north-1" + + - name: Clean old hosts working-directory: tool/clean - run: go run ./clean_host/clean_host.go --tags=clean + run: go run ./clean_host/clean_host.go cn-north-1 clean-ecs-clusters: runs-on: ubuntu-latest @@ -177,6 +196,24 @@ jobs: - name: Clean old eks cluster working-directory: tool/clean run: go run ./clean_eks/clean_eks.go --tags=clean + clean-ebs-volumes: + runs-on: ubuntu-latest + permissions: + id-token: write + contents: read + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-go@v4 + + - name: Configure AWS Credentials + uses: aws-actions/configure-aws-credentials@v2 + with: + role-to-assume: ${{ secrets.TERRAFORM_AWS_ASSUME_ROLE }} + aws-region: us-west-2 + + - name: Clean old unused ebs volumes + working-directory: tool/clean + run: go run ./clean_ebs/clean_ebs.go --tags=clean clean-asg: runs-on: ubuntu-latest diff --git a/.github/workflows/deploy-canary.yml b/.github/workflows/deploy-canary.yml new file mode 100644 index 0000000000..5a30401ed2 --- /dev/null +++ b/.github/workflows/deploy-canary.yml @@ -0,0 +1,88 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: MIT + +name: Deploy Canary +env: + TERRAFORM_AWS_ASSUME_ROLE: ${{ secrets.TERRAFORM_AWS_ASSUME_ROLE }} + S3_INTEGRATION_BUCKET: ${{ secrets.S3_INTEGRATION_BUCKET }} + KEY_NAME: ${{ secrets.KEY_NAME }} + PRIVATE_KEY: ${{ secrets.AWS_PRIVATE_KEY }} + CWA_GITHUB_TEST_REPO_NAME: "aws/amazon-cloudwatch-agent-test" + CWA_GITHUB_TEST_REPO_URL: "https://github.com/aws/amazon-cloudwatch-agent-test.git" + CWA_GITHUB_TEST_REPO_BRANCH: "main" + +on: + schedule: + - cron: "0 15 * * *" # Run daily at 15:00 UTC + workflow_call: + workflow_dispatch: + +concurrency: + group: ${{ github.workflow }} + cancel-in-progress: true + +jobs: + DeployCanary: + name: "DeployCanary" + runs-on: ubuntu-latest + permissions: + id-token: write + contents: read + steps: + - uses: actions/checkout@v3 + with: + repository: ${{env.CWA_GITHUB_TEST_REPO_NAME}} + ref: ${{env.CWA_GITHUB_TEST_REPO_BRANCH}} + + - name: Configure AWS Credentials + uses: aws-actions/configure-aws-credentials@v1 + with: + role-to-assume: ${{ env.TERRAFORM_AWS_ASSUME_ROLE }} + aws-region: us-west-2 + + - name: Terminate Last Canary + run: | + if aws s3api wait object-exists --bucket ${S3_INTEGRATION_BUCKET} --key canary/al2/terraform.tfstate ; + then + cd terraform/ec2/linux + aws s3 cp s3://${S3_INTEGRATION_BUCKET}/canary/al2/terraform.tfstate . + terraform --version + terraform init + terraform destroy -auto-approve + aws s3api delete-object --bucket ${S3_INTEGRATION_BUCKET} --key canary/al2/terraform.tfstate + fi + # @TODO we can add a matrix in the future but for alpha we will only deploy to al2 + - name: Terraform apply + uses: nick-fields/retry@v2 + with: + max_attempts: 3 + timeout_minutes: 60 + retry_wait_seconds: 5 + command: | + cd terraform/ec2/linux + terraform init + if terraform apply --auto-approve \ + -var="github_test_repo=${{env.CWA_GITHUB_TEST_REPO_URL}}" \ + -var="github_test_repo_branch=${{env.CWA_GITHUB_TEST_REPO_BRANCH}}" \ + -var="user=ec2-user" \ + -var="ami=cloudwatch-agent-integration-test-al2*" \ + -var="arc=amd64" \ + -var="binary_name=amazon-cloudwatch-agent.rpm" \ + -var="s3_bucket=${S3_INTEGRATION_BUCKET}" \ + -var="ssh_key_name=${KEY_NAME}" \ + -var="ssh_key_value=${PRIVATE_KEY}" \ + -var="test_name=canary" \ + -var="is_canary=true" \ + -var="test_dir=./test/canary" ; then aws s3 cp terraform.tfstate s3://${S3_INTEGRATION_BUCKET}/canary/al2/terraform.tfstate + else + terraform destroy -auto-approve && exit 1 + fi + #This is here just in case workflow cancel + - name: Terraform destroy + if: ${{ cancelled() }} + uses: nick-fields/retry@v2 + with: + max_attempts: 3 + timeout_minutes: 8 + retry_wait_seconds: 5 + command: cd terraform/ec2/linux && terraform destroy --auto-approve \ No newline at end of file diff --git a/.github/workflows/ec2-integration-test.yml b/.github/workflows/ec2-integration-test.yml index f3d4db89b7..45cfb4ccb4 100644 --- a/.github/workflows/ec2-integration-test.yml +++ b/.github/workflows/ec2-integration-test.yml @@ -5,9 +5,8 @@ name: Reusable EC2 Integration Test env: PRIVATE_KEY: ${{ secrets.AWS_PRIVATE_KEY }} - TERRAFORM_AWS_ASSUME_ROLE: ${{ secrets.TERRAFORM_AWS_ASSUME_ROLE }} - S3_INTEGRATION_BUCKET: ${{ secrets.S3_INTEGRATION_BUCKET }} KEY_NAME: ${{ secrets.KEY_NAME }} + TERRAFORM_AWS_ASSUME_ROLE_DURATION: 14400 # 4 hours on: workflow_call: @@ -35,6 +34,12 @@ on: type: string localstack_host: type: string + region: + type: string + terraform_assume_role: + type: string + s3_integration_bucket: + type: string jobs: EC2IntegrationTest: @@ -56,19 +61,21 @@ jobs: - name: Configure AWS Credentials uses: aws-actions/configure-aws-credentials@v1 with: - role-to-assume: ${{env.TERRAFORM_AWS_ASSUME_ROLE}} - aws-region: us-west-2 + role-to-assume: ${{ inputs.terraform_assume_role }} + aws-region: ${{inputs.region}} + role-duration-seconds: ${{ env.TERRAFORM_AWS_ASSUME_ROLE_DURATION }} - name: Cache if success id: cache_if_success uses: actions/cache@v3 with: path: go.mod - key: cache_if_success-${{ github.sha }}-${{ matrix.arrays.os }}-${{ matrix.arrays.arc }}-${{ matrix.arrays.test_dir }} + key: ${{inputs.region}}-${{ github.sha }}-${{ matrix.arrays.os }}-${{ matrix.arrays.arc }}-${{ matrix.arrays.test_dir }} - name: Echo Test Info - run: echo run cache_if_success os ${{ matrix.arrays.os }} arc ${{ matrix.arrays.arc }} test dir ${{ matrix.arrays.test_dir }} - + run: | + echo run cache_if_success os ${{ matrix.arrays.os }} arc ${{ matrix.arrays.arc }} test dir ${{ matrix.arrays.test_dir }} + echo localstack input ${{ inputs.localstack_host }} - name: Verify Terraform version run: terraform --version @@ -100,14 +107,15 @@ jobs: -var="arc=${{ matrix.arrays.arc }}" \ -var="binary_name=${{ matrix.arrays.binaryName }}" \ -var="local_stack_host_name=${{ inputs.localstack_host }}" \ - -var="s3_bucket=${{env.S3_INTEGRATION_BUCKET}}" \ + -var="region=${{ inputs.region }}" \ + -var="s3_bucket=${{ inputs.s3_integration_bucket }}" \ -var="plugin_tests='${{ github.event.inputs.plugins }}'" \ -var="excluded_tests='${{ matrix.arrays.excludedTests }}'" \ -var="ssh_key_name=${{env.KEY_NAME}}" \ -var="test_dir=${{ matrix.arrays.test_dir }}" \ - -var="agent_start=${{ matrix.arrays.agentStartCommand }}"; then terraform destroy -auto-approve + -var="agent_start=${{ matrix.arrays.agentStartCommand }}"; then terraform destroy -var="region=${{ inputs.region }}" -var="ami=${{ matrix.arrays.ami }}" -auto-approve else - terraform destroy -auto-approve && exit 1 + terraform destroy -var="region=${{ inputs.region }}" -var="ami=${{ matrix.arrays.ami }}" -auto-approve && exit 1 fi #This is here just in case workflow cancel - name: Terraform destroy @@ -117,4 +125,4 @@ jobs: max_attempts: 3 timeout_minutes: 8 retry_wait_seconds: 5 - command: cd ${{ inputs.test_dir }} && terraform destroy --auto-approve \ No newline at end of file + command: cd ${{ inputs.test_dir }} && terraform destroy -var="region=${{ inputs.region }}" -var="ami=${{ matrix.arrays.ami }}" --auto-approve \ No newline at end of file diff --git a/.github/workflows/integration-test.yml b/.github/workflows/integration-test.yml index 47a548b496..4c806cba83 100644 --- a/.github/workflows/integration-test.yml +++ b/.github/workflows/integration-test.yml @@ -4,9 +4,9 @@ name: Run Integration Tests env: PRIVATE_KEY: ${{ secrets.AWS_PRIVATE_KEY }} - TERRAFORM_AWS_ASSUME_ROLE: ${{ secrets.TERRAFORM_AWS_ASSUME_ROLE }} + TERRAFORM_AWS_ASSUME_ROLE: ${{ vars.TERRAFORM_AWS_ASSUME_ROLE }} TERRAFORM_AWS_ASSUME_ROLE_DURATION: 14400 # 4 hours - S3_INTEGRATION_BUCKET: ${{ secrets.S3_INTEGRATION_BUCKET }} + S3_INTEGRATION_BUCKET: ${{ vars.S3_INTEGRATION_BUCKET }} KEY_NAME: ${{ secrets.KEY_NAME }} CF_IAM_ROLE: ${{ secrets.CF_IAM_ROLE }} CF_KEY_NAME: ${{ secrets.CF_KEY_NAME }} @@ -14,6 +14,10 @@ env: CWA_GITHUB_TEST_REPO_NAME: "aws/amazon-cloudwatch-agent-test" CWA_GITHUB_TEST_REPO_URL: "https://github.com/aws/amazon-cloudwatch-agent-test.git" CWA_GITHUB_TEST_REPO_BRANCH: "main" + TERRAFORM_AWS_ASSUME_ROLE_ITAR: ${{ vars.TERRAFORM_AWS_ASSUME_ROLE_ITAR }} + S3_INTEGRATION_BUCKET_ITAR: ${{ vars.S3_INTEGRATION_BUCKET_ITAR }} + TERRAFORM_AWS_ASSUME_ROLE_CN: ${{ vars.TERRAFORM_AWS_ASSUME_ROLE_CN }} + S3_INTEGRATION_BUCKET_CN: ${{ vars.S3_INTEGRATION_BUCKET_CN }} on: push: @@ -43,6 +47,32 @@ jobs: BuildAndUpload: uses: ./.github/workflows/test-build.yml secrets: inherit + permissions: + id-token: write + contents: read + with: + BucketKey: "integration-test/binary/${{ github.sha }}" + PackageBucketKey: "integration-test/packaging/${{ github.sha }}" + TerraformAWSAssumeRole: ${{ vars.TERRAFORM_AWS_ASSUME_ROLE }} + Bucket: ${{ vars.S3_INTEGRATION_BUCKET }} + + BuildAndUploadPackages: + uses: ./.github/workflows/test-build-packages.yml + needs: [BuildAndUpload] + secrets: inherit + permissions: + id-token: write + contents: read + with: + BucketKey: "integration-test/binary/${{ github.sha }}" + PackageBucketKey: "integration-test/packaging/${{ github.sha }}" + TerraformAWSAssumeRole: ${{ vars.TERRAFORM_AWS_ASSUME_ROLE }} + Bucket: ${{ vars.S3_INTEGRATION_BUCKET }} + + BuildDocker: + needs: [BuildAndUpload] + uses: ./.github/workflows/test-build-docker.yml + secrets: inherit permissions: id-token: write contents: read @@ -51,6 +81,32 @@ jobs: BucketKey: "integration-test/binary/${{ github.sha }}" PackageBucketKey: "integration-test/packaging/${{ github.sha }}" + BuildAndUploadITAR: + uses: ./.github/workflows/test-build.yml + secrets: inherit + permissions: + id-token: write + contents: read + with: + BucketKey: "integration-test/binary/${{ github.sha }}" + PackageBucketKey: "integration-test/packaging/${{ github.sha }}" + Region: "us-gov-east-1" + TerraformAWSAssumeRole: ${{ vars.TERRAFORM_AWS_ASSUME_ROLE_ITAR }} + Bucket: ${{ vars.S3_INTEGRATION_BUCKET_ITAR }} + + BuildAndUploadCN: + uses: ./.github/workflows/test-build.yml + secrets: inherit + permissions: + id-token: write + contents: read + with: + BucketKey: "integration-test/binary/${{ github.sha }}" + PackageBucketKey: "integration-test/packaging/${{ github.sha }}" + Region: "cn-north-1" + TerraformAWSAssumeRole: ${{ vars.TERRAFORM_AWS_ASSUME_ROLE_CN }} + Bucket: ${{ vars.S3_INTEGRATION_BUCKET_CN }} + GenerateTestMatrix: name: 'GenerateTestMatrix' runs-on: ubuntu-latest @@ -67,6 +123,9 @@ jobs: ecs_fargate_matrix: ${{ steps.set-matrix.outputs.ecs_fargate_matrix }} eks_daemon_matrix: ${{ steps.set-matrix.outputs.eks_daemon_matrix }} eks_deployment_matrix: ${{ steps.set-matrix.outputs.eks_deployment_matrix }} + ec2_linux_itar_matrix: ${{ steps.set-matrix.outputs.ec2_linux_itar_matrix }} + ec2_linux_china_matrix: ${{ steps.set-matrix.outputs.ec2_linux_china_matrix }} + steps: - uses: actions/checkout@v3 with: @@ -76,7 +135,7 @@ jobs: - name: Set up Go 1.x uses: actions/setup-go@v4 with: - go-version: ~1.21.1 + go-version: ~1.22.2 - name: Generate matrix id: set-matrix @@ -94,6 +153,8 @@ jobs: echo "::set-output name=ecs_fargate_matrix::$(echo $(cat generator/resources/ecs_fargate_complete_test_matrix.json))" echo "::set-output name=eks_daemon_matrix::$(echo $(cat generator/resources/eks_daemon_complete_test_matrix.json))" echo "::set-output name=eks_deployment_matrix::$(echo $(cat generator/resources/eks_deployment_complete_test_matrix.json))" + echo "::set-output name=ec2_linux_itar_matrix::$(echo $(cat generator/resources/ec2_linux_itar_complete_test_matrix.json))" + echo "::set-output name=ec2_linux_china_matrix::$(echo $(cat generator/resources/ec2_linux_china_complete_test_matrix.json))" - name: Echo test plan matrix run: | @@ -109,6 +170,8 @@ jobs: echo "ecs_fargate_matrix: ${{ steps.set-matrix.outputs.ecs_fargate_matrix }}" echo "eks_daemon_matrix: ${{ steps.set-matrix.outputs.eks_daemon_matrix }}" echo "eks_deployment_matrix: ${{ steps.set-matrix.outputs.eks_deployment_matrix }}" + echo "ec2_linux_itar_matrix: ${{ steps.set-matrix.outputs.ec2_linux_itar_matrix }}" + echo "ec2_linux_china_matrix: ${{ steps.set-matrix.outputs.ec2_linux_china_matrix }}" CloudformationTest: needs: [BuildAndUpload, GenerateTestMatrix] @@ -129,7 +192,7 @@ jobs: - name: Set up Go 1.x uses: actions/setup-go@v2 with: - go-version: ~1.21.1 + go-version: ~1.22.2 - name: Configure AWS Credentials uses: aws-actions/configure-aws-credentials@v2 @@ -153,48 +216,56 @@ jobs: StartLocalStack: name: 'StartLocalStack' - runs-on: ubuntu-latest - defaults: - run: - working-directory: terraform/ec2/localstack - outputs: - local_stack_host_name: ${{ steps.localstack.outputs.local_stack_host_name }} + needs: [OutputEnvVariables] + uses: ./.github/workflows/start-localstack.yml + secrets: inherit permissions: id-token: write contents: read - steps: - - uses: actions/checkout@v3 - with: - repository: ${{env.CWA_GITHUB_TEST_REPO_NAME}} - ref: ${{env.CWA_GITHUB_TEST_REPO_BRANCH}} + with: + region: us-west-2 + test_repo_name: ${{ needs.OutputEnvVariables.outputs.CWA_GITHUB_TEST_REPO_NAME }} + test_repo_branch: ${{ needs.OutputEnvVariables.outputs.CWA_GITHUB_TEST_REPO_BRANCH }} + terraform_assume_role: ${{ vars.TERRAFORM_AWS_ASSUME_ROLE }} + test_repo_url: ${{ needs.OutputEnvVariables.outputs.CWA_GITHUB_TEST_REPO_URL }} + github_sha: ${{github.sha}} + s3_integration_bucket: ${{ vars.S3_INTEGRATION_BUCKET }} - - name: Configure AWS Credentials - uses: aws-actions/configure-aws-credentials@v2 - with: - role-to-assume: ${{ env.TERRAFORM_AWS_ASSUME_ROLE }} - aws-region: us-west-2 - role-duration-seconds: ${{ env.TERRAFORM_AWS_ASSUME_ROLE_DURATION }} + StartLocalStackITAR: + name: 'StartLocalStackITAR' + needs: [OutputEnvVariables] + uses: ./.github/workflows/start-localstack.yml + secrets: inherit + permissions: + id-token: write + contents: read + with: + region: us-gov-east-1 + test_repo_name: ${{ needs.OutputEnvVariables.outputs.CWA_GITHUB_TEST_REPO_NAME }} + test_repo_branch: ${{ needs.OutputEnvVariables.outputs.CWA_GITHUB_TEST_REPO_BRANCH }} + terraform_assume_role: ${{ vars.TERRAFORM_AWS_ASSUME_ROLE_ITAR }} + test_repo_url: ${{ needs.OutputEnvVariables.outputs.CWA_GITHUB_TEST_REPO_URL }} + github_sha: ${{github.sha}} + s3_integration_bucket: ${{ vars.S3_INTEGRATION_BUCKET_ITAR }} - - name: Verify Terraform version - run: terraform --version + StartLocalStackCN: + name: 'StartLocalStackCN' + needs: [ OutputEnvVariables ] + uses: ./.github/workflows/start-localstack.yml + secrets: inherit + permissions: + id-token: write + contents: read + with: + region: cn-north-1 + test_repo_name: ${{ needs.OutputEnvVariables.outputs.CWA_GITHUB_TEST_REPO_NAME }} + test_repo_branch: ${{ needs.OutputEnvVariables.outputs.CWA_GITHUB_TEST_REPO_BRANCH }} + terraform_assume_role: ${{ vars.TERRAFORM_AWS_ASSUME_ROLE_CN }} + test_repo_url: ${{ needs.OutputEnvVariables.outputs.CWA_GITHUB_TEST_REPO_URL }} + github_sha: ${{github.sha}} + s3_integration_bucket: ${{ vars.S3_INTEGRATION_BUCKET_CN }} - - name: Terraform init - run: terraform init - - name: Terraform apply - id: localstack - run: > - echo run terraform and execute test code && - terraform apply --auto-approve - -var="ssh_key_value=${PRIVATE_KEY}" - -var="github_test_repo=${{env.CWA_GITHUB_TEST_REPO_URL}}" - -var="cwa_github_sha=${GITHUB_SHA}" - -var="s3_bucket=${S3_INTEGRATION_BUCKET}" - -var="ssh_key_name=${KEY_NAME}" && - LOCAL_STACK_HOST_NAME=$(terraform output -raw public_dns) && - echo $LOCAL_STACK_HOST_NAME && - echo "::set-output name=local_stack_host_name::$LOCAL_STACK_HOST_NAME" && - aws s3 cp terraform.tfstate s3://${S3_INTEGRATION_BUCKET}/integration-test/local-stack-terraform-state/${GITHUB_SHA}/terraform.tfstate EC2NvidiaGPUIntegrationTest: needs: [ BuildAndUpload, StartLocalStack, GenerateTestMatrix ] @@ -327,7 +398,7 @@ jobs: - name: Set up Go 1.x uses: actions/setup-go@v4 with: - go-version: ~1.21.1 + go-version: ~1.22.2 - name: SetOutputs id: set-outputs @@ -355,8 +426,48 @@ jobs: test_repo_url: ${{ needs.OutputEnvVariables.outputs.CWA_GITHUB_TEST_REPO_URL }} test_repo_branch: ${{ needs.OutputEnvVariables.outputs.CWA_GITHUB_TEST_REPO_BRANCH }} localstack_host: ${{needs.StartLocalStack.outputs.local_stack_host_name}} + region: us-west-2 + terraform_assume_role: ${{ vars.TERRAFORM_AWS_ASSUME_ROLE }} + s3_integration_bucket: ${{ vars.S3_INTEGRATION_BUCKET }} + secrets: inherit + + EC2LinuxIntegrationTestITAR: + needs: [ BuildAndUpload, BuildAndUploadITAR, StartLocalStackITAR, GenerateTestMatrix, OutputEnvVariables ] + name: 'EC2LinuxITAR' + uses: ./.github/workflows/ec2-integration-test.yml + with: + github_sha: ${{github.sha}} + test_dir: terraform/ec2/linux + job_id: ec2-linux-integration-test + test_props: ${{needs.GenerateTestMatrix.outputs.ec2_linux_itar_matrix}} + test_repo_name: ${{ needs.OutputEnvVariables.outputs.CWA_GITHUB_TEST_REPO_NAME }} + test_repo_url: ${{ needs.OutputEnvVariables.outputs.CWA_GITHUB_TEST_REPO_URL }} + test_repo_branch: ${{ needs.OutputEnvVariables.outputs.CWA_GITHUB_TEST_REPO_BRANCH }} + localstack_host: ${{needs.StartLocalStackITAR.outputs.local_stack_host_name}} + region: us-gov-east-1 + terraform_assume_role: ${{ vars.TERRAFORM_AWS_ASSUME_ROLE_ITAR }} + s3_integration_bucket: ${{ vars.S3_INTEGRATION_BUCKET_ITAR }} + secrets: inherit + + EC2LinuxIntegrationTestCN: + needs: [ BuildAndUpload, BuildAndUploadCN, StartLocalStackCN, GenerateTestMatrix, OutputEnvVariables ] + name: 'EC2LinuxCN' + uses: ./.github/workflows/ec2-integration-test.yml + with: + github_sha: ${{github.sha}} + test_dir: terraform/ec2/linux + job_id: ec2-linux-integration-test + test_props: ${{needs.GenerateTestMatrix.outputs.ec2_linux_china_matrix}} + test_repo_name: ${{ needs.OutputEnvVariables.outputs.CWA_GITHUB_TEST_REPO_NAME }} + test_repo_url: ${{ needs.OutputEnvVariables.outputs.CWA_GITHUB_TEST_REPO_URL }} + test_repo_branch: ${{ needs.OutputEnvVariables.outputs.CWA_GITHUB_TEST_REPO_BRANCH }} + localstack_host: ${{needs.StartLocalStackCN.outputs.local_stack_host_name}} + region: cn-north-1 + terraform_assume_role: ${{ vars.TERRAFORM_AWS_ASSUME_ROLE_CN }} + s3_integration_bucket: ${{ vars.S3_INTEGRATION_BUCKET_CN }} secrets: inherit + LinuxOnPremIntegrationTest: needs: [BuildAndUpload, StartLocalStack, GenerateTestMatrix, OutputEnvVariables] name: 'OnpremLinux' @@ -370,10 +481,11 @@ jobs: test_repo_url: ${{ needs.OutputEnvVariables.outputs.CWA_GITHUB_TEST_REPO_URL }} test_repo_branch: ${{ needs.OutputEnvVariables.outputs.CWA_GITHUB_TEST_REPO_BRANCH }} localstack_host: ${{needs.StartLocalStack.outputs.local_stack_host_name}} + region: us-west-2 secrets: inherit EC2WinIntegrationTest: - needs: [BuildAndUpload, GenerateTestMatrix] + needs: [BuildAndUpload, BuildAndUploadPackages, GenerateTestMatrix] name: 'EC2WinIntegrationTest' runs-on: ubuntu-latest strategy: @@ -455,7 +567,7 @@ jobs: terraform destroy --auto-approve EC2DarwinIntegrationTest: - needs: [BuildAndUpload, GenerateTestMatrix] + needs: [BuildAndUpload, BuildAndUploadPackages, GenerateTestMatrix] name: 'EC2DarwinIntegrationTest' runs-on: ubuntu-latest strategy: @@ -537,44 +649,59 @@ jobs: StopLocalStack: name: 'StopLocalStack' - runs-on: ubuntu-latest if: ${{ always() }} - needs: [ StartLocalStack, EC2LinuxIntegrationTest, LinuxOnPremIntegrationTest ] - defaults: - run: - working-directory: terraform/ec2/localstack + needs: [ StartLocalStack, EC2LinuxIntegrationTest, LinuxOnPremIntegrationTest, OutputEnvVariables ] + uses: ./.github/workflows/stop-localstack.yml + secrets: inherit permissions: id-token: write contents: read - steps: - - uses: actions/checkout@v3 - with: - repository: ${{env.CWA_GITHUB_TEST_REPO_NAME}} - ref: ${{env.CWA_GITHUB_TEST_REPO_BRANCH}} - - - name: Configure AWS Credentials - uses: aws-actions/configure-aws-credentials@v2 - with: - role-to-assume: ${{ env.TERRAFORM_AWS_ASSUME_ROLE }} - aws-region: us-west-2 - role-duration-seconds: ${{ env.TERRAFORM_AWS_ASSUME_ROLE_DURATION }} - - - name: Copy state - run: aws s3 cp s3://${S3_INTEGRATION_BUCKET}/integration-test/local-stack-terraform-state/${GITHUB_SHA}/terraform.tfstate . - - - name: Verify Terraform version - run: terraform --version + with: + region: us-west-2 + test_repo_name: ${{ needs.OutputEnvVariables.outputs.CWA_GITHUB_TEST_REPO_NAME }} + test_repo_branch: ${{ needs.OutputEnvVariables.outputs.CWA_GITHUB_TEST_REPO_BRANCH }} + terraform_assume_role: ${{ vars.TERRAFORM_AWS_ASSUME_ROLE }} + github_sha: ${{github.sha}} + s3_integration_bucket: ${{ vars.S3_INTEGRATION_BUCKET }} - - name: Terraform init - run: terraform init + StopLocalStackITAR: + name: 'StopLocalStackITAR' + if: ${{ always() }} + needs: [ EC2LinuxIntegrationTestITAR, OutputEnvVariables ] + uses: ./.github/workflows/stop-localstack.yml + secrets: inherit + permissions: + id-token: write + contents: read + with: + region: us-gov-east-1 + test_repo_name: ${{ needs.OutputEnvVariables.outputs.CWA_GITHUB_TEST_REPO_NAME }} + test_repo_branch: ${{ needs.OutputEnvVariables.outputs.CWA_GITHUB_TEST_REPO_BRANCH }} + terraform_assume_role: ${{ vars.TERRAFORM_AWS_ASSUME_ROLE_ITAR }} + github_sha: ${{github.sha}} + s3_integration_bucket: ${{ vars.S3_INTEGRATION_BUCKET_ITAR }} - - name: Terraform destroy - run: terraform destroy --auto-approve + StopLocalStackCN: + name: 'StopLocalStackCN' + if: ${{ always() }} + needs: [ EC2LinuxIntegrationTestCN ] + uses: ./.github/workflows/stop-localstack.yml + secrets: inherit + permissions: + id-token: write + contents: read + with: + region: cn-north-1 + test_repo_name: ${{ needs.OutputEnvVariables.outputs.CWA_GITHUB_TEST_REPO_NAME }} + test_repo_branch: ${{ needs.OutputEnvVariables.outputs.CWA_GITHUB_TEST_REPO_BRANCH }} + terraform_assume_role: ${{ vars.TERRAFORM_AWS_ASSUME_ROLE_CN }} + github_sha: ${{github.sha}} + s3_integration_bucket: ${{ vars.S3_INTEGRATION_BUCKET_CN }} ECSEC2IntegrationTest: name: 'ECSEC2IntegrationTest' runs-on: ubuntu-latest - needs: [ BuildAndUpload, GenerateTestMatrix ] + needs: [ BuildAndUpload, BuildDocker, GenerateTestMatrix ] strategy: fail-fast: false matrix: @@ -657,7 +784,7 @@ jobs: ECSFargateIntegrationTest: name: 'ECSFargateIntegrationTest' runs-on: ubuntu-latest - needs: [BuildAndUpload, GenerateTestMatrix] + needs: [BuildAndUpload, BuildDocker, GenerateTestMatrix] strategy: fail-fast: false matrix: @@ -735,7 +862,7 @@ jobs: EKSIntegrationTest: name: 'EKSIntegrationTest' runs-on: ubuntu-latest - needs: [ BuildAndUpload, GenerateTestMatrix ] + needs: [ BuildAndUpload, BuildDocker, GenerateTestMatrix ] strategy: fail-fast: false matrix: @@ -761,7 +888,7 @@ jobs: uses: actions/cache@v3 with: path: go.mod - key: eks-ec2-integration-test-${{ github.sha }}-${{ matrix.arrays.os }}-${{ matrix.arrays.test_dir }} + key: ${{ matrix.arrays.terraform_dir }}-${{ matrix.arrays.k8s_version }}-${{ matrix.arrays.instanceType }}-${{ github.sha }}-${{ matrix.arrays.os }}-${{ matrix.arrays.test_dir }} - name: Login ECR id: login-ecr @@ -776,8 +903,8 @@ jobs: if: steps.eks-ec2-integration-test.outputs.cache-hit != 'true' uses: nick-fields/retry@v2 with: - max_attempts: 3 - timeout_minutes: 60 # EKS takes about 20 minutes to spin up a cluster and service on the cluster + max_attempts: 2 + timeout_minutes: 90 # EKS takes about 20 minutes to spin up a cluster and service on the cluster retry_wait_seconds: 5 command: | if [ "${{ matrix.arrays.terraform_dir }}" != "" ]; then @@ -817,7 +944,7 @@ jobs: EKSPrometheusIntegrationTest: name: 'EKSPrometheusIntegrationTest' runs-on: ubuntu-latest - needs: [ BuildAndUpload, GenerateTestMatrix ] + needs: [ BuildAndUpload, BuildDocker, GenerateTestMatrix ] strategy: fail-fast: false matrix: @@ -964,7 +1091,7 @@ jobs: EC2WinPerformanceTest: name: "EC2WinPerformanceTest" - needs: [ BuildAndUpload, GenerateTestMatrix ] + needs: [ BuildAndUpload, BuildAndUploadPackages, GenerateTestMatrix ] runs-on: ubuntu-latest strategy: fail-fast: false @@ -1102,7 +1229,7 @@ jobs: EC2WinStressTrackingTest: name: "EC2WinStressTrackingTest" - needs: [BuildAndUpload, GenerateTestMatrix] + needs: [BuildAndUpload, BuildAndUploadPackages, GenerateTestMatrix] runs-on: ubuntu-latest strategy: fail-fast: false @@ -1170,22 +1297,60 @@ jobs: timeout_minutes: 8 retry_wait_seconds: 5 command: cd terraform/stress && terraform destroy --auto-approve - - EKSEndToEndTest: + + JavaEKSEndToEndTest: name: "AppSignals E2E EKS Test" - needs: [ BuildAndUpload ] - uses: ./.github/workflows/appsignals-e2e-eks-test.yml + needs: [ BuildAndUpload, BuildDocker ] + uses: ./.github/workflows/application-signals-java-e2e-eks-test.yml permissions: id-token: write contents: read secrets: inherit with: test-cluster-name: 'e2e-cw-agent-test' - - EC2EndToEndTest: + + JavaEC2EndToEndTest: + name: "AppSignals E2E EC2 Test" + needs: [ BuildAndUpload, BuildDocker ] + uses: ./.github/workflows/application-signals-java-e2e-ec2-test.yml + permissions: + id-token: write + contents: read + secrets: inherit + + PythonEKSEndToEndTest: + name: "AppSignals E2E EKS Test" + needs: [ BuildAndUpload, BuildDocker, JavaEKSEndToEndTest ] + uses: ./.github/workflows/application-signals-python-e2e-eks-test.yml + permissions: + id-token: write + contents: read + secrets: inherit + with: + test-cluster-name: 'e2e-cw-agent-test' + + PythonEC2EndToEndTest: + name: "AppSignals E2E EC2 Test" + needs: [ BuildAndUpload, BuildDocker, JavaEC2EndToEndTest ] + uses: ./.github/workflows/application-signals-python-e2e-ec2-test.yml + permissions: + id-token: write + contents: read + secrets: inherit + + JavaEC2ASGEndToEndTest: + name: "AppSignals E2E EC2 Test" + needs: [ BuildAndUpload, BuildDocker ] + uses: ./.github/workflows/application-signals-java-e2e-ec2-asg-test.yml + permissions: + id-token: write + contents: read + secrets: inherit + + PythonEC2ASGEndToEndTest: name: "AppSignals E2E EC2 Test" - needs: [ BuildAndUpload ] - uses: ./.github/workflows/appsignals-e2e-ec2-test.yml + needs: [ BuildAndUpload, BuildDocker ] + uses: ./.github/workflows/application-signals-python-e2e-ec2-asg-test.yml permissions: id-token: write contents: read diff --git a/.github/workflows/nightly-build.yml b/.github/workflows/nightly-build.yml index d592b09681..3716d9cdd0 100644 --- a/.github/workflows/nightly-build.yml +++ b/.github/workflows/nightly-build.yml @@ -9,6 +9,32 @@ jobs: BuildAndUpload: uses: ./.github/workflows/test-build.yml secrets: inherit + permissions: + id-token: write + contents: read + with: + BucketKey: "nightly-build/latest" + PackageBucketKey: "nightly-build/latest" + TerraformAWSAssumeRole: ${{ vars.TERRAFORM_AWS_ASSUME_ROLE }} + Bucket: ${{ vars.S3_INTEGRATION_BUCKET }} + + BuildAndUploadPackages: + uses: ./.github/workflows/test-build-packages.yml + needs: [ BuildAndUpload ] + secrets: inherit + permissions: + id-token: write + contents: read + with: + BucketKey: "nightly-build/latest" + PackageBucketKey: "nightly-build/latest" + TerraformAWSAssumeRole: ${{ vars.TERRAFORM_AWS_ASSUME_ROLE }} + Bucket: ${{ vars.S3_INTEGRATION_BUCKET }} + + BuildDocker: + uses: ./.github/workflows/test-build-docker.yml + needs: [ BuildAndUpload ] + secrets: inherit permissions: id-token: write contents: read diff --git a/.github/workflows/otel-fork-replace.yml b/.github/workflows/otel-fork-replace.yml index 553eed7d48..60499ef2f5 100644 --- a/.github/workflows/otel-fork-replace.yml +++ b/.github/workflows/otel-fork-replace.yml @@ -30,7 +30,7 @@ jobs: - name: Set up Go 1.x uses: actions/setup-go@v4 with: - go-version: ~1.21.1 + go-version: ~1.22.2 cache: false - name: Update OTel fork components version id: set-matrix diff --git a/.github/workflows/soak-test.yml b/.github/workflows/soak-test.yml new file mode 100644 index 0000000000..abb9529cdc --- /dev/null +++ b/.github/workflows/soak-test.yml @@ -0,0 +1,114 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: MIT + +name: Soak Test +env: + TERRAFORM_AWS_ASSUME_ROLE: ${{ secrets.TERRAFORM_AWS_ASSUME_ROLE }} + S3_INTEGRATION_BUCKET: ${{ secrets.S3_INTEGRATION_BUCKET }} + KEY_NAME: ${{ secrets.KEY_NAME }} + PRIVATE_KEY: ${{ secrets.AWS_PRIVATE_KEY }} + CWA_GITHUB_TEST_REPO_NAME: "aws/amazon-cloudwatch-agent-test" + CWA_GITHUB_TEST_REPO_URL: "https://github.com/aws/amazon-cloudwatch-agent-test.git" + CWA_GITHUB_TEST_REPO_BRANCH: "main" + +on: + schedule: + # Run at midnight on Sunday (once a week) + - cron: "0 0 * * 0" + workflow_call: + workflow_dispatch: + +concurrency: + group: ${{ github.workflow }}-${{ github.ref_name }} + cancel-in-progress: true + +jobs: + BuildAndUpload: + uses: ./.github/workflows/test-build.yml + secrets: inherit + permissions: + id-token: write + contents: read + with: + BucketKey: "integration-test/binary/${{ github.sha }}" + PackageBucketKey: "integration-test/binary/${{ github.sha }}" + TerraformAWSAssumeRole: ${{ vars.TERRAFORM_AWS_ASSUME_ROLE }} + Bucket: ${{ vars.S3_INTEGRATION_BUCKET }} + + BuildAndUploadPackages: + uses: ./.github/workflows/test-build-packages.yml + needs: [BuildAndUpload] + secrets: inherit + permissions: + id-token: write + contents: read + with: + BucketKey: "integration-test/binary/${{ github.sha }}" + PackageBucketKey: "integration-test/packaging/${{ github.sha }}" + TerraformAWSAssumeRole: ${{ vars.TERRAFORM_AWS_ASSUME_ROLE }} + Bucket: ${{ vars.S3_INTEGRATION_BUCKET }} + + BuildDocker: + needs: [BuildAndUpload] + uses: ./.github/workflows/test-build-docker.yml + secrets: inherit + permissions: + id-token: write + contents: read + with: + ContainerRepositoryNameAndTag: "cwagent-integration-test:${{ github.sha }}" + BucketKey: "integration-test/binary/${{ github.sha }}" + PackageBucketKey: "integration-test/packaging/${{ github.sha }}" + + DeploySoakTest: + name: "DeploySoakTest" + needs: [BuildAndUpload, BuildAndUploadPackages, BuildDocker] + runs-on: ubuntu-latest + permissions: + id-token: write + contents: read + steps: + - uses: actions/checkout@v3 + with: + repository: ${{env.CWA_GITHUB_TEST_REPO_NAME}} + ref: ${{env.CWA_GITHUB_TEST_REPO_BRANCH}} + + - name: Configure AWS Credentials + uses: aws-actions/configure-aws-credentials@v2 + with: + role-to-assume: ${{ env.TERRAFORM_AWS_ASSUME_ROLE }} + aws-region: us-west-2 + + # @TODO we can add a matrix in the future but for for now, we will only deploy to AL2. + - name: Terraform apply + uses: nick-fields/retry@v2 + with: + max_attempts: 3 + timeout_minutes: 60 + retry_wait_seconds: 5 + command: | + cd terraform/ec2/linux + terraform init + terraform apply --auto-approve \ + -var="github_test_repo=${{env.CWA_GITHUB_TEST_REPO_URL}}" \ + -var="github_test_repo_branch=${{env.CWA_GITHUB_TEST_REPO_BRANCH}}" \ + -var="cwa_github_sha=${GITHUB_SHA}" \ + -var="user=ec2-user" \ + -var="ami=cloudwatch-agent-integration-test-al2*" \ + -var="arc=amd64" \ + -var="binary_name=amazon-cloudwatch-agent.rpm" \ + -var="s3_bucket=${S3_INTEGRATION_BUCKET}" \ + -var="ssh_key_name=${KEY_NAME}" \ + -var="ssh_key_value=${PRIVATE_KEY}" \ + -var="test_name=SoakTest" \ + -var="test_dir=./test/soak -run TestSoakHigh" + + #This is here just in case workflow cancel + - name: Terraform destroy + if: ${{ cancelled() }} + uses: nick-fields/retry@v2 + with: + max_attempts: 3 + timeout_minutes: 8 + retry_wait_seconds: 5 + command: cd terraform/ec2/linux && terraform destroy --auto-approve diff --git a/.github/workflows/start-localstack.yml b/.github/workflows/start-localstack.yml new file mode 100644 index 0000000000..6dc1096121 --- /dev/null +++ b/.github/workflows/start-localstack.yml @@ -0,0 +1,84 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: MIT + +name: Start Localstack + +env: + PRIVATE_KEY: ${{ secrets.AWS_PRIVATE_KEY }} + KEY_NAME: ${{ secrets.KEY_NAME }} + +on: + workflow_call: + inputs: + region: + type: string + test_repo_name: + required: true + type: string + test_repo_branch: + required: true + type: string + terraform_assume_role: + type: string + test_repo_url: + required: true + type: string + github_sha: + required: true + type: string + s3_integration_bucket: + type: string + outputs: + local_stack_host_name: + value: ${{ jobs.StartLocalStack.outputs.local_stack_host_name }} + + +jobs: + StartLocalStack: + name: 'StartLocalStack' + runs-on: ubuntu-latest + defaults: + run: + working-directory: terraform/ec2/localstack + outputs: + local_stack_host_name: ${{ steps.localstack.outputs.local_stack_host_name }} + permissions: + id-token: write + contents: read + steps: + - uses: actions/checkout@v3 + with: + repository: ${{ inputs.test_repo_name }} + ref: ${{ inputs.test_repo_branch }} + + - name: Configure AWS Credentials + uses: aws-actions/configure-aws-credentials@v2 + with: + role-to-assume: ${{ inputs.terraform_assume_role }} + aws-region: ${{ inputs.region }} + + - name: Echo Localstack Config + run: echo repo name ${{inputs.test_repo_name}} repo branch ${{ inputs.test_repo_branch }} region ${{ inputs.region }} + + - name: Verify Terraform version + run: terraform --version + + - name: Terraform init + run: terraform init + + - name: Terraform apply + id: localstack + run: > + echo run terraform and execute test code && + terraform apply --auto-approve + -var="ssh_key_value=${{env.PRIVATE_KEY}}" + -var="github_test_repo=${{inputs.test_repo_url}}" + -var="github_test_repo_branch=${{inputs.test_repo_branch}}" + -var="cwa_github_sha=${{inputs.github_sha}}" + -var="s3_bucket=${{inputs.s3_integration_bucket}}" + -var="region=${{inputs.region}}" + -var="ssh_key_name=${{env.KEY_NAME}}" && + LOCAL_STACK_HOST_NAME=$(terraform output -raw public_dns) && + echo $LOCAL_STACK_HOST_NAME && + echo "::set-output name=local_stack_host_name::$LOCAL_STACK_HOST_NAME" && + aws s3 cp terraform.tfstate s3://${{inputs.s3_integration_bucket}}/integration-test/local-stack-terraform-state/${{inputs.github_sha}}/terraform.tfstate diff --git a/.github/workflows/stop-localstack.yml b/.github/workflows/stop-localstack.yml new file mode 100644 index 0000000000..248e3e85ca --- /dev/null +++ b/.github/workflows/stop-localstack.yml @@ -0,0 +1,63 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: MIT + +name: Stop Localstack + +env: + PRIVATE_KEY: ${{ secrets.AWS_PRIVATE_KEY }} + KEY_NAME: ${{ secrets.KEY_NAME }} + +on: + workflow_call: + inputs: + region: + type: string + test_repo_name: + required: true + type: string + test_repo_branch: + required: true + type: string + terraform_assume_role: + type: string + github_sha: + required: true + type: string + s3_integration_bucket: + type: string + + +jobs: + StopLocalStack: + name: 'StopLocalStack' + runs-on: ubuntu-latest + if: ${{ always() }} + defaults: + run: + working-directory: terraform/ec2/localstack + permissions: + id-token: write + contents: read + steps: + - uses: actions/checkout@v3 + with: + repository: ${{inputs.test_repo_name}} + ref: ${{inputs.test_repo_branch}} + + - name: Configure AWS Credentials + uses: aws-actions/configure-aws-credentials@v2 + with: + role-to-assume: ${{ inputs.terraform_assume_role }} + aws-region: ${{ inputs.region }} + + - name: Copy state + run: aws s3 cp s3://${{inputs.s3_integration_bucket}}/integration-test/local-stack-terraform-state/${{inputs.github_sha}}/terraform.tfstate . + + - name: Verify Terraform version + run: terraform --version + + - name: Terraform init + run: terraform init + + - name: Terraform destroy + run: terraform destroy -var="region=${{ inputs.region }}" --auto-approve diff --git a/.github/workflows/test-build-docker.yml b/.github/workflows/test-build-docker.yml new file mode 100644 index 0000000000..6d7e5df113 --- /dev/null +++ b/.github/workflows/test-build-docker.yml @@ -0,0 +1,438 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: MIT + +name: Build And Upload Docker Image +env: + CWA_GITHUB_TEST_REPO_NAME: "aws/amazon-cloudwatch-agent-test" + +on: + workflow_dispatch: + inputs: + ContainerRepositoryNameAndTag: + # e.g. "cwagent-integration-test:SHA" + # e.g. "cwa-release:latest" + # e.g. "cwa_nonprod:latest" + description: "ECR repo name and tag" + required: true + type: string + BucketKey: + # e.g. s3:///integration-test/binary/" + # e.g. s3:///nonprod + # e.g. s3:///release + description: "S3 URI to upload artifacts into." + required: true + type: string + PackageBucketKey: + description: "Integration tests put the MSI and PKG in a different bucket path than the binaries." + required: true + type: string + workflow_call: + inputs: + ContainerRepositoryNameAndTag: + # e.g. "cwagent-integration-test:SHA" + # e.g. "cwa-release:latest" + # e.g. "cwa_nonprod:latest" + description: "ECR repo name and tag" + required: true + type: string + BucketKey: + # e.g. s3:///integration-test/binary/" + # e.g. s3:///nonprod + # e.g. s3:///release + description: "S3 URI to upload artifacts into." + required: true + type: string + PackageBucketKey: + description: "Integration tests put the MSI and PKG in a different bucket path than the binaries." + required: true + type: string + +jobs: + MakeBinary: + name: 'MakeBinary' + runs-on: ubuntu-latest + permissions: + id-token: write + contents: read + steps: + - uses: actions/checkout@v3 + with: + fetch-depth: 0 + + - name: Configure AWS Credentials + uses: aws-actions/configure-aws-credentials@v2 + with: + role-to-assume: ${{ secrets.TERRAFORM_AWS_ASSUME_ROLE }} + aws-region: us-west-2 + + - name: Cache container + id: cached_container + uses: actions/cache@v3 + with: + key: "cached_container_${{ github.sha }}" + path: go.mod + + - name: Login ECR + if: contains(inputs.BucketKey, 'test') == false || steps.cached_container.outputs.cache-hit == false + id: login-ecr + uses: aws-actions/amazon-ecr-login@v1 + + - name: Set up Docker Buildx + if: contains(inputs.BucketKey, 'test') == false || steps.cached_container.outputs.cache-hit == false + uses: docker/setup-buildx-action@v1 + + - name: Set up QEMU + if: contains(inputs.BucketKey, 'test') == false || steps.cached_container.outputs.cache-hit == false + uses: docker/setup-qemu-action@v1 + + # Build dir is ignored in our .dockerignore thus need to copy to another dir. + - name: Copy Binary For Agent Image Build + if: contains(inputs.BucketKey, 'test') == false || steps.cached_container.outputs.cache-hit == false + run: | + mkdir amd64 + mkdir arm64 + aws s3 cp s3://${{ secrets.S3_INTEGRATION_BUCKET }}/${{ inputs.BucketKey }}/linux/amd64/amazon-cloudwatch-agent.deb amd64/amazon-cloudwatch-agent.deb + aws s3 cp s3://${{ secrets.S3_INTEGRATION_BUCKET }}/${{ inputs.BucketKey }}/linux/arm64/amazon-cloudwatch-agent.deb arm64/amazon-cloudwatch-agent.deb + + - name: Get ECR Repo name + id: repo_name + env: + ContainerRepositoryNameAndTag: ${{ inputs.ContainerRepositoryNameAndTag }} + run: | + RepoName=`echo $ContainerRepositoryNameAndTag | awk -F: '{print $1}'` + echo "::set-output name=ContainerRepositoryName::$RepoName" + + - name: Build Cloudwatch Agent Image amd64 + uses: docker/build-push-action@v4 + if: contains(inputs.BucketKey, 'test') == false || steps.cached_container.outputs.cache-hit == false + with: + file: amazon-cloudwatch-container-insights/cloudwatch-agent-dockerfile/localdeb/Dockerfile + context: . + push: true + tags: | + ${{ steps.login-ecr.outputs.registry }}/${{ steps.repo_name.outputs.ContainerRepositoryName }}:linux-amd64 + platforms: linux/amd64 + + - name: Build Cloudwatch Agent Image arm64 + uses: docker/build-push-action@v4 + if: contains(inputs.BucketKey, 'test') == false || steps.cached_container.outputs.cache-hit == false + with: + file: amazon-cloudwatch-container-insights/cloudwatch-agent-dockerfile/localdeb/Dockerfile + context: . + push: true + tags: | + ${{ steps.login-ecr.outputs.registry }}/${{ steps.repo_name.outputs.ContainerRepositoryName }}:linux-arm64 + platforms: linux/arm64 + + MakeMSIZip: + name: 'MakeMSIZip' + runs-on: ubuntu-latest + permissions: + id-token: write + contents: read + steps: + - uses: actions/checkout@v3 + with: + repository: ${{env.CWA_GITHUB_TEST_REPO_NAME}} + + - name: Set up Go 1.x + uses: actions/setup-go@v4 + with: + go-version: ~1.22.2 + + - name: Configure AWS Credentials + uses: aws-actions/configure-aws-credentials@v2 + with: + role-to-assume: ${{ secrets.TERRAFORM_AWS_ASSUME_ROLE }} + aws-region: us-west-2 + + - name: Cache win zip + id: cached_win_zip + uses: actions/cache@v3 + with: + key: "cached_win_zip_${{ github.sha }}_${{ inputs.PackageBucketKey }}_${{ inputs.Bucket }}_${{ inputs.BucketKey }}" + path: go.mod + + - name: Copy binary + if: contains(inputs.BucketKey, 'test') == false || steps.cached_win_zip.outputs.cache-hit == false + run: | + aws s3 cp s3://${{ secrets.S3_INTEGRATION_BUCKET }}/${{ inputs.BucketKey }} . --recursive + - name: Unzip + if: contains(inputs.BucketKey, 'test') == false || steps.cached_win_zip.outputs.cache-hit == false + run: | + sudo apt install unzip + unzip windows/amd64/amazon-cloudwatch-agent.zip -d windows-agent + - name: Create msi dep folder and copy deps + if: contains(inputs.BucketKey, 'test') == false || steps.cached_win_zip.outputs.cache-hit == false + run: | + export version=$(cat CWAGENT_VERSION) + echo cw agent version $version + mkdir msi_dep + cp -r msi/tools/. msi_dep/ + cp -r windows-agent/amazon-cloudwatch-agent/. msi_dep/ + go run msi/tools/msiversion/msiversionconverter.go $version msi_dep/amazon-cloudwatch-agent.wxs '' + go run msi/tools/msiversion/msiversionconverter.go $version msi_dep/manifest.json __VERSION__ + + - name: Zip + if: contains(inputs.BucketKey, 'test') == false || steps.cached_win_zip.outputs.cache-hit == false + run: | + sudo apt install zip + zip buildMSI.zip msi_dep/* + + - name: Upload zip + if: contains(inputs.BucketKey, 'test') == false || steps.cached_win_zip.outputs.cache-hit == false + run: aws s3 cp buildMSI.zip s3://${{ secrets.S3_INTEGRATION_BUCKET }}/${{ inputs.BucketKey }}/buildMSI.zip + + BuildMSI-2022: + name: 'BuildMSI-2022' + runs-on: windows-latest + needs: [ MakeMSIZip ] + permissions: + id-token: write + contents: read + steps: + - uses: actions/checkout@v3 + + - name: Configure AWS Credentials + uses: aws-actions/configure-aws-credentials@v2 + with: + role-to-assume: ${{ secrets.TERRAFORM_AWS_ASSUME_ROLE }} + aws-region: us-west-2 + + - name: Cache msi + id: cached_msi + uses: actions/cache@v3 + with: + key: "cached_msi_${{ github.sha }}" + path: go.mod + + # Using the env variable returns "" for bucket name thus use the secret + - name: Copy msi + if: contains(inputs.BucketKey, 'test') == false || steps.cached_msi.outputs.cache-hit == false + run: aws s3 cp s3://${{ secrets.S3_INTEGRATION_BUCKET }}/${{ inputs.BucketKey }}/buildMSI.zip . + + - name: Create msi + if: contains(inputs.BucketKey, 'test') == false || steps.cached_msi.outputs.cache-hit == false + run: | + curl -OLS https://github.com/wixtoolset/wix3/releases/download/wix314rtm/wix314.exe + .\wix314.exe /install /quiet /norestart + $wixToolsetBinPath = ";C:\Program Files (x86)\WiX Toolset v3.14\bin;" + $env:PATH = $env:PATH + $wixToolsetBinPath + Expand-Archive buildMSI.zip -Force + cd buildMSI/msi_dep + .\create_msi.ps1 "nosha" ${{ secrets.S3_INTEGRATION_BUCKET }}/${{ inputs.PackageBucketKey }} + + - name: clean ecr login credential cache + if: contains(inputs.BucketKey, 'test') == false || steps.cached_msi.outputs.cache-hit == false + run : | + echo '{"auths": {"https://index.docker.io/v1/": {}}, "HttpHeaders": { "User-Agent": "Docker-Client/19.03.12 (windows)"}}' > ~/.docker/config.json + + - name: Login ECR + if: contains(inputs.BucketKey, 'test') == false || steps.cached_msi.outputs.cache-hit == false + id: login-ecr + uses: aws-actions/amazon-ecr-login@v1 + + # Build dir is ignored in our .dockerignore thus need to copy to another dir. + - name: Copy Binary For Agent Image Build + if: contains(inputs.BucketKey, 'test') == false || steps.cached_msi.outputs.cache-hit == false + run: | + pwd + mkdir amd64 + cp -r buildMSI/msi_dep/amazon-cloudwatch-agent.msi amd64/ + + - name: Get ECR Repo name + id: repo_name + env: + ContainerRepositoryNameAndTag: ${{ inputs.ContainerRepositoryNameAndTag }} + run: | + $splitArray = $env:ContainerRepositoryNameAndTag.Split(":")[0] + Write-Output "::set-output name=ContainerRepositoryName::$splitArray" + + - name: Build Windows Cloudwatch Agent Image + env: + REGISTRY: ${{ steps.login-ecr.outputs.registry }} + REPOSITORY: ${{ steps.repo_name.outputs.ContainerRepositoryName }}:2022 + if: contains(inputs.BucketKey, 'test') == false || steps.cached_msi.outputs.cache-hit == false + run: | + Write-Output "$env:REGISTRY/$env:REPOSITORY" + docker build --platform windows/amd64 -f ./amazon-cloudwatch-container-insights/cloudwatch-agent-dockerfile/localmsi/Dockerfile.Windows . -t $env:REGISTRY/$env:REPOSITORY + docker push $env:REGISTRY/$env:REPOSITORY + + BuildMSI-2019: + name: 'BuildMSI-2019' + runs-on: windows-2019 + needs: [MakeMSIZip] + permissions: + id-token: write + contents: read + steps: + - uses: actions/checkout@v3 + + - name: Configure AWS Credentials + uses: aws-actions/configure-aws-credentials@v2 + with: + role-to-assume: ${{ secrets.TERRAFORM_AWS_ASSUME_ROLE }} + aws-region: us-west-2 + + - name: Cache msi + id: cached_msi + uses: actions/cache@v3 + with: + key: "cached_msi_${{ github.sha }}" + path: go.mod + + # Using the env variable returns "" for bucket name thus use the secret + - name: Copy msi + if: contains(inputs.BucketKey, 'test') == false || steps.cached_msi.outputs.cache-hit == false + run: aws s3 cp s3://${{ secrets.S3_INTEGRATION_BUCKET }}/${{ inputs.BucketKey }}/buildMSI.zip . + + - name: Create msi + if: contains(inputs.BucketKey, 'test') == false || steps.cached_msi.outputs.cache-hit == false + run : | + curl -OLS https://github.com/wixtoolset/wix3/releases/download/wix314rtm/wix314.exe + .\wix314.exe /install /quiet /norestart + $wixToolsetBinPath = ";C:\Program Files (x86)\WiX Toolset v3.14\bin;" + $env:PATH = $env:PATH + $wixToolsetBinPath + Expand-Archive buildMSI.zip -Force + cd buildMSI/msi_dep + .\create_msi.ps1 "nosha" ${{ secrets.S3_INTEGRATION_BUCKET }}/${{ inputs.PackageBucketKey }} + + - name: clean ecr login credential cache + if: contains(inputs.BucketKey, 'test') == false || steps.cached_msi.outputs.cache-hit == false + run : | + echo '{"auths": {"https://index.docker.io/v1/": {}}, "HttpHeaders": { "User-Agent": "Docker-Client/19.03.12 (windows)"}}' > ~/.docker/config.json + + - name: Login ECR + if: contains(inputs.BucketKey, 'test') == false || steps.cached_msi.outputs.cache-hit == false + id: login-ecr + uses: aws-actions/amazon-ecr-login@v1 + + # Build dir is ignored in our .dockerignore thus need to copy to another dir. + - name: Copy Binary For Agent Image Build + if: contains(inputs.BucketKey, 'test') == false || steps.cached_msi.outputs.cache-hit == false + run: | + pwd + mkdir amd64 + cp -r buildMSI/msi_dep/amazon-cloudwatch-agent.msi amd64/ + + - name: Get ECR Repo name + id: repo_name + env: + ContainerRepositoryNameAndTag: ${{ inputs.ContainerRepositoryNameAndTag }} + run: | + $splitArray = $env:ContainerRepositoryNameAndTag.Split(":")[0] + Write-Output "::set-output name=ContainerRepositoryName::$splitArray" + + - name: Build Windows Cloudwatch Agent Image + env: + REGISTRY: ${{ steps.login-ecr.outputs.registry }} + REPOSITORY: ${{ steps.repo_name.outputs.ContainerRepositoryName }}:2019 + if: contains(inputs.BucketKey, 'test') == false || steps.cached_msi.outputs.cache-hit == false + run: | + Write-Output "$env:REGISTRY/$env:REPOSITORY" + docker build --platform windows/amd64 -f ./amazon-cloudwatch-container-insights/cloudwatch-agent-dockerfile/localmsi/Dockerfile.Windows --build-arg IMAGE_TAG=ltsc2019 . -t $env:REGISTRY/$env:REPOSITORY + docker push $env:REGISTRY/$env:REPOSITORY + + CreateContainerManifest: + name: 'CreateManifest' + needs: ['BuildMSI-2019', 'BuildMSI-2022', 'MakeBinary'] + runs-on: ubuntu-latest + permissions: + id-token: write + contents: read + steps: + - uses: actions/checkout@v3 + with: + fetch-depth: 0 + + - name: Install rpm + run: sudo apt install rpm + + - name: Configure AWS Credentials + uses: aws-actions/configure-aws-credentials@v2 + with: + role-to-assume: ${{ secrets.TERRAFORM_AWS_ASSUME_ROLE }} + aws-region: us-west-2 + + - name: Login ECR + if: contains(inputs.BucketKey, 'test') == false || steps.cached_binaries.outputs.cache-hit == false + id: login-ecr + uses: aws-actions/amazon-ecr-login@v1 + + - name: Set up Docker Buildx + if: contains(inputs.BucketKey, 'test') == false || steps.cached_binaries.outputs.cache-hit == false + uses: docker/setup-buildx-action@v1 + + - name: Get ECR Repo name + id: repo_name + env: + ContainerRepositoryNameAndTag: ${{ inputs.ContainerRepositoryNameAndTag }} + run: | + RepoName=`echo $ContainerRepositoryNameAndTag | awk -F: '{print $1}'` + echo "::set-output name=ContainerRepositoryName::$RepoName" + + - name: Create manifest and push + env: + REGISTRY: ${{ steps.login-ecr.outputs.registry }} + OrigREPOSITORY: ${{ inputs.ContainerRepositoryNameAndTag }} + REPOSITORY: ${{ steps.repo_name.outputs.ContainerRepositoryName }} + REPOSITORYWindows: ${{ steps.repo_name.outputs.ContainerRepositoryName }}:windows + REPO2022: ${{ steps.repo_name.outputs.ContainerRepositoryName }}:2022 + REPO2019: ${{ steps.repo_name.outputs.ContainerRepositoryName }}:2019 + REPOLinuxAmd: ${{ steps.repo_name.outputs.ContainerRepositoryName }}:linux-amd64 + REPOLinuxArm: ${{ steps.repo_name.outputs.ContainerRepositoryName }}:linux-arm64 + if: contains(inputs.BucketKey, 'test') == false || steps.cached_binaries.outputs.cache-hit == false + run: | + docker manifest create $REGISTRY/$REPOSITORYWindows --amend $REGISTRY/$REPO2022 --amend $REGISTRY/$REPO2019 + docker manifest push $REGISTRY/$REPOSITORYWindows + + docker buildx imagetools inspect --raw $REGISTRY/$REPOLinuxAmd | jq '.manifests[0]' > linux-amd.json + docker buildx imagetools inspect --raw $REGISTRY/$REPOLinuxArm | jq '.manifests[0]' > linux-arm.json + docker buildx imagetools inspect --raw $REGISTRY/$REPOSITORYWindows | jq '.manifests[0]' > 2019.json + docker buildx imagetools inspect --raw $REGISTRY/$REPOSITORYWindows | jq '.manifests[1]' > 2022.json + + docker buildx imagetools create -f linux-amd.json -f linux-arm.json -f 2019.json -f 2022.json --tag $REGISTRY/$OrigREPOSITORY + + #GH actions set up gpg only works on ubuntu as of this commit date + GPGSignWindowsPackage: + name: 'GPGSignWindowsPackage' + runs-on: ubuntu-latest + needs: [ BuildMSI-2022 ] + permissions: + id-token: write + contents: read + steps: + - uses: actions/checkout@v3 + + - name: Configure AWS Credentials + uses: aws-actions/configure-aws-credentials@v2 + with: + role-to-assume: ${{ secrets.TERRAFORM_AWS_ASSUME_ROLE }} + aws-region: us-west-2 + + - name: Cache sig + id: cached_sig + uses: actions/cache@v3 + with: + key: "cached_sig_${{ github.sha }}" + path: go.mod + + - name: Download from s3 + if: contains(inputs.BucketKey, 'test') == false || steps.cached_sig.outputs.cache-hit == false + run: | + mkdir -p packages/amd64 + mkdir packages/arm64 + aws s3 cp s3://${{ secrets.S3_INTEGRATION_BUCKET }}/${{ inputs.PackageBucketKey }}/amazon-cloudwatch-agent.msi ./packages/amazon-cloudwatch-agent.msi + - name: Import GPG Key + uses: crazy-max/ghaction-import-gpg@v5 + with: + gpg_private_key: ${{ secrets.GPG_PRIVATE_KEY }} + passphrase: ${{ secrets.PASSPHRASE }} + + - name: Sign Build Files + run: for f in $(find packages/); do if [ ! -d $f ]; then echo "Signing file $f" && gpg --detach-sign $f ; fi ; done + + - name: Upload to s3 + if: contains(inputs.BucketKey, 'test') == false || steps.cached_sig.outputs.cache-hit == false + run: | + aws s3 cp packages/amazon-cloudwatch-agent.msi.sig s3://${{ secrets.S3_INTEGRATION_BUCKET }}/${{ inputs.PackageBucketKey }}/amazon-cloudwatch-agent.msi.sig diff --git a/.github/workflows/test-build-packages.yml b/.github/workflows/test-build-packages.yml new file mode 100644 index 0000000000..12673c19d3 --- /dev/null +++ b/.github/workflows/test-build-packages.yml @@ -0,0 +1,182 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: MIT + +name: Build And Upload Packages +env: + CWA_GITHUB_TEST_REPO_NAME: "aws/amazon-cloudwatch-agent-test" + +on: + workflow_dispatch: + inputs: + BucketKey: + # e.g. s3:///integration-test/binary/" + # e.g. s3:///nonprod + # e.g. s3:///release + description: "S3 URI to upload artifacts into." + required: true + type: string + PackageBucketKey: + description: "Integration tests put the MSI and PKG in a different bucket path than the binaries." + required: true + type: string + Region: + description: "Region to upload binaries" + required: false + type: string + default: "us-west-2" + TerraformAWSAssumeRole: + description: "Role to assume to upload artifacts" + required: true + type: string + Bucket: + description: "Bucket to upload the artifacts to" + required: true + type: string + workflow_call: + inputs: + BucketKey: + # e.g. s3:///integration-test/binary/" + # e.g. s3:///nonprod + # e.g. s3:///release + description: "S3 URI to upload artifacts into." + required: true + type: string + PackageBucketKey: + description: "Integration tests put the MSI and PKG in a different bucket path than the binaries." + required: true + type: string + Region: + description: "Region to upload binaries" + required: false + type: string + default: "us-west-2" + TerraformAWSAssumeRole: + description: "Role to assume to upload artifacts" + required: true + type: string + Bucket: + description: "Bucket to upload the artifacts to" + required: true + type: string + +jobs: + MakeMacPkg: + name: 'MakeMacPkg' + runs-on: macos-11 + permissions: + id-token: write + contents: read + steps: + - uses: actions/checkout@v3 + with: + path: cwa + fetch-depth: 0 + + - uses: actions/checkout@v3 + with: + repository: ${{env.CWA_GITHUB_TEST_REPO_NAME}} + path: test + + - name: Set up Go 1.x + uses: actions/setup-go@v4 + with: + go-version: ~1.22.2 + + - name: Configure AWS Credentials + uses: aws-actions/configure-aws-credentials@v2 + with: + role-to-assume: ${{ inputs.TerraformAWSAssumeRole }} + aws-region: ${{ inputs.Region }} + + - name: Cache binaries + id: cached_binaries + uses: actions/cache@v3 + with: + key: "cached-binaries-${{ runner.os }}-${{ inputs.BucketKey }}" + path: go.mod + + - name: Cache pkg + if: contains(inputs.BucketKey, 'test') == false || steps.cached_binaries.outputs.cache-hit == false + uses: actions/cache@v3 + with: + path: | + ~/Library/Caches/go-build + ~/go/pkg/mod + key: v1-go-pkg-mod-${{ runner.os }}-${{ hashFiles('**/go.sum') }} + + - name: Build Binaries + if: contains(inputs.BucketKey, 'test') == false || steps.cached_binaries.outputs.cache-hit == false + working-directory: cwa + run: make amazon-cloudwatch-agent-darwin package-darwin + + - name: Copy binary + if: contains(inputs.BucketKey, 'test') == false || steps.cached_binaries.outputs.cache-hit == false + working-directory: cwa + run: | + echo cw agent version $(cat CWAGENT_VERSION) + cp -r build/bin/darwin/amd64/. /tmp/ + cp -r build/bin/darwin/arm64/. /tmp/arm64/ + cp build/bin/CWAGENT_VERSION /tmp/CWAGENT_VERSION + + - name: Create pkg dep folder and copy deps + if: contains(inputs.BucketKey, 'test') == false || steps.cached_binaries.outputs.cache-hit == false + working-directory: test + run: | + cp -r pkg/tools/. /tmp/ + cp -r pkg/tools/. /tmp/arm64/ + + - name: Build And Upload PKG + if: contains(inputs.BucketKey, 'test') == false || steps.cached_binaries.outputs.cache-hit == false + working-directory: /tmp/ + run: | + chmod +x create_pkg.sh + chmod +x arm64/create_pkg.sh + ./create_pkg.sh ${{ inputs.Bucket }}/${{ inputs.PackageBucketKey }} "nosha" amd64 + cd arm64 + ./create_pkg.sh ${{ inputs.Bucket }}/${{ inputs.PackageBucketKey }} "nosha" arm64 + + #GH actions set up gpg only works on ubuntu as of this commit date + GPGSignMacPackage: + name: 'GPGSignMacPackage' + runs-on: ubuntu-latest + needs: [ MakeMacPkg ] + permissions: + id-token: write + contents: read + steps: + - uses: actions/checkout@v3 + + - name: Configure AWS Credentials + uses: aws-actions/configure-aws-credentials@v2 + with: + role-to-assume: ${{ inputs.TerraformAWSAssumeRole }} + aws-region: ${{ inputs.Region }} + + - name: Cache sig + id: cached_sig + uses: actions/cache@v3 + with: + key: "cached_sig_${{ github.sha }}" + path: go.mod + + - name: Download from s3 + if: contains(inputs.BucketKey, 'test') == false || steps.cached_sig.outputs.cache-hit == false + run: | + mkdir -p packages/amd64 + mkdir packages/arm64 + aws s3 cp s3://${{ inputs.Bucket }}/${{ inputs.PackageBucketKey }}/amd64/amazon-cloudwatch-agent.pkg ./packages/amd64/amazon-cloudwatch-agent.pkg + aws s3 cp s3://${{ inputs.Bucket }}/${{ inputs.PackageBucketKey }}/arm64/amazon-cloudwatch-agent.pkg ./packages/arm64/amazon-cloudwatch-agent.pkg + - name: Import GPG Key + uses: crazy-max/ghaction-import-gpg@v5 + with: + gpg_private_key: ${{ secrets.GPG_PRIVATE_KEY }} + passphrase: ${{ secrets.PASSPHRASE }} + + - name: Sign Build Files + run: for f in $(find packages/); do if [ ! -d $f ]; then echo "Signing file $f" && gpg --detach-sign $f ; fi ; done + + - name: Upload to s3 + if: contains(inputs.BucketKey, 'test') == false || steps.cached_sig.outputs.cache-hit == false + run: | + aws s3 cp packages/amd64/amazon-cloudwatch-agent.pkg.sig s3://${{ inputs.Bucket }}/${{ inputs.PackageBucketKey }}/amd64/amazon-cloudwatch-agent.pkg.sig + aws s3 cp packages/arm64/amazon-cloudwatch-agent.pkg.sig s3://${{ inputs.Bucket }}/${{ inputs.PackageBucketKey }}/arm64/amazon-cloudwatch-agent.pkg.sig diff --git a/.github/workflows/test-build.yml b/.github/workflows/test-build.yml index 370c832990..9a4f68fa4f 100644 --- a/.github/workflows/test-build.yml +++ b/.github/workflows/test-build.yml @@ -8,13 +8,6 @@ env: on: workflow_dispatch: inputs: - ContainerRepositoryNameAndTag: - # e.g. "cwagent-integration-test:SHA" - # e.g. "cwa-release:latest" - # e.g. "cwa_nonprod:latest" - description: "ECR repo name and tag" - required: true - type: string BucketKey: # e.g. s3:///integration-test/binary/" # e.g. s3:///nonprod @@ -26,20 +19,21 @@ on: description: "Integration tests put the MSI and PKG in a different bucket path than the binaries." required: true type: string - TargetRegion: - description: "Target region" + Region: + description: "Region to upload binaries" required: false type: string default: "us-west-2" - workflow_call: - inputs: - ContainerRepositoryNameAndTag: - # e.g. "cwagent-integration-test:SHA" - # e.g. "cwa-release:latest" - # e.g. "cwa_nonprod:latest" - description: "ECR repo name and tag" + TerraformAWSAssumeRole: + description: "Role to assume to upload artifacts" required: true type: string + Bucket: + description: "Bucket to upload the artifacts to" + required: true + type: string + workflow_call: + inputs: BucketKey: # e.g. s3:///integration-test/binary/" # e.g. s3:///nonprod @@ -51,11 +45,19 @@ on: description: "Integration tests put the MSI and PKG in a different bucket path than the binaries." required: true type: string - TargetRegion: - description: "Target region" + Region: + description: "Region to upload binaries" required: false type: string default: "us-west-2" + TerraformAWSAssumeRole: + description: "Role to assume to upload artifacts" + required: true + type: string + Bucket: + description: "Bucket to upload the artifacts to" + required: true + type: string jobs: MakeBinary: @@ -73,7 +75,7 @@ jobs: - name: Set up Go 1.x uses: actions/setup-go@v4 with: - go-version: ~1.21.1 + go-version: ~1.22.2 cache: false - name: Install rpm @@ -82,14 +84,14 @@ jobs: - name: Configure AWS Credentials uses: aws-actions/configure-aws-credentials@v2 with: - role-to-assume: ${{ secrets.TERRAFORM_AWS_ASSUME_ROLE }} - aws-region: ${{ inputs.TargetRegion }} + role-to-assume: ${{ inputs.TerraformAWSAssumeRole }} + aws-region: ${{ inputs.Region }} - name: Cache binaries id: cached_binaries uses: actions/cache@v3 with: - key: "cached_binaries_${{ github.sha }}" + key: "cached_binaries_${{ github.sha }}_${{ inputs.PackageBucketKey }}_${{ inputs.Bucket }}_${{ inputs.BucketKey }}" path: go.mod - name: Cache go @@ -122,258 +124,7 @@ jobs: if: contains(inputs.BucketKey, 'test') == false || steps.cached_binaries.outputs.cache-hit == false # Copy the RPM to .../amazon_linux/... because BETA customers expect it there. run: | - echo "BucketKey: ${{ secrets.S3_INTEGRATION_BUCKET}} ${{ inputs.BucketKey }}" - aws s3 cp build/bin s3://${{ secrets.S3_INTEGRATION_BUCKET }}/${{ inputs.BucketKey }} --recursive - aws s3 cp build/bin/linux/amd64/amazon-cloudwatch-agent.rpm s3://${{ secrets.S3_INTEGRATION_BUCKET }}/${{ inputs.BucketKey }}/amazon_linux/amd64/latest/amazon-cloudwatch-agent.rpm - aws s3 cp build/bin/linux/arm64/amazon-cloudwatch-agent.rpm s3://${{ secrets.S3_INTEGRATION_BUCKET }}/${{ inputs.BucketKey }}/amazon_linux/arm64/latest/amazon-cloudwatch-agent.rpm - - - name: Login ECR - if: contains(inputs.BucketKey, 'test') == false || steps.cached_binaries.outputs.cache-hit == false - id: login-ecr - uses: aws-actions/amazon-ecr-login@v1 - - - name: Set up Docker Buildx - if: contains(inputs.BucketKey, 'test') == false || steps.cached_binaries.outputs.cache-hit == false - uses: docker/setup-buildx-action@v1 - - - name: Set up QEMU - if: contains(inputs.BucketKey, 'test') == false || steps.cached_binaries.outputs.cache-hit == false - uses: docker/setup-qemu-action@v1 - - # Build dir is ignored in our .dockerignore thus need to copy to another dir. - - name: Copy Binary For Agent Image Build - if: contains(inputs.BucketKey, 'test') == false || steps.cached_binaries.outputs.cache-hit == false - run: cp -r build/bin/linux/* . - - - name: Build Cloudwatch Agent Image - uses: docker/build-push-action@v4 - if: contains(inputs.BucketKey, 'test') == false || steps.cached_binaries.outputs.cache-hit == false - with: - file: amazon-cloudwatch-container-insights/cloudwatch-agent-dockerfile/localdeb/Dockerfile - context: . - push: true - tags: | - ${{ steps.login-ecr.outputs.registry }}/${{ inputs.ContainerRepositoryNameAndTag }} - platforms: linux/amd64, linux/arm64 - - MakeMSIZip: - name: 'MakeMSIZip' - runs-on: ubuntu-latest - needs: [MakeBinary] - permissions: - id-token: write - contents: read - steps: - - uses: actions/checkout@v3 - with: - repository: ${{env.CWA_GITHUB_TEST_REPO_NAME}} - - - name: Set up Go 1.x - uses: actions/setup-go@v4 - with: - go-version: ~1.21.1 - - - name: Configure AWS Credentials - uses: aws-actions/configure-aws-credentials@v2 - with: - role-to-assume: ${{ secrets.TERRAFORM_AWS_ASSUME_ROLE }} - aws-region: us-west-2 - - - name: Cache win zip - id: cached_win_zip - uses: actions/cache@v3 - with: - key: "cached_win_zip_${{ github.sha }}" - path: go.mod - - - name: Copy binary - if: contains(inputs.BucketKey, 'test') == false || steps.cached_win_zip.outputs.cache-hit == false - run: | - aws s3 cp s3://${{ secrets.S3_INTEGRATION_BUCKET }}/${{ inputs.BucketKey }} . --recursive - - name: Unzip - if: contains(inputs.BucketKey, 'test') == false || steps.cached_win_zip.outputs.cache-hit == false - run: | - sudo apt install unzip - unzip windows/amd64/amazon-cloudwatch-agent.zip -d windows-agent - - name: Create msi dep folder and copy deps - if: contains(inputs.BucketKey, 'test') == false || steps.cached_win_zip.outputs.cache-hit == false - run: | - export version=$(cat CWAGENT_VERSION) - echo cw agent version $version - mkdir msi_dep - cp -r msi/tools/. msi_dep/ - cp -r windows-agent/amazon-cloudwatch-agent/. msi_dep/ - go run msi/tools/msiversion/msiversionconverter.go $version msi_dep/amazon-cloudwatch-agent.wxs '' - go run msi/tools/msiversion/msiversionconverter.go $version msi_dep/manifest.json __VERSION__ - - - name: Zip - if: contains(inputs.BucketKey, 'test') == false || steps.cached_win_zip.outputs.cache-hit == false - run: | - sudo apt install zip - zip buildMSI.zip msi_dep/* - - - name: Upload zip - if: contains(inputs.BucketKey, 'test') == false || steps.cached_win_zip.outputs.cache-hit == false - run: aws s3 cp buildMSI.zip s3://${{ secrets.S3_INTEGRATION_BUCKET }}/${{ inputs.BucketKey }}/buildMSI.zip - - MakeMacPkg: - name: 'MakeMacPkg' - runs-on: macos-11 - permissions: - id-token: write - contents: read - steps: - - uses: actions/checkout@v3 - with: - path: cwa - fetch-depth: 0 - - - uses: actions/checkout@v3 - with: - repository: ${{env.CWA_GITHUB_TEST_REPO_NAME}} - path: test - - - name: Set up Go 1.x - uses: actions/setup-go@v4 - with: - go-version: ~1.21.1 - - - name: Configure AWS Credentials - uses: aws-actions/configure-aws-credentials@v2 - with: - role-to-assume: ${{ secrets.TERRAFORM_AWS_ASSUME_ROLE }} - aws-region: us-west-2 - - - name: Cache binaries - id: cached_binaries - uses: actions/cache@v3 - with: - key: "cached-binaries-${{ runner.os }}-${{ inputs.BucketKey }}" - path: go.mod - - - name: Cache pkg - if: contains(inputs.BucketKey, 'test') == false || steps.cached_binaries.outputs.cache-hit == false - uses: actions/cache@v3 - with: - path: | - ~/Library/Caches/go-build - ~/go/pkg/mod - key: v1-go-pkg-mod-${{ runner.os }}-${{ hashFiles('**/go.sum') }} - - - name: Build Binaries - if: contains(inputs.BucketKey, 'test') == false || steps.cached_binaries.outputs.cache-hit == false - working-directory: cwa - run: make amazon-cloudwatch-agent-darwin package-darwin - - - name: Copy binary - if: contains(inputs.BucketKey, 'test') == false || steps.cached_binaries.outputs.cache-hit == false - working-directory: cwa - run: | - echo cw agent version $(cat CWAGENT_VERSION) - cp -r build/bin/darwin/amd64/. /tmp/ - cp -r build/bin/darwin/arm64/. /tmp/arm64/ - cp build/bin/CWAGENT_VERSION /tmp/CWAGENT_VERSION - - - name: Create pkg dep folder and copy deps - if: contains(inputs.BucketKey, 'test') == false || steps.cached_binaries.outputs.cache-hit == false - working-directory: test - run: | - cp -r pkg/tools/. /tmp/ - cp -r pkg/tools/. /tmp/arm64/ - - - name: Build And Upload PKG - if: contains(inputs.BucketKey, 'test') == false || steps.cached_binaries.outputs.cache-hit == false - working-directory: /tmp/ - run : | - chmod +x create_pkg.sh - chmod +x arm64/create_pkg.sh - ./create_pkg.sh ${{ secrets.S3_INTEGRATION_BUCKET }}/${{ inputs.PackageBucketKey }} "nosha" amd64 - cd arm64 - ./create_pkg.sh ${{ secrets.S3_INTEGRATION_BUCKET }}/${{ inputs.PackageBucketKey }} "nosha" arm64 - - BuildMSI: - name: 'BuildMSI' - runs-on: windows-latest - needs: [MakeMSIZip] - permissions: - id-token: write - contents: read - steps: - - uses: actions/checkout@v3 - - - name: Configure AWS Credentials - uses: aws-actions/configure-aws-credentials@v2 - with: - role-to-assume: ${{ secrets.TERRAFORM_AWS_ASSUME_ROLE }} - aws-region: us-west-2 - - - name: Cache msi - id: cached_msi - uses: actions/cache@v3 - with: - key: "cached_msi_${{ github.sha }}" - path: go.mod - - # Using the env variable returns "" for bucket name thus use the secret - - name: Copy msi - if: contains(inputs.BucketKey, 'test') == false || steps.cached_msi.outputs.cache-hit == false - run: aws s3 cp s3://${{ secrets.S3_INTEGRATION_BUCKET }}/${{ inputs.BucketKey }}/buildMSI.zip . - - - name: Create msi - if: contains(inputs.BucketKey, 'test') == false || steps.cached_msi.outputs.cache-hit == false - run : | - curl -OLS https://github.com/wixtoolset/wix3/releases/download/wix3111rtm/wix311.exe - .\wix311.exe /install /quiet /norestart - $wixToolsetBinPath = ";C:\Program Files (x86)\WiX Toolset v3.11\bin;" - $env:PATH = $env:PATH + $wixToolsetBinPath - Expand-Archive buildMSI.zip -Force - cd buildMSI/msi_dep - .\create_msi.ps1 "nosha" ${{ secrets.S3_INTEGRATION_BUCKET }}/${{ inputs.PackageBucketKey }} - - #GH actions set up gpg only works on ubuntu as of this commit date - GPGSignMacAndWindowsPackage: - name: 'SignMacAndWindowsPackage' - runs-on: ubuntu-latest - needs: [BuildMSI, MakeMacPkg] - permissions: - id-token: write - contents: read - steps: - - uses: actions/checkout@v3 - - - name: Configure AWS Credentials - uses: aws-actions/configure-aws-credentials@v2 - with: - role-to-assume: ${{ secrets.TERRAFORM_AWS_ASSUME_ROLE }} - aws-region: us-west-2 - - - name: Cache sig - id: cached_sig - uses: actions/cache@v3 - with: - key: "cached_sig_${{ github.sha }}" - path: go.mod - - - name: Download from s3 - if: contains(inputs.BucketKey, 'test') == false || steps.cached_sig.outputs.cache-hit == false - run: | - mkdir -p packages/amd64 - mkdir packages/arm64 - aws s3 cp s3://${{ secrets.S3_INTEGRATION_BUCKET }}/${{ inputs.PackageBucketKey }}/amazon-cloudwatch-agent.msi ./packages/amazon-cloudwatch-agent.msi - aws s3 cp s3://${{ secrets.S3_INTEGRATION_BUCKET }}/${{ inputs.PackageBucketKey }}/amd64/amazon-cloudwatch-agent.pkg ./packages/amd64/amazon-cloudwatch-agent.pkg - aws s3 cp s3://${{ secrets.S3_INTEGRATION_BUCKET }}/${{ inputs.PackageBucketKey }}/arm64/amazon-cloudwatch-agent.pkg ./packages/arm64/amazon-cloudwatch-agent.pkg - - name: Import GPG Key - uses: crazy-max/ghaction-import-gpg@v5 - with: - gpg_private_key: ${{ secrets.GPG_PRIVATE_KEY }} - passphrase: ${{ secrets.PASSPHRASE }} - - - name: Sign Build Files - run: for f in $(find packages/); do if [ ! -d $f ]; then echo "Signing file $f" && gpg --detach-sign $f ; fi ; done - - - name: Upload to s3 - if: contains(inputs.BucketKey, 'test') == false || steps.cached_sig.outputs.cache-hit == false - run: | - aws s3 cp packages/amazon-cloudwatch-agent.msi.sig s3://${{ secrets.S3_INTEGRATION_BUCKET }}/${{ inputs.PackageBucketKey }}/amazon-cloudwatch-agent.msi.sig - aws s3 cp packages/amd64/amazon-cloudwatch-agent.pkg.sig s3://${{ secrets.S3_INTEGRATION_BUCKET }}/${{ inputs.PackageBucketKey }}/amd64/amazon-cloudwatch-agent.pkg.sig - aws s3 cp packages/arm64/amazon-cloudwatch-agent.pkg.sig s3://${{ secrets.S3_INTEGRATION_BUCKET }}/${{ inputs.PackageBucketKey }}/arm64/amazon-cloudwatch-agent.pkg.sig + echo "BucketKey: ${{ inputs.Bucket }} ${{ inputs.BucketKey }}" + aws s3 cp build/bin s3://${{ inputs.Bucket }}/${{ inputs.BucketKey }} --recursive + aws s3 cp build/bin/linux/amd64/amazon-cloudwatch-agent.rpm s3://${{ inputs.Bucket }}/${{ inputs.BucketKey }}/amazon_linux/amd64/latest/amazon-cloudwatch-agent.rpm + aws s3 cp build/bin/linux/arm64/amazon-cloudwatch-agent.rpm s3://${{ inputs.Bucket }}/${{ inputs.BucketKey }}/amazon_linux/arm64/latest/amazon-cloudwatch-agent.rpm diff --git a/Makefile b/Makefile index 494952ece1..14ef2b41fb 100644 --- a/Makefile +++ b/Makefile @@ -25,8 +25,12 @@ WIN_BUILD = GOOS=windows GOARCH=amd64 go build -trimpath -buildmode=${CWAGENT_BU DARWIN_BUILD_AMD64 = CGO_ENABLED=1 GO111MODULE=on GOOS=darwin GOARCH=amd64 go build -trimpath -ldflags="${LDFLAGS}" -o $(BUILD_SPACE)/bin/darwin_amd64 DARWIN_BUILD_ARM64 = CGO_ENABLED=1 GO111MODULE=on GOOS=darwin GOARCH=arm64 go build -trimpath -ldflags="${LDFLAGS}" -o $(BUILD_SPACE)/bin/darwin_arm64 -IMAGE = amazon/cloudwatch-agent:$(VERSION) +IMAGE_REGISTRY = amazon +IMAGE_REPO = cloudwatch-agent +IMAGE_TAG = $(VERSION) +IMAGE = $(IMAGE_REGISTRY)/$(IMAGE_REPO):$(IMAGE_TAG) DOCKER_BUILD_FROM_SOURCE = docker build -t $(IMAGE) -f ./amazon-cloudwatch-container-insights/cloudwatch-agent-dockerfile/source/Dockerfile +DOCKER_WINDOWS_BUILD_FROM_SOURCE = docker build -t $(IMAGE) -f ./amazon-cloudwatch-container-insights/cloudwatch-agent-dockerfile/source/Dockerfile.Windows CW_AGENT_IMPORT_PATH=github.com/aws/amazon-cloudwatch-agent ALL_SRC := $(shell find . -name '*.go' -type f | sort) @@ -106,35 +110,33 @@ build-for-docker-amd64: $(LINUX_AMD64_BUILD)/start-amazon-cloudwatch-agent github.com/aws/amazon-cloudwatch-agent/cmd/start-amazon-cloudwatch-agent $(LINUX_AMD64_BUILD)/config-translator github.com/aws/amazon-cloudwatch-agent/cmd/config-translator +build-for-docker-windows-amd64: + $(WIN_BUILD)/amazon-cloudwatch-agent.exe github.com/aws/amazon-cloudwatch-agent/cmd/amazon-cloudwatch-agent + $(WIN_BUILD)/start-amazon-cloudwatch-agent.exe github.com/aws/amazon-cloudwatch-agent/cmd/start-amazon-cloudwatch-agent + $(WIN_BUILD)/config-translator.exe github.com/aws/amazon-cloudwatch-agent/cmd/config-translator + build-for-docker-arm64: $(LINUX_ARM64_BUILD)/amazon-cloudwatch-agent github.com/aws/amazon-cloudwatch-agent/cmd/amazon-cloudwatch-agent $(LINUX_ARM64_BUILD)/start-amazon-cloudwatch-agent github.com/aws/amazon-cloudwatch-agent/cmd/start-amazon-cloudwatch-agent $(LINUX_ARM64_BUILD)/config-translator github.com/aws/amazon-cloudwatch-agent/cmd/config-translator -# this is because we docker ignore our build dir -# even if there is no dir rm -rf will not fail but if there already is a dir mkdir will -# for local registery you may only load a single platform -build-for-docker-fast: build-for-docker-amd64 build-for-docker-arm64 - rm -rf tmp - mkdir -p tmp/amd64 - mkdir -p tmp/arm64 - cp build/bin/linux_amd64/* tmp/amd64 - cp build/bin/linux_arm64/* tmp/arm64 - docker buildx build --platform linux/amd64,linux/arm64 . -f amazon-cloudwatch-container-insights/cloudwatch-agent-dockerfile/localbin/Dockerfile -t amazon-cloudwatch-agent - rm -rf tmp +docker-build: build-for-docker-amd64 build-for-docker-arm64 + docker buildx build --platform linux/amd64,linux/arm64 . -f amazon-cloudwatch-container-insights/cloudwatch-agent-dockerfile/localbin/Dockerfile -t $(IMAGE) -build-for-docker-fast-amd64: build-for-docker-amd64 - rm -rf tmp - mkdir -p tmp/amd64 - cp build/bin/linux_amd64/* tmp/amd64 - docker buildx build --platform linux/amd64 . -f amazon-cloudwatch-container-insights/cloudwatch-agent-dockerfile/localbin/Dockerfile -t amazon-cloudwatch-agent --load - rm -rf tmp +docker-build-amd64: build-for-docker-amd64 + docker buildx build --platform linux/amd64 . -f amazon-cloudwatch-container-insights/cloudwatch-agent-dockerfile/localbin/Dockerfile -t $(IMAGE) --load + +docker-build-arm64: build-for-docker-arm64 + docker buildx build --platform linux/arm64 . -f amazon-cloudwatch-container-insights/cloudwatch-agent-dockerfile/localbin/Dockerfile -t $(IMAGE) --load -build-for-docker-fast-arm64: build-for-docker-arm64 +docker-push: + docker push $(IMAGE) + +build-for-docker-fast-windows-amd64: build-for-docker-windows-amd64 rm -rf tmp - mkdir -p tmp/arm64 - cp build/bin/linux_arm64/* tmp/arm64 - docker buildx build --platform linux/arm64 . -f amazon-cloudwatch-container-insights/cloudwatch-agent-dockerfile/localbin/Dockerfile -t amazon-cloudwatch-agent --load + mkdir -p tmp/windows_amd64 + cp build/bin/windows_amd64/* tmp/windows_amd64 + docker build --platform windows/amd64 -f amazon-cloudwatch-container-insights/cloudwatch-agent-dockerfile/localbin/Dockerfile.Windows . -t amazon-cloudwatch-agent rm -rf tmp install-goimports: @@ -195,7 +197,7 @@ lint: install-golangci-lint simple-lint ${LINTER} run ./... test: - CGO_ENABLED=0 go test -timeout 15m -coverprofile coverage.txt -failfast ./cfg/... ./cmd/... ./handlers/... ./internal/... ./logger/... ./logs/... ./metric/... ./receiver/... ./plugins/... ./profiler/... ./tool/... ./translator/... + CGO_ENABLED=0 go test -timeout 15m -coverprofile coverage.txt -failfast ./... clean:: rm -rf release/ build/ @@ -326,3 +328,9 @@ dockerized-build: # Use vendor instead of proxy when building w/ vendor folder dockerized-build-vendor: $(DOCKER_BUILD_FROM_SOURCE) --build-arg GO111MODULE=off . + +.PHONY: dockerized-windows-build +dockerized-windows-build: + $(DOCKER_WINDOWS_BUILD_FROM_SOURCE) . + @echo Built image: + @echo $(IMAGE) \ No newline at end of file diff --git a/RELEASE_NOTES b/RELEASE_NOTES index c5fb1d9984..9e5cea56a9 100644 --- a/RELEASE_NOTES +++ b/RELEASE_NOTES @@ -1,3 +1,83 @@ +======================================================================== +Amazon CloudWatch Agent 1.300040.0 (2024-05-21) +======================================================================== +Enhancements +* [Application Signals] Export emf logs to /aws/application-signals/data +* [Application Signals] Rename metric namespace to Application Signals +* [Application Signals] Change metric schema from HostedIn to Environment +* Trim AWS prefix for spans in xray exporter + +Bug fixes: +* Fix panic when using amazon-cloudwatch-agent-ctl -a cond-restart + +======================================================================== +Amazon CloudWatch Agent 1.300039.0 (2024-05-03) +======================================================================== +Features: +* [Metrics] Append Dimension Volume Id For Metrics + +Enhancements +* Upgrade OTEL Contrib To v0.98.0 + +======================================================================== +Amazon CloudWatch Agent 1.300037.1 (2024-04-26) +======================================================================== +Bug fixes: +* Fix nil referencing issue while decorating container insights metrics + +======================================================================== +Amazon CloudWatch Agent 1.300037.0 (2024-04-11) +======================================================================== +Features: +* [ContainerInsights] Add Elastic Fabric Adapter (EFA) observability with Kubernetes + +Enhancements: +* Retain CloudWatch Log Group when translating X-Ray segments + +Bug fixes: +* Clean up log pusher state file on startup +* Drop container/pod level Nvidia GPU metrics when there is no active workload + +======================================================================== +Amazon CloudWatch Agent 1.300036.0 (2024-04-05) +======================================================================== +Features: +* [ContainerInsights] Add AWS Trainium & Inferentia observability with Kubernetes + +======================================================================== +Amazon CloudWatch Agent 1.300035.0 (2024-03-18) +======================================================================== +Features: +* [ContainerInsights] Add Container Insights for Windows in kubernetes. + +======================================================================== +Amazon CloudWatch Agent 1.300034.1 (2024-03-14) +======================================================================== +Bug fixes: +* [AppSignals] Explicitly set the default GC interval for metrics limiter + +Enhancements: +* [ContainerInsights] Add InstanceType attribute to GPU metrics + +======================================================================== +Amazon CloudWatch Agent 1.300034.0 (2024-03-04) +======================================================================== +Features: +* [ContainerInsights] Add NVIDIA GPU observability with Kubernetes + +Enhancements: +* [AppSignals] Improve Kubernetes support + +======================================================================== +Amazon CloudWatch Agent 1.300033.0 (2023-01-31) +======================================================================== + +Enchancements: +* [AppSignals] Log correlation +* [AppSignals] New Metric Rollup +* [AppSignals] Add metrics cardinality control +* [AppSignals] Add application log group info + ======================================================================== Amazon CloudWatch Agent 1.300032.3 (2023-12-21) ======================================================================== diff --git a/amazon-cloudwatch-container-insights/cloudwatch-agent-dockerfile/localbin/Dockerfile b/amazon-cloudwatch-container-insights/cloudwatch-agent-dockerfile/localbin/Dockerfile index 9452064151..a0585012c4 100644 --- a/amazon-cloudwatch-container-insights/cloudwatch-agent-dockerfile/localbin/Dockerfile +++ b/amazon-cloudwatch-container-insights/cloudwatch-agent-dockerfile/localbin/Dockerfile @@ -1,14 +1,23 @@ +# Build the binary +ARG CERT_IMAGE=ubuntu:latest + # Install cert and binaries -FROM ubuntu:latest +FROM $CERT_IMAGE as cert # Need to repeat the ARG after each FROM ARG TARGETARCH - RUN mkdir -p /opt/aws/amazon-cloudwatch-agent/etc RUN mkdir -p /opt/aws/amazon-cloudwatch-agent/var -RUN mkdir -p /opt/aws/amazon-cloudwatch-agent/bin -COPY tmp/${TARGETARCH} /opt/aws/amazon-cloudwatch-agent/bin -RUN apt-get update && apt-get install -y ca-certificates +RUN apt-get update && \ + apt-get install -y ca-certificates && \ + rm -rf /var/lib/apt/lists/* +COPY build/bin/linux_${TARGETARCH}/ /opt/aws/amazon-cloudwatch-agent/bin + +FROM scratch + +COPY --from=cert /tmp /tmp +COPY --from=cert /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt +COPY --from=cert /opt/aws/amazon-cloudwatch-agent /opt/aws/amazon-cloudwatch-agent ENV RUN_IN_CONTAINER="True" ENTRYPOINT ["/opt/aws/amazon-cloudwatch-agent/bin/start-amazon-cloudwatch-agent"] diff --git a/amazon-cloudwatch-container-insights/cloudwatch-agent-dockerfile/localbin/Dockerfile.Windows b/amazon-cloudwatch-container-insights/cloudwatch-agent-dockerfile/localbin/Dockerfile.Windows new file mode 100644 index 0000000000..46a28d6362 --- /dev/null +++ b/amazon-cloudwatch-container-insights/cloudwatch-agent-dockerfile/localbin/Dockerfile.Windows @@ -0,0 +1,22 @@ +# This Dockerfile can be used for building Windows Server 2019, 2022 Container images. + +# Build Container image from binary in local +ARG TAG=ltsc2022 +ARG TARGETARCH=amd64 +ARG CONTAINER_CLOUDWATCHAGENT_PATH=C:\\Program\ Files\\Amazon\\AmazonCloudWatchAgent\\ +ARG CONTAINER_CLOUDWATCHAGENT_DATA_PATH=C:\\ProgramData\\Amazon\\AmazonCloudWatchAgent\\ + +FROM mcr.microsoft.com/windows/servercore:$TAG as runtime + +ARG CONTAINER_CLOUDWATCHAGENT_PATH +ARG CONTAINER_CLOUDWATCHAGENT_DATA_PATH +ARG TARGETARCH + +RUN mkdir %CONTAINER_CLOUDWATCHAGENT_PATH% +RUN mkdir %CONTAINER_CLOUDWATCHAGENT_DATA_PATH% +WORKDIR $CONTAINER_CLOUDWATCHAGENT_PATH + +COPY "./tmp/windows_${TARGETARCH}" $CONTAINER_CLOUDWATCHAGENT_PATH + +ENV RUN_IN_CONTAINER="True" +CMD ["start-amazon-cloudwatch-agent.exe"] \ No newline at end of file diff --git a/amazon-cloudwatch-container-insights/cloudwatch-agent-dockerfile/localbin/Dockerfile.dockerignore b/amazon-cloudwatch-container-insights/cloudwatch-agent-dockerfile/localbin/Dockerfile.dockerignore new file mode 100644 index 0000000000..e69de29bb2 diff --git a/amazon-cloudwatch-container-insights/cloudwatch-agent-dockerfile/localmsi/Dockerfile.Windows b/amazon-cloudwatch-container-insights/cloudwatch-agent-dockerfile/localmsi/Dockerfile.Windows new file mode 100644 index 0000000000..9170b110dc --- /dev/null +++ b/amazon-cloudwatch-container-insights/cloudwatch-agent-dockerfile/localmsi/Dockerfile.Windows @@ -0,0 +1,32 @@ +# This Dockerfile can be used for building Windows Server 2019, 2022 Container images. + +# Build Container image from msi installer +ARG IMAGE_TAG=ltsc2022 +ARG BUILD_IMAGE=mcr.microsoft.com/windows/servercore:$IMAGE_TAG +ARG TARGETARCH=amd64 +ARG CONTAINER_CLOUDWATCHAGENT_PATH=C:\\Program\ Files\\Amazon\\AmazonCloudWatchAgent\\ +ARG CONTAINER_CLOUDWATCHAGENT_DATA_PATH=C:\\ProgramData\\Amazon\\AmazonCloudWatchAgent\\ + +FROM $BUILD_IMAGE as builder + +ARG TARGETARCH +ARG CONTAINER_CLOUDWATCHAGENT_PATH + +COPY ${TARGETARCH}/amazon-cloudwatch-agent.msi C:\\tmp\\amazon-cloudwatch-agent.msi +RUN msiexec /i C:\\tmp\\amazon-cloudwatch-agent.msi + +# Build target CW agent container image for Windows +FROM mcr.microsoft.com/windows/nanoserver:$IMAGE_TAG + +ARG CONTAINER_CLOUDWATCHAGENT_PATH +ARG CONTAINER_CLOUDWATCHAGENT_DATA_PATH +ARG TARGETARCH + +RUN mkdir %CONTAINER_CLOUDWATCHAGENT_PATH% +WORKDIR $CONTAINER_CLOUDWATCHAGENT_PATH + +COPY --from=builder $CONTAINER_CLOUDWATCHAGENT_PATH $CONTAINER_CLOUDWATCHAGENT_PATH +COPY --from=builder $CONTAINER_CLOUDWATCHAGENT_PATH $CONTAINER_CLOUDWATCHAGENT_DATA_PATH + +ENV RUN_IN_CONTAINER="True" +CMD ["start-amazon-cloudwatch-agent.exe"] diff --git a/amazon-cloudwatch-container-insights/cloudwatch-agent-dockerfile/source/Dockerfile.Windows b/amazon-cloudwatch-container-insights/cloudwatch-agent-dockerfile/source/Dockerfile.Windows new file mode 100644 index 0000000000..3179cb94a0 --- /dev/null +++ b/amazon-cloudwatch-container-insights/cloudwatch-agent-dockerfile/source/Dockerfile.Windows @@ -0,0 +1,45 @@ +# This Dockerfile can be used for building Windows Server 2019, 2022 Container images. + +ARG TAG=ltsc2022 +ARG TARGETARCH=amd64 +ARG CONTAINER_CLOUDWATCHAGENT_PATH=C:\\Program\ Files\\Amazon\\AmazonCloudWatchAgent\\ +ARG GO_IMAGE=golang:latest + +# Build CW agent inside Windows golang container +FROM $GO_IMAGE as builder + +ARG TARGETARCH + +RUN New-Item -ItemType Directory -Path "C:\go\src\github.com\aws\amazon-cloudwatch-agent" -Force +WORKDIR "C:\go\src\github.com\aws\amazon-cloudwatch-agent" + +ENV GOPROXY=direct +ARG GO111MODULE="on" +ENV GO111MODULE=${GO111MODULE} + +COPY go.mod "C:\go\src\github.com\aws\amazon-cloudwatch-agent" +COPY go.sum "C:\go\src\github.com\aws\amazon-cloudwatch-agent" +RUN go mod download -x + +COPY . "C:\go\src\github.com\aws\amazon-cloudwatch-agent" + +# Install chocolatey and make +RUN Set-ExecutionPolicy Bypass -Scope Process -Force; [System.Net.ServicePointManager]::SecurityProtocol = [System.Net.ServicePointManager]::SecurityProtocol -bor 3072; iex ((New-Object System.Net.WebClient).DownloadString('https://community.chocolatey.org/install.ps1')) +RUN choco install make + +# Run make and build CW agent inside container +RUN make build-for-docker-windows-${env:TARGETARCH} + +# Build target CW agent container image for Windows +FROM mcr.microsoft.com/windows/nanoserver:$TAG + +ARG CONTAINER_CLOUDWATCHAGENT_PATH +ARG TARGETARCH + +RUN mkdir %CONTAINER_CLOUDWATCHAGENT_PATH% +WORKDIR $CONTAINER_CLOUDWATCHAGENT_PATH + +COPY --from=builder C:\\go\\src\\github.com\\aws\\amazon-cloudwatch-agent\\build\\bin\\windows_$TARGETARCH\\ $CONTAINER_CLOUDWATCHAGENT_PATH + +ENV RUN_IN_CONTAINER="True" +CMD ["start-amazon-cloudwatch-agent.exe"] \ No newline at end of file diff --git a/cfg/aws/credentials.go b/cfg/aws/credentials.go index 34ca454904..c0867ba258 100644 --- a/cfg/aws/credentials.go +++ b/cfg/aws/credentials.go @@ -19,7 +19,7 @@ import ( "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/sts" - "github.com/aws/amazon-cloudwatch-agent/extension/agenthealth/handler/stats/provider" + "github.com/aws/amazon-cloudwatch-agent/extension/agenthealth/handler/stats/agent" ) const ( @@ -116,7 +116,7 @@ func getSession(config *aws.Config) *session.Session { if len(found) > 0 { log.Printf("W! Unused shared config file(s) found: %v. If you would like to use them, "+ "please update your common-config.toml.", found) - provider.GetFlagsStats().SetFlag(provider.FlagSharedConfigFallback) + agent.UsageFlags().Set(agent.FlagSharedConfigFallback) } } return ses diff --git a/cfg/envconfig/envconfig.go b/cfg/envconfig/envconfig.go index 67e85a8021..a0cd44327c 100644 --- a/cfg/envconfig/envconfig.go +++ b/cfg/envconfig/envconfig.go @@ -5,29 +5,31 @@ package envconfig import ( "os" + "runtime" "strconv" "sync" ) const ( //the following are the names of environment variables - HTTP_PROXY = "HTTP_PROXY" - HTTPS_PROXY = "HTTPS_PROXY" - NO_PROXY = "NO_PROXY" - AWS_CA_BUNDLE = "AWS_CA_BUNDLE" - AWS_SDK_LOG_LEVEL = "AWS_SDK_LOG_LEVEL" - CWAGENT_USER_AGENT = "CWAGENT_USER_AGENT" - CWAGENT_LOG_LEVEL = "CWAGENT_LOG_LEVEL" - CWAGENT_USAGE_DATA = "CWAGENT_USAGE_DATA" - IMDS_NUMBER_RETRY = "IMDS_NUMBER_RETRY" - RunInContainer = "RUN_IN_CONTAINER" - RunInAWS = "RUN_IN_AWS" - RunWithIRSA = "RUN_WITH_IRSA" - UseDefaultConfig = "USE_DEFAULT_CONFIG" - HostName = "HOST_NAME" - PodName = "POD_NAME" - HostIP = "HOST_IP" - CWConfigContent = "CW_CONFIG_CONTENT" + HTTP_PROXY = "HTTP_PROXY" + HTTPS_PROXY = "HTTPS_PROXY" + NO_PROXY = "NO_PROXY" + AWS_CA_BUNDLE = "AWS_CA_BUNDLE" + AWS_SDK_LOG_LEVEL = "AWS_SDK_LOG_LEVEL" + CWAGENT_USER_AGENT = "CWAGENT_USER_AGENT" + CWAGENT_LOG_LEVEL = "CWAGENT_LOG_LEVEL" + CWAGENT_USAGE_DATA = "CWAGENT_USAGE_DATA" + IMDS_NUMBER_RETRY = "IMDS_NUMBER_RETRY" + RunInContainer = "RUN_IN_CONTAINER" + RunAsHostProcessContainer = "RUN_AS_HOST_PROCESS_CONTAINER" + RunInAWS = "RUN_IN_AWS" + RunWithIRSA = "RUN_WITH_IRSA" + UseDefaultConfig = "USE_DEFAULT_CONFIG" + HostName = "HOST_NAME" + PodName = "POD_NAME" + HostIP = "HOST_IP" + CWConfigContent = "CW_CONFIG_CONTENT" ) const ( @@ -57,3 +59,10 @@ func IsUsageDataEnabled() bool { func IsRunningInContainer() bool { return os.Getenv(RunInContainer) == TrueValue } + +func IsWindowsHostProcessContainer() bool { + if runtime.GOOS == "windows" && os.Getenv(RunInContainer) == TrueValue && os.Getenv(RunAsHostProcessContainer) == TrueValue { + return true + } + return false +} diff --git a/cmd/amazon-cloudwatch-agent/amazon-cloudwatch-agent.go b/cmd/amazon-cloudwatch-agent/amazon-cloudwatch-agent.go index ca7a6fb709..2afac53173 100644 --- a/cmd/amazon-cloudwatch-agent/amazon-cloudwatch-agent.go +++ b/cmd/amazon-cloudwatch-agent/amazon-cloudwatch-agent.go @@ -30,6 +30,7 @@ import ( "github.com/influxdata/wlog" "github.com/kardianos/service" "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/featuregate" "go.opentelemetry.io/collector/otelcol" configaws "github.com/aws/amazon-cloudwatch-agent/cfg/aws" @@ -90,6 +91,7 @@ var fServiceName = flag.String("service-name", "telegraf", "service name (window var fServiceDisplayName = flag.String("service-display-name", "Telegraf Data Collector Service", "service display name (windows only)") var fRunAsConsole = flag.Bool("console", false, "run as console application (windows only)") var fSetEnv = flag.String("setenv", "", "set an env in the configuration file in the format of KEY=VALUE") +var fStartUpErrorFile = flag.String("startup-error-file", "", "file to touch if agent can't start") var stop chan struct{} @@ -175,6 +177,14 @@ func reloadLoop( err := runAgent(ctx, inputFilters, outputFilters) if err != nil && err != context.Canceled { + if *fStartUpErrorFile != "" { + f, err := os.OpenFile(*fStartUpErrorFile, os.O_CREATE|os.O_WRONLY, 0644) + if err != nil { + log.Printf("E! Unable to create errorFile: %s", err) + } else { + _ = f.Close() + } + } log.Fatalf("E! [telegraf] Error running agent: %v", err) } } @@ -336,12 +346,13 @@ func runAgent(ctx context.Context, params := getCollectorParams(factories, provider, writer) + _ = featuregate.GlobalRegistry().Set("exporter.xray.allowDot", true) cmd := otelcol.NewCommand(params) // Noticed that args of parent process get passed here to otel collector which causes failures complaining about // unrecognized args. So below change overwrites the args. Need to investigate this further as I dont think the config // path below here is actually used and it still respects what was set in the settings above. - e := []string{"--config=" + yamlConfigPath} + e := []string{"--config=" + yamlConfigPath + " --feature-gates=exporter.xray.allowDot"} cmd.SetArgs(e) return cmd.Execute() diff --git a/cmd/config-translator/translator.go b/cmd/config-translator/translator.go index da9124803d..2d69a2c03c 100644 --- a/cmd/config-translator/translator.go +++ b/cmd/config-translator/translator.go @@ -61,7 +61,10 @@ func initFlags() { } translatorUtil.SetProxyEnv(ctx.Proxy()) translatorUtil.SetSSLEnv(ctx.SSL()) - ctx.SetMode(translatorUtil.DetectAgentMode(*inputMode)) + + mode := translatorUtil.DetectAgentMode(*inputMode) + ctx.SetMode(mode) + ctx.SetKubernetesMode(translatorUtil.DetectKubernetesMode(mode)) } /** diff --git a/cmd/start-amazon-cloudwatch-agent/path_windows.go b/cmd/start-amazon-cloudwatch-agent/path_windows.go index 19c01a2ef7..8337a6f0ee 100644 --- a/cmd/start-amazon-cloudwatch-agent/path_windows.go +++ b/cmd/start-amazon-cloudwatch-agent/path_windows.go @@ -10,25 +10,44 @@ import ( "fmt" "io" "log" + "os" "os/exec" "github.com/aws/amazon-cloudwatch-agent/tool/paths" + "github.com/aws/amazon-cloudwatch-agent/translator/config" ) func startAgent(writer io.WriteCloser) error { - if err := writer.Close(); err != nil { - log.Printf("E! Cannot close the log file, ERROR is %v \n", err) + + if os.Getenv(config.RUN_IN_CONTAINER) != config.RUN_IN_CONTAINER_TRUE { + if err := writer.Close(); err != nil { + log.Printf("E! Cannot close the log file, ERROR is %v \n", err) + return err + } + cmd := exec.Command( + paths.AgentBinaryPath, + "-config", paths.TomlConfigPath, + "-envconfig", paths.EnvConfigPath, + "-otelconfig", paths.YamlConfigPath, + ) + stdoutStderr, err := cmd.CombinedOutput() + // log file is closed, so use fmt here + fmt.Printf("%s \n", stdoutStderr) + return err + } else { + cmd := exec.Command( + paths.AgentBinaryPath, + "-config", paths.TomlConfigPath, + "-envconfig", paths.EnvConfigPath, + "-otelconfig", paths.YamlConfigPath, + "-console", "true", + ) + cmd.Stdin = os.Stdin + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + err := cmd.Run() + fmt.Printf("%s \n", err) return err } - cmd := exec.Command( - paths.AgentBinaryPath, - "-config", paths.TomlConfigPath, - "-envconfig", paths.EnvConfigPath, - "-otelconfig", paths.YamlConfigPath, - ) - stdoutStderr, err := cmd.CombinedOutput() - // log file is closed, so use fmt here - fmt.Printf("%s \n", stdoutStderr) - return err } diff --git a/extension/agenthealth/factory.go b/extension/agenthealth/factory.go index fe60efec6f..e8e97587a5 100644 --- a/extension/agenthealth/factory.go +++ b/extension/agenthealth/factory.go @@ -12,8 +12,8 @@ import ( "github.com/aws/amazon-cloudwatch-agent/extension/agenthealth/handler/stats/agent" ) -const ( - TypeStr = "agenthealth" +var ( + TypeStr, _ = component.NewType("agenthealth") ) func NewFactory() extension.Factory { diff --git a/extension/agenthealth/handler/stats/agent/agent.go b/extension/agenthealth/handler/stats/agent/agent.go index 6eaf97d4bd..1c3b0dc348 100644 --- a/extension/agenthealth/handler/stats/agent/agent.go +++ b/extension/agenthealth/handler/stats/agent/agent.go @@ -111,4 +111,6 @@ func NewOperationsFilter(operations ...string) OperationsFilter { type StatsConfig struct { // Operations are the allowed operation names to gather stats for. Operations []string `mapstructure:"operations,omitempty"` + // UsageFlags are the usage flags to set on start up. + UsageFlags map[Flag]any `mapstructure:"usage_flags,omitempty"` } diff --git a/extension/agenthealth/handler/stats/agent/flag.go b/extension/agenthealth/handler/stats/agent/flag.go new file mode 100644 index 0000000000..65b77632e5 --- /dev/null +++ b/extension/agenthealth/handler/stats/agent/flag.go @@ -0,0 +1,188 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package agent + +import ( + "encoding" + "errors" + "fmt" + "sync" + + "github.com/aws/aws-sdk-go/aws" +) + +var ( + errUnsupportedFlag = errors.New("unsupported usage flag") +) + +const ( + FlagIMDSFallbackSuccess Flag = iota + FlagSharedConfigFallback + FlagAppSignal + FlagEnhancedContainerInsights + FlagRunningInContainer + FlagMode + FlagRegionType + + flagIMDSFallbackSuccessStr = "imds_fallback_success" + flagSharedConfigFallbackStr = "shared_config_fallback" + flagAppSignalsStr = "application_signals" + flagEnhancedContainerInsightsStr = "enhanced_container_insights" + flagRunningInContainerStr = "running_in_container" + flagModeStr = "mode" + flagRegionTypeStr = "region_type" +) + +type Flag int + +var _ encoding.TextMarshaler = (*Flag)(nil) +var _ encoding.TextUnmarshaler = (*Flag)(nil) + +func (f Flag) String() string { + switch f { + case FlagAppSignal: + return flagAppSignalsStr + case FlagEnhancedContainerInsights: + return flagEnhancedContainerInsightsStr + case FlagIMDSFallbackSuccess: + return flagIMDSFallbackSuccessStr + case FlagMode: + return flagModeStr + case FlagRegionType: + return flagRegionTypeStr + case FlagRunningInContainer: + return flagRunningInContainerStr + case FlagSharedConfigFallback: + return flagSharedConfigFallbackStr + } + return "" +} + +func (f Flag) MarshalText() (text []byte, err error) { + s := f.String() + if s == "" { + return nil, fmt.Errorf("%w: %[2]T(%[2]d)", errUnsupportedFlag, f) + } + return []byte(s), nil +} + +func (f *Flag) UnmarshalText(text []byte) error { + switch s := string(text); s { + case flagAppSignalsStr: + *f = FlagAppSignal + case flagEnhancedContainerInsightsStr: + *f = FlagEnhancedContainerInsights + case flagIMDSFallbackSuccessStr: + *f = FlagIMDSFallbackSuccess + case flagModeStr: + *f = FlagMode + case flagRegionTypeStr: + *f = FlagRegionType + case flagRunningInContainerStr: + *f = FlagRunningInContainer + case flagSharedConfigFallbackStr: + *f = FlagSharedConfigFallback + default: + return fmt.Errorf("%w: %s", errUnsupportedFlag, s) + } + return nil +} + +var ( + flagSingleton FlagSet + flagOnce sync.Once +) + +// FlagSet is a getter/setter for flag/value pairs. Once a flag key is set, its value is immutable. +type FlagSet interface { + // IsSet returns if the flag is present in the backing map. + IsSet(flag Flag) bool + // GetString if the value stored with the flag is a string. If not, returns nil. + GetString(flag Flag) *string + // Set adds the Flag with an unused value. + Set(flag Flag) + // SetValue adds the Flag with a value. + SetValue(flag Flag, value any) + // SetValues adds each Flag/value pair. + SetValues(flags map[Flag]any) + // OnChange registers a callback that triggers on flag sets. + OnChange(callback func()) +} + +type flagSet struct { + m sync.Map + mu sync.RWMutex + callbacks []func() +} + +var _ FlagSet = (*flagSet)(nil) + +func (p *flagSet) IsSet(flag Flag) bool { + _, ok := p.m.Load(flag) + return ok +} + +func (p *flagSet) GetString(flag Flag) *string { + value, ok := p.m.Load(flag) + if !ok { + return nil + } + var str string + str, ok = value.(string) + if !ok || str == "" { + return nil + } + return aws.String(str) +} + +func (p *flagSet) Set(flag Flag) { + p.SetValue(flag, 1) +} + +func (p *flagSet) SetValue(flag Flag, value any) { + if p.setWithValue(flag, value) { + p.notify() + } +} + +func (p *flagSet) SetValues(m map[Flag]any) { + var changed bool + for flag, value := range m { + if p.setWithValue(flag, value) { + changed = true + } + } + if changed { + p.notify() + } +} + +func (p *flagSet) setWithValue(flag Flag, value any) bool { + if !p.IsSet(flag) { + p.m.Store(flag, value) + return true + } + return false +} + +func (p *flagSet) OnChange(f func()) { + p.mu.Lock() + defer p.mu.Unlock() + p.callbacks = append(p.callbacks, f) +} + +func (p *flagSet) notify() { + p.mu.RLock() + defer p.mu.RUnlock() + for _, callback := range p.callbacks { + callback() + } +} + +func UsageFlags() FlagSet { + flagOnce.Do(func() { + flagSingleton = &flagSet{} + }) + return flagSingleton +} diff --git a/extension/agenthealth/handler/stats/agent/flag_test.go b/extension/agenthealth/handler/stats/agent/flag_test.go new file mode 100644 index 0000000000..7936999a5d --- /dev/null +++ b/extension/agenthealth/handler/stats/agent/flag_test.go @@ -0,0 +1,90 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package agent + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestFlagSet(t *testing.T) { + fs := &flagSet{} + var notifyCount int + fs.OnChange(func() { + notifyCount++ + }) + assert.False(t, fs.IsSet(FlagIMDSFallbackSuccess)) + assert.Nil(t, fs.GetString(FlagIMDSFallbackSuccess)) + fs.Set(FlagIMDSFallbackSuccess) + assert.True(t, fs.IsSet(FlagIMDSFallbackSuccess)) + assert.Nil(t, fs.GetString(FlagIMDSFallbackSuccess)) + assert.Equal(t, 1, notifyCount) + // already set, so ignored + fs.SetValue(FlagIMDSFallbackSuccess, "ignores this") + assert.Nil(t, fs.GetString(FlagIMDSFallbackSuccess)) + assert.Equal(t, 1, notifyCount) + fs.SetValues(map[Flag]any{ + FlagMode: "test/mode", + FlagRegionType: "test/region-type", + }) + assert.True(t, fs.IsSet(FlagMode)) + assert.True(t, fs.IsSet(FlagRegionType)) + got := fs.GetString(FlagMode) + assert.NotNil(t, got) + assert.Equal(t, "test/mode", *got) + got = fs.GetString(FlagRegionType) + assert.NotNil(t, got) + assert.Equal(t, "test/region-type", *got) + assert.Equal(t, 2, notifyCount) + fs.SetValues(map[Flag]any{ + FlagRegionType: "other", + }) + assert.NotNil(t, got) + assert.Equal(t, "test/region-type", *got) + assert.Equal(t, 2, notifyCount) + fs.SetValues(map[Flag]any{ + FlagMode: "other/mode", + FlagRunningInContainer: true, + }) + got = fs.GetString(FlagMode) + assert.NotNil(t, got) + assert.Equal(t, "test/mode", *got) + assert.True(t, fs.IsSet(FlagRunningInContainer)) + assert.Equal(t, 3, notifyCount) +} + +func TestFlag(t *testing.T) { + testCases := []struct { + flag Flag + str string + }{ + {flag: FlagAppSignal, str: flagAppSignalsStr}, + {flag: FlagEnhancedContainerInsights, str: flagEnhancedContainerInsightsStr}, + {flag: FlagIMDSFallbackSuccess, str: flagIMDSFallbackSuccessStr}, + {flag: FlagMode, str: flagModeStr}, + {flag: FlagRegionType, str: flagRegionTypeStr}, + {flag: FlagRunningInContainer, str: flagRunningInContainerStr}, + {flag: FlagSharedConfigFallback, str: flagSharedConfigFallbackStr}, + } + for _, testCase := range testCases { + flag := testCase.flag + got, err := flag.MarshalText() + assert.NoError(t, err) + assert.EqualValues(t, testCase.str, got) + assert.NoError(t, flag.UnmarshalText(got)) + assert.Equal(t, flag, testCase.flag) + } +} + +func TestInvalidFlag(t *testing.T) { + f := Flag(-1) + got, err := f.MarshalText() + assert.Error(t, err) + assert.ErrorIs(t, err, errUnsupportedFlag) + assert.Nil(t, got) + err = f.UnmarshalText([]byte("Flag(-1)")) + assert.Error(t, err) + assert.ErrorIs(t, err, errUnsupportedFlag) +} diff --git a/extension/agenthealth/handler/stats/handler.go b/extension/agenthealth/handler/stats/handler.go index f5fe991125..7e12f12b5c 100644 --- a/extension/agenthealth/handler/stats/handler.go +++ b/extension/agenthealth/handler/stats/handler.go @@ -25,6 +25,7 @@ func NewHandlers(logger *zap.Logger, cfg agent.StatsConfig) ([]awsmiddleware.Req filter := agent.NewOperationsFilter(cfg.Operations...) clientStats := client.NewHandler(filter) stats := newStatsHandler(logger, filter, []agent.StatsProvider{clientStats, provider.GetProcessStats(), provider.GetFlagsStats()}) + agent.UsageFlags().SetValues(cfg.UsageFlags) return []awsmiddleware.RequestHandler{stats, clientStats}, []awsmiddleware.ResponseHandler{clientStats} } diff --git a/extension/agenthealth/handler/stats/provider/flag.go b/extension/agenthealth/handler/stats/provider/flag.go index de684e1f86..6ea9c7649d 100644 --- a/extension/agenthealth/handler/stats/provider/flag.go +++ b/extension/agenthealth/handler/stats/provider/flag.go @@ -17,99 +17,61 @@ const ( flagGetInterval = 5 * time.Minute ) -type Flag int - -const ( - FlagIMDSFallbackSucceed Flag = iota - FlagSharedConfigFallback - FlagAppSignal - FlagEnhancedContainerInsights - FlagRunningInContainer - FlagMode - FlagRegionType -) - var ( - flagSingleton FlagStats + flagSingleton *flagStats flagOnce sync.Once ) -type FlagStats interface { - agent.StatsProvider - SetFlag(flag Flag) - SetFlagWithValue(flag Flag, value string) -} - type flagStats struct { *intervalStats - flags sync.Map + flagSet agent.FlagSet } -var _ FlagStats = (*flagStats)(nil) - func (p *flagStats) update() { p.stats.Store(agent.Stats{ - ImdsFallbackSucceed: p.getIntFlag(FlagIMDSFallbackSucceed, false), - SharedConfigFallback: p.getIntFlag(FlagSharedConfigFallback, false), - AppSignals: p.getIntFlag(FlagAppSignal, false), - EnhancedContainerInsights: p.getIntFlag(FlagEnhancedContainerInsights, false), - RunningInContainer: p.getIntFlag(FlagRunningInContainer, true), - Mode: p.getStringFlag(FlagMode), - RegionType: p.getStringFlag(FlagRegionType), + ImdsFallbackSucceed: boolToSparseInt(p.flagSet.IsSet(agent.FlagIMDSFallbackSuccess)), + SharedConfigFallback: boolToSparseInt(p.flagSet.IsSet(agent.FlagSharedConfigFallback)), + AppSignals: boolToSparseInt(p.flagSet.IsSet(agent.FlagAppSignal)), + EnhancedContainerInsights: boolToSparseInt(p.flagSet.IsSet(agent.FlagEnhancedContainerInsights)), + RunningInContainer: boolToInt(p.flagSet.IsSet(agent.FlagRunningInContainer)), + Mode: p.flagSet.GetString(agent.FlagMode), + RegionType: p.flagSet.GetString(agent.FlagRegionType), }) } -func (p *flagStats) getIntFlag(flag Flag, missingAsZero bool) *int { - if _, ok := p.flags.Load(flag); ok { - return aws.Int(1) +func boolToInt(value bool) *int { + result := boolToSparseInt(value) + if result != nil { + return result } - if missingAsZero { - return aws.Int(0) - } - return nil + return aws.Int(0) } -func (p *flagStats) getStringFlag(flag Flag) *string { - value, ok := p.flags.Load(flag) - if !ok { - return nil - } - var str string - str, ok = value.(string) - if !ok { - return nil - } - return aws.String(str) -} - -func (p *flagStats) SetFlag(flag Flag) { - if _, ok := p.flags.Load(flag); !ok { - p.flags.Store(flag, true) - p.update() - } -} - -func (p *flagStats) SetFlagWithValue(flag Flag, value string) { - if _, ok := p.flags.Load(flag); !ok { - p.flags.Store(flag, value) - p.update() +func boolToSparseInt(value bool) *int { + if value { + return aws.Int(1) } + return nil } -func newFlagStats(interval time.Duration) *flagStats { +func newFlagStats(flagSet agent.FlagSet, interval time.Duration) *flagStats { stats := &flagStats{ + flagSet: flagSet, intervalStats: newIntervalStats(interval), } + stats.flagSet.OnChange(stats.update) if envconfig.IsRunningInContainer() { - stats.SetFlag(FlagRunningInContainer) + stats.flagSet.Set(agent.FlagRunningInContainer) + } else { + stats.update() } return stats } -func GetFlagsStats() FlagStats { +func GetFlagsStats() agent.StatsProvider { flagOnce.Do(func() { - flagSingleton = newFlagStats(flagGetInterval) + flagSingleton = newFlagStats(agent.UsageFlags(), flagGetInterval) }) return flagSingleton } diff --git a/extension/agenthealth/handler/stats/provider/flag_test.go b/extension/agenthealth/handler/stats/provider/flag_test.go index cbc42c094a..b1b0360200 100644 --- a/extension/agenthealth/handler/stats/provider/flag_test.go +++ b/extension/agenthealth/handler/stats/provider/flag_test.go @@ -10,28 +10,30 @@ import ( "github.com/stretchr/testify/assert" "github.com/aws/amazon-cloudwatch-agent/cfg/envconfig" + "github.com/aws/amazon-cloudwatch-agent/extension/agenthealth/handler/stats/agent" ) func TestFlagStats(t *testing.T) { + t.Skip("stat provider tests are flaky. disable until fix is available") t.Setenv(envconfig.RunInContainer, envconfig.TrueValue) - provider := newFlagStats(time.Microsecond) - got := provider.getStats() + fs := newFlagStats(agent.UsageFlags(), time.Microsecond) + got := fs.getStats() assert.Nil(t, got.ImdsFallbackSucceed) assert.Nil(t, got.SharedConfigFallback) assert.NotNil(t, got.RunningInContainer) assert.Equal(t, 1, *got.RunningInContainer) - provider.SetFlag(FlagIMDSFallbackSucceed) + fs.flagSet.Set(agent.FlagIMDSFallbackSuccess) assert.Nil(t, got.ImdsFallbackSucceed) - got = provider.getStats() + got = fs.getStats() assert.NotNil(t, got.ImdsFallbackSucceed) assert.Equal(t, 1, *got.ImdsFallbackSucceed) assert.Nil(t, got.SharedConfigFallback) - provider.SetFlag(FlagSharedConfigFallback) - got = provider.getStats() + fs.flagSet.Set(agent.FlagSharedConfigFallback) + got = fs.getStats() assert.NotNil(t, got.SharedConfigFallback) assert.Equal(t, 1, *got.SharedConfigFallback) - provider.SetFlagWithValue(FlagMode, "test") - got = provider.getStats() + fs.flagSet.SetValue(agent.FlagMode, "test") + got = fs.getStats() assert.NotNil(t, got.Mode) assert.Equal(t, "test", *got.Mode) } diff --git a/extension/agenthealth/handler/stats/provider/interval_test.go b/extension/agenthealth/handler/stats/provider/interval_test.go index d56c23b8ae..0baee1c163 100644 --- a/extension/agenthealth/handler/stats/provider/interval_test.go +++ b/extension/agenthealth/handler/stats/provider/interval_test.go @@ -14,6 +14,7 @@ import ( ) func TestIntervalStats(t *testing.T) { + t.Skip("stat provider tests are flaky. disable until fix is available") s := newIntervalStats(time.Millisecond) s.stats.Store(agent.Stats{ ThreadCount: aws.Int32(2), diff --git a/extension/agenthealth/handler/stats/provider/process_test.go b/extension/agenthealth/handler/stats/provider/process_test.go index ea9a56b602..19fac625fb 100644 --- a/extension/agenthealth/handler/stats/provider/process_test.go +++ b/extension/agenthealth/handler/stats/provider/process_test.go @@ -59,6 +59,7 @@ func (m *mockProcessMetrics) NumThreads() (int32, error) { } func TestProcessStats(t *testing.T) { + t.Skip("stat provider tests are flaky. disable until fix is available") testErr := errors.New("test error") mock := &mockProcessMetrics{} provider := newProcessStats(mock, time.Millisecond) diff --git a/extension/agenthealth/handler/useragent/useragent.go b/extension/agenthealth/handler/useragent/useragent.go index 4af3be0f11..54ae527c66 100644 --- a/extension/agenthealth/handler/useragent/useragent.go +++ b/extension/agenthealth/handler/useragent/useragent.go @@ -18,7 +18,7 @@ import ( "golang.org/x/exp/maps" "github.com/aws/amazon-cloudwatch-agent/cfg/envconfig" - "github.com/aws/amazon-cloudwatch-agent/extension/agenthealth/handler/stats/provider" + "github.com/aws/amazon-cloudwatch-agent/extension/agenthealth/handler/stats/agent" "github.com/aws/amazon-cloudwatch-agent/internal/util/collections" "github.com/aws/amazon-cloudwatch-agent/internal/version" "github.com/aws/amazon-cloudwatch-agent/receiver/adapter" @@ -27,7 +27,7 @@ import ( const ( flagRunAsUser = "run_as_user" flagContainerInsights = "container_insights" - flagAppSignals = "app_signals" + flagAppSignals = "application_signals" flagEnhancedContainerInsights = "enhanced_container_insights" separator = " " @@ -79,23 +79,23 @@ func (ua *userAgent) SetComponents(otelCfg *otelcol.Config, telegrafCfg *telegra for _, pipeline := range otelCfg.Service.Pipelines { for _, receiver := range pipeline.Receivers { // trim the adapter prefix from adapted Telegraf plugins - name := strings.TrimPrefix(string(receiver.Type()), adapter.TelegrafPrefix) + name := strings.TrimPrefix(receiver.Type().String(), adapter.TelegrafPrefix) ua.inputs.Add(name) } for _, processor := range pipeline.Processors { - ua.processors.Add(string(processor.Type())) + ua.processors.Add(processor.Type().String()) } for _, exporter := range pipeline.Exporters { - ua.outputs.Add(string(exporter.Type())) - if exporter.Type() == "awsemf" { + ua.outputs.Add(exporter.Type().String()) + if exporter.Type().String() == "awsemf" { cfg := otelCfg.Exporters[exporter].(*awsemfexporter.Config) if cfg.IsAppSignalsEnabled() { ua.outputs.Add(flagAppSignals) - provider.GetFlagsStats().SetFlag(provider.FlagAppSignal) + agent.UsageFlags().Set(agent.FlagAppSignal) } if cfg.IsEnhancedContainerInsights() { ua.outputs.Add(flagEnhancedContainerInsights) - provider.GetFlagsStats().SetFlag(provider.FlagEnhancedContainerInsights) + agent.UsageFlags().Set(agent.FlagEnhancedContainerInsights) } } } diff --git a/extension/agenthealth/handler/useragent/useragent_test.go b/extension/agenthealth/handler/useragent/useragent_test.go index 2ce645e03f..dd203e825f 100644 --- a/extension/agenthealth/handler/useragent/useragent_test.go +++ b/extension/agenthealth/handler/useragent/useragent_test.go @@ -22,20 +22,26 @@ import ( ) func TestSetComponents(t *testing.T) { + metricsType, _ := component.NewType("metrics") + telegrafCPUType, _ := component.NewType(adapter.TelegrafPrefix + "cpu") + prometheusType, _ := component.NewType("prometheus") + batchType, _ := component.NewType("batch") + filterType, _ := component.NewType("filter") + cloudwatchType, _ := component.NewType("cloudwatch") otelCfg := &otelcol.Config{ Service: service.Config{ Pipelines: map[component.ID]*pipelines.PipelineConfig{ - component.NewID("metrics"): { + component.NewID(metricsType): { Receivers: []component.ID{ - component.NewID(adapter.TelegrafPrefix + "cpu"), - component.NewID("prometheus"), + component.NewID(telegrafCPUType), + component.NewID(prometheusType), }, Processors: []component.ID{ - component.NewID("batch"), - component.NewID("filter"), + component.NewID(batchType), + component.NewID(filterType), }, Exporters: []component.ID{ - component.NewID("cloudwatch"), + component.NewID(cloudwatchType), }, }, }, @@ -101,21 +107,24 @@ func TestAlternateUserAgent(t *testing.T) { } func TestEmf(t *testing.T) { + metricsType, _ := component.NewType("metrics") + nopType, _ := component.NewType("nop") + awsEMFType, _ := component.NewType("awsemf") otelCfg := &otelcol.Config{ Service: service.Config{ Pipelines: map[component.ID]*pipelines.PipelineConfig{ - component.NewID("metrics"): { + component.NewID(metricsType): { Receivers: []component.ID{ - component.NewID("nop"), + component.NewID(nopType), }, Exporters: []component.ID{ - component.NewID("awsemf"), + component.NewID(awsEMFType), }, }, }, }, Exporters: map[component.ID]component.Config{ - component.NewID("awsemf"): &awsemfexporter.Config{Namespace: "AppSignals", LogGroupName: "/aws/appsignals/log/group"}, + component.NewID(awsEMFType): &awsemfexporter.Config{Namespace: "ApplicationSignals", LogGroupName: "/aws/application-signals/log/group"}, }, } ua := newUserAgent() @@ -126,7 +135,7 @@ func TestEmf(t *testing.T) { assert.Equal(t, "inputs:(nop run_as_user)", ua.inputsStr.Load()) assert.Equal(t, "", ua.processorsStr.Load()) - assert.Equal(t, "outputs:(app_signals awsemf)", ua.outputsStr.Load()) + assert.Equal(t, "outputs:(application_signals awsemf)", ua.outputsStr.Load()) } func TestSingleton(t *testing.T) { diff --git a/go.mod b/go.mod index 51aaba43ee..1c6c1af36d 100644 --- a/go.mod +++ b/go.mod @@ -1,43 +1,44 @@ module github.com/aws/amazon-cloudwatch-agent -go 1.20 +go 1.22.0 -replace github.com/influxdata/telegraf => github.com/aws/telegraf v0.10.2-0.20231103153700-d2a4b9e20b87 +toolchain go1.22.2 + +replace github.com/influxdata/telegraf => github.com/aws/telegraf v0.10.2-0.20240423220441-63baeaedb379 // Replace with https://github.com/amazon-contributing/opentelemetry-collector-contrib, there are no requirements for all receivers/processors/exporters // to be all replaced since there are some changes that will always be from upstream -replace github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsemfexporter => github.com/amazon-contributing/opentelemetry-collector-contrib/exporter/awsemfexporter v0.0.0-20231221155153-92710a714293 - -replace github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsxrayexporter => github.com/amazon-contributing/opentelemetry-collector-contrib/exporter/awsxrayexporter v0.0.0-20231221155153-92710a714293 - -replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/xray => github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/xray v0.0.0-20231221155153-92710a714293 - -replace github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver => github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver v0.0.0-20231221155153-92710a714293 - -replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/k8s => github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/k8s v0.0.0-20231221155153-92710a714293 - -replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/containerinsight => github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/containerinsight v0.0.0-20231221155153-92710a714293 - -// Replace with contrib to revert upstream change https://github.com/open-telemetry/opentelemetry-collector-contrib/pull/20519 -replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus => github.com/amazon-contributing/opentelemetry-collector-contrib/pkg/translator/prometheus v0.0.0-20231221155153-92710a714293 - -replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza => github.com/amazon-contributing/opentelemetry-collector-contrib/pkg/stanza v0.0.0-20231208183748-c00ca1f62c3e - -replace github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver => github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.0.0-20231221155153-92710a714293 - -replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/awsutil => github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/awsutil v0.0.0-20231221155153-92710a714293 - -replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/cwlogs => github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/cwlogs v0.0.0-20231221155153-92710a714293 +replace ( + github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awscloudwatchlogsexporter => github.com/amazon-contributing/opentelemetry-collector-contrib/exporter/awscloudwatchlogsexporter v0.0.0-20240503173519-cc2b921759f4 + github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsemfexporter => github.com/amazon-contributing/opentelemetry-collector-contrib/exporter/awsemfexporter v0.0.0-20240517183704-e0e66ca9e79c + github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsxrayexporter => github.com/amazon-contributing/opentelemetry-collector-contrib/exporter/awsxrayexporter v0.0.0-20240517183704-e0e66ca9e79c +) -replace github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awscloudwatchlogsexporter => github.com/amazon-contributing/opentelemetry-collector-contrib/exporter/awscloudwatchlogsexporter v0.0.0-20231221155153-92710a714293 +replace github.com/open-telemetry/opentelemetry-collector-contrib/extension/awsproxy => github.com/amazon-contributing/opentelemetry-collector-contrib/extension/awsproxy v0.0.0-20240503173519-cc2b921759f4 -replace github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awsxrayreceiver => github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/awsxrayreceiver v0.0.0-20231221155153-92710a714293 +replace ( + github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/awsutil => github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/awsutil v0.0.0-20240503173519-cc2b921759f4 + github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/containerinsight => github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/containerinsight v0.0.0-20240503173519-cc2b921759f4 + github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/cwlogs => github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/cwlogs v0.0.0-20240503173519-cc2b921759f4 + github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/k8s => github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/k8s v0.0.0-20240503173519-cc2b921759f4 + github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/proxy => github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/proxy v0.0.0-20240503173519-cc2b921759f4 + github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/xray => github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/xray v0.0.0-20240503173519-cc2b921759f4 + github.com/open-telemetry/opentelemetry-collector-contrib/internal/kubelet => github.com/amazon-contributing/opentelemetry-collector-contrib/internal/kubelet v0.0.0-20240503173519-cc2b921759f4 +) -replace github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor => github.com/amazon-contributing/opentelemetry-collector-contrib/processor/resourcedetectionprocessor v0.0.0-20231221155153-92710a714293 +replace ( + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza => github.com/amazon-contributing/opentelemetry-collector-contrib/pkg/stanza v0.0.0-20240503173519-cc2b921759f4 + // Replace with contrib to revert upstream change https://github.com/open-telemetry/opentelemetry-collector-contrib/pull/20519 + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus => github.com/amazon-contributing/opentelemetry-collector-contrib/pkg/translator/prometheus v0.0.0-20240503173519-cc2b921759f4 +) -replace go.opentelemetry.io/collector/config/confighttp => github.com/amazon-contributing/opentelemetry-collector-contrib/config/confighttp v0.0.0-20231221155153-92710a714293 +replace github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor => github.com/amazon-contributing/opentelemetry-collector-contrib/processor/resourcedetectionprocessor v0.0.0-20240503173519-cc2b921759f4 -replace github.com/amazon-contributing/opentelemetry-collector-contrib/override/aws => github.com/amazon-contributing/opentelemetry-collector-contrib/override/aws v0.0.0-20231221155153-92710a714293 +replace ( + github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver => github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver v0.0.0-20240503173519-cc2b921759f4 + github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awsxrayreceiver => github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/awsxrayreceiver v0.0.0-20240503173519-cc2b921759f4 + github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver => github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.0.0-20240503173519-cc2b921759f4 +) // Temporary fix, pending PR https://github.com/shirou/gopsutil/pull/957 replace github.com/shirou/gopsutil/v3 => github.com/aws/telegraf/patches/gopsutil/v3 v3.0.0-20231109213610-a8c21c54a2be // indirect @@ -75,24 +76,20 @@ replace github.com/karrick/godirwalk v1.16.1 => github.com/karrick/godirwalk v1. replace github.com/docker/distribution => github.com/docker/distribution v2.8.2+incompatible -// Telegraf uses the older v1.8.2: https://github.com/influxdata/telegraf/blob/0e1b637414bdc7b438a8e77d859f787525b3782d/go.mod#L146 -// But we want a later version, so do a replace -// v0.42.0 looks lower, but Prometheus messed up their library naming convention, it actually matches 2.42.0 prometheus version -replace github.com/prometheus/prometheus v1.8.2-0.20210430082741-2a4b8e12bbf23 => github.com/prometheus/prometheus v0.42.0 - // go-kit has the fix for nats-io/jwt/v2 merged but not released yet. Replacing this version for now until next release. replace github.com/go-kit/kit => github.com/go-kit/kit v0.12.1-0.20220808180842-62c81a0f3047 // openshift removed all tags from their repo, use the pseudoversion from the release-3.9 branch HEAD replace github.com/openshift/api v3.9.0+incompatible => github.com/openshift/api v0.0.0-20180801171038-322a19404e37 +// forces version bump to support log group classes replace github.com/aws/aws-sdk-go => github.com/aws/aws-sdk-go v1.48.6 require ( github.com/BurntSushi/toml v1.3.2 github.com/Jeffail/gabs v1.4.0 - github.com/amazon-contributing/opentelemetry-collector-contrib/extension/awsmiddleware v0.0.0-20231208183748-c00ca1f62c3e - github.com/aws/aws-sdk-go v1.48.6 + github.com/amazon-contributing/opentelemetry-collector-contrib/extension/awsmiddleware v0.0.0-20240503173519-cc2b921759f4 + github.com/aws/aws-sdk-go v1.51.17 github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.30.2 github.com/bigkevmcd/go-configparser v0.0.0-20200217161103-d137835d2579 github.com/deckarep/golang-set/v2 v2.3.1 @@ -100,96 +97,108 @@ require ( github.com/go-test/deep v1.0.2-0.20181118220953-042da051cf31 github.com/gobwas/glob v0.2.3 github.com/google/go-cmp v0.6.0 - github.com/google/uuid v1.4.0 + github.com/google/uuid v1.6.0 + github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd // indirect github.com/hashicorp/golang-lru v1.0.2 github.com/influxdata/telegraf v0.0.0-00010101000000-000000000000 github.com/influxdata/wlog v0.0.0-20160411224016-7c63b0a71ef8 - github.com/jellydator/ttlcache/v3 v3.1.0 + github.com/jellydator/ttlcache/v3 v3.2.0 github.com/kardianos/service v1.2.1 // Keep this pinned to v1.2.1. v1.2.2 causes the agent to not register as a service on Windows github.com/kr/pretty v0.3.1 + github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c github.com/oklog/run v1.1.0 - github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awscloudwatchlogsexporter v0.89.0 - github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsemfexporter v0.89.0 - github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsxrayexporter v0.89.0 - github.com/open-telemetry/opentelemetry-collector-contrib/extension/awsproxy v0.89.0 - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry v0.89.0 - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza v0.89.0 - github.com/open-telemetry/opentelemetry-collector-contrib/processor/cumulativetodeltaprocessor v0.89.0 - github.com/open-telemetry/opentelemetry-collector-contrib/processor/metricstransformprocessor v0.89.0 - github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor v0.89.0 - github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor v0.89.0 - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver v0.89.0 - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awsxrayreceiver v0.89.0 - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/tcplogreceiver v0.89.0 - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/udplogreceiver v0.89.0 + github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awscloudwatchlogsexporter v0.98.0 + github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsemfexporter v0.98.0 + github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsxrayexporter v0.98.0 + github.com/open-telemetry/opentelemetry-collector-contrib/extension/awsproxy v0.98.0 + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry v0.98.0 + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza v0.98.0 + github.com/open-telemetry/opentelemetry-collector-contrib/processor/cumulativetodeltaprocessor v0.98.0 + github.com/open-telemetry/opentelemetry-collector-contrib/processor/metricstransformprocessor v0.98.0 + github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor v0.98.0 + github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor v0.98.0 + github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver v0.98.0 + github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awsxrayreceiver v0.98.0 + github.com/open-telemetry/opentelemetry-collector-contrib/receiver/tcplogreceiver v0.98.0 + github.com/open-telemetry/opentelemetry-collector-contrib/receiver/udplogreceiver v0.98.0 github.com/pkg/errors v0.9.1 - github.com/prometheus/client_golang v1.17.0 - github.com/prometheus/common v0.45.0 - github.com/prometheus/prometheus v1.8.2-0.20210430082741-2a4b8e12bbf23 + github.com/prometheus/client_golang v1.19.0 + github.com/prometheus/common v0.52.2 + github.com/prometheus/prometheus v0.51.2-0.20240405174432-b4a973753c6e github.com/shirou/gopsutil v3.21.11+incompatible - github.com/shirou/gopsutil/v3 v3.23.10 - github.com/stretchr/testify v1.8.4 + github.com/shirou/gopsutil/v3 v3.24.3 + github.com/stretchr/testify v1.9.0 github.com/xeipuuv/gojsonschema v1.2.0 - go.opentelemetry.io/collector/component v0.89.0 - go.opentelemetry.io/collector/config/configtelemetry v0.89.0 - go.opentelemetry.io/collector/confmap v0.89.0 - go.opentelemetry.io/collector/consumer v0.89.0 - go.opentelemetry.io/collector/exporter v0.89.0 - go.opentelemetry.io/collector/exporter/loggingexporter v0.89.0 - go.opentelemetry.io/collector/extension v0.89.0 - go.opentelemetry.io/collector/otelcol v0.89.0 - go.opentelemetry.io/collector/pdata v1.0.0 - go.opentelemetry.io/collector/processor v0.89.0 - go.opentelemetry.io/collector/processor/batchprocessor v0.89.0 - go.opentelemetry.io/collector/receiver v0.89.0 - go.opentelemetry.io/collector/receiver/otlpreceiver v0.89.0 - go.opentelemetry.io/collector/semconv v0.89.0 - go.opentelemetry.io/collector/service v0.89.0 + go.opentelemetry.io/collector/component v0.98.0 + go.opentelemetry.io/collector/config/configopaque v1.5.0 + go.opentelemetry.io/collector/config/configtelemetry v0.98.0 + go.opentelemetry.io/collector/config/configtls v0.98.0 + go.opentelemetry.io/collector/confmap v0.98.0 + go.opentelemetry.io/collector/consumer v0.98.0 + go.opentelemetry.io/collector/exporter v0.98.0 + go.opentelemetry.io/collector/exporter/loggingexporter v0.98.0 + go.opentelemetry.io/collector/extension v0.98.0 + go.opentelemetry.io/collector/otelcol v0.98.0 + go.opentelemetry.io/collector/pdata v1.5.0 + go.opentelemetry.io/collector/processor v0.98.0 + go.opentelemetry.io/collector/processor/batchprocessor v0.98.0 + go.opentelemetry.io/collector/receiver v0.98.0 + go.opentelemetry.io/collector/receiver/otlpreceiver v0.98.0 + go.opentelemetry.io/collector/semconv v0.98.0 + go.opentelemetry.io/collector/service v0.98.0 go.uber.org/atomic v1.11.0 go.uber.org/multierr v1.11.0 - go.uber.org/zap v1.26.0 - golang.org/x/exp v0.0.0-20231127185646-65229373498e - golang.org/x/net v0.19.0 - golang.org/x/sync v0.5.0 - golang.org/x/sys v0.15.0 + go.uber.org/zap v1.27.0 + golang.org/x/exp v0.0.0-20240119083558-1b970713d09a + golang.org/x/net v0.24.0 + golang.org/x/sync v0.6.0 + golang.org/x/sys v0.19.0 golang.org/x/text v0.14.0 gopkg.in/fsnotify.v1 v1.4.7 gopkg.in/natefinch/lumberjack.v2 v2.0.0 gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 gopkg.in/yaml.v3 v3.0.1 gotest.tools/v3 v3.2.0 - k8s.io/api v0.28.3 - k8s.io/apimachinery v0.28.3 - k8s.io/client-go v0.28.3 - k8s.io/klog/v2 v2.100.1 + k8s.io/api v0.30.0 + k8s.io/apimachinery v0.30.0 + k8s.io/client-go v0.30.0 + k8s.io/klog/v2 v2.120.1 ) require ( - cloud.google.com/go/compute v1.23.2 // indirect + go.opentelemetry.io/collector/confmap/converter/expandconverter v0.98.0 + go.opentelemetry.io/collector/confmap/provider/fileprovider v0.98.0 + go.opentelemetry.io/collector/featuregate v1.5.0 +) + +require ( + cloud.google.com/go/compute v1.24.0 // indirect cloud.google.com/go/compute/metadata v0.2.4-0.20230617002413-005d2dfb6b68 // indirect collectd.org v0.4.0 // indirect - contrib.go.opencensus.io/exporter/prometheus v0.4.2 // indirect github.com/Azure/azure-sdk-for-go v67.1.0+incompatible // indirect - github.com/Azure/go-autorest v14.2.0+incompatible // indirect + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.10.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1 // indirect + github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2 // indirect + github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.5.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0 // indirect github.com/Azure/go-autorest/autorest v0.11.29 // indirect github.com/Azure/go-autorest/autorest/adal v0.9.23 // indirect - github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect - github.com/Azure/go-autorest/autorest/to v0.4.0 // indirect - github.com/Azure/go-autorest/autorest/validation v0.3.1 // indirect - github.com/Azure/go-autorest/logger v0.2.1 // indirect - github.com/Azure/go-autorest/tracing v0.6.0 // indirect - github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.20.0 // indirect + github.com/AzureAD/microsoft-authentication-library-for-go v1.2.1 // indirect + github.com/Code-Hex/go-generics-cache v1.3.1 // indirect + github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.22.0 // indirect github.com/Microsoft/go-winio v0.6.1 // indirect + github.com/Microsoft/hcsshim v0.12.0-rc.3 // indirect github.com/Showmax/go-fqdn v1.0.0 // indirect github.com/alecthomas/participle v0.4.1 // indirect - github.com/alecthomas/participle/v2 v2.1.0 // indirect - github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 // indirect - github.com/amazon-contributing/opentelemetry-collector-contrib/override/aws v0.0.0-20231208183748-c00ca1f62c3e // indirect + github.com/alecthomas/participle/v2 v2.1.1 // indirect + github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9 // indirect + github.com/amazon-contributing/opentelemetry-collector-contrib/override/aws v0.0.0-20240503173519-cc2b921759f4 // indirect github.com/antchfx/jsonquery v1.1.5 // indirect github.com/antchfx/xmlquery v1.3.9 // indirect github.com/antchfx/xpath v1.2.0 // indirect - github.com/antonmedv/expr v1.15.3 // indirect github.com/apache/arrow/go/v12 v12.0.1 // indirect + github.com/apache/arrow/go/v14 v14.0.2 // indirect + github.com/apache/thrift v0.17.0 // indirect github.com/armon/go-metrics v0.4.1 // indirect github.com/aws/aws-sdk-go-v2 v1.23.0 // indirect github.com/aws/aws-sdk-go-v2/config v1.25.1 // indirect @@ -202,60 +211,64 @@ require ( github.com/beorn7/perks v1.0.1 // indirect github.com/blang/semver/v4 v4.0.0 // indirect github.com/caio/go-tdigest v3.1.0+incompatible // indirect - github.com/cenkalti/backoff/v4 v4.2.1 // indirect - github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/cenkalti/backoff/v4 v4.3.0 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/checkpoint-restore/go-criu/v5 v5.3.0 // indirect - github.com/cilium/ebpf v0.7.0 // indirect - github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 // indirect + github.com/cilium/ebpf v0.11.0 // indirect + github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa // indirect + github.com/containerd/cgroups/v3 v3.0.3 // indirect github.com/containerd/console v1.0.3 // indirect + github.com/containerd/errdefs v0.1.0 // indirect github.com/containerd/ttrpc v1.2.2 // indirect github.com/coreos/go-semver v0.3.0 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/cyphar/filepath-securejoin v0.2.4 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/dennwc/varint v1.0.0 // indirect - github.com/digitalocean/godo v1.104.1 // indirect - github.com/docker/distribution v2.8.2+incompatible // indirect - github.com/docker/docker v24.0.7+incompatible // indirect - github.com/docker/go-connections v0.4.0 // indirect + github.com/digitalocean/godo v1.109.0 // indirect + github.com/distribution/reference v0.5.0 // indirect + github.com/docker/docker v25.0.5+incompatible // indirect + github.com/docker/go-connections v0.5.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/doclambda/protobufquery v0.0.0-20210317203640-88ffabe06a60 // indirect - github.com/emicklei/go-restful/v3 v3.10.2 // indirect - github.com/envoyproxy/go-control-plane v0.11.1 // indirect - github.com/envoyproxy/protoc-gen-validate v1.0.2 // indirect + github.com/emicklei/go-restful/v3 v3.11.0 // indirect + github.com/envoyproxy/go-control-plane v0.12.0 // indirect + github.com/envoyproxy/protoc-gen-validate v1.0.4 // indirect github.com/euank/go-kmsg-parser v2.0.0+incompatible // indirect github.com/evanphx/json-patch v5.6.0+incompatible // indirect + github.com/expr-lang/expr v1.16.3 // indirect github.com/fatih/color v1.15.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect github.com/go-logfmt/logfmt v0.6.0 // indirect - github.com/go-logr/logr v1.3.0 // indirect + github.com/go-logr/logr v1.4.1 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-ole/go-ole v1.2.6 // indirect - github.com/go-openapi/jsonpointer v0.20.0 // indirect - github.com/go-openapi/jsonreference v0.20.2 // indirect - github.com/go-openapi/swag v0.22.4 // indirect - github.com/go-resty/resty/v2 v2.7.0 // indirect + github.com/go-openapi/jsonpointer v0.20.2 // indirect + github.com/go-openapi/jsonreference v0.20.4 // indirect + github.com/go-openapi/swag v0.22.9 // indirect + github.com/go-resty/resty/v2 v2.11.0 // indirect + github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1 // indirect github.com/go-zookeeper/zk v1.0.3 // indirect - github.com/godbus/dbus/v5 v5.0.6 // indirect + github.com/godbus/dbus/v5 v5.1.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang-jwt/jwt/v4 v4.5.0 // indirect + github.com/golang-jwt/jwt/v5 v5.2.0 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/golang/protobuf v1.5.3 // indirect + github.com/golang/protobuf v1.5.4 // indirect github.com/golang/snappy v0.0.4 // indirect - github.com/google/cadvisor v0.48.1 // indirect + github.com/google/cadvisor v0.49.0 // indirect github.com/google/gnostic-models v0.6.8 // indirect github.com/google/go-querystring v1.1.0 // indirect github.com/google/gofuzz v1.2.0 // indirect github.com/google/s2a-go v0.1.7 // indirect github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect - github.com/googleapis/gax-go/v2 v2.12.0 // indirect - github.com/gophercloud/gophercloud v1.7.0 // indirect + github.com/googleapis/gax-go/v2 v2.12.2 // indirect + github.com/gophercloud/gophercloud v1.8.0 // indirect github.com/gorilla/websocket v1.5.0 // indirect github.com/gosnmp/gosnmp v1.34.0 // indirect - github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.18.1 // indirect - github.com/hashicorp/consul/api v1.25.1 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0 // indirect + github.com/hashicorp/consul/api v1.28.2 // indirect github.com/hashicorp/cronexpr v1.1.2 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect @@ -265,94 +278,93 @@ require ( github.com/hashicorp/go-retryablehttp v0.7.4 // indirect github.com/hashicorp/go-rootcerts v1.0.2 // indirect github.com/hashicorp/go-version v1.6.0 // indirect - github.com/hashicorp/nomad/api v0.0.0-20230721134942-515895c7690c // indirect + github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect + github.com/hashicorp/nomad/api v0.0.0-20240306004928-3e7191ccb702 // indirect github.com/hashicorp/serf v0.10.1 // indirect - github.com/hetznercloud/hcloud-go v1.41.0 // indirect + github.com/hetznercloud/hcloud-go/v2 v2.6.0 // indirect github.com/iancoleman/strcase v0.3.0 // indirect github.com/imdario/mergo v0.3.16 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect - github.com/influxdata/go-syslog/v3 v3.0.1-0.20210608084020-ac565dc76ba6 // indirect + github.com/influxdata/go-syslog/v3 v3.0.1-0.20230911200830-875f5bc594a4 // indirect github.com/influxdata/line-protocol/v2 v2.2.1 // indirect github.com/influxdata/toml v0.0.0-20190415235208-270119a8ce65 // indirect - github.com/ionos-cloud/sdk-go/v6 v6.1.9 // indirect + github.com/ionos-cloud/sdk-go/v6 v6.1.11 // indirect github.com/jhump/protoreflect v1.8.3-0.20210616212123-6cc1efa697ca // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/jpillora/backoff v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/karrick/godirwalk v1.17.0 // indirect - github.com/klauspost/compress v1.17.2 // indirect + github.com/klauspost/compress v1.17.8 // indirect github.com/knadh/koanf v1.5.0 // indirect - github.com/knadh/koanf/v2 v2.0.1 // indirect + github.com/knadh/koanf/v2 v2.1.1 // indirect github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b // indirect github.com/kr/text v0.2.0 // indirect + github.com/kylelemons/godebug v1.1.0 // indirect github.com/leodido/ragel-machinery v0.0.0-20181214104525-299bdde78165 // indirect - github.com/linode/linodego v1.23.0 // indirect + github.com/linode/linodego v1.30.0 // indirect github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.19 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect - github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect - github.com/miekg/dns v1.1.56 // indirect + github.com/miekg/dns v1.1.58 // indirect github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/mitchellh/hashstructure/v2 v2.0.2 // indirect - github.com/mitchellh/mapstructure v1.5.1-0.20220423185008-bf980b35cac4 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/moby/sys/mountinfo v0.6.2 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect - github.com/montanaflynn/stats v0.7.0 // indirect github.com/mostynb/go-grpc-compression v1.2.2 // indirect - github.com/mrunalp/fileutils v0.5.0 // indirect + github.com/mrunalp/fileutils v0.5.1 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect github.com/naoina/go-stringutil v0.1.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/awsutil v0.89.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/containerinsight v0.89.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/cwlogs v0.89.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/ecsutil v0.89.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/k8s v0.89.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/metrics v0.89.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/proxy v0.89.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/xray v0.89.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.89.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.89.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter v0.89.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8sconfig v0.89.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/internal/kubelet v0.89.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/internal/metadataproviders v0.89.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl v0.89.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.89.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus v0.89.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.89.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/awsutil v0.98.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/containerinsight v0.98.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/cwlogs v0.98.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/ecsutil v0.98.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/k8s v0.98.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/metrics v0.98.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/proxy v0.98.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/xray v0.98.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.98.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.98.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter v0.98.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8sconfig v0.98.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/kubelet v0.98.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/metadataproviders v0.98.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl v0.98.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.98.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus v0.98.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.98.0 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect - github.com/opencontainers/image-spec v1.1.0-rc5 // indirect - github.com/opencontainers/runc v1.1.9 // indirect - github.com/opencontainers/runtime-spec v1.0.3-0.20220909204839-494a5a6aca78 // indirect - github.com/opencontainers/selinux v1.10.1 // indirect + github.com/opencontainers/image-spec v1.1.0 // indirect + github.com/opencontainers/runc v1.1.12 // indirect + github.com/opencontainers/runtime-spec v1.1.0 // indirect + github.com/opencontainers/selinux v1.11.0 // indirect github.com/openshift/api v3.9.0+incompatible // indirect github.com/openshift/client-go v0.0.0-20210521082421-73d9475a9142 // indirect github.com/ovh/go-ovh v1.4.3 // indirect github.com/philhofer/fwd v1.1.1 // indirect + github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect - github.com/prometheus/client_model v0.5.0 // indirect + github.com/prometheus/client_model v0.6.1 // indirect github.com/prometheus/common/sigv4 v0.1.0 // indirect github.com/prometheus/procfs v0.12.0 // indirect - github.com/prometheus/statsd_exporter v0.22.7 // indirect github.com/rogpeppe/go-internal v1.11.0 // indirect github.com/rs/cors v1.10.1 // indirect github.com/safchain/ethtool v0.0.0-20210803160452-9aa261dae9b1 // indirect - github.com/scaleway/scaleway-sdk-go v1.0.0-beta.21 // indirect + github.com/scaleway/scaleway-sdk-go v1.0.0-beta.25 // indirect github.com/seccomp/libseccomp-golang v0.9.2-0.20220502022130-f33da4d89646 // indirect github.com/sirupsen/logrus v1.9.3 // indirect github.com/sleepinggenius2/gosmi v0.4.4 // indirect github.com/spf13/cobra v1.8.0 // indirect github.com/spf13/pflag v1.0.5 // indirect - github.com/stretchr/objx v0.5.0 // indirect + github.com/stretchr/objx v0.5.2 // indirect github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 // indirect github.com/tidwall/gjson v1.10.2 // indirect github.com/tidwall/match v1.1.1 // indirect @@ -360,69 +372,76 @@ require ( github.com/tinylib/msgp v1.1.6 // indirect github.com/tklauser/go-sysconf v0.3.12 // indirect github.com/tklauser/numcpus v0.6.1 // indirect - github.com/vishvananda/netlink v1.1.1-0.20210330154013-f5de75959ad5 // indirect - github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f // indirect + github.com/valyala/fastjson v1.6.4 // indirect + github.com/vishvananda/netlink v1.2.1-beta.2 // indirect + github.com/vishvananda/netns v0.0.4 // indirect github.com/vjeantet/grok v1.0.1 // indirect github.com/vultr/govultr/v2 v2.17.2 // indirect github.com/wavefronthq/wavefront-sdk-go v0.9.10 // indirect - github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f // indirect + github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect - github.com/yusufpapurcu/wmi v1.2.3 // indirect + github.com/yusufpapurcu/wmi v1.2.4 // indirect go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/collector v0.89.0 // indirect - go.opentelemetry.io/collector/config/configauth v0.89.0 // indirect - go.opentelemetry.io/collector/config/configcompression v0.89.0 // indirect - go.opentelemetry.io/collector/config/configgrpc v0.89.0 // indirect - go.opentelemetry.io/collector/config/confighttp v0.89.0 // indirect - go.opentelemetry.io/collector/config/confignet v0.89.0 // indirect - go.opentelemetry.io/collector/config/configopaque v0.89.0 // indirect - go.opentelemetry.io/collector/config/configtls v0.89.0 // indirect - go.opentelemetry.io/collector/config/internal v0.89.0 // indirect - go.opentelemetry.io/collector/connector v0.89.0 // indirect - go.opentelemetry.io/collector/extension/auth v0.89.0 // indirect - go.opentelemetry.io/collector/featuregate v1.0.0 // indirect - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.0 // indirect - go.opentelemetry.io/contrib/propagators/b3 v1.21.1 // indirect - go.opentelemetry.io/otel v1.21.0 // indirect - go.opentelemetry.io/otel/bridge/opencensus v0.44.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.44.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v0.44.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.21.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.21.0 // indirect - go.opentelemetry.io/otel/exporters/prometheus v0.44.0 // indirect - go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v0.44.0 // indirect - go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.21.0 // indirect - go.opentelemetry.io/otel/metric v1.21.0 // indirect - go.opentelemetry.io/otel/sdk v1.21.0 // indirect - go.opentelemetry.io/otel/sdk/metric v1.21.0 // indirect - go.opentelemetry.io/otel/trace v1.21.0 // indirect - go.opentelemetry.io/proto/otlp v1.0.0 // indirect - go.uber.org/goleak v1.3.0 // indirect - golang.org/x/crypto v0.16.0 // indirect - golang.org/x/mod v0.14.0 // indirect - golang.org/x/oauth2 v0.14.0 // indirect - golang.org/x/term v0.15.0 // indirect - golang.org/x/time v0.4.0 // indirect - golang.org/x/tools v0.16.0 // indirect - gonum.org/v1/gonum v0.14.0 // indirect - google.golang.org/api v0.150.0 // indirect - google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20231030173426-d783a09b4405 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20231106174013-bbf56f31fb17 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20231030173426-d783a09b4405 // indirect - google.golang.org/grpc v1.59.0 // indirect - google.golang.org/protobuf v1.31.0 // indirect + go.opentelemetry.io/collector v0.98.0 // indirect + go.opentelemetry.io/collector/config/configauth v0.98.0 // indirect + go.opentelemetry.io/collector/config/configcompression v1.5.0 // indirect + go.opentelemetry.io/collector/config/configgrpc v0.98.0 // indirect + go.opentelemetry.io/collector/config/confighttp v0.98.0 // indirect + go.opentelemetry.io/collector/config/confignet v0.98.0 // indirect + go.opentelemetry.io/collector/config/configretry v0.98.0 // indirect + go.opentelemetry.io/collector/config/internal v0.98.0 // indirect + go.opentelemetry.io/collector/confmap/provider/envprovider v0.98.0 // indirect + go.opentelemetry.io/collector/confmap/provider/httpprovider v0.98.0 // indirect + go.opentelemetry.io/collector/confmap/provider/httpsprovider v0.98.0 // indirect + go.opentelemetry.io/collector/confmap/provider/yamlprovider v0.98.0 // indirect + go.opentelemetry.io/collector/connector v0.98.0 // indirect + go.opentelemetry.io/collector/extension/auth v0.98.0 // indirect + go.opentelemetry.io/collector/pdata/testdata v0.98.0 // indirect + go.opentelemetry.io/contrib/config v0.4.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 // indirect + go.opentelemetry.io/contrib/propagators/b3 v1.25.0 // indirect + go.opentelemetry.io/otel v1.25.0 // indirect + go.opentelemetry.io/otel/bridge/opencensus v1.25.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.25.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.25.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.25.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.25.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.25.0 // indirect + go.opentelemetry.io/otel/exporters/prometheus v0.47.0 // indirect + go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.25.0 // indirect + go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.25.0 // indirect + go.opentelemetry.io/otel/metric v1.25.0 // indirect + go.opentelemetry.io/otel/sdk v1.25.0 // indirect + go.opentelemetry.io/otel/sdk/metric v1.25.0 // indirect + go.opentelemetry.io/otel/trace v1.25.0 // indirect + go.opentelemetry.io/proto/otlp v1.1.0 // indirect + golang.org/x/crypto v0.22.0 // indirect + golang.org/x/mod v0.16.0 // indirect + golang.org/x/oauth2 v0.18.0 // indirect + golang.org/x/term v0.19.0 // indirect + golang.org/x/time v0.5.0 // indirect + golang.org/x/tools v0.19.0 // indirect + gonum.org/v1/gonum v0.15.0 // indirect + google.golang.org/api v0.168.0 // indirect + google.golang.org/appengine v1.6.8 // indirect + google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240318140521-94a12d6c2237 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda // indirect + google.golang.org/grpc v1.63.0 // indirect + google.golang.org/protobuf v1.33.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect - k8s.io/klog v1.0.0 // indirect - k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 // indirect - k8s.io/utils v0.0.0-20230711102312-30195339c3c7 // indirect + k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect + k8s.io/kubelet v0.30.0 // indirect + k8s.io/utils v0.0.0-20230726121419-3b25d923346b // indirect + modernc.org/sqlite v1.21.2 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.3.0 // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect sigs.k8s.io/yaml v1.3.0 // indirect ) -replace github.com/amazon-contributing/opentelemetry-collector-contrib/pkg/stanza => github.com/amazon-contributing/opentelemetry-collector-contrib/pkg/stanza v0.0.0-20231221155153-92710a714293 +replace github.com/amazon-contributing/opentelemetry-collector-contrib/override/aws => github.com/amazon-contributing/opentelemetry-collector-contrib/override/aws v0.0.0-20240405185623-56e778998456 + +replace github.com/amazon-contributing/opentelemetry-collector-contrib/pkg/stanza => github.com/amazon-contributing/opentelemetry-collector-contrib/pkg/stanza v0.0.0-20240405185623-56e778998456 diff --git a/go.sum b/go.sum index 0618c54d5f..0e5f34e122 100644 --- a/go.sum +++ b/go.sum @@ -13,53 +13,80 @@ cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKV cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go v0.110.9 h1:e7ITSqGFFk4rbz/JFIqZh3G4VEHguhAL4BQcFlWtU68= +cloud.google.com/go v0.112.0 h1:tpFCD7hpHFlQ8yPwT3x+QeXqc2T6+n6T+hmABHfDUSM= +cloud.google.com/go v0.112.0/go.mod h1:3jEEVwZ/MHU4djK5t5RHuKOA/GbLddgTdVubX1qnPD4= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/bigquery v1.56.0 h1:LHIc9E7Kw+ftFpQFKzZYBB88IAFz7qONawXXx0F3QBo= -cloud.google.com/go/compute v1.23.2 h1:nWEMDhgbBkBJjfpVySqU4jgWdc22PLR0o4vEexZHers= -cloud.google.com/go/compute v1.23.2/go.mod h1:JJ0atRC0J/oWYiiVBmsSsrRnh92DhZPG4hFDcR04Rns= +cloud.google.com/go/bigquery v1.59.1 h1:CpT+/njKuKT3CEmswm6IbhNu9u35zt5dO4yPDLW+nG4= +cloud.google.com/go/bigquery v1.59.1/go.mod h1:VP1UJYgevyTwsV7desjzNzDND5p6hZB+Z8gZJN1GQUc= +cloud.google.com/go/compute v1.24.0 h1:phWcR2eWzRJaL/kOiJwfFsPs4BaKq1j6vnpZrc1YlVg= +cloud.google.com/go/compute v1.24.0/go.mod h1:kw1/T+h/+tK2LJK0wiPPx1intgdAM3j/g3hFDlscY40= cloud.google.com/go/compute/metadata v0.2.4-0.20230617002413-005d2dfb6b68 h1:aRVqY1p2IJaBGStWMsQMpkAa83cPkCDLl80eOj0Rbz4= cloud.google.com/go/compute/metadata v0.2.4-0.20230617002413-005d2dfb6b68/go.mod h1:1a3eRNYX12fs5UABBIXS8HXVvQbX9hRB/RkEBPORpe8= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/iam v1.1.4 h1:K6n/GZHFTtEoKT5aUG3l9diPi0VduZNQ1PfdnpkkIFk= -cloud.google.com/go/monitoring v1.16.2 h1:gx7BDZcoRqX5DfuJzw9LdhVjEkqCLmDXScdnrmIy9ik= +cloud.google.com/go/iam v1.1.6 h1:bEa06k05IO4f4uJonbB5iAgKTPpABy1ayxaIZV/GHVc= +cloud.google.com/go/iam v1.1.6/go.mod h1:O0zxdPeGBoFdWW3HWmBxJsk0pfvNM/p/qa82rWOGTwI= +cloud.google.com/go/monitoring v1.18.0 h1:NfkDLQDG2UR3WYZVQE8kwSbUIEyIqJUPl+aOQdFH1T4= +cloud.google.com/go/monitoring v1.18.0/go.mod h1:c92vVBCeq/OB4Ioyo+NbN2U7tlg5ZH41PZcdvfc+Lcg= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= -cloud.google.com/go/pubsub v1.33.0 h1:6SPCPvWav64tj0sVX/+npCBKhUi/UjJehy9op/V3p2g= +cloud.google.com/go/pubsub v1.36.1 h1:dfEPuGCHGbWUhaMCTHUFjfroILEkx55iUmKBZTP5f+Y= +cloud.google.com/go/pubsub v1.36.1/go.mod h1:iYjCa9EzWOoBiTdd4ps7QoMtMln5NwaZQpK1hbRfBDE= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= code.cloudfoundry.org/clock v1.0.0 h1:kFXWQM4bxYvdBw2X8BbBeXwQNgfoWv1vqAk2ZZyBN2o= +code.cloudfoundry.org/clock v1.0.0/go.mod h1:QD9Lzhd/ux6eNQVUDVRJX/RKTigpewimNYBi7ivZKY8= collectd.org v0.4.0 h1:nWNldfMqg7EVWAevG8oyOVsS9r/UHRG3LZRf6MdQho0= collectd.org v0.4.0/go.mod h1:A/8DzQBkF6abtvrT2j/AU/4tiBgJWYyh0y/oB/4MlWE= -contrib.go.opencensus.io/exporter/prometheus v0.4.2 h1:sqfsYl5GIY/L570iT+l93ehxaWJs2/OwXtiWwew3oAg= -contrib.go.opencensus.io/exporter/prometheus v0.4.2/go.mod h1:dvEHbiKmgvbr5pjaF9fpw1KeYcjrnC1J8B+JKjsZyRQ= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4 h1:/vQbFIOMbk2FiG/kXiLl8BRyzTWDw7gX/Hz7Dd5eDMs= +github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4/go.mod h1:hN7oaIRCjzsZ2dE+yG5k+rsdt3qcwykqK6HVGcKwsw4= github.com/99designs/keyring v1.2.2 h1:pZd3neh/EmUzWONb35LxQfvuY7kiSXAq3HQd97+XBn0= +github.com/99designs/keyring v1.2.2/go.mod h1:wes/FrByc8j7lFOAGLGSNEg8f/PaI3cgTBqhFkHUrPk= github.com/Azure/azure-amqp-common-go/v3 v3.2.3 h1:uDF62mbd9bypXWi19V1bN5NZEO84JqgmI5G73ibAmrk= +github.com/Azure/azure-amqp-common-go/v3 v3.2.3/go.mod h1:7rPmbSfszeovxGfc5fSAXE4ehlXQZHpMja2OtxC2Tas= github.com/Azure/azure-event-hubs-go/v3 v3.3.17 h1:9k2yRMBJWgcIlSNBuKVja2af/oR3oMowqFPpHDV5Kl4= +github.com/Azure/azure-event-hubs-go/v3 v3.3.17/go.mod h1:R5H325+EzgxcBDkUerEwtor7ZQg77G7HiOTwpcuIVXY= github.com/Azure/azure-kusto-go v0.5.2 h1:6kFVZp4iyz8YFTuxrIdivAXVcEs5wNKTVK5gai+E8pk= +github.com/Azure/azure-kusto-go v0.5.2/go.mod h1:2xOhBxRcHyyNifFHmNMcqYL6AMdhyrUHCkEJkrZ+EI4= github.com/Azure/azure-pipeline-go v0.2.3 h1:7U9HBg1JFK3jHl5qmo4CTZKFTVgMwdFHMVtCdfBE21U= +github.com/Azure/azure-pipeline-go v0.2.3/go.mod h1:x841ezTBIMG6O3lAcl8ATHnsOPVl2bqk7S3ta6S6u4k= github.com/Azure/azure-sdk-for-go v67.1.0+incompatible h1:oziYcaopbnIKfM69DL05wXdypiqfrUKdxUKrKpynJTw= github.com/Azure/azure-sdk-for-go v67.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.8.0 h1:9kDVnTz3vbfweTqAUmk/a/pH5pWFCHtvRpHYC0G/dcA= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 h1:sXr+ck84g/ZlZUOZiNELInmMgOsuGwdjjVkEIde0OtY= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.10.0 h1:n1DH8TPV4qqPTje2RcUBYwtrTWlabVp4n46+74X2pn4= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.10.0/go.mod h1:HDcZnuGbiyppErN6lB+idp4CKhjbc8gwjto6OPpyggM= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1 h1:sO0/P7g68FrryJzljemN+6GTssUXdANk6aJ7T1ZxnsQ= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1/go.mod h1:h8hyGFDsU5HMivxiS2iYFZsgDbU9OnnJ163x5UGVKYo= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2 h1:LqbJ/WzJUwBf8UiaSzgX7aMclParm9/5Vgp+TY51uBQ= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2/go.mod h1:yInRyqWXAuaPrgI7p70+lDDgh3mlBohis29jGMISnmc= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.5.0 h1:MxA59PGoCFb+vCwRQi3PhQEwHj4+r2dhuv9HG+vM7iM= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.5.0/go.mod h1:uYt4CfhkJA9o0FN7jfE5minm/i4nUE4MjGUJkzB6Zs8= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/v2 v2.0.0 h1:PTFGRSlMKCQelWwxUyYVEUqseBJVemLyqWJjvMyt0do= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/v2 v2.0.0/go.mod h1:LRr2FzBTQlONPPa5HREE5+RjSCTXl7BwOvYOaWTqCaI= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0 h1:bXwSugBiSbgtz7rOtbfGf+woewp4f06orW9OP5BjHLA= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0/go.mod h1:Y/HgrePTmGy9HjdSGTqZNa+apUpTVIEVKXJyARP2lrk= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.1.1 h1:7CBQ+Ei8SP2c6ydQTGCCrS35bDxgTMfoP2miAwK++OU= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.1.1/go.mod h1:c/wcGeGx5FUPbM/JltUYHZcKmigwyVLJlDq+4HdtXaw= github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.0.0 h1:u/LLAOFgsMv7HmNL4Qufg58y+qElGOt5qv0z1mURkRY= +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.0.0/go.mod h1:2e8rMJtl2+2j+HXbTBwnyGpm5Nou7KhvSfxOq8JpTag= github.com/Azure/azure-storage-blob-go v0.14.0 h1:1BCg74AmVdYwO3dlKwtFU1V0wU2PZdREkXvAmZJRUlM= +github.com/Azure/azure-storage-blob-go v0.14.0/go.mod h1:SMqIBi+SuiQH32bvyjngEewEeXoPfKMgWlBDaYf6fck= github.com/Azure/azure-storage-queue-go v0.0.0-20191125232315-636801874cdd h1:b3wyxBl3vvr15tUAziPBPK354y+LSdfPCpex5oBttHo= +github.com/Azure/azure-storage-queue-go v0.0.0-20191125232315-636801874cdd/go.mod h1:K6am8mT+5iFXgingS9LUc7TmbsW6XBw3nxaRyaMyWc8= github.com/Azure/go-amqp v0.17.0 h1:HHXa3149nKrI0IZwyM7DRcRy5810t9ZICDutn4BYzj4= +github.com/Azure/go-amqp v0.17.0/go.mod h1:9YJ3RhxRT1gquYnzpZO1vcYMMpAdJT+QEg6fwmw9Zlg= github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0= +github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest/autorest v0.11.12/go.mod h1:eipySxLmqSyC5s5k1CLupqet0PSENBEDP93LQ9a8QYw= @@ -70,11 +97,12 @@ github.com/Azure/go-autorest/autorest/adal v0.9.22/go.mod h1:XuAbAEUv2Tta//+voMI github.com/Azure/go-autorest/autorest/adal v0.9.23 h1:Yepx8CvFxwNKpH6ja7RZ+sKX+DWYNldbLiALMC3BTz8= github.com/Azure/go-autorest/autorest/adal v0.9.23/go.mod h1:5pcMqFkdPhviJdlEy3kC/v1ZLnQl0MH6XA5YCcMhy4c= github.com/Azure/go-autorest/autorest/azure/auth v0.5.11 h1:P6bYXFoao05z5uhOQzbC3Qd8JqF3jUoocoTeIxkp2cA= +github.com/Azure/go-autorest/autorest/azure/auth v0.5.11/go.mod h1:84w/uV8E37feW2NCJ08uT9VBfjfUHpgLVnG2InYD6cg= github.com/Azure/go-autorest/autorest/azure/cli v0.4.5 h1:0W/yGmFdTIT77fvdlGZ0LMISoLHFJ7Tx4U0yeB+uFs4= +github.com/Azure/go-autorest/autorest/azure/cli v0.4.5/go.mod h1:ADQAXrkgm7acgWVUNamOgh8YNrv4p27l3Wc55oVfpzg= github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= -github.com/Azure/go-autorest/autorest/mocks v0.4.2 h1:PGN4EDXnuQbojHbU0UWoNvmu9AGVwYHG9/fkDYhtAfw= github.com/Azure/go-autorest/autorest/mocks v0.4.2/go.mod h1:Vy7OitM9Kei0i1Oj+LvyAWMXJHeKH1MVlzFugfVrmyU= github.com/Azure/go-autorest/autorest/to v0.4.0 h1:oXVqrxakqqV1UZdSazDOPOLvOIz+XA683u8EctwboHk= github.com/Azure/go-autorest/autorest/to v0.4.0/go.mod h1:fE8iZBn7LQR7zH/9XU2NcPR4o9jEImooCeWJcYV/zLE= @@ -86,90 +114,111 @@ github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZ github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= github.com/Azure/go-ntlmssp v0.0.0-20200615164410-66371956d46c h1:/IBSNwUN8+eKzUzbJPqhK839ygXJ82sde8x3ogr6R28= +github.com/Azure/go-ntlmssp v0.0.0-20200615164410-66371956d46c/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU= +github.com/AzureAD/microsoft-authentication-library-for-go v1.2.1 h1:DzHpqpoJVaCgOUdVHxE8QB52S6NiVdDQvGlny1qvPqA= +github.com/AzureAD/microsoft-authentication-library-for-go v1.2.1/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8= github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/ClickHouse/clickhouse-go v1.5.4 h1:cKjXeYLNWVJIx2J1K6H2CqyRmfwVJVY1OV1coaaFcI0= +github.com/ClickHouse/clickhouse-go v1.5.4/go.mod h1:EaI/sW7Azgz9UATzd5ZdZHRUhHgv5+JMS9NSr2smCJI= +github.com/Code-Hex/go-generics-cache v1.3.1 h1:i8rLwyhoyhaerr7JpjtYjJZUcCbWOdiYO3fZXLiEC4g= +github.com/Code-Hex/go-generics-cache v1.3.1/go.mod h1:qxcC9kRVrct9rHeiYpFWSoW1vxyillCVzX13KZG8dl4= github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= -github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.20.0 h1:tk85AYGwOf6VNtoOQi8w/kVDi2vmPxp3/OU2FsUpdcA= -github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.20.0/go.mod h1:Xx0VKh7GJ4si3rmElbh19Mejxz68ibWg/J30ZOMrqzU= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.22.0 h1:PWcDbDjrcT/ZHLn4Bc/FuglaZZVPP8bWO/YRmJBbe38= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.22.0/go.mod h1:XEK/YHYsi+Wk2Bk1+zi/he+gjRfDWtoIZEZwuwcYjhk= github.com/HdrHistogram/hdrhistogram-go v1.1.0/go.mod h1:yDgFjdqOqDEKOvasDdhWNXYg9BVp4O+o5f6V/ehm6Oo= github.com/HdrHistogram/hdrhistogram-go v1.1.2/go.mod h1:yDgFjdqOqDEKOvasDdhWNXYg9BVp4O+o5f6V/ehm6Oo= github.com/Jeffail/gabs v1.4.0 h1://5fYRRTq1edjfIrQGvdkcd22pkYUrHZ5YC/H2GJVAo= github.com/Jeffail/gabs v1.4.0/go.mod h1:6xMvQMK4k33lb7GUUpaAPh6nKMmemQeg5d4gn7/bOXc= github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c h1:RGWPOewvKIROun94nF7v2cua9qP+thov/7M50KEoeSU= +github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c/go.mod h1:X0CRv0ky0k6m906ixxpzmDRLvX58TFUKS2eePweuyxk= github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= github.com/Mellanox/rdmamap v0.0.0-20191106181932-7c3c4763a6ee h1:atI/FFjXh6hIVlPE1Jup9m8N4B9q/OSbMUe2EBahs+w= +github.com/Mellanox/rdmamap v0.0.0-20191106181932-7c3c4763a6ee/go.mod h1:jDA6v0TUYrFEIAE5uGJ29LQOeONIgMdP4Rkqb8HUnPM= github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= +github.com/Microsoft/hcsshim v0.12.0-rc.3 h1:5GNGrobGs/sN/0nFO21W9k4lFn+iXXZAE8fCZbmdRak= +github.com/Microsoft/hcsshim v0.12.0-rc.3/go.mod h1:WuNfcaYNaw+KpCEsZCIM6HCEmu0c5HfXpi+dDSmveP0= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= github.com/Shopify/sarama v1.32.0 h1:P+RUjEaRU0GMMbYexGMDyrMkLhbbBVUVISDywi+IlFU= +github.com/Shopify/sarama v1.32.0/go.mod h1:+EmJJKZWVT/faR9RcOxJerP+LId4iWdQPBGLy1Y1Njs= github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= github.com/Showmax/go-fqdn v1.0.0 h1:0rG5IbmVliNT5O19Mfuvna9LL7zlHyRfsSvBPZmF9tM= github.com/Showmax/go-fqdn v1.0.0/go.mod h1:SfrFBzmDCtCGrnHhoDjuvFnKsWjEQX/Q9ARZvOrJAko= github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= github.com/aerospike/aerospike-client-go/v5 v5.7.0 h1:Olgq011scnhKlGxo4AcGSXI8JRLF0aSEdl1PhjmKTUo= +github.com/aerospike/aerospike-client-go/v5 v5.7.0/go.mod h1:rJ/KpmClE7kiBPfvAPrGw9WuNOiz8v2uKbQaUyYPXtI= github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= github.com/alecthomas/assert/v2 v2.3.0 h1:mAsH2wmvjsuvyBvAmCtm7zFsBlb8mIHx5ySLVdDZXL0= +github.com/alecthomas/assert/v2 v2.3.0/go.mod h1:pXcQ2Asjp247dahGEmsZ6ru0UVwnkhktn7S0bBDLxvQ= github.com/alecthomas/go-thrift v0.0.0-20170109061633-7914173639b2/go.mod h1:CxCgO+NdpMdi9SsTlGbc0W+/UNxO3I0AabOEJZ3w61w= github.com/alecthomas/kong v0.2.1/go.mod h1:+inYUSluD+p4L8KdviBSgzcqEjUQOfC5fQDRFuc36lI= github.com/alecthomas/participle v0.4.1 h1:P2PJWzwrSpuCWXKnzqvw0b0phSfH1kJo4p2HvLynVsI= github.com/alecthomas/participle v0.4.1/go.mod h1:T8u4bQOSMwrkTWOSyt8/jSFPEnRtd0FKFMjVfYBlqPs= -github.com/alecthomas/participle/v2 v2.1.0 h1:z7dElHRrOEEq45F2TG5cbQihMtNTv8vwldytDj7Wrz4= -github.com/alecthomas/participle/v2 v2.1.0/go.mod h1:Y1+hAs8DHPmc3YUFzqllV+eSQ9ljPTk0ZkPMtEdAx2c= +github.com/alecthomas/participle/v2 v2.1.1 h1:hrjKESvSqGHzRb4yW1ciisFJ4p3MGYih6icjJvbsmV8= +github.com/alecthomas/participle/v2 v2.1.1/go.mod h1:Y1+hAs8DHPmc3YUFzqllV+eSQ9ljPTk0ZkPMtEdAx2c= github.com/alecthomas/repr v0.0.0-20181024024818-d37bc2a10ba1/go.mod h1:xTS7Pm1pD1mvyM075QCDSRqH6qRLXylzS24ZTpRiSzQ= github.com/alecthomas/repr v0.0.0-20210301060118-828286944d6a/go.mod h1:2kn6fqh/zIyPLmm3ugklbEi5hg5wS435eygvNfaDQL8= github.com/alecthomas/repr v0.2.0 h1:HAzS41CIzNW5syS8Mf9UwXhNH1J9aix/BvDRf1Ml2Yk= +github.com/alecthomas/repr v0.2.0/go.mod h1:Fr0507jx4eOXV7AlPV6AVZLYrLIuIeSOWtW57eE/O/4= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= -github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 h1:s6gZFSlWYmbqAuRjVTiNNhvNRfY2Wxp9nhfyel4rklc= -github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= +github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9 h1:ez/4by2iGztzR4L0zgAOR8lTQK9VlyBVVd7G4omaOQs= +github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= github.com/aliyun/alibaba-cloud-sdk-go v1.61.1483 h1:J8HaD+Zpfi1gcel3HCKpoHHEsrcuRrZlSnx7R9SCf5I= -github.com/amazon-contributing/opentelemetry-collector-contrib/config/confighttp v0.0.0-20231221155153-92710a714293 h1:obkaR5eEXrXkkyGUhrKsT8Tv71d4kv7Lya8osYJAQQA= -github.com/amazon-contributing/opentelemetry-collector-contrib/config/confighttp v0.0.0-20231221155153-92710a714293/go.mod h1:3sU3HgF5wc32CVljnzGo4Fn/9+T0N1Z6tCJyKdW2MvM= -github.com/amazon-contributing/opentelemetry-collector-contrib/exporter/awscloudwatchlogsexporter v0.0.0-20231221155153-92710a714293 h1:XOxFyC1WpWbhGlz55m+LMlLeAGGKLeKkFA2Dya0I7kk= -github.com/amazon-contributing/opentelemetry-collector-contrib/exporter/awscloudwatchlogsexporter v0.0.0-20231221155153-92710a714293/go.mod h1:9L23Mib5WjvuWRMmLyZrH+OJyeDz0fEZ9e2ummzZlgU= -github.com/amazon-contributing/opentelemetry-collector-contrib/exporter/awsemfexporter v0.0.0-20231221155153-92710a714293 h1:dCgA/y6U/0vgMV7sMbspVXg9Jfm0ahIduBEA6X2hXjU= -github.com/amazon-contributing/opentelemetry-collector-contrib/exporter/awsemfexporter v0.0.0-20231221155153-92710a714293/go.mod h1:2+SR5BBlbAx503ixT3O2voJcNCxOgc+3vqRiGUFQU5k= -github.com/amazon-contributing/opentelemetry-collector-contrib/exporter/awsxrayexporter v0.0.0-20231221155153-92710a714293 h1:VlkfSM1CfSBF60bxTfVV2bK8sNVJlPp2+CILLKfcu5M= -github.com/amazon-contributing/opentelemetry-collector-contrib/exporter/awsxrayexporter v0.0.0-20231221155153-92710a714293/go.mod h1:n5I9WMdSLqf1nngS88/Os8Ts2xF+jd0RxGVfEP46Xsk= -github.com/amazon-contributing/opentelemetry-collector-contrib/extension/awsmiddleware v0.0.0-20231208183748-c00ca1f62c3e h1:qIfARzccXIc2Q8q160L6AV3aqIwKsijkucWBKi6a/pM= -github.com/amazon-contributing/opentelemetry-collector-contrib/extension/awsmiddleware v0.0.0-20231208183748-c00ca1f62c3e/go.mod h1:5JOe6ISApVHBIsZuLb8ppaY06ujDcHCxYJE5wCymNoI= -github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/awsutil v0.0.0-20231221155153-92710a714293 h1:S+SaW2waNdJDqlM6pQZqK4mp2joS987P1FE5fiIJF7g= -github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/awsutil v0.0.0-20231221155153-92710a714293/go.mod h1:Mpe0DhouTXYDk/DyFDyQbjKpVxRTqahohQT5lidS2jY= -github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/containerinsight v0.0.0-20231221155153-92710a714293 h1:XGLk7XBPb3YPeQBtkzqc0VwJsbwIVrRCfBjG8jKJ8Zg= -github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/containerinsight v0.0.0-20231221155153-92710a714293/go.mod h1:Xg5sUWQEuVshBnsZB7wxGDLf5DfzAqnFZGVbMHvoaj8= -github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/cwlogs v0.0.0-20231221155153-92710a714293 h1:87fR7qoBch57mQhqvkDc0vEslLQdfHPCum53HpDo6S8= -github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/cwlogs v0.0.0-20231221155153-92710a714293/go.mod h1:YNWxiYSLiIxOmw5hmA6jSnK8KVyru1JLX1OQeUCtCf8= -github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/k8s v0.0.0-20231221155153-92710a714293 h1:6KZWBakuBiO7yCkCYUZbtYYhs6zbTZkdxCbJd1xxMZ8= -github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/k8s v0.0.0-20231221155153-92710a714293/go.mod h1:RNHBmikDFzPQ8GbL/UtLxiJ7xqbK5RrFsfUSnIjJJlE= -github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/xray v0.0.0-20231221155153-92710a714293 h1:QREKHVRnmHNXK1WXZ2JJy/D+9ifRImX8Yfr1ylPAtvI= -github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/xray v0.0.0-20231221155153-92710a714293/go.mod h1:D6lmFdWbpYRttNnfkZJSs2ZZcAUGfe96/Vrm8tiK3Tw= -github.com/amazon-contributing/opentelemetry-collector-contrib/override/aws v0.0.0-20231221155153-92710a714293 h1:tdKrquL4SXnudMwMcgUJWxTG/ZlxcuFIpGLzAdY2PTM= -github.com/amazon-contributing/opentelemetry-collector-contrib/override/aws v0.0.0-20231221155153-92710a714293/go.mod h1:t/hYoRTnlPuRjh8y0BwVGgNvNIXpU2QJME5YVppUUHQ= -github.com/amazon-contributing/opentelemetry-collector-contrib/pkg/stanza v0.0.0-20231208183748-c00ca1f62c3e h1:T9NVthu16Nd8FSuqPw2718jijASLLjo+m09Q2qeamoM= -github.com/amazon-contributing/opentelemetry-collector-contrib/pkg/stanza v0.0.0-20231208183748-c00ca1f62c3e/go.mod h1:7uCHpcHoawJsqoyPLxaFROWsZXPSF6/op3Hmw4pV4WE= -github.com/amazon-contributing/opentelemetry-collector-contrib/pkg/translator/prometheus v0.0.0-20231221155153-92710a714293 h1:P6oZ2M+MiL0ti5ORCaRxELtK2UxrVyO+PjdqbkqPq3M= -github.com/amazon-contributing/opentelemetry-collector-contrib/pkg/translator/prometheus v0.0.0-20231221155153-92710a714293/go.mod h1:HXv8nyJ+RUHGLZMPbaPFnWKonYWNTJfZ9ZUudqtwudw= -github.com/amazon-contributing/opentelemetry-collector-contrib/processor/resourcedetectionprocessor v0.0.0-20231221155153-92710a714293 h1:x44yeVvtVWIjl7aAwnFEpGqF/vnToGo4XsU+At/+p9k= -github.com/amazon-contributing/opentelemetry-collector-contrib/processor/resourcedetectionprocessor v0.0.0-20231221155153-92710a714293/go.mod h1:L4/sIFbml9J28qd8i2aJGztmCg2FCCtfTdwikpu4JEs= -github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver v0.0.0-20231221155153-92710a714293 h1:/zN4UcdF82q1tPSQOx1kcniGO/9JCmbm4a8HjKv8/AE= -github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver v0.0.0-20231221155153-92710a714293/go.mod h1:2/pkztaYtQZkP5tMJ2lecQU0qHu3Ih2SyBIy7TGklAE= -github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/awsxrayreceiver v0.0.0-20231221155153-92710a714293 h1:Cw76iRhM0FcNr5jvUe1wZnXDSHsWaLGfaOVrrFVwilQ= -github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/awsxrayreceiver v0.0.0-20231221155153-92710a714293/go.mod h1:4IMYeZjU+IgZdXHuiLOIVtdp42lrMjk+rtlQpENeGSM= -github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.0.0-20231221155153-92710a714293 h1:rVEEJlo6E/7dPvVsOOWY49ojRzZswxRV/9ZoERBfSag= -github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.0.0-20231221155153-92710a714293/go.mod h1:fnNxw30DVmpiS3tt1nUETZH3g/boGnBLx7+hYwYd9EU= +github.com/aliyun/alibaba-cloud-sdk-go v1.61.1483/go.mod h1:RcDobYh8k5VP6TNybz9m++gL3ijVI5wueVr0EM10VsU= +github.com/amazon-contributing/opentelemetry-collector-contrib/exporter/awscloudwatchlogsexporter v0.0.0-20240503173519-cc2b921759f4 h1:AM/j39i5nQ1z09KC7GynVvTtC31y/Qi5N6VCOEhN0v8= +github.com/amazon-contributing/opentelemetry-collector-contrib/exporter/awscloudwatchlogsexporter v0.0.0-20240503173519-cc2b921759f4/go.mod h1:P9k2FWA6wiRTWRnUfknsMfHkN8XuQQ1lBYA3txjPf98= +github.com/amazon-contributing/opentelemetry-collector-contrib/exporter/awsemfexporter v0.0.0-20240517183704-e0e66ca9e79c h1:LiwSPScsEdXIZunXyBoMzB+URrPW4PacLVvnT8kqulE= +github.com/amazon-contributing/opentelemetry-collector-contrib/exporter/awsemfexporter v0.0.0-20240517183704-e0e66ca9e79c/go.mod h1:shTWN1DIuzxMlxFAePpC5ssPno7InKYz67kS7p+zTTM= +github.com/amazon-contributing/opentelemetry-collector-contrib/exporter/awsxrayexporter v0.0.0-20240517183704-e0e66ca9e79c h1:mhjSDvLcsHrhT2YdZ73ZHhmYAsFpYa4U8ihJ4zoMzYk= +github.com/amazon-contributing/opentelemetry-collector-contrib/exporter/awsxrayexporter v0.0.0-20240517183704-e0e66ca9e79c/go.mod h1:o9NV6CjnL89FLwpxhDYD+/p+6hCh5vlxE6fZPFIuV8g= +github.com/amazon-contributing/opentelemetry-collector-contrib/extension/awsmiddleware v0.0.0-20240503173519-cc2b921759f4 h1:HyPipXg155rx1KSuYmjc/oDGEr2SR6d6Q34xKUc9PVk= +github.com/amazon-contributing/opentelemetry-collector-contrib/extension/awsmiddleware v0.0.0-20240503173519-cc2b921759f4/go.mod h1:cDpFSR8i0bjVLkxLxOFABagktk5wd7LdMzU4xX7unPo= +github.com/amazon-contributing/opentelemetry-collector-contrib/extension/awsproxy v0.0.0-20240503173519-cc2b921759f4 h1:Jror5XL3GsJlDF4eoLblAq/YDVPFz3qePtUqWN2wGFQ= +github.com/amazon-contributing/opentelemetry-collector-contrib/extension/awsproxy v0.0.0-20240503173519-cc2b921759f4/go.mod h1:CMxOkDu9MF1JJ7X/jYYvjFTLxZ90n+mkFt88aMdBINI= +github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/awsutil v0.0.0-20240503173519-cc2b921759f4 h1:bCDehcKUhWMkLJiIxjpey1PqK00iMX1fVIJimm+SMoI= +github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/awsutil v0.0.0-20240503173519-cc2b921759f4/go.mod h1:V8pzzNan0O1W9o3rBWpSwn8YIUN9ZVCEctuzn55EWtw= +github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/containerinsight v0.0.0-20240503173519-cc2b921759f4 h1:n7rOi3pCOJF/QnxCVkXtL7zxLDF4TsnbWsxaw8MTfPw= +github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/containerinsight v0.0.0-20240503173519-cc2b921759f4/go.mod h1:EW3A0XWGehjQfN6cw17uoDtYqd5SqsXDNfac9WAj8oU= +github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/cwlogs v0.0.0-20240503173519-cc2b921759f4 h1:c6zxIUWLUMtRY8F37CQCl5/cyb9y2odfjEPIh0HsC/U= +github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/cwlogs v0.0.0-20240503173519-cc2b921759f4/go.mod h1:dkG+z2jMW6p8HgiZauQwJLgeTxQzwa/I1qoeSx0U3wI= +github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/k8s v0.0.0-20240503173519-cc2b921759f4 h1:jSqA+xluL3ks/eJ7/GVm3JdGY99xIo9xFhvjy3bXvq0= +github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/k8s v0.0.0-20240503173519-cc2b921759f4/go.mod h1:vwINNzS7Mu37VQRRa5Kq/iO88u30K3fL0MdssCP5Qnw= +github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/proxy v0.0.0-20240503173519-cc2b921759f4 h1:5fh25Ckk+u4hWu+3UffcAdnyvgbkH3QEKCQIqDOyABo= +github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/proxy v0.0.0-20240503173519-cc2b921759f4/go.mod h1:bwdfq5JKghWQG9QrzVW8g9Kd2D7b9brOaHiYWPO8V4A= +github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/xray v0.0.0-20240503173519-cc2b921759f4 h1:uoonCx4ubTqe0Y+r5VVjuOlQIZLr83dZav4LmAO4CpA= +github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/xray v0.0.0-20240503173519-cc2b921759f4/go.mod h1:Y+QZTJhbAmXu7FcAlFjQ78hxoGMXP7apd9X5ux6ENQo= +github.com/amazon-contributing/opentelemetry-collector-contrib/internal/kubelet v0.0.0-20240503173519-cc2b921759f4 h1:zWd6/Mwwv1DibHs6EpYE218PcLfABhZpNF+aKrrBiuw= +github.com/amazon-contributing/opentelemetry-collector-contrib/internal/kubelet v0.0.0-20240503173519-cc2b921759f4/go.mod h1:JZ3OGy7w5tcCOXXSZ/KPIleJ4pLH1URvxc0gS/W19p8= +github.com/amazon-contributing/opentelemetry-collector-contrib/override/aws v0.0.0-20240405185623-56e778998456 h1:0X4NZmgT+m86lxiWqb2LTyBaL9Yog/T5kky8tcTq900= +github.com/amazon-contributing/opentelemetry-collector-contrib/override/aws v0.0.0-20240405185623-56e778998456/go.mod h1:t/hYoRTnlPuRjh8y0BwVGgNvNIXpU2QJME5YVppUUHQ= +github.com/amazon-contributing/opentelemetry-collector-contrib/pkg/stanza v0.0.0-20240503173519-cc2b921759f4 h1:slmCa9AVYQ0qPZ+i49CVYKcTG+TEPDInkSwXzGA5Cgs= +github.com/amazon-contributing/opentelemetry-collector-contrib/pkg/stanza v0.0.0-20240503173519-cc2b921759f4/go.mod h1:zhLhabEO8mr1PY65YZuJuFJAbN0938xhi3bVKD8fbnQ= +github.com/amazon-contributing/opentelemetry-collector-contrib/pkg/translator/prometheus v0.0.0-20240503173519-cc2b921759f4 h1:qxvOC9JmfGnPdhLqH4WdYhhZ4Wz0szlRHsGcDnoXLls= +github.com/amazon-contributing/opentelemetry-collector-contrib/pkg/translator/prometheus v0.0.0-20240503173519-cc2b921759f4/go.mod h1:FjFQFT6tfC7Gac53GC/vorbTwvR/UzRrGh/rgSQsCB4= +github.com/amazon-contributing/opentelemetry-collector-contrib/processor/resourcedetectionprocessor v0.0.0-20240503173519-cc2b921759f4 h1:a7Fz5SO0jt93JZuxQN/Au3jp57YJuERIxqsWW+1o/gc= +github.com/amazon-contributing/opentelemetry-collector-contrib/processor/resourcedetectionprocessor v0.0.0-20240503173519-cc2b921759f4/go.mod h1:a6HP//RgakNXjSCDK5NB1Fxb7uNGWD23JTWqSyp1DxU= +github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver v0.0.0-20240503173519-cc2b921759f4 h1:tSzc0lGuTAq5LuckCqIXFj6jUJukvtyrG/2IsHZ131s= +github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver v0.0.0-20240503173519-cc2b921759f4/go.mod h1:qUFt/MaXuMA2uRd/hssUWAyIw5IsriPv8KV4VjPjWZA= +github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/awsxrayreceiver v0.0.0-20240503173519-cc2b921759f4 h1:7vPfgOspEhKENfJQ/DL3yJ7vksnAooenlHoMwgQn3GQ= +github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/awsxrayreceiver v0.0.0-20240503173519-cc2b921759f4/go.mod h1:gw1UQJfG+9fp8X3JkKp1q66J86w1nnFFUDQslUCZStI= +github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.0.0-20240503173519-cc2b921759f4 h1:rJv1IdDTP9PwFc4pL7hr/8FIJ/tehOBsfqxnyazWDTc= +github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.0.0-20240503173519-cc2b921759f4/go.mod h1:JLA1nMGAcSlqAdtK9KuVg092WnrXE2ULkCr1j/1po6g= github.com/amir/raidman v0.0.0-20170415203553-1ccc43bfb9c9 h1:FXrPTd8Rdlc94dKccl7KPmdmIbVh/OjelJ8/vgMRzcQ= -github.com/andybalholm/brotli v1.0.4 h1:V7DdXeJtZscaqfNuAdSRuRFzuiKlHSC/Zh3zl9qY3JY= +github.com/amir/raidman v0.0.0-20170415203553-1ccc43bfb9c9/go.mod h1:eliMa/PW+RDr2QLWRmLH1R1ZA4RInpmvOzDDXtaIZkc= +github.com/andybalholm/brotli v1.0.5 h1:8uQZIdzKmjc/iuPu7O2ioW48L81FgatrcpfFmiq/cCs= +github.com/andybalholm/brotli v1.0.5/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= github.com/antchfx/jsonquery v1.1.5 h1:1YWrNFYCcIuJPIjFeOP5b6TXbLSUYY8qqxWbuZOB1qE= github.com/antchfx/jsonquery v1.1.5/go.mod h1:RtMzTHohKaAerkfslTNjr3Y9MdxjKlSgIgaVjVKNiug= github.com/antchfx/xmlquery v1.3.9 h1:Y+zyMdiUZ4fasTQTkDb3DflOXP7+obcYEh80SISBmnQ= @@ -178,13 +227,16 @@ github.com/antchfx/xpath v1.1.7/go.mod h1:Yee4kTMuNiPYJ7nSNorELQMr1J33uOpXDMByNY github.com/antchfx/xpath v1.2.0 h1:mbwv7co+x0RwgeGAOHdrKy89GvHaGvxxBtPK0uF9Zr8= github.com/antchfx/xpath v1.2.0/go.mod h1:i54GszH55fYfBmoZXapTHN8T8tkcHfRgLyVwwqzXNcs= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/antonmedv/expr v1.15.3 h1:q3hOJZNvLvhqE8OHBs1cFRdbXFNKuA+bHmRaI+AmRmI= -github.com/antonmedv/expr v1.15.3/go.mod h1:0E/6TxnOlRNp81GMzX9QfDPAmHo2Phg00y4JUv1ihsE= github.com/apache/arrow/go/v12 v12.0.1 h1:JsR2+hzYYjgSUkBSaahpqCetqZMr76djX80fF/DiJbg= github.com/apache/arrow/go/v12 v12.0.1/go.mod h1:weuTY7JvTG/HDPtMQxEUp7pU73vkLWMLpY67QwZ/WWw= -github.com/apache/thrift v0.16.0 h1:qEy6UW60iVOlUy+b9ZR0d5WzUWYGOo4HfopoyBaNmoY= +github.com/apache/arrow/go/v14 v14.0.2 h1:N8OkaJEOfI3mEZt07BIkvo4sC6XDbL+48MBPWO5IONw= +github.com/apache/arrow/go/v14 v14.0.2/go.mod h1:u3fgh3EdgN/YQ8cVQRguVW3R+seMybFg8QBQ5LU+eBY= +github.com/apache/thrift v0.17.0 h1:cMd2aj52n+8VoAtvSvLn4kDC3aZ6IAkBuqWQ2IDu7wo= +github.com/apache/thrift v0.17.0/go.mod h1:OLxhMRJxomX+1I/KUw03qoV3mMz16BwaKI+d4fPBx7Q= github.com/aristanetworks/glog v0.0.0-20191112221043-67e8567f59f3 h1:Bmjk+DjIi3tTAU0wxGaFbfjGUqlxxSXARq9A96Kgoos= +github.com/aristanetworks/glog v0.0.0-20191112221043-67e8567f59f3/go.mod h1:KASm+qXFKs/xjSoWn30NrWBBvdTTQq+UjkhjEJHfSFA= github.com/aristanetworks/goarista v0.0.0-20190325233358-a123909ec740 h1:FD4/ikKOFxwP8muWDypbmBWc634+YcAs3eBrYAmRdZY= +github.com/aristanetworks/goarista v0.0.0-20190325233358-a123909ec740/go.mod h1:D/tb0zPVXnP7fmsLZjtdUhSsumbK/ij54UXjjVgMGxQ= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-metrics v0.3.9/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= @@ -200,51 +252,75 @@ github.com/aws/aws-sdk-go-v2 v1.9.2/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVj github.com/aws/aws-sdk-go-v2 v1.23.0 h1:PiHAzmiQQr6JULBUdvR8fKlA+UPKLT/8KbiqpFBWiAo= github.com/aws/aws-sdk-go-v2 v1.23.0/go.mod h1:i1XDttT4rnf6vxc9AuskLc6s7XBee8rlLilKlc03uAA= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.13 h1:OPLEkmhXf6xFPiz0bLeDArZIDx1NNS4oJyG4nv3Gct0= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.13/go.mod h1:gpAbvyDGQFozTEmlTFO8XcQKHzubdq0LzRyJpG6MiXM= github.com/aws/aws-sdk-go-v2/config v1.8.3/go.mod h1:4AEiLtAb8kLs7vgw2ZV3p2VZ1+hBavOc84hqxVNpCyw= github.com/aws/aws-sdk-go-v2/config v1.25.1 h1:YsjngBOl2mx4l3egkVWndr6/6TqtkdsWJFZIsQ924Ek= github.com/aws/aws-sdk-go-v2/config v1.25.1/go.mod h1:yV6h7TRVzhdIFmUk9WWDRpWwYGg1woEzKr0k1IYz2Tk= github.com/aws/aws-sdk-go-v2/credentials v1.4.3/go.mod h1:FNNC6nQZQUuyhq5aE5c7ata8o9e4ECGmS4lAXC7o1mQ= github.com/aws/aws-sdk-go-v2/credentials v1.16.1 h1:WessyrdgyFN5TB+eLQdrFSlN/3oMnqukIFhDxK6z8h0= github.com/aws/aws-sdk-go-v2/credentials v1.16.1/go.mod h1:RQJyPxKcr+m4ArlIG1LUhMOrjposVfzbX6H8oR6oCgE= +github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue v1.2.0 h1:8kvinmbIDObqsWegKP0JjeanYPiA4GUVpAtciNWE+jw= +github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue v1.2.0/go.mod h1:UVFtSYSWCHj2+brBLDHUdlJXmz8LxUpZhA+Ewypc+xQ= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.6.0/go.mod h1:gqlclDEZp4aqJOancXK6TN24aKhT0W0Ae9MHk3wzTMM= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.4 h1:9wKDWEjwSnXZre0/O3+ZwbBl1SmlgWYBbrTV10X/H1s= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.4/go.mod h1:t4i+yGHMCcUNIX1x7YVYa6bH/Do7civ5I6cG/6PMfyA= github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.59 h1:E3Y+OfzOK1+rmRo/K2G0ml8Vs+Xqk0kOnf4nS0kUtBc= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.59/go.mod h1:1M4PLSBUVfBI0aP+C9XI7SM6kZPCGYyI6izWz0TGprE= github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.3 h1:DUwbD79T8gyQ23qVXFUthjzVMTviSHi3y4z58KvghhM= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.3/go.mod h1:7sGSz1JCKHWWBHq98m6sMtWQikmYPpxjqOydDemiVoM= github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.3 h1:AplLJCtIaUZDCbr6+gLYdsYNxne4iuaboJhVt9d+WXI= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.3/go.mod h1:ify42Rb7nKeDDPkFjKn7q1bPscVPu/+gmHH8d2c+anU= github.com/aws/aws-sdk-go-v2/internal/ini v1.2.4/go.mod h1:ZcBrrI3zBKlhGFNYWvju0I3TR93I7YIgAfy82Fh4lcQ= github.com/aws/aws-sdk-go-v2/internal/ini v1.7.0 h1:usgqiJtamuGIBj+OvYmMq89+Z1hIKkMJToz1WpoeNUY= +github.com/aws/aws-sdk-go-v2/internal/ini v1.7.0/go.mod h1:6fQQgfuGmw8Al/3M2IgIllycxV7ZW7WCdVSqfBeUiCY= github.com/aws/aws-sdk-go-v2/internal/v4a v1.1.4 h1:6lJvvkQ9HmbHZ4h/IEwclwv2mrTW8Uq1SOB/kXy0mfw= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.1.4/go.mod h1:1PrKYwxTM+zjpw9Y41KFtoJCQrJ34Z47Y4VgVbfndjo= github.com/aws/aws-sdk-go-v2/service/appconfig v1.4.2/go.mod h1:FZ3HkCe+b10uFZZkFdvf98LHW21k49W8o8J366lqVKY= github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.8.1/go.mod h1:CM+19rL1+4dFWnOQKwDc7H1KwXTz+h61oUSHyhV0b3o= github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.30.2 h1:T2YjSwrDkLg2laNjhIunyTbjy9Qzd/oZ+yQjrAhdIEA= github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.30.2/go.mod h1:GuVYdn7tWjbyp/YtZSM6VczmceUUQW6v8Yq98wJ9dWY= github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs v1.13.0 h1:NfqONXoDwWtBCnkPVz7GL/FKMo/s//TnHSwF+PjzG5c= +github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs v1.13.0/go.mod h1:OFC7Rn7jyPoKtczT+TARRbxKHRmN9nyrJ2rmCc3ewuQ= github.com/aws/aws-sdk-go-v2/service/dynamodb v1.14.0 h1:P+eF8PKkeaiTfN/VBe5GI3uNdhwCPVYCQxchRewJcWk= +github.com/aws/aws-sdk-go-v2/service/dynamodb v1.14.0/go.mod h1:15NiwrGGBpsC7C3zScmoaqNo1QJ9SRjdM5jxEPnCUR8= +github.com/aws/aws-sdk-go-v2/service/dynamodbstreams v1.4.0 h1:QbFWJr2SAyVYvyoOHvJU6sCGLnqNT94ZbWElJMEI1JY= +github.com/aws/aws-sdk-go-v2/service/dynamodbstreams v1.4.0/go.mod h1:bYsEP8w5YnbYyrx/Zi5hy4hTwRRQISSJS3RWrsGRijg= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.1 h1:rpkF4n0CyFcrJUG/rNNohoTmhtWlFTRI4BsZOh9PvLs= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.1/go.mod h1:l9ymW25HOqymeU2m1gbUQ3rUIsTwKs8gYHXkqDQUhiI= github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.36 h1:eev2yZX7esGRjqRbnVk1UxMLw4CyVZDpZXRCcy75oQk= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.36/go.mod h1:lGnOkH9NJATw0XEPcAknFBj3zzNTEGRHtSw+CwC1YTg= github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.7.6 h1:JGrc3+kkyr848/wpG2+kWuzHK3H4Fyxj2jnXj8ijQ/Y= +github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.7.6/go.mod h1:zwvTysbXES8GDwFcwCPB8NkC+bCdio1abH+E+BRe/xg= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.3.2/go.mod h1:72HRZDLMtmVQiLG2tLfQcaWLCssELvGl+Zf2WVxMmR8= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.3 h1:kJOolE8xBAD13xTCgOakByZkyP4D/owNmvEiioeUNAg= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.3/go.mod h1:Owv1I59vaghv1Ax8zz8ELY8DN7/Y0rGS+WWAmjgi950= github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.15.4 h1:v0jkRigbSD6uOdwcaUQmgEwG1BkPfAPDqaeNt/29ghg= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.15.4/go.mod h1:LhTyt8J04LL+9cIt7pYJ5lbS/U98ZmXovLOR/4LUsk8= github.com/aws/aws-sdk-go-v2/service/kinesis v1.13.0 h1:wqLvwC4qdrrGikudu8Z9X2sb79BYUYWAgMF5BGFQJY8= +github.com/aws/aws-sdk-go-v2/service/kinesis v1.13.0/go.mod h1:RCOtKdXlUfirtaxlHIcFs586lpZU2HD8AzmfXzapOdg= github.com/aws/aws-sdk-go-v2/service/s3 v1.40.0 h1:wl5dxN1NONhTDQD9uaEvNsDRX29cBmGED/nl0jkWlt4= +github.com/aws/aws-sdk-go-v2/service/s3 v1.40.0/go.mod h1:rDGMZA7f4pbmTtPOk5v5UM2lmX6UAbRnMDJeDvnH7AM= github.com/aws/aws-sdk-go-v2/service/sso v1.4.2/go.mod h1:NBvT9R1MEF+Ud6ApJKM0G+IkPchKS7p7c2YPKwHmBOk= github.com/aws/aws-sdk-go-v2/service/sso v1.17.2 h1:V47N5eKgVZoRSvx2+RQ0EpAEit/pqOhqeSQFiS4OFEQ= +github.com/aws/aws-sdk-go-v2/service/sso v1.17.2/go.mod h1:/pE21vno3q1h4bbhUOEi+6Zu/aT26UK2WKkDXd+TssQ= github.com/aws/aws-sdk-go-v2/service/ssooidc v1.19.2 h1:sMAcO7VHVw28HTAdZpTULDzFirHOsVm/x25CxhUH0jA= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.19.2/go.mod h1:dWqm5G767qwKPuayKfzm4rjzFmVjiBFbOJrpSPnAMDs= github.com/aws/aws-sdk-go-v2/service/sts v1.7.2/go.mod h1:8EzeIqfWt2wWT4rJVu3f21TfrhJ8AEMzVybRNSb/b4g= github.com/aws/aws-sdk-go-v2/service/sts v1.25.2 h1:vwyiRTnXLqsak/6WAQ+uTRhVqKI6vxUQ0HJXjKij0zM= github.com/aws/aws-sdk-go-v2/service/sts v1.25.2/go.mod h1:4EqRHDCKP78hq3zOnmFXu5k0j4bXbRFfCh/zQ6KnEfQ= github.com/aws/aws-sdk-go-v2/service/timestreamwrite v1.13.6 h1:HwNzaXr3lHe3YPEyyx7Fh41CZplz6G1YqB3OR0FJ2iw= +github.com/aws/aws-sdk-go-v2/service/timestreamwrite v1.13.6/go.mod h1:akrYtxss20JAwAF/VzsUJRHf210HwuLZpUy1Njrgpe0= github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= github.com/aws/smithy-go v1.17.0 h1:wWJD7LX6PBV6etBUwO0zElG0nWN9rUhp0WdYeHSHAaI= github.com/aws/smithy-go v1.17.0/go.mod h1:NukqUGpCZIILqqiV0NIjeFh24kd/FAa4beRb6nbIUPE= -github.com/aws/telegraf v0.10.2-0.20231103153700-d2a4b9e20b87 h1:/zmphBRn0vrwBzVOUmeXJQRuZxlvf1C0m+sHe81zwxg= -github.com/aws/telegraf v0.10.2-0.20231103153700-d2a4b9e20b87/go.mod h1:SMbtNz1+X7rQbr19B2Engg5+WWq+oI9rvRrEgy3qPho= +github.com/aws/telegraf v0.10.2-0.20240423220441-63baeaedb379 h1:EaMA5kc5yQzobctnBE8MYD9h4HPQ/YtCg4u0mFKXAj8= +github.com/aws/telegraf v0.10.2-0.20240423220441-63baeaedb379/go.mod h1:tSaq8qDvwntXHIWy6YTHPoWttYsOnF7Hm3mpZfHkIrA= github.com/aws/telegraf/patches/gopsutil/v3 v3.0.0-20231109213610-a8c21c54a2be h1:sF6OUdk1hpuX7lf74vn+zBUFtQRe+hky0jmMYyFp5Kk= github.com/aws/telegraf/patches/gopsutil/v3 v3.0.0-20231109213610-a8c21c54a2be/go.mod h1:1W1wnODUDv+FBSAtAa878Kxto5kj8eV+kI0AF4LIjq4= github.com/awslabs/kinesis-aggregation/go v0.0.0-20210630091500-54e17340d32f h1:Pf0BjJDga7C98f0vhw+Ip5EaiE07S3lTKpIYPNS0nMo= +github.com/awslabs/kinesis-aggregation/go v0.0.0-20210630091500-54e17340d32f/go.mod h1:SghidfnxvX7ribW6nHI7T+IBbc9puZ9kk5Tx/88h8P4= +github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 h1:6df1vn4bBlDDo4tARvBm7l6KA9iVMnE3NWizDeWSrps= +github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3/go.mod h1:CIWtjkly68+yqLPbvwwR/fjNJA/idrtULjZWh2v1ys0= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= @@ -258,55 +334,70 @@ github.com/bigkevmcd/go-configparser v0.0.0-20200217161103-d137835d2579/go.mod h github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= github.com/bmatcuk/doublestar/v3 v3.0.0 h1:TQtVPlDnAYwcrVNB2JiGuMc++H5qzWZd9PhkNo5WyHI= +github.com/bmatcuk/doublestar/v3 v3.0.0/go.mod h1:6PcTVMw80pCY1RVuoqu3V++99uQB3vsSYKPTd8AWA0k= github.com/caio/go-tdigest v3.1.0+incompatible h1:uoVMJ3Q5lXmVLCCqaMGHLBWnbGoN6Lpu7OAUPR60cds= github.com/caio/go-tdigest v3.1.0+incompatible/go.mod h1:sHQM/ubZStBUmF1WbB8FAm8q9GjDajLC5T7ydxE3JHI= github.com/casbin/casbin/v2 v2.37.0/go.mod h1:vByNa/Fchek0KZUgG5wEsl7iFsiviAYKRtgrQfcJqHg= github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= +github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= -github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= -github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= +github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= -github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/checkpoint-restore/go-criu/v5 v5.3.0 h1:wpFFOoomK3389ue2lAb0Boag6XPht5QYpipxmSNL4d8= github.com/checkpoint-restore/go-criu/v5 v5.3.0/go.mod h1:E/eQpaFtUKGOOSEBZgmKAcn+zUUwWxqcaKZlF54wK8E= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/cilium/ebpf v0.7.0 h1:1k/q3ATgxSXRdrmPfH8d7YK0GfqVsEKZAX9dQZvs56k= -github.com/cilium/ebpf v0.7.0/go.mod h1:/oI2+1shJiTGAMgl6/RgJr36Eo1jzrRcAWbcXO2usCA= +github.com/cilium/ebpf v0.11.0 h1:V8gS/bTCCjX9uUnkUFUpPsksM8n1lXBAvHcpiFk1X2Y= +github.com/cilium/ebpf v0.11.0/go.mod h1:WE7CZAnqOL2RouJ4f1uyNhqr2P4CCvXFIqdRDUgWsVs= github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= github.com/cisco-ie/nx-telemetry-proto v0.0.0-20190531143454-82441e232cf6 h1:57RI0wFkG/smvVTcz7F43+R0k+Hvci3jAVQF9lyMoOo= +github.com/cisco-ie/nx-telemetry-proto v0.0.0-20190531143454-82441e232cf6/go.mod h1:ugEfq4B8T8ciw/h5mCkgdiDRFS4CkqqhH2dymDB4knc= github.com/clbanning/mxj v1.8.4/go.mod h1:BVjHeAH+rl9rs6f+QIpeRl0tfu10SXn1pUSa5PVGJng= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cloudflare/golz4 v0.0.0-20150217214814-ef862a3cdc58 h1:F1EaeKL/ta07PY/k9Os/UFtwERei2/XzGemhpGnBKNg= +github.com/cloudflare/golz4 v0.0.0-20150217214814-ef862a3cdc58/go.mod h1:EOBUe0h4xcZ5GoxqC5SDxFQ8gwyZPKQoEzownBlhI80= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 h1:/inchEIKaYC1Akx+H+gqO04wryn5h75LSazbRlnya1k= -github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa h1:jQCWAUqqlij9Pgj2i/PB79y4KOPYVyFYdROxgaCwdTQ= +github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa/go.mod h1:x/1Gn8zydmfq8dk6e9PdstVsDgu9RuyIIJqAaF//0IM= +github.com/containerd/cgroups/v3 v3.0.3 h1:S5ByHZ/h9PMe5IOQoN7E+nMc2UcLEM/V48DGDJ9kip0= +github.com/containerd/cgroups/v3 v3.0.3/go.mod h1:8HBe7V3aWGLFPd/k03swSIsGjZhHI2WzJmticMgVuz0= github.com/containerd/console v1.0.3 h1:lIr7SlA5PxZyMV30bDW0MGbiOPXwc63yRuCP0ARubLw= github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U= +github.com/containerd/errdefs v0.1.0 h1:m0wCRBiu1WJT/Fr+iOoQHMQS/eP5myQ8lCv4Dz5ZURM= +github.com/containerd/errdefs v0.1.0/go.mod h1:YgWiiHtLmSeBrvpw+UfPijzbLaB77mEG1WwJTDETIV0= +github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= +github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= github.com/containerd/ttrpc v1.2.2 h1:9vqZr0pxwOF5koz6N0N3kJ0zDHokrcPxIR/ZR2YFtOs= github.com/containerd/ttrpc v1.2.2/go.mod h1:sIT6l32Ph/H9cvnJsfXM5drIVzTr5A2flTf1G5tYZak= github.com/containerd/typeurl v1.0.2 h1:Chlt8zIieDbzQFzXzAeBEF92KhExuE4p9p92/QmY7aY= +github.com/containerd/typeurl v1.0.2/go.mod h1:9trJWW2sRlGub4wZJRTW83VtbOLS6hwcDZXTn6oPz9s= github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/couchbase/go-couchbase v0.1.1 h1:ClFXELcKj/ojyoTYbsY34QUrrYCBi/1G749sXSCkdhk= +github.com/couchbase/go-couchbase v0.1.1/go.mod h1:+/bddYDxXsf9qt0xpDUtRR47A2GjaXmGGAqQ/k3GJ8A= github.com/couchbase/gomemcached v0.1.3 h1:HIc5qMYNbuhB7zNaiEtj61DCYkquAwrQlf64q7JzdEY= +github.com/couchbase/gomemcached v0.1.3/go.mod h1:mxliKQxOv84gQ0bJWbI+w9Wxdpt9HjDvgW9MjCym5Vo= github.com/couchbase/goutils v0.1.0 h1:0WLlKJilu7IBm98T8nS9+J36lBFVLRUSIUtyD/uWpAE= +github.com/couchbase/goutils v0.1.0/go.mod h1:BQwMFlJzDjFDG3DJUdU0KORxn88UlsOULuxLExMh3Hs= github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= github.com/danieljoos/wincred v1.1.2 h1:QLdCxFs1/Yl4zduvBdcHB8goaYk9RARS2SgLLRuAyr0= +github.com/danieljoos/wincred v1.1.2/go.mod h1:GijpziifJoIBfYh+S7BbkdUTU4LfM+QnGqR5Vl2tAx0= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= @@ -314,20 +405,25 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8Yc github.com/deckarep/golang-set/v2 v2.3.1 h1:vjmkvJt/IV27WXPyYQpAh4bRyWJc5Y435D17XQ9QU5A= github.com/deckarep/golang-set/v2 v2.3.1/go.mod h1:VAky9rY/yGXJOLEDv3OMci+7wtDpOF4IN+y82NBOac4= github.com/denisenkom/go-mssqldb v0.12.0 h1:VtrkII767ttSPNRfFekePK3sctr+joXgO58stqQbtUA= +github.com/denisenkom/go-mssqldb v0.12.0/go.mod h1:iiK0YP1ZeepvmBQk/QpLEhhTNJgfzrpArPY/aFvc9yU= github.com/dennwc/varint v1.0.0 h1:kGNFFSSw8ToIy3obO/kKr8U9GZYUAxQEVuix4zfDWzE= github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgziApxA= github.com/devigned/tab v0.1.1 h1:3mD6Kb1mUOYeLpJvTVSDwSg5ZsfSxfvxGRTxRsJsITA= -github.com/digitalocean/godo v1.104.1 h1:SZNxjAsskM/su0YW9P8Wx3gU0W1Z13b6tZlYNpl5BnA= -github.com/digitalocean/godo v1.104.1/go.mod h1:VAI/L5YDzMuPRU01lEEUSQ/sp5Z//1HnnFv/RBTEdbg= +github.com/devigned/tab v0.1.1/go.mod h1:XG9mPq0dFghrYvoBF3xdRrJzSTX1b7IQrvaL9mzjeJY= +github.com/digitalocean/godo v1.109.0 h1:4W97RJLJSUQ3veRZDNbp1Ol3Rbn6Lmt9bKGvfqYI5SU= +github.com/digitalocean/godo v1.109.0/go.mod h1:R6EmmWI8CT1+fCtjWY9UCB+L5uufuZH13wk3YhxycCs= github.com/dimchansky/utfbom v1.1.1 h1:vV6w1AhK4VMnhBno/TPVCoK9U/LP0PkLCS9tbxHdi/U= +github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE= +github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= +github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/djherbis/times v1.5.0 h1:79myA211VwPhFTqUk8xehWrsEO+zcIZj0zT8mXPVARU= +github.com/djherbis/times v1.5.0/go.mod h1:5q7FDLvbNg1L/KaBmPcWlVR9NmoKo3+ucqUA3ijQhA0= github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= -github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8= -github.com/docker/distribution v2.8.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v24.0.7+incompatible h1:Wo6l37AuwP3JaMnZa226lzVXGA3F9Ig1seQen0cKYlM= -github.com/docker/docker v24.0.7+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= -github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= +github.com/docker/docker v25.0.5+incompatible h1:UmQydMduGkrD5nQde1mecF/YnSbTOaPeFIeP5C4W+DE= +github.com/docker/docker v25.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= +github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/doclambda/protobufquery v0.0.0-20210317203640-88ffabe06a60 h1:27379cxrsKlr7hAnW/xrusefspUPjqHVRW1K/bZgfGw= @@ -335,37 +431,44 @@ github.com/doclambda/protobufquery v0.0.0-20210317203640-88ffabe06a60/go.mod h1: github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= +github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/dvsekhvalnov/jose2go v1.5.0 h1:3j8ya4Z4kMCwT5nXIKFSV84YS+HdqSSO0VsTQxaLAeM= +github.com/dvsekhvalnov/jose2go v1.5.0/go.mod h1:QsHjhyTlD/lAVqn/NSbVZmSCGeDehTB/mPZadG+mhXU= github.com/dynatrace-oss/dynatrace-metric-utils-go v0.3.0 h1:q2Ayh9s6Cr75bS5URiOUAoyFXemgKQaBJphbhAaJHCY= +github.com/dynatrace-oss/dynatrace-metric-utils-go v0.3.0/go.mod h1:qw0E9EJ0PnSlhWawDNuqE0zhc1hqOBUCFIAj3dd9DNw= github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= github.com/eapache/go-resiliency v1.2.0 h1:v7g92e/KSN71Rq7vSThKaWIq68fL4YHvWyiUKorFR1Q= +github.com/eapache/go-resiliency v1.2.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 h1:YEetp8/yCZMuEPMUDHG0CW/brkkEp8mzqk2+ODEitlw= github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc= github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= github.com/eclipse/paho.mqtt.golang v1.3.5 h1:sWtmgNxYM9P2sP+xEItMozsR3w0cqZFlqnNN1bdl41Y= +github.com/eclipse/paho.mqtt.golang v1.3.5/go.mod h1:eTzb4gxwwyWpqBUHGQZ4ABAV7+Jgm1PklsYT/eo8Hcc= github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/emicklei/go-restful/v3 v3.10.2 h1:hIovbnmBTLjHXkqEBUz3HGpXZdM7ZrE9fJIZIqlJLqE= -github.com/emicklei/go-restful/v3 v3.10.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= +github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= -github.com/envoyproxy/go-control-plane v0.11.1 h1:wSUXTlLfiAQRWs2F+p+EKOY9rUyis1MyGqJ2DIk5HpM= -github.com/envoyproxy/go-control-plane v0.11.1/go.mod h1:uhMcXKCQMEJHiAb0w+YGefQLaTEw+YhGluxZkrTmD0g= +github.com/envoyproxy/go-control-plane v0.12.0 h1:4X+VP1GHd1Mhj6IB5mMeGbLCleqxjletLK6K0rbxyZI= +github.com/envoyproxy/go-control-plane v0.12.0/go.mod h1:ZBTaoJ23lqITozF0M6G4/IragXCQKCnYbmlmtHvwRG0= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA= -github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE= +github.com/envoyproxy/protoc-gen-validate v1.0.4 h1:gVPz/FMfvh57HdSJQyvBtF00j8JU4zdyUgIUNhlgg0A= +github.com/envoyproxy/protoc-gen-validate v1.0.4/go.mod h1:qys6tmnRsYrQqIhm2bvKZH4Blx/1gTIZ2UKVY1M+Yew= github.com/euank/go-kmsg-parser v2.0.0+incompatible h1:cHD53+PLQuuQyLZeriD1V/esuG4MuU0Pjs5y6iknohY= github.com/euank/go-kmsg-parser v2.0.0+incompatible/go.mod h1:MhmAMZ8V4CYH4ybgdRwPr2TU5ThnS43puaKEMpja1uw= github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U= github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/expr-lang/expr v1.16.3 h1:NLldf786GffptcXNxxJx5dQ+FzeWDKChBDqOOwyK8to= +github.com/expr-lang/expr v1.16.3/go.mod h1:uCkhfG+x7fcZ5A5sXHKuQ07jGZRl6J0FCAaf2k4PtVQ= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= github.com/fatih/color v1.12.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM= @@ -378,21 +481,25 @@ github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSw github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/form3tech-oss/jwt-go v3.2.5+incompatible h1:/l4kBbb4/vGSsdtB5nUe8L7B9mImVMaBPw9L/0TBHU8= +github.com/form3tech-oss/jwt-go v3.2.5+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/franela/goblin v0.0.0-20210519012713-85d372ac71e2/go.mod h1:VzmDKDJVZI3aJmnRI9VjAn9nJ8qPPsN1fqzr9dqInIo= github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= github.com/frankban/quicktest v1.11.0/go.mod h1:K+q6oSqb0W0Ininfk863uOk1lMy69l/P6txr3mVT54s= github.com/frankban/quicktest v1.11.2/go.mod h1:K+q6oSqb0W0Ininfk863uOk1lMy69l/P6txr3mVT54s= -github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= github.com/frankban/quicktest v1.13.0/go.mod h1:qLE0fzW0VuyUAJgPU19zByoIr0HtCHN/r/VLSOOIySU= -github.com/frankban/quicktest v1.14.0 h1:+cqqvzZV87b4adx/5ayVOaYZ2CrvM4ejQvUdBzPPUss= +github.com/frankban/quicktest v1.14.5 h1:dfYrrRyLtiqT9GyKXgdh+k4inNeTvmGbuSgZ3lx3GhA= +github.com/frankban/quicktest v1.14.5/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/gabriel-vasile/mimetype v1.4.2 h1:w5qFW6JKBz9Y393Y4q372O9A7cUSequkh1Q7OhCmWKU= +github.com/gabriel-vasile/mimetype v1.4.2/go.mod h1:zApsH/mKG4w07erKIaJPFiX0Tsq9BFQgN3qGY5GnNgA= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 h1:Mn26/9ZMNWSw9C9ERFA1PUxfmGpolnw2v0bKOREu5ew= +github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32/go.mod h1:GIjDIg/heH5DOkXY3YJ/wNhfHsQHoXGjl8G8amsYQ1I= github.com/go-asn1-ber/asn1-ber v1.5.1 h1:pDbRAunXzIUXfx4CB2QJFv5IuPiuoW+sWvr/Us009o8= +github.com/go-asn1-ber/asn1-ber v1.5.1/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= @@ -404,6 +511,7 @@ github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBj github.com/go-ldap/ldap v3.0.2+incompatible h1:kD5HQcAzlQ7yrhfn+h+MSABeAy/jAJhvIJ/QDllP44g= github.com/go-ldap/ldap v3.0.2+incompatible/go.mod h1:qfd9rJvER9Q0/D/Sqn1DfHRoBp40uXYvFoEVrNEPqRc= github.com/go-ldap/ldap/v3 v3.4.1 h1:fU/0xli6HY02ocbMuozHAYsaHLcnkLjvho2r5a34BUU= +github.com/go-ldap/ldap/v3 v3.4.1/go.mod h1:iYS1MdmrmceOJ1QOTnRXrIs7i3kloqtmGQjRvjKpyMg= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= @@ -413,60 +521,71 @@ github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KE github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= -github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= -github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= +github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= -github.com/go-openapi/jsonpointer v0.20.0 h1:ESKJdU9ASRfaPNOPRx12IUyA1vn3R9GiE3KYD14BXdQ= -github.com/go-openapi/jsonpointer v0.20.0/go.mod h1:6PGzBjjIIumbLYysB73Klnms1mwnU4G3YHOECG3CedA= +github.com/go-openapi/jsonpointer v0.20.2 h1:mQc3nmndL8ZBzStEo3JYF8wzmeWffDH4VbXz58sAx6Q= +github.com/go-openapi/jsonpointer v0.20.2/go.mod h1:bHen+N0u1KEO3YlmqOjTT9Adn1RfD91Ar825/PuiRVs= github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= -github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= -github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= +github.com/go-openapi/jsonreference v0.20.4 h1:bKlDxQxQJgwpUSgOENiMPzCTBVuc7vTdXSSgNeAhojU= +github.com/go-openapi/jsonreference v0.20.4/go.mod h1:5pZJyJP2MnYCpoeoMAql78cCHauHj0V9Lhc506VOpw4= github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= github.com/go-openapi/spec v0.19.5/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk= github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= -github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogBU= -github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-openapi/swag v0.22.9 h1:XX2DssF+mQKM2DHsbgZK74y/zj4mo9I99+89xUmuZCE= +github.com/go-openapi/swag v0.22.9/go.mod h1:3/OXnFfnMAwBD099SwYRk7GD3xOrr1iL7d/XNLXVVwE= github.com/go-ping/ping v0.0.0-20210201095549-52eed920f98c h1:fWdhUpCuoeNIPiQ+pkAmmERYEjhVx5/cbVGK7T99OkI= +github.com/go-ping/ping v0.0.0-20210201095549-52eed920f98c/go.mod h1:35JbSyV/BYqHwwRA6Zr1uVDm1637YlNOU61wI797NPI= github.com/go-redis/redis v6.15.9+incompatible h1:K0pv1D7EQUjfyoMql+r/jZqCLizCGKFlFgcHWWmHQjg= -github.com/go-resty/resty/v2 v2.7.0 h1:me+K9p3uhSmXtrBZ4k9jcEAfJmuC8IivWHwaLZwPrFY= -github.com/go-resty/resty/v2 v2.7.0/go.mod h1:9PWDzw47qPphMRFfhsyk0NnSgvluHcljSMVIq3w7q0I= +github.com/go-redis/redis v6.15.9+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA= +github.com/go-resty/resty/v2 v2.11.0 h1:i7jMfNOJYMp69lq7qozJP+bjgzfAzeOhuGlyDrqxT/8= +github.com/go-resty/resty/v2 v2.11.0/go.mod h1:iiP/OpA0CkcL3IGt1O0+/SIItFUbkkyw5BGXiVdTu+A= github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE= +github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw= +github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= github.com/go-test/deep v1.0.2-0.20181118220953-042da051cf31 h1:28FVBuwkwowZMjbA7M0wXsI6t3PYulRTMio3SO+eKCM= github.com/go-test/deep v1.0.2-0.20181118220953-042da051cf31/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= +github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1 h1:TQcrn6Wq+sKGkpyPvppOz99zsMBaUOKXq6HSv655U1c= +github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= github.com/go-zookeeper/zk v1.0.2/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw= github.com/go-zookeeper/zk v1.0.3 h1:7M2kwOsc//9VeeFiPtf+uSJlVpU66x9Ba5+8XK7/TDg= github.com/go-zookeeper/zk v1.0.3/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw= github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= -github.com/goccy/go-json v0.10.0 h1:mXKd9Qw4NuzShiRlOXKews24ufknHO7gx30lsDyokKA= +github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU= +github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 h1:ZpnhV/YsD2/4cESfV5+Hoeu/iUR3ruzNvZ+yQfO03a0= +github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/godbus/dbus/v5 v5.0.6 h1:mkgN1ofwASrYnJ5W6U/BxG15eXXXjirgZc7CLqkcaro= -github.com/godbus/dbus/v5 v5.0.6/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk= +github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gofrs/uuid v4.2.0+incompatible h1:yyYWMnhkhrKwwr8gAOcOCYxOOscHgDS9yZgBrnJfGa0= +github.com/gofrs/uuid v4.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= +github.com/golang-jwt/jwt/v5 v5.2.0 h1:d/ix8ftRUorsN+5eMIlF4T6J8CAt9rch3My2winC1Jw= +github.com/golang-jwt/jwt/v5 v5.2.0/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe h1:lXe2qZdvpiX5WZkZR4hgp4KJVfY3nMkvmwbVkpv1rVY= +github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= github.com/golang-sql/sqlexp v0.0.0-20170517235910-f1bb20e5a188 h1:+eHOFJl1BaXrQxKX+T06f78590z4qA2ZzBTqahsKSE4= +github.com/golang-sql/sqlexp v0.0.0-20170517235910-f1bb20e5a188/go.mod h1:vXjM/+wXQnTPR4KqTKDgJukSZ6amVRtWMPEjE6sQoK8= github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -498,8 +617,8 @@ github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= -github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= @@ -507,9 +626,11 @@ github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEW github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= -github.com/google/cadvisor v0.48.1 h1:eyYTxKBd+KxI1kh6rst4JSTLUhfHQM34qGpp+0AMlSg= -github.com/google/cadvisor v0.48.1/go.mod h1:ZkYbiiVdyoqBmI2ahZI8GlmirT78OAOER0z4EQugkxQ= -github.com/google/flatbuffers v23.1.21+incompatible h1:bUqzx/MXCDxuS0hRJL2EfjyZL3uQrPbMocUa8zGqsTA= +github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= +github.com/google/cadvisor v0.49.0 h1:1PYeiORXmcFYi609M4Qvq5IzcvcVaWgYxDt78uH8jYA= +github.com/google/cadvisor v0.49.0/go.mod h1:s6Fqwb2KiWG6leCegVhw4KW40tf9f7m+SF1aXiE8Wsk= +github.com/google/flatbuffers v23.5.26+incompatible h1:M9dgRyhJemaM4Sw8+66GHBu8ioaQmyPLg1b8VwK5WJg= +github.com/google/flatbuffers v23.5.26+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= @@ -525,11 +646,11 @@ github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= -github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-github/v32 v32.1.0 h1:GWkQOdXqviCPx7Q7Fj+KyPoGm4SwHRh8rheoPhd27II= +github.com/google/go-github/v32 v32.1.0/go.mod h1:rIEpZD9CTDQwDK9GDrtMTycQNA4JU3qBsCizh3q2WCI= github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -545,29 +666,32 @@ github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20230111200839-76d1ae5aea2b h1:8htHrh2bw9c7Idkb7YNac+ZpTqLMjRpI+FWu51ltaQc= +github.com/google/pprof v0.0.0-20240227163752-401108e1b7e7 h1:y3N7Bm7Y9/CtpiVkw/ZWj6lSlDF3F74SfKwfTCer72Q= +github.com/google/pprof v0.0.0-20240227163752-401108e1b7e7/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o= github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.4.0 h1:MtMxsa51/r9yyhkyLsVeVt0B+BGQZzpQiTQ4eHZ8bc4= -github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/gax-go/v2 v2.12.0 h1:A+gCJKdRfqXkr+BIRGtZLibNXf0m1f9E4HG56etFpas= -github.com/googleapis/gax-go/v2 v2.12.0/go.mod h1:y+aIqrI5eb1YGMVJfuV3185Ts/D7qKpsEkdD5+I6QGU= +github.com/googleapis/gax-go/v2 v2.12.2 h1:mhN09QQW1jEWeMF74zGR81R30z4VJzjZsfkUhuHF+DA= +github.com/googleapis/gax-go/v2 v2.12.2/go.mod h1:61M8vcyyXR2kqKFxKrfA22jaA8JGF7Dc8App1U3H6jc= github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= github.com/gopcua/opcua v0.3.1 h1:BS1TRJUdsPSwU0mlfc8Dffchh0jTw9lWchmF4HFRo2w= -github.com/gophercloud/gophercloud v1.7.0 h1:fyJGKh0LBvIZKLvBWvQdIgkaV5yTM3Jh9EYUh+UNCAs= -github.com/gophercloud/gophercloud v1.7.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM= +github.com/gopcua/opcua v0.3.1/go.mod h1:rdqS1oF5s/+Ko4SnhZA+3tgK4MQuXDzH3KgnnLDaCCQ= +github.com/gophercloud/gophercloud v1.8.0 h1:TM3Jawprb2NrdOnvcHhWJalmKmAmOGgfZElM/3oBYCk= +github.com/gophercloud/gophercloud v1.8.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gordonklaus/ineffassign v0.0.0-20200309095847-7953dde2c7bf/go.mod h1:cuNKsD1zp2v6XfE/orVX2QE1LC+i254ceGcVeDT3pTU= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= +github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= @@ -577,21 +701,28 @@ github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd h1:PpuIBO5P3e9hpqBD github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd/go.mod h1:M5qHK+eWfAv8VR/265dIuEpL3fNfeC21tXXp9itM24A= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grid-x/modbus v0.0.0-20211113184042-7f2251c342c9 h1:Q7e9kXS3sRbTjsNDKazbcbDSGAKjFdk096M3qYbwNpE= +github.com/grid-x/modbus v0.0.0-20211113184042-7f2251c342c9/go.mod h1:qVX2WhsI5xyAoM6I/MV1bXSKBPdLAjp7pCvieO/S0AY= github.com/grid-x/serial v0.0.0-20211107191517-583c7356b3aa h1:Rsn6ARgNkXrsXJIzhkE4vQr5Gbx2LvtEMv4BJOK4LyU= +github.com/grid-x/serial v0.0.0-20211107191517-583c7356b3aa/go.mod h1:kdOd86/VGFWRrtkNwf1MPk0u1gIjc4Y7R2j7nhwc7Rk= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.18.1 h1:6UKoz5ujsI55KNpsJH3UwCq3T8kKbZwNZBNPuTTje8U= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.18.1/go.mod h1:YvJ2f6MplWDhfxiUC3KpyTy76kYUZA4W3pTv/wdKQ9Y= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0 h1:Wqo399gCIufwto+VfwCSvsnfGpF/w5E9CNxSwbpD6No= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0/go.mod h1:qmOFXW2epJhM0qSnUUYpldc7gVz2KMQwJ/QYCDIa7XU= github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c h1:6rhixN/i8ZofjG1Y75iExal34USq5p+wiN1tpie8IrU= +github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c/go.mod h1:NMPJylDgVpX0MLRlPy15sqSwOFv/U1GZ2m21JhFfek0= github.com/gwos/tcg/sdk v0.0.0-20211223101342-35fbd1ae683c h1:befb5xGUwNCoBuN/akLFCKekUzr0ixyws3aAX/7TaOk= +github.com/gwos/tcg/sdk v0.0.0-20211223101342-35fbd1ae683c/go.mod h1:OjlJNRXwlEjznVfU3YtLWH8FyM7KWHUevXDI47UeZeM= github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed h1:5upAirOpQc1Q53c0bnx2ufif5kANL7bfZWcc6VJWJd8= -github.com/harlow/kinesis-consumer v0.3.5 h1:xeiDp2frP8DdKDeOzVuS+vaBX03JjifQO/Apzu4IOMA= +github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4= +github.com/harlow/kinesis-consumer v0.3.6-0.20211204214318-c2b9f79d7ab6 h1:38nI+nE+oUmLmlNjuByhvnmuBrcQVLNkOJhSSM4eJv0= +github.com/harlow/kinesis-consumer v0.3.6-0.20211204214318-c2b9f79d7ab6/go.mod h1:hNEr2hL0WPpm4BSILcClbOE/+Tew0JJnqCbTlc6jCUc= github.com/hashicorp/consul/api v1.10.1/go.mod h1:XjsvQN+RJGWI2TWy1/kqaE16HrR2J/FWgkYjdZQsX9M= github.com/hashicorp/consul/api v1.13.0/go.mod h1:ZlVrynguJKcYr54zGaDbaL3fOvKC9m72FhPvA8T35KQ= -github.com/hashicorp/consul/api v1.25.1 h1:CqrdhYzc8XZuPnhIYZWH45toM0LB9ZeYr/gvpLVI3PE= -github.com/hashicorp/consul/api v1.25.1/go.mod h1:iiLVwR/htV7mas/sy0O+XSuEnrdBUUydemjxcUrAt4g= +github.com/hashicorp/consul/api v1.28.2 h1:mXfkRHrpHN4YY3RqL09nXU1eHKLNiuAN4kHvDQ16k/8= +github.com/hashicorp/consul/api v1.28.2/go.mod h1:KyzqzgMEya+IZPcD65YFoOVAgPpbfERu4I/tzG6/ueE= github.com/hashicorp/consul/sdk v0.8.0/go.mod h1:GBvyrGALthsZObzUGsfgHZQDXjg4lOjagTIwIR1vPms= -github.com/hashicorp/consul/sdk v0.14.1 h1:ZiwE2bKb+zro68sWzZ1SgHF3kRMBZ94TwOCFRF4ylPs= +github.com/hashicorp/consul/sdk v0.16.0 h1:SE9m0W6DEfgIVCJX7xU+iv/hUl4m/nxqMTnCdMxDpJ8= +github.com/hashicorp/consul/sdk v0.16.0/go.mod h1:7pxqqhqoaPqnBnzXD1StKed62LqJeClzVsUEy85Zr0A= github.com/hashicorp/cronexpr v1.1.2 h1:wG/ZYIKT+RT3QkOdgYc+xsKWVRgnxJ1OJtjjy84fJ9A= github.com/hashicorp/cronexpr v1.1.2/go.mod h1:P4wA0KBl9C5q2hABiMO7cp6jcIg96CDh1Efb3g1PWA4= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -613,6 +744,7 @@ github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJ github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-msgpack v0.5.5 h1:i9R9JSrqIz0QVLz3sz+i3YJdT7TTSLcfLLzJi9aZTuI= +github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= @@ -632,6 +764,7 @@ github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdv github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= +github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-version v1.1.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= @@ -640,6 +773,8 @@ github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c= github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= +github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= github.com/hashicorp/mdns v1.0.1/go.mod h1:4gW7WsVCke5TE7EPeYliwHlRUyBtfCwuFwuMg2DmyNY= @@ -648,8 +783,8 @@ github.com/hashicorp/memberlist v0.2.2/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOn github.com/hashicorp/memberlist v0.3.0/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= github.com/hashicorp/memberlist v0.5.0 h1:EtYPN8DpAURiapus508I4n9CzHs2W+8NZGbmmR/prTM= github.com/hashicorp/memberlist v0.5.0/go.mod h1:yvyXLpo0QaGE59Y7hDTsTzDD25JYBZ4mHgHUZ8lrOI0= -github.com/hashicorp/nomad/api v0.0.0-20230721134942-515895c7690c h1:Nc3Mt2BAnq0/VoLEntF/nipX+K1S7pG+RgwiitSv6v0= -github.com/hashicorp/nomad/api v0.0.0-20230721134942-515895c7690c/go.mod h1:O23qLAZuCx4htdY9zBaO4cJPXgleSFEdq6D/sezGgYE= +github.com/hashicorp/nomad/api v0.0.0-20240306004928-3e7191ccb702 h1:fI1LXuBaS1d9z1kmb++Og6YD8uMRwadXorCwE+xgOFA= +github.com/hashicorp/nomad/api v0.0.0-20240306004928-3e7191ccb702/go.mod h1:z71gkJdrkAt/Rl6C7Q79VE7AwJ5lUF+M+fzFTyIHYB0= github.com/hashicorp/serf v0.9.5/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk= github.com/hashicorp/serf v0.9.6/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4= github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY= @@ -658,9 +793,10 @@ github.com/hashicorp/vault/api v1.0.4/go.mod h1:gDcqh3WGcR1cpF5AJz/B1UFheUEneMoI github.com/hashicorp/vault/sdk v0.1.13/go.mod h1:B+hVj7TpuQY1Y/GPbCpffmgd+tSEwvhkWnjtSYCaS2M= github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= -github.com/hetznercloud/hcloud-go v1.41.0 h1:KJGFRRc68QiVu4PrEP5BmCQVveCP2CM26UGQUKGpIUs= -github.com/hetznercloud/hcloud-go v1.41.0/go.mod h1:NaHg47L6C77mngZhwBG652dTAztYrsZ2/iITJKhQkHA= +github.com/hetznercloud/hcloud-go/v2 v2.6.0 h1:RJOA2hHZ7rD1pScA4O1NF6qhkHyUdbbxjHgFNot8928= +github.com/hetznercloud/hcloud-go/v2 v2.6.0/go.mod h1:4J1cSE57+g0WS93IiHLV7ubTHItcp+awzeBp5bM9mfA= github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM= +github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg= github.com/hjson/hjson-go/v4 v4.0.0/go.mod h1:KaYt3bTw3zhBjYqnXkYywcYctk0A2nxeEFTse3rH13E= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/hudl/fargo v1.4.0/go.mod h1:9Ai6uvFy5fQNq6VPKtg+Ceq1+eTY4nKUlR2JElEOcDo= @@ -672,11 +808,14 @@ github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= -github.com/influxdata/go-syslog/v3 v3.0.1-0.20210608084020-ac565dc76ba6 h1:s9ZL6ZhFF8y6ebnm1FLvobkzoIu5xwDQUcRPk/IEhpM= -github.com/influxdata/go-syslog/v3 v3.0.1-0.20210608084020-ac565dc76ba6/go.mod h1:aXdIdfn2OcGnMhOTojXmwZqXKgC3MU5riiNvzwwG9OY= +github.com/influxdata/go-syslog/v3 v3.0.1-0.20230911200830-875f5bc594a4 h1:2r2WiFeAwiJ/uyx1qIKnV1L4C9w/2V8ehlbJY4gjFaM= +github.com/influxdata/go-syslog/v3 v3.0.1-0.20230911200830-875f5bc594a4/go.mod h1:1yEQhaLb/cETXCqQmdh7lDjupNAReO7c83AHyK2dJ48= github.com/influxdata/influxdb-observability/common v0.2.10 h1:5sQwU7KQYWSB7ZuPZXO39yQJ2nw2FPoxWeLoNK2jKRE= +github.com/influxdata/influxdb-observability/common v0.2.10/go.mod h1:bl0YEzAg4yAoH8a2C4hz6CS/+OpNM9YyVjd5pkaAbZs= github.com/influxdata/influxdb-observability/influx2otel v0.2.10 h1:YZbHxhGBfOmfXFe6Odovq7eALriDnfQwtRFoT2JypHk= +github.com/influxdata/influxdb-observability/influx2otel v0.2.10/go.mod h1:y/9uuUKTjqXcHE4XtJYxWxxl5DSw9RIvjl049m6C6co= github.com/influxdata/influxdb-observability/otel2influx v0.2.10 h1:sNZCYUExwCWsNHWpNlu2gZZZav6H0rjK/DcaXEVN29E= +github.com/influxdata/influxdb-observability/otel2influx v0.2.10/go.mod h1:UOa19v6sU7EpL1dPK79Yt+mZ+1/iOwvMqcFu9yVXenw= github.com/influxdata/influxdb1-client v0.0.0-20200827194710-b269163b24ab/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= github.com/influxdata/line-protocol-corpus v0.0.0-20210519164801-ca6fa5da0184/go.mod h1:03nmhxzZ7Xk2pdG+lmMd7mHDfeVOYFyhOgwO61qWU98= github.com/influxdata/line-protocol-corpus v0.0.0-20210922080147-aa28ccfb8937 h1:MHJNQ+p99hFATQm6ORoLmpUCF7ovjwEFshs/NHzAbig= @@ -686,31 +825,49 @@ github.com/influxdata/line-protocol/v2 v2.1.0/go.mod h1:QKw43hdUBg3GTk2iC3iyCxks github.com/influxdata/line-protocol/v2 v2.2.1 h1:EAPkqJ9Km4uAxtMRgUubJyqAr6zgWM0dznKMLRauQRE= github.com/influxdata/line-protocol/v2 v2.2.1/go.mod h1:DmB3Cnh+3oxmG6LOBIxce4oaL4CPj3OmMPgvauXh+tM= github.com/influxdata/tail v1.0.1-0.20210707231403-b283181d1fa7 h1:0rQOs1VHLVFpAAOIR0mJEvVOIaMYFgYdreeVbgI9sII= +github.com/influxdata/tail v1.0.1-0.20210707231403-b283181d1fa7/go.mod h1:VeiWgI3qaGdJWust2fP27a6J+koITo/1c/UhxeOxgaM= github.com/influxdata/toml v0.0.0-20190415235208-270119a8ce65 h1:vvyMtD5LTJc1W9sQKjDkAWdcg0478CszSdzlHtiAXCY= github.com/influxdata/toml v0.0.0-20190415235208-270119a8ce65/go.mod h1:zApaNFpP/bTpQItGZNNUMISDMDAnTXu9UqJ4yT3ocz8= github.com/influxdata/wlog v0.0.0-20160411224016-7c63b0a71ef8 h1:W2IgzRCb0L9VzMujq/QuTaZUKcH8096jWwP519mHN6Q= github.com/influxdata/wlog v0.0.0-20160411224016-7c63b0a71ef8/go.mod h1:/2NMgWB1DHM1ti/gqhOlg+LJeBVk6FqR5aVGYY0hlwI= github.com/intel/iaevents v1.0.0 h1:J8lETV13FMImV0VbOrKhkA790z7+cAHQ/28gbiefu7E= -github.com/ionos-cloud/sdk-go/v6 v6.1.9 h1:Iq3VIXzeEbc8EbButuACgfLMiY5TPVWUPNrF+Vsddo4= -github.com/ionos-cloud/sdk-go/v6 v6.1.9/go.mod h1:EzEgRIDxBELvfoa/uBN0kOQaqovLjUWEB7iW4/Q+t4k= +github.com/intel/iaevents v1.0.0/go.mod h1:nFsAQmrbF6MoZUomrSl4jgmHhe0SrLxTGtyqvqU2X9Y= +github.com/ionos-cloud/sdk-go/v6 v6.1.11 h1:J/uRN4UWO3wCyGOeDdMKv8LWRzKu6UIkLEaes38Kzh8= +github.com/ionos-cloud/sdk-go/v6 v6.1.11/go.mod h1:EzEgRIDxBELvfoa/uBN0kOQaqovLjUWEB7iW4/Q+t4k= github.com/jackc/chunkreader/v2 v2.0.1 h1:i+RDz65UE+mmpjTfyz0MoVTnzeYxroil2G82ki7MGG8= +github.com/jackc/chunkreader/v2 v2.0.1/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= github.com/jackc/pgconn v1.11.0 h1:HiHArx4yFbwl91X3qqIHtUFoiIfLNJXCQRsnzkiwwaQ= +github.com/jackc/pgconn v1.11.0/go.mod h1:4z2w8XhRbP1hYxkpTuBjTS3ne3J48K83+u0zoyvg2pI= github.com/jackc/pgio v1.0.0 h1:g12B9UwVnzGhueNavwioyEEpAmqMe1E/BN9ES+8ovkE= +github.com/jackc/pgio v1.0.0/go.mod h1:oP+2QK2wFfUWgr+gxjoBH9KGBb31Eio69xUb0w5bYf8= github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= +github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= github.com/jackc/pgproto3/v2 v2.2.0 h1:r7JypeP2D3onoQTCxWdTpCtJ4D+qpKr0TxvoyMhZ5ns= +github.com/jackc/pgproto3/v2 v2.2.0/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b h1:C8S2+VttkHFdOOCXJe+YGfa4vHYwlt4Zx+IVXQ97jYg= +github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E= github.com/jackc/pgtype v1.10.0 h1:ILnBWrRMSXGczYvmkYD6PsYyVFUNLTnIUJHHDLmqk38= +github.com/jackc/pgtype v1.10.0/go.mod h1:LUMuVrfsFfdKGLw+AFFVv6KtHOFMwRgDDzBt76IqCA4= github.com/jackc/pgx/v4 v4.15.0 h1:B7dTkXsdILD3MF987WGGCcg+tvLW6bZJdEcqVFeU//w= -github.com/jaegertracing/jaeger v1.26.0 h1:4LbUdb9l/Mx83zYvjLbkrayheX+Aga26NEI+feo3xzA= +github.com/jackc/pgx/v4 v4.15.0/go.mod h1:D/zyOyXiaM1TmVWnOM18p0xdDtdakRBa0RsVGI3U3bw= +github.com/jaegertracing/jaeger v1.38.0 h1:rDQ36TnSxUX4gTskMQzEdpieS0BGYdfXXnUJmGnNMGw= +github.com/jaegertracing/jaeger v1.38.0/go.mod h1:4MBTMxfCp3d4buDLxRlHnESQvTFCkN16OUIeE9BEdl4= github.com/james4k/rcon v0.0.0-20120923215419-8fbb8268b60a h1:JxcWget6X/VfBMKxPIc28Jel37LGREut2fpV+ObkwJ0= -github.com/jarcoal/httpmock v1.3.0 h1:2RJ8GP0IIaWwcC9Fp2BmVi8Kog3v2Hn7VXM3fTd+nuc= +github.com/james4k/rcon v0.0.0-20120923215419-8fbb8268b60a/go.mod h1:1qNVsDcmNQDsAXYfUuF/Z0rtK5eT8x9D6Pi7S3PjXAg= +github.com/jarcoal/httpmock v1.3.1 h1:iUx3whfZWVf3jT01hQTO/Eo5sAYtB2/rqaUuOtpInww= +github.com/jarcoal/httpmock v1.3.1/go.mod h1:3yb8rc4BI7TCBhFY8ng0gjuLKJNquuDNiPaZjnENuYg= github.com/jcmturner/aescts/v2 v2.0.0 h1:9YKLH6ey7H4eDBXW8khjYslgyqG2xZikXP0EQFKrle8= +github.com/jcmturner/aescts/v2 v2.0.0/go.mod h1:AiaICIRyfYg35RUkr8yESTqvSy7csK90qZ5xfvvsoNs= github.com/jcmturner/dnsutils/v2 v2.0.0 h1:lltnkeZGL0wILNvrNiVCR6Ro5PGU/SeBvVO/8c/iPbo= +github.com/jcmturner/dnsutils/v2 v2.0.0/go.mod h1:b0TnjGOvI/n42bZa+hmXL+kFJZsFT7G4t3HTlQ184QM= github.com/jcmturner/gofork v1.0.0 h1:J7uCkflzTEhUZ64xqKnkDxq3kzc96ajM1Gli5ktUem8= +github.com/jcmturner/gofork v1.0.0/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o= github.com/jcmturner/gokrb5/v8 v8.4.2 h1:6ZIM6b/JJN0X8UM43ZOM6Z4SJzla+a/u7scXFJzodkA= +github.com/jcmturner/gokrb5/v8 v8.4.2/go.mod h1:sb+Xq/fTY5yktf/VxLsE3wlfPqQjp0aWNYyvBVK62bc= github.com/jcmturner/rpc/v2 v2.0.3 h1:7FXXj8Ti1IaVFpSAziCZWNzbNuZmnvw/i6CqLNdWfZY= -github.com/jellydator/ttlcache/v3 v3.1.0 h1:0gPFG0IHHP6xyUyXq+JaD8fwkDCqgqwohXNJBcYE71g= -github.com/jellydator/ttlcache/v3 v3.1.0/go.mod h1:hi7MGFdMAwZna5n2tuvh63DvFLzVKySzCVW6+0gA2n4= +github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc= +github.com/jellydator/ttlcache/v3 v3.2.0 h1:6lqVJ8X3ZaUwvzENqPAobDsXNExfUJd61u++uW8a3LE= +github.com/jellydator/ttlcache/v3 v3.2.0/go.mod h1:hi7MGFdMAwZna5n2tuvh63DvFLzVKySzCVW6+0gA2n4= github.com/jhump/protoreflect v1.8.3-0.20210616212123-6cc1efa697ca h1:a0GZUdb+qnutF8shJxr2qs2qT3fnF+ptxTxPB8+oIvk= github.com/jhump/protoreflect v1.8.3-0.20210616212123-6cc1efa697ca/go.mod h1:7GcYQDdMU/O/BBrl/cX6PNHpXh6cenjd8pneu5yW7Tg= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= @@ -739,17 +896,20 @@ github.com/kardianos/service v1.2.1/go.mod h1:CIMRFEJVL+0DS1a3Nx06NaMn4Dz63Ng6O7 github.com/karrick/godirwalk v1.17.0 h1:b4kY7nqDdioR/6qnbHQyDvmA17u5G1cZ6J+CZXwSWoI= github.com/karrick/godirwalk v1.17.0/go.mod h1:j4mkqPuvaLI8mp1DroR3P6ad7cyYd4c1qeJ3RV7ULlk= github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs= +github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/asmfmt v1.3.2 h1:4Ri7ox3EwapiOjCki+hw14RyKk201CN4rzyCJRFLpK4= +github.com/klauspost/asmfmt v1.3.2/go.mod h1:AG8TuvYojzulgDAMCnYn50l/5QV3Bs/tp6j0HLHbNSE= github.com/klauspost/compress v1.14.4/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/klauspost/compress v1.17.2 h1:RlWWUY/Dr4fL8qk9YG7DTZ7PDgME2V4csBXA8L/ixi4= -github.com/klauspost/compress v1.17.2/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= -github.com/klauspost/cpuid/v2 v2.2.3 h1:sxCkb+qR91z4vsqw4vGGZlDgPz3G7gjaLyK3V8y70BU= +github.com/klauspost/compress v1.17.8 h1:YcnTYrq7MikUT7k0Yb5eceMmALQPYBW/Xltxn0NAMnU= +github.com/klauspost/compress v1.17.8/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= +github.com/klauspost/cpuid/v2 v2.2.5 h1:0E5MSMDEoAulmXNFquVs//DdoomxaoTY1kUhbc/qbZg= +github.com/klauspost/cpuid/v2 v2.2.5/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= github.com/knadh/koanf v1.5.0 h1:q2TSd/3Pyc/5yP9ldIrSdIz26MCcyNQzW0pEAugLPNs= github.com/knadh/koanf v1.5.0/go.mod h1:Hgyjp4y8v44hpZtPzs7JZfRAW5AhN7KfZcwv1RYggDs= -github.com/knadh/koanf/v2 v2.0.1 h1:1dYGITt1I23x8cfx8ZnldtezdyaZtfAuRtIFOiRzK7g= -github.com/knadh/koanf/v2 v2.0.1/go.mod h1:ZeiIlIDXTE7w1lMT6UVcNiRAS2/rCeLn/GdLNvY1Dus= +github.com/knadh/koanf/v2 v2.1.1 h1:/R8eXqasSTsmDCsAyYj+81Wteg8AqrV9CP6gvsTsOmM= +github.com/knadh/koanf/v2 v2.1.1/go.mod h1:4mnTRbZCK+ALuBXHZMjDfG9y714L7TykVnZkXbMU3Es= github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b h1:udzkj9S/zlT5X367kqJis0QP7YMxobob6zhzq6Yre00= github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b/go.mod h1:pcaDhQK0/NJZEvtCO0qQPPropqV0sJOJ6YW7X+9kRwM= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -766,11 +926,13 @@ github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/leesper/go_rng v0.0.0-20190531154944-a612b043e353 h1:X/79QL0b4YJVO5+OsPH9rF2u428CIrGL/jLmPsoOQQ4= +github.com/leesper/go_rng v0.0.0-20190531154944-a612b043e353/go.mod h1:N0SVk0uhy+E1PZ3C9ctsPRlvOPAFPkCNlcPBDkt0N3U= github.com/leodido/ragel-machinery v0.0.0-20181214104525-299bdde78165 h1:bCiVCRCs1Heq84lurVinUPy19keqGEe4jh5vtK37jcg= github.com/leodido/ragel-machinery v0.0.0-20181214104525-299bdde78165/go.mod h1:WZxr2/6a/Ar9bMDc2rN/LJrE/hF6bXE4LPyDSIxwAfg= -github.com/linode/linodego v1.23.0 h1:s0ReCZtuN9Z1IoUN9w1RLeYO1dMZUGPwOQ/IBFsBHtU= -github.com/linode/linodego v1.23.0/go.mod h1:0U7wj/UQOqBNbKv1FYTXiBUXueR8DY4HvIotwE0ENgg= +github.com/linode/linodego v1.30.0 h1:6HJli+LX7NGu+Sne2G+ux790EkVOWOV/SR4mK3jcs6k= +github.com/linode/linodego v1.30.0/go.mod h1:/46h/XpmWi//oSA92GX2p3FIxb8HbX7grslPPQalR2o= github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4= github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= @@ -787,6 +949,7 @@ github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-ieproxy v0.0.1 h1:qiyop7gCflfhwCzGyeT0gro3sF9AIg9HU98JORTkqfI= +github.com/mattn/go-ieproxy v0.0.1/go.mod h1:pYabZ6IHcRpFh7vIaLfK7rdcWgFEb3SFJ6/gNWuh88E= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= @@ -799,19 +962,22 @@ github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= -github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 h1:jWpvCLoY8Z/e3VKvlsiIGKtc+UG6U5vzxaoagmhXfyg= -github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0/go.mod h1:QUyp042oQthUoa9bqDv0ER0wrtXnBruoNd7aNjkbP+k= github.com/maxatome/go-testdeep v1.12.0 h1:Ql7Go8Tg0C1D/uMMX59LAoYK7LffeJQ6X2T04nTH68g= +github.com/maxatome/go-testdeep v1.12.0/go.mod h1:lPZc/HAcJMP92l7yI6TRz1aZN5URwUBUAfUNvrclaNM= github.com/mdlayher/apcupsd v0.0.0-20200608131503-2bf01da7bf1b h1:Kcr+kPbkWZHFHXwl87quXUAmavS4/IMgu2zck3aiE7k= +github.com/mdlayher/apcupsd v0.0.0-20200608131503-2bf01da7bf1b/go.mod h1:WYK/Z/aXq9cbMFIL5ihcA4sX/r/3/WCas/Qvs/2fXcA= github.com/microsoft/ApplicationInsights-Go v0.4.4 h1:G4+H9WNs6ygSCe6sUyxRc2U81TI5Es90b2t/MwX5KqY= +github.com/microsoft/ApplicationInsights-Go v0.4.4/go.mod h1:fKRUseBqkw6bDiXTs3ESTiU/4YTIHsQS4W3fP2ieF4U= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= github.com/miekg/dns v1.1.43/go.mod h1:+evo5L0630/F6ca/Z9+GAqzhjGyn8/c+TBaOyfEl0V4= -github.com/miekg/dns v1.1.56 h1:5imZaSeoRNvpM9SzWNhEcP9QliKiz20/dA2QabIGVnE= -github.com/miekg/dns v1.1.56/go.mod h1:cRm6Oo2C8TY9ZS/TqsSrseAcncm74lfK5G+ikN2SWWY= +github.com/miekg/dns v1.1.58 h1:ca2Hdkz+cDg/7eNF6V56jjzuZ4aCAE+DbVkILdQWG/4= +github.com/miekg/dns v1.1.58/go.mod h1:Ypv+3b/KadlvW9vJfXOTf300O4UqaHFzFCuHz+rPkBY= github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8 h1:AMFGa4R4MiIpspGNG7Z948v4n35fFGB3RR3G/ry4FWs= +github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8/go.mod h1:mC1jAcsrzbxHt8iiaC+zU4b1ylILSosueou12R++wfY= github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3 h1:+n/aFZefKZp7spd8DFdX7uMikMLXX4oubIzJF4kv/wI= +github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3/go.mod h1:RagcQ7I8IeTMnF8JTXieKnO4Z6JCsikNEzj0DwauVzE= github.com/minio/highwayhash v1.0.2 h1:Aak5U0nElisjDCfPSG79Tgzkn2gl66NxOMspRrKnA/g= github.com/minio/highwayhash v1.0.2/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY= github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible h1:aKW/4cBs+yK6gpqU3K/oIwk9Q/XICqd3zOX/UFuvqmk= @@ -826,22 +992,26 @@ github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrk github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/mitchellh/go-testing-interface v1.14.1 h1:jrgshOhYAUVNMAJiKbEu7EqAwgJJ2JqpQmpLJOu07cU= +github.com/mitchellh/go-testing-interface v1.14.1/go.mod h1:gfgS7OtZj6MA4U1UrDRp04twqAjfvlZyCfX3sDjEym8= github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= github.com/mitchellh/hashstructure/v2 v2.0.2 h1:vGKWl0YJqUNxE8d+h8f6NJLcCJrgbhC4NcD46KavDd4= github.com/mitchellh/hashstructure/v2 v2.0.2/go.mod h1:MG3aRVU/N29oo/V/IhBX8GR/zz4kQkprJgF2EVszyDE= github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.4.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c h1:cqn374mizHuIWj+OSJCajGr/phAmuMug9qIX3l9CflE= +github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/moby/ipvs v1.0.1 h1:aoZ7fhLTXgDbzVrAnvV+XbKOU8kOET7B3+xULDF/1o0= +github.com/moby/ipvs v1.0.1/go.mod h1:2pngiyseZbIKXNv7hsKj3O9UEz30c53MT9005gt2hxQ= github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= github.com/moby/sys/mountinfo v0.6.2 h1:BzJjoreD5BMFNmD9Rus6gdd1pLuecOFPt8wC+Vygl78= github.com/moby/sys/mountinfo v0.6.2/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI= github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= +github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -852,12 +1022,15 @@ github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjY github.com/montanaflynn/stats v0.7.0 h1:r3y12KyNxj/Sb/iOE46ws+3mS1+MZca1wlHQFPsY/JU= github.com/montanaflynn/stats v0.7.0/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow= github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= +github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/mostynb/go-grpc-compression v1.2.2 h1:XaDbnRvt2+1vgr0b/l0qh4mJAfIxE0bKXtz2Znl3GGI= github.com/mostynb/go-grpc-compression v1.2.2/go.mod h1:GOCr2KBxXcblCuczg3YdLQlcin1/NfyDA348ckuCH6w= -github.com/mrunalp/fileutils v0.5.0 h1:NKzVxiH7eSk+OQ4M+ZYW1K6h27RUV3MI6NUTsHhU6Z4= -github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ= +github.com/mrunalp/fileutils v0.5.1 h1:F+S7ZlNKnrwHfSwdlgNSkKo67ReVf8o9fel6C3dkm/Q= +github.com/mrunalp/fileutils v0.5.1/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ= github.com/mtibben/percent v0.2.1 h1:5gssi8Nqo8QU/r2pynCm+hBQHpkB/uNK7BJCFogWdzs= +github.com/mtibben/percent v0.2.1/go.mod h1:KG9uO+SZkUp+VkRHsCdYQV3XSZrrSpR3O9ibNBTZrns= github.com/multiplay/go-ts3 v1.0.1 h1:Ja8ho7UzUDNvNCwcDzPEPimLRub7MUqbD+sgMWkcR0A= +github.com/multiplay/go-ts3 v1.0.1/go.mod h1:WIP3X0efye5ENZdXLu8LV4woCbPoc41wuMHx3EcU5CI= github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= @@ -867,28 +1040,36 @@ github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRW github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/naoina/go-stringutil v0.1.0 h1:rCUeRUHjBjGTSHl0VC00jUPLz8/F9dDzYI70Hzifhks= github.com/naoina/go-stringutil v0.1.0/go.mod h1:XJ2SJL9jCtBh+P9q5btrd/Ylo8XwT/h1USek5+NqSA0= -github.com/nats-io/jwt/v2 v2.2.1-0.20220330180145-442af02fd36a h1:lem6QCvxR0Y28gth9P+wV2K/zYUUAkJ+55U8cpS0p5I= github.com/nats-io/jwt/v2 v2.2.1-0.20220330180145-442af02fd36a/go.mod h1:0tqz9Hlu6bCBFLWAASKhE5vUA4c24L9KPUUgvwumE/k= -github.com/nats-io/nats-server/v2 v2.8.4 h1:0jQzze1T9mECg8YZEl8+WYUXb9JKluJfCBriPUtluB4= +github.com/nats-io/jwt/v2 v2.5.0 h1:WQQ40AAlqqfx+f6ku+i0pOVm+ASirD4fUh+oQsiE9Ak= +github.com/nats-io/jwt/v2 v2.5.0/go.mod h1:24BeQtRwxRV8ruvC4CojXlx/WQ/VjuwlYiH+vu/+ibI= github.com/nats-io/nats-server/v2 v2.8.4/go.mod h1:8zZa+Al3WsESfmgSs98Fi06dRWLH5Bnq90m5bKD/eT4= -github.com/nats-io/nats.go v1.15.0 h1:3IXNBolWrwIUf2soxh6Rla8gPzYWEZQBUBK6RV21s+o= +github.com/nats-io/nats-server/v2 v2.9.23 h1:6Wj6H6QpP9FMlpCyWUaNu2yeZ/qGj+mdRkZ1wbikExU= +github.com/nats-io/nats-server/v2 v2.9.23/go.mod h1:wEjrEy9vnqIGE4Pqz4/c75v9Pmaq7My2IgFmnykc4C0= github.com/nats-io/nats.go v1.15.0/go.mod h1:BPko4oXsySz4aSWeFgOHLZs3G4Jq4ZAyE6/zMCxRT6w= -github.com/nats-io/nkeys v0.3.0 h1:cgM5tL53EvYRU+2YLXIK0G2mJtK12Ft9oeooSZMA2G8= +github.com/nats-io/nats.go v1.28.0 h1:Th4G6zdsz2d0OqXdfzKLClo6bOfoI/b1kInhRtFIy5c= +github.com/nats-io/nats.go v1.28.0/go.mod h1:XpbWUlOElGwTYbMR7imivs7jJj9GtK7ypv321Wp6pjc= github.com/nats-io/nkeys v0.3.0/go.mod h1:gvUNGjVcM2IPr5rCsRsC6Wb3Hr2CQAm08dsxtV6A5y4= +github.com/nats-io/nkeys v0.4.4 h1:xvBJ8d69TznjcQl9t6//Q5xXuVhyYiSos6RPtvQNTwA= +github.com/nats-io/nkeys v0.4.4/go.mod h1:XUkxdLPTufzlihbamfzQ7mw/VGx6ObUs+0bN5sNvt64= github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw= github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= github.com/newrelic/newrelic-telemetry-sdk-go v0.8.1 h1:6OX5VXMuj2salqNBc41eXKz6K+nV6OB/hhlGnAKCbwU= +github.com/newrelic/newrelic-telemetry-sdk-go v0.8.1/go.mod h1:2kY6OeOxrJ+RIQlVjWDc/pZlT3MIf30prs6drzMfJ6E= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nishanths/predeclared v0.0.0-20200524104333-86fad755b4d3/go.mod h1:nt3d53pc1VYcphSCIaYAJtnPYnr3Zyn8fMq2wvPGPso= github.com/npillmayer/nestext v0.1.3/go.mod h1:h2lrijH8jpicr25dFY+oAJLyzlya6jhnuG+zWp9L0Uk= github.com/nsqio/go-nsq v1.1.0 h1:PQg+xxiUjA7V+TLdXw7nVrJ5Jbl3sN86EhGCQj4+FYE= +github.com/nsqio/go-nsq v1.1.0/go.mod h1:vKq36oyeVXgsS5Q8YEO7WghqidAVXQlcFxzQbQTuDEY= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= github.com/oklog/run v1.1.0 h1:GEenZ1cK0+q0+wsJew9qUg/DyD8k3JzYsZAi5gYi2mA= github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU= github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/olivere/elastic v6.2.37+incompatible h1:UfSGJem5czY+x/LqxgeCBgjDn6St+z8OnsCuxwD3L0U= +github.com/olivere/elastic v6.2.37+incompatible/go.mod h1:J+q1zQJTgAz9woqsbVRqGeB5G1iqDKVBWLNSYW8yfJ8= github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= @@ -896,67 +1077,69 @@ github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+ github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.16.2 h1:HFB2fbVIlhIfCfOW81bZFbiC/RvnpXSdhbF2/DJr134= github.com/onsi/ginkgo v1.16.2/go.mod h1:CObGmKUOKaSC0RjmoAK7tKyn4Azo5P2IWuoMnvwxz1E= -github.com/onsi/ginkgo/v2 v2.9.4 h1:xR7vG4IXt5RWx6FfIjyAtsoMAtnc3C/rFXBBd2AjZwE= +github.com/onsi/ginkgo/v2 v2.15.0 h1:79HwNRBAZHOEwrczrgSOPy+eFTTlIGELKy5as+ClttY= +github.com/onsi/ginkgo/v2 v2.15.0/go.mod h1:HlxMHtYF57y6Dpf+mc5529KKmSq9h2FpCF+/ZkwUxKM= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.13.0/go.mod h1:lRk9szgn8TxENtWd0Tp4c3wjlRfMTMH27I+3Je41yGY= -github.com/onsi/gomega v1.27.6 h1:ENqfyGeS5AX/rlXDd/ETokDz93u0YufY1Pgxuy/PvWE= +github.com/onsi/gomega v1.31.0 h1:54UJxxj6cPInHS3a35wm6BK/F9nHYueZ1NVujHDrnXE= +github.com/onsi/gomega v1.31.0/go.mod h1:DW9aCi7U6Yi40wNVAvT6kzFnEVEI5n3DloYBiKiT6zk= github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= -github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter v0.89.0 h1:31Nbeod7i+wYzDb6n16tVqkeA37MoQUN9Wgh4MxAaX8= -github.com/open-telemetry/opentelemetry-collector-contrib/extension/awsproxy v0.89.0 h1:d2nkq8HvFyCrbAB3w2WSeJ5VIljn1xUkxyab5jmDjwg= -github.com/open-telemetry/opentelemetry-collector-contrib/extension/awsproxy v0.89.0/go.mod h1:vsRWa6BigsVJpocMOsIhzzIpviLE1ccQ98YDgTAzn5E= -github.com/open-telemetry/opentelemetry-collector-contrib/extension/storage v0.89.0 h1:JLVBDcRuwvlD469yaGDDLzbw3j+L7BuC1RmI0xhtMTI= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/ecsutil v0.89.0 h1:RHln8CleTEtf8fA23mI0lDAGbsxJsXlWDVXJsoUDWmQ= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/ecsutil v0.89.0/go.mod h1:f14BGLPG7oos1rdywnYtIaKnG8qgdBLJAKxTM9otCZ8= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/metrics v0.89.0 h1:mgxQeGPYlqz/xa7MC+8euupnz5iPo+pj+4ucu7iskWs= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/metrics v0.89.0/go.mod h1:1yBu6NDWkSergVx105J1+VoemO29zNV96k1TKZw9D38= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/proxy v0.89.0 h1:n5/PfauD3QYvFSRCbo498PObHrLm0qkOpV1P6ANQJh4= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/proxy v0.89.0/go.mod h1:AnTZIdBSzgBQ6mMoZq/nslvo8WOT/+jUZoCSkaL2kcY= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.89.0 h1:stB4V0yU6htEVWxoNOVuiIPDUetbRLlpP4m1Rcn03G8= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.89.0/go.mod h1:mrkZwYA2MKZaidETgwMffAyPzsLjOq5fEJB58TIXa0I= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.89.0 h1:OzyR48z06nE00fPS2pdyoBdY7MsnHvitiDZ6krDPKkM= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.89.0/go.mod h1:D32znNtx75Y0lUECRJRdQTyHjaropN8AD+qHqXs4/6Q= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter v0.89.0 h1:6UTjDWi0qReo5pf34E6SEKwT81KCb5kUxi3vVhcRG48= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter v0.89.0/go.mod h1:/fuCFCuI8B9ViltKiw2xZEKdTEcEZMbPAajRfJqF9mg= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8sconfig v0.89.0 h1:LP0znKNQsJHqWVRKTzZcgnJEQv4730XBe1tzTFkVHQE= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8sconfig v0.89.0/go.mod h1:1mxlOOYVp1/QGKM13X1m1ZUJa0GJ2Lvz/URC9d8FC1s= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/kubelet v0.89.0 h1:bWeWdtqT+GryeILIJ64eChhgkVoj4Ireczxr9C1hWKg= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/kubelet v0.89.0/go.mod h1:JszEiY5P2i7sL6LAsic+TM/C3FWHRL0wbazfVmIdPE0= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/metadataproviders v0.89.0 h1:NElphJJuMTt/LBFULiN4IuMWKwNWBxb+SgWBWE7V4BU= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/metadataproviders v0.89.0/go.mod h1:B13GrpB/gdZKCOk1e40uIDefGxZI7745nxhBZ28MwAw= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/golden v0.89.0 h1:GAI5zG4W6wo20qVLvZjamskPYh7JEPEPMboElWxWzQE= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl v0.89.0 h1:E3uMN6efnooLi5/jz0hYlsvczaYg/XT7lNrv2/QjUa0= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl v0.89.0/go.mod h1:TCT9B0MM0XsMdvJ9Hif1tZ/3rUkJH7nJ77mpvj6Wkh8= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.89.0 h1:haMoD8AR60bbBBc/z7J1jUqv0xaZ/TjTI5Frz+Rm8oc= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.89.0 h1:6gs+KOZ77UhSJnNjpacptiMuvVqfmgYw/f24uqLFxzM= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.89.0/go.mod h1:oyLPe07YKGU38ANwNCisNyad2/xBUNslHuziYHE00v4= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry v0.89.0 h1:qPwyUva7Vjnp7WJAA8vOw7SEfGdKW2w5sKlCondKl68= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry v0.89.0/go.mod h1:rHxhO+rTfEfAo9xtEZSJXdvbiw1Gcya1EIX2knIa2xo= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheusremotewrite v0.89.0 h1:Lu16kQylQWaI217RMA3E3mPos2RUEN7oybg+7YzCbzM= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/cumulativetodeltaprocessor v0.89.0 h1:VaxkGMEy9QqDmngEFU5zgy2lra216CEz05p1cP/g8FE= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/cumulativetodeltaprocessor v0.89.0/go.mod h1:n7Ha6X8VPlgWoYby+V32W9jmMEu0u3SdnWSYWBuAano= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/metricstransformprocessor v0.89.0 h1:JimsaOmtYrYOAA35JzkqnkDFGZi9D50GfaHVG5W7iUQ= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/metricstransformprocessor v0.89.0/go.mod h1:dMuzeobFNwvW7TgxHVVF1UH2HMK4jcU1JxzGdv+WZVk= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor v0.89.0 h1:aX/JOO9uRSuu1QXOw6pgKoofobxkFqz0nkevwctbkFU= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor v0.89.0/go.mod h1:AUxeyAuaT9fxbjMmCzZkbEJsxmdKo1Fe7BhWwfbzRq8= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/tcplogreceiver v0.89.0 h1:D6kSn1hw5DMfQVspWSYmofWks1RP0wci9VJbLTckwdQ= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/tcplogreceiver v0.89.0/go.mod h1:vAA4Q3EW70/LRvMM3oXJznZ7Q/2bU9c9SdOW7TyLqR4= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/udplogreceiver v0.89.0 h1:tjKz5xlnwYgR9xSb3eDSawcNyImUtl/MGnAYcizfUSw= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/udplogreceiver v0.89.0/go.mod h1:zQCzHJiER8Mc0kXt7LWShxD6Qx/fi2eQJWbC/6d5Smk= +github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter v0.98.0 h1:PvTmyr1MOFwlKdEqHDKEwoOSLINTiEppcvzp6a2jsFQ= +github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter v0.98.0/go.mod h1:fxMPjSrU2yhl0wcc+aBgv1F6brf6A4t2IM/IT1PwLZ0= +github.com/open-telemetry/opentelemetry-collector-contrib/extension/storage v0.98.0 h1:yend0fdg/ejfVSFOCI8CLo5ikkNhSl41Zs6ma5jUZ4c= +github.com/open-telemetry/opentelemetry-collector-contrib/extension/storage v0.98.0/go.mod h1:yGkFJl78686wAA5235HdLLQrWlOxuNqnZzQMUz2I7Ac= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/ecsutil v0.98.0 h1:4RD0elfzuoOxrBpekmg94JmIJjL8MZIayIUBLva+P44= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/ecsutil v0.98.0/go.mod h1:BnCYHF+EfdRTriFxwaaA9SuGV7Nri8WkZLXp+8keTg8= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/metrics v0.98.0 h1:gNh2x7mHzpRL1+tpj3n30L1UswcsVen4gyhBWHH3+so= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/metrics v0.98.0/go.mod h1:dPJAgUxoGBqXFPsW0W6rxp43MQi+DZFnPBhYZpT2rIk= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.98.0 h1:rbfZJ4YA82WSC9HW2jgmY1UZEsOYte8VTjrBFU6dv5Y= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.98.0/go.mod h1:KOTp7iN8sWTmqk/SxbMqvM2KXUdgXRWb6hI84E5arLY= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.98.0 h1:FaldDCQ6hpPAauYZ1kbNWkTFU2vRgL/nr5UY8d2jrT4= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.98.0/go.mod h1:0arlQ0mj/VhcFFSKHDmIc+iieHweXKENSBcqNnAY8OA= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter v0.98.0 h1:KHO7ptmWdDW1wi0oiDzLNEDyXDG9TFsK/N6LTgyL6JI= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter v0.98.0/go.mod h1:EwaZ9Bj0+7l5roLUkdKIH1pHXdfHCGve/8mZTf5Hzxk= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8sconfig v0.98.0 h1:i5+XkPXUFqbNnOYngPq1b7nZ1PyGdAtLwcsEGHJ79yw= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8sconfig v0.98.0/go.mod h1:JRRvo6HJ8jtHzHA2/H4+bIZsO5M8gpSXDLZ06fHN7II= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/metadataproviders v0.98.0 h1:orhX9ZGjvIUlW1Pesk/q6KVsKZ1S3Ql6KOS+wCJwMDI= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/metadataproviders v0.98.0/go.mod h1:jzd6HhS9Av55YlRctGWG/Jk/rATPYRdVCiOZLlQnDxM= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/golden v0.98.0 h1:Y4UbYtpt+AsCVj4W8HKxLSlKNrMLhxypmA34GkwjJGE= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/golden v0.98.0/go.mod h1:a1eqRzzePACd9YamtiY0+42P4dn1pB0Ozc0B3P6JGMs= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl v0.98.0 h1:vgVVJgTRQzgkIf8iODqKiS5VoMUyUPHRQjAUMOeLJt8= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl v0.98.0/go.mod h1:5RtSFx/r557j1/Sy8+MO+N0ulfEzDwSNwEKo7bdHvYQ= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.98.0 h1:lZGvpoHCYoEFBDLhnKGGAgGJyX2l2jGZUgC1kTz/esI= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.98.0/go.mod h1:fOHCXaWnVcKJPY9Jxeoyl7tSuuhZ/PPN92ib8eyjSLQ= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.98.0 h1:F1Vnzkhg0Trncmvw65Mq1wBKPgX2wI3aGE90J7CXhuU= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.98.0/go.mod h1:yybbWtN2RnnIoqw3KzboFjk4iYpEBXPzHQvARs+eO9U= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry v0.98.0 h1:Ml4/JEqJeJknFMiXW5AxtrejrbGXyocRq/BfCCLS5jA= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry v0.98.0/go.mod h1:DjiZ//9SFD9if4d/Q7dFam/4etFiXFpkxZ3kGM7XKmE= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheusremotewrite v0.98.0 h1:pM4puW3v2E+kfvuxz9L3bqGXbg/l6skLYVyZE3ksI0Y= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheusremotewrite v0.98.0/go.mod h1:tdgLPheVJOpy9Gic113d7F9+a4S/slFV5OxJAiIpGSk= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/cumulativetodeltaprocessor v0.98.0 h1:F93HvVWJSEUtiwtyqXicjBiq3PhXxVjvX59VJtRuZL0= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/cumulativetodeltaprocessor v0.98.0/go.mod h1:vUObVWshD4NKy4CzZpcC8V/avUyFD6WmIwOIaux7oDA= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/metricstransformprocessor v0.98.0 h1:Q+NzXfH9LTNupFpUdXFG/q3rka/Hv7lutBX5cjiPYmg= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/metricstransformprocessor v0.98.0/go.mod h1:9G5dg3SYuipPocclXv4U87hQ1/B2T4ca7lzUOLSReko= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor v0.98.0 h1:ljdy8h+V69mjx4X0Jbu4nt0FbeXa8h53ogie6OIK2zg= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor v0.98.0/go.mod h1:iz/isMSPjHCFKiS9twzsfBMwy1j7p4fAxLSL47mf7zI= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/tcplogreceiver v0.98.0 h1:bz6HalCAVZkFWE9ar82puapTU+RkhFgSgrowOYZ3kLQ= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/tcplogreceiver v0.98.0/go.mod h1:CxSzZAEx2UIAUI6zWVPnbCnbqt0FlY0yuVlwUm1hsJ0= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/udplogreceiver v0.98.0 h1:F/5pHcrMWucyEIYbUL3xflIwgdT08Sdjg1sbZ3X0dWg= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/udplogreceiver v0.98.0/go.mod h1:aTMFq8/R+fm86HGJEhzXWjs7sTezh5Ivq5uXCyNBJeE= github.com/openconfig/gnmi v0.0.0-20180912164834-33a1865c3029 h1:lXQqyLroROhwR2Yq/kXbLzVecgmVeZh2TFLg6OxCd+w= +github.com/openconfig/gnmi v0.0.0-20180912164834-33a1865c3029/go.mod h1:t+O9It+LKzfOAhKTT5O0ehDix+MTqbtT0T9t+7zzOvc= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= -github.com/opencontainers/image-spec v1.1.0-rc5 h1:Ygwkfw9bpDvs+c9E34SdgGOj41dX/cbdlwvlWt0pnFI= -github.com/opencontainers/image-spec v1.1.0-rc5/go.mod h1:X4pATf0uXsnn3g5aiGIsVnJBR4mxhKzfwmvK/B2NTm8= -github.com/opencontainers/runc v1.1.9 h1:XR0VIHTGce5eWPkaPesqTBrhW2yAcaraWfsEalNwQLM= -github.com/opencontainers/runc v1.1.9/go.mod h1:CbUumNnWCuTGFukNXahoo/RFBZvDAgRh/smNYNOhA50= -github.com/opencontainers/runtime-spec v1.0.3-0.20220909204839-494a5a6aca78 h1:R5M2qXZiK/mWPMT4VldCOiSL9HIAMuxQZWdG0CSM5+4= -github.com/opencontainers/runtime-spec v1.0.3-0.20220909204839-494a5a6aca78/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/selinux v1.10.1 h1:09LIPVRP3uuZGQvgR+SgMSNBd1Eb3vlRbGqQpoHsF8w= -github.com/opencontainers/selinux v1.10.1/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI= +github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug= +github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM= +github.com/opencontainers/runc v1.1.12 h1:BOIssBaW1La0/qbNZHXOOa71dZfZEQOzW7dqQf3phss= +github.com/opencontainers/runc v1.1.12/go.mod h1:S+lQwSfncpBha7XTy/5lBwWgm5+y5Ma/O44Ekby9FK8= +github.com/opencontainers/runtime-spec v1.1.0 h1:HHUyrt9mwHUjtasSbXSMvs4cyFxh+Bll4AjJ9odEGpg= +github.com/opencontainers/runtime-spec v1.1.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/selinux v1.11.0 h1:+5Zbo97w3Lbmb3PeqQtpmTkMwsW5nRI3YaLpt7tQ7oU= +github.com/opencontainers/selinux v1.11.0/go.mod h1:E5dMC3VPuVvVHDYmi78qvhJp8+M586T4DlDRYpFkyec= github.com/openshift/api v0.0.0-20180801171038-322a19404e37 h1:05irGU4HK4IauGGDbsk+ZHrm1wOzMLYjMlfaiqMrBYc= github.com/openshift/api v0.0.0-20180801171038-322a19404e37/go.mod h1:dh9o4Fs58gpFXGSYfnVxGR9PnV53I8TW84pQaJDdGiY= github.com/openshift/api v0.0.0-20210521075222-e273a339932a/go.mod h1:izBmoXbUu3z5kUa4FjZhvekTsyzIWiOoaIgJiZBBMQs= @@ -979,12 +1162,19 @@ github.com/philhofer/fwd v1.1.1/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pierrec/lz4 v2.6.1+incompatible h1:9UY3+iC23yxF0UfGaYrGplQ+79Rg+h/q9FV9ix19jjM= +github.com/pierrec/lz4 v2.6.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pierrec/lz4/v4 v4.1.18 h1:xaKrnTkyoqfh1YItXl56+6KJNVYWlEEPuAQW9xsplYQ= +github.com/pierrec/lz4/v4 v4.1.18/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pion/dtls/v2 v2.2.6 h1:yXMxKr0Skd+Ub6A8UqXTRLSywskx93ooMRHsQUtd+Z4= +github.com/pion/dtls/v2 v2.2.6/go.mod h1:t8fWJCIquY5rlQZwA2yWxUS1+OCrAdXrhVKXB5oD/wY= github.com/pion/logging v0.2.2 h1:M9+AIj/+pxNsDfAT64+MAVgJO0rsyLnoJKCqf//DoeY= +github.com/pion/logging v0.2.2/go.mod h1:k0/tDVsRCX2Mb2ZEmTqNa7CWsQPc+YYCB7Q+5pahoms= github.com/pion/transport/v2 v2.0.2 h1:St+8o+1PEzPT51O9bv+tH/KYYLMNR5Vwm5Z3Qkjsywg= +github.com/pion/transport/v2 v2.0.2/go.mod h1:vrz6bUbFr/cjdwbnxq8OdDDzHf7JJfGsIRkxfpZoTA0= github.com/pion/udp/v2 v2.0.1 h1:xP0z6WNux1zWEjhC7onRA3EwwSliXqu1ElUZAQhUP54= -github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU= +github.com/pion/udp/v2 v2.0.1/go.mod h1:B7uvTMP00lzWdyMr/1PVZXtV3wpPIxBRd4Wl6AksXn8= +github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ= +github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -1004,28 +1194,22 @@ github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3O github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= -github.com/prometheus/client_golang v1.12.2/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= -github.com/prometheus/client_golang v1.13.0/go.mod h1:vTeo+zgvILHsnnj/39Ou/1fPN5nJFOEMgftOUOmlvYQ= -github.com/prometheus/client_golang v1.17.0 h1:rl2sfwZMtSthVU752MqfjQozy7blglC+1SOtjMAMh+Q= -github.com/prometheus/client_golang v1.17.0/go.mod h1:VeL+gMmOAxkS2IqfCq0ZmHSL+LjWfWDUmp1mBz9JgUY= +github.com/prometheus/client_golang v1.19.0 h1:ygXvpU1AoN1MhdzckN+PyD9QJOSD4x7kmXYlnfbA6JU= +github.com/prometheus/client_golang v1.19.0/go.mod h1:ZRM9uEAypZakd+q/x7+gmsvXdURP+DABIEIjnmDdp+k= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= -github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= +github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= +github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/common v0.30.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.35.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= -github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= -github.com/prometheus/common v0.45.0 h1:2BGz0eBc2hdMDLnO/8n0jeB3oPrt2D08CekT0lneoxM= -github.com/prometheus/common v0.45.0/go.mod h1:YJmSTw9BoKxJplESWWxlbyttQR4uaEcGyv9MZjVOJsY= +github.com/prometheus/common v0.52.2 h1:LW8Vk7BccEdONfrJBDffQGRtpSzi5CQaRZGtboOO2ck= +github.com/prometheus/common v0.52.2/go.mod h1:lrWtQx+iDfn2mbH5GUzlH9TSHyfZpHkSiG1W7y3sF2Q= github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4= github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= @@ -1034,19 +1218,19 @@ github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+Gx github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= -github.com/prometheus/prometheus v0.42.0 h1:G769v8covTkOiNckXFIwLx01XE04OE6Fr0JPA0oR2nI= -github.com/prometheus/prometheus v0.42.0/go.mod h1:Pfqb/MLnnR2KK+0vchiaH39jXxvLMBk+3lnIGP4N7Vk= -github.com/prometheus/statsd_exporter v0.22.7 h1:7Pji/i2GuhK6Lu7DHrtTkFmNBCudCPT1pX2CziuyQR0= -github.com/prometheus/statsd_exporter v0.22.7/go.mod h1:N/TevpjkIh9ccs6nuzY3jQn9dFqnUakOjnEuMPJJJnI= +github.com/prometheus/prometheus v0.51.2-0.20240405174432-b4a973753c6e h1:UmqAuY2OyDoog8+l5FybViJE5B2r+UxVGCUwFTsY5AA= +github.com/prometheus/prometheus v0.51.2-0.20240405174432-b4a973753c6e/go.mod h1:+0ld+ozir7zWFcHA2vVpWAKxXakIioEjPPNOqH+J3ZA= github.com/rabbitmq/amqp091-go v1.2.0/go.mod h1:ogQDLSOACsLPsIq0NpbtiifNZi2YOz0VTJ0kHRghqbM= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM= +github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE= +github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/rhnvrm/simples3 v0.6.1/go.mod h1:Y+3vYm2V7Y4VijFoJHHTrja6OgPrJ2cBti8dPGkC3sA= github.com/riemann/riemann-go-client v0.5.0 h1:yPP7tz1vSYJkSZvZFCsMiDsHHXX57x8/fEX3qyEXuAA= +github.com/riemann/riemann-go-client v0.5.0/go.mod h1:FMiaOL8dgBnRfgwENzV0xlYJ2eCbV1o7yqVwOBLbShQ= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= @@ -1061,20 +1245,27 @@ github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIH github.com/safchain/ethtool v0.0.0-20210803160452-9aa261dae9b1 h1:ZFfeKAhIQiiOrQaI3/znw0gOmYpO28Tcu1YaqMa/jtQ= github.com/safchain/ethtool v0.0.0-20210803160452-9aa261dae9b1/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= github.com/samuel/go-zookeeper v0.0.0-20200724154423-2164a8ac840e h1:CGjiMQ0wMH4wtNWrlj6kiTbkPt2F3rbYnhGX6TWLfco= -github.com/scaleway/scaleway-sdk-go v1.0.0-beta.21 h1:yWfiTPwYxB0l5fGMhl/G+liULugVIHD9AU77iNLrURQ= -github.com/scaleway/scaleway-sdk-go v1.0.0-beta.21/go.mod h1:fCa7OJZ/9DRTnOKmxvT6pn+LPWUptQAmHF/SBJUGEcg= +github.com/samuel/go-zookeeper v0.0.0-20200724154423-2164a8ac840e/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= +github.com/scaleway/scaleway-sdk-go v1.0.0-beta.25 h1:/8rfZAdFfafRXOgz+ZpMZZWZ5pYggCY9t7e/BvjaBHM= +github.com/scaleway/scaleway-sdk-go v1.0.0-beta.25/go.mod h1:fCa7OJZ/9DRTnOKmxvT6pn+LPWUptQAmHF/SBJUGEcg= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/seccomp/libseccomp-golang v0.9.2-0.20220502022130-f33da4d89646 h1:RpforrEYXWkmGwJHIGnLZ3tTWStkjVVstwzNGqxX2Ds= github.com/seccomp/libseccomp-golang v0.9.2-0.20220502022130-f33da4d89646/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg= github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI= github.com/shirou/gopsutil v3.21.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= -github.com/shoenig/test v0.6.6 h1:Oe8TPH9wAbv++YPNDKJWUnI8Q4PPWCx3UbOfH+FxiMU= +github.com/shoenig/test v1.7.1 h1:UJcjSAI3aUKx52kfcfhblgyhZceouhvvs3OYdWgn+PY= +github.com/shoenig/test v1.7.1/go.mod h1:UxJ6u/x2v/TNs/LoLxBNJRV9DiwBBKYxXSyczsBHFoI= github.com/showwin/speedtest-go v1.1.4 h1:pcY1W5LYZu44lH6Fuu80nu/Pj67n//VArlZudbAgR6E= -github.com/signalfx/com_signalfx_metrics_protobuf v0.0.2 h1:X886QgwZH5qr9HIQkk3mWcNEhUxx6D8rUZumzLV4Wiw= +github.com/showwin/speedtest-go v1.1.4/go.mod h1:dJugxvC/AQDt4HQQKZ9lKNa2+b1c8nzj9IL0a/F8l1U= +github.com/signalfx/com_signalfx_metrics_protobuf v0.0.3 h1:32k2QLgsKhcEs55q4REPKyIadvid5FPy2+VMgvbmKJ0= +github.com/signalfx/com_signalfx_metrics_protobuf v0.0.3/go.mod h1:gJrXWi7wSGXfiC7+VheQaz+ypdCt5SmZNL+BRxUe7y4= github.com/signalfx/gohistogram v0.0.0-20160107210732-1ccfd2ff5083 h1:WsShHmu12ZztYPfh9b+I+VjYD1o8iOHhB67WZCMEEE8= -github.com/signalfx/golib/v3 v3.3.43 h1:GvzjE2WaYU3oPhoek52/5zYZ5tPnt05EXUmszSZct+E= -github.com/signalfx/sapm-proto v0.7.2 h1:iM/y3gezQm1/j7JBS0gXhEJ8ROeneb6DY7n0OcnvLks= +github.com/signalfx/gohistogram v0.0.0-20160107210732-1ccfd2ff5083/go.mod h1:adPDS6s7WaajdFBV9mQ7i0dKfQ8xiDnF9ZNETVPpp7c= +github.com/signalfx/golib/v3 v3.3.53 h1:gJx0JrrHjidpo2md+YLEN+Ws3RLTaCmKYT575ZSTKUo= +github.com/signalfx/golib/v3 v3.3.53/go.mod h1:lgdUNXjNnZkgEhQFSbfyek4oFUG8t1APhGaitZsSBzc= +github.com/signalfx/sapm-proto v0.12.0 h1:OtOe+Jm8L61Ml8K6X8a89zc8/RlaaMRElCImeGKR/Ew= +github.com/signalfx/sapm-proto v0.12.0/go.mod h1:wQEki8RNCYjkv19jw5aWDcmDMTQru0ckfUbgHI69U2E= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= @@ -1086,6 +1277,7 @@ github.com/sleepinggenius2/gosmi v0.4.4/go.mod h1:l8OniPmd3bJzw0MXP2/qh7AhP/e+bT github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/snowflakedb/gosnowflake v1.6.21 h1:OEn5/P+voj3P/STW+R/gGktJlEpfP127GzrxvtAJ5II= +github.com/snowflakedb/gosnowflake v1.6.21/go.mod h1:P2fE/xiD2kQXpr48OdgnazkzPsKD6aVtnHD3WP8yD9c= github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= @@ -1097,13 +1289,15 @@ github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271 h1:WhxRHzgeVGETMlmVfqhRn8RIeeNoPr2Czh33I4Zdccw= +github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/handy v0.0.0-20200128134331-0f66f006fb2e/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= @@ -1115,9 +1309,8 @@ github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1F github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= -github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -github.com/stvp/go-udp-testing v0.0.0-20201019212854-469649b16807/go.mod h1:7jxmlfBCDBXRzr0eAQJ48XC1hBu1np4CS5+cHEYfwpc= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 h1:kdXcSzyDtseVEc4yCz2qF8ZrQvIDBJLl4S1c3GCXmoI= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/tidwall/gjson v1.10.2 h1:APbLGOM0rrEkd8WBw9C24nllro4ajFuJu0Sc9hRz8Bo= @@ -1127,7 +1320,9 @@ github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JT github.com/tidwall/pretty v1.2.0 h1:RWIZEg2iJ8/g6fDDYzMpobmaoGh5OLl4AXtGUGPcqCs= github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= github.com/tidwall/tinylru v1.1.0 h1:XY6IUfzVTU9rpwdhKUF6nQdChgCdGjkMfLzbWyiau6I= +github.com/tidwall/tinylru v1.1.0/go.mod h1:3+bX+TJ2baOLMWTnlyNWHh4QMnFyARg2TLTQ6OFbzw8= github.com/tidwall/wal v1.1.7 h1:emc1TRjIVsdKKSnpwGBAcsAGg0767SvUk8+ygx7Bb+4= +github.com/tidwall/wal v1.1.7/go.mod h1:r6lR1j27W9EPalgHiB7zLJDYu3mzW5BQP5KrzBpYY/E= github.com/tinylib/msgp v1.1.6 h1:i+SbKraHhnrf9M5MYmvQhFnbLhAXSDWF8WWsuyRdocw= github.com/tinylib/msgp v1.1.6/go.mod h1:75BAfg2hauQhs3qedfdDZmWAPcFMAvJE5b9rGOMufyw= github.com/tklauser/go-sysconf v0.3.10/go.mod h1:C8XykCvCb+Gn0oNCWPIlcb0RuglQTYaQ2hGm7jmxEFk= @@ -1137,33 +1332,52 @@ github.com/tklauser/numcpus v0.4.0/go.mod h1:1+UI3pD8NW14VMwdgJNJ1ESk2UnwhAnz5hM github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk= github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= +github.com/twmb/murmur3 v1.1.7 h1:ULWBiM04n/XoN3YMSJ6Z2pHDFLf+MeIVQU71ZPrvbWg= +github.com/twmb/murmur3 v1.1.7/go.mod h1:Qq/R7NUyOfr65zD+6Q5IHKsJLwP7exErjN6lyyq3OSQ= +github.com/uber/jaeger-client-go v2.30.0+incompatible h1:D6wyKGCecFaSRUpo8lCVbaOOb6ThwMmTEbhRwtKR97o= +github.com/uber/jaeger-client-go v2.30.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= +github.com/uber/jaeger-lib v2.4.1+incompatible h1:td4jdvLcExb4cBISKIpHuGoVXh+dVKhn2Um6rjCsSsg= +github.com/uber/jaeger-lib v2.4.1+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= +github.com/valyala/fastjson v1.6.4 h1:uAUNq9Z6ymTgGhcm0UynUAB6tlbakBrz6CQFax3BXVQ= +github.com/valyala/fastjson v1.6.4/go.mod h1:CLCAqky6SMuOcxStkYQvblddUtoRxhYMGLrsQns1aXY= github.com/vapourismo/knx-go v0.0.0-20211128234507-8198fa17db36 h1:JBj2CqnFwBhI3XsdMNn9MjKvehog+p5QZihotqq0Zuo= -github.com/vishvananda/netlink v1.1.1-0.20210330154013-f5de75959ad5 h1:+UB2BJA852UkGH42H+Oee69djmxS3ANzl2b/JtT1YiA= -github.com/vishvananda/netlink v1.1.1-0.20210330154013-f5de75959ad5/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= +github.com/vapourismo/knx-go v0.0.0-20211128234507-8198fa17db36/go.mod h1:AslkIOXnEbVmvzc8uqDjm8ZyIqNJcEPiFRqlokmqr2o= +github.com/vishvananda/netlink v1.2.1-beta.2 h1:Llsql0lnQEbHj0I1OuKyp8otXp0r3q0mPkuhwHfStVs= +github.com/vishvananda/netlink v1.2.1-beta.2/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= -github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f h1:p4VB7kIXpOQvVn1ZaTIVp+3vuYAXFe3OJEvjbUYJLaA= -github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= +github.com/vishvananda/netns v0.0.4 h1:Oeaw1EM2JMxD51g9uhtC0D7erkIjgmj8+JZc26m1YX8= +github.com/vishvananda/netns v0.0.4/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM= github.com/vjeantet/grok v1.0.1 h1:2rhIR7J4gThTgcZ1m2JY4TrJZNgjn985U28kT2wQrJ4= github.com/vjeantet/grok v1.0.1/go.mod h1:ax1aAchzC6/QMXMcyzHQGZWaW1l195+uMYIkCWPCNIo= github.com/vmware/govmomi v0.27.3 h1:gwHHxKbMTNJON/3WPK3EsqZyQznTdHJAyRYPRSLm6R8= +github.com/vmware/govmomi v0.27.3/go.mod h1:daTuJEcQosNMXYJOeku0qdBJP9SOLLWB3Mqz8THtv6o= github.com/vultr/govultr/v2 v2.17.2 h1:gej/rwr91Puc/tgh+j33p/BLR16UrIPnSr+AIwYWZQs= github.com/vultr/govultr/v2 v2.17.2/go.mod h1:ZFOKGWmgjytfyjeyAdhQlSWwTjh2ig+X49cAp50dzXI= github.com/wavefronthq/wavefront-sdk-go v0.9.10 h1:L4eiEdHpudHTGwrJaPYaCXSjCYDex8FEKVqaprU2sY0= github.com/wavefronthq/wavefront-sdk-go v0.9.10/go.mod h1:ACJVXk0ksPHFkkTkXckqKEJGU3YDoAWjxTESkZlp+IE= github.com/wvanbergen/kafka v0.0.0-20171203153745-e2edea948ddf h1:TOV5PC6fIWwFOFra9xJfRXZcL2pLhMI8oNuDugNxg9Q= +github.com/wvanbergen/kafka v0.0.0-20171203153745-e2edea948ddf/go.mod h1:nxx7XRXbR9ykhnC8lXqQyJS0rfvJGxKyKw/sT1YOttg= github.com/wvanbergen/kazoo-go v0.0.0-20180202103751-f72d8611297a h1:ILoU84rj4AQ3q6cjQvtb9jBjx4xzR/Riq/zYhmDQiOk= +github.com/wvanbergen/kazoo-go v0.0.0-20180202103751-f72d8611297a/go.mod h1:vQQATAGxVK20DC1rRubTJbZDDhhpA4QfU02pMdPxGO4= github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c= -github.com/xdg-go/scram v1.1.0 h1:d70R37I0HrDLsafRrMBXyrD4lmQbCHE873t00Vr0gm0= -github.com/xdg-go/stringprep v1.0.2 h1:6iq84/ryjjeRmMJwxutI51F2GIPlP5BfTvXHeYjyhBc= +github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= +github.com/xdg-go/scram v1.1.2 h1:FHX5I5B4i4hKRVRBCFRxq1iQRej7WO3hhBuJf+UUySY= +github.com/xdg-go/scram v1.1.2/go.mod h1:RT/sEzTbU5y00aCK8UOx6R7YryM0iF1N2MOmC3kKLN4= +github.com/xdg-go/stringprep v1.0.4 h1:XLI/Ng3O1Atzq0oBs3TWm+5ZVgkq2aqdlvP9JtoZ6c8= +github.com/xdg-go/stringprep v1.0.4/go.mod h1:mPGuuIYwz7CmR2bT9j4GbQqutWS1zV24gijq1dTyGkM= github.com/xdg/scram v1.0.3 h1:nTadYh2Fs4BK2xdldEa2g5bbaZp0/+1nJMMPtPxS/to= +github.com/xdg/scram v1.0.3/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= github.com/xdg/stringprep v1.0.3 h1:cmL5Enob4W83ti/ZHuZLuKD/xqJfus4fVPwE+/BDm+4= -github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f h1:J9EGpcZtP0E/raorCMxlFGSTBrsSlaDGf3jU/qvAE2c= +github.com/xdg/stringprep v1.0.3/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo= +github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74= github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= github.com/youmark/pkcs8 v0.0.0-20201027041543-1326539a0a0a h1:fZHgsYlfvtyqToslyjUt3VOPF4J7aK/3MPcK7xp3PDk= +github.com/youmark/pkcs8 v0.0.0-20201027041543-1326539a0a0a/go.mod h1:ul22v+Nro/R083muKhosV54bj5niojjWZvU8xrevuH4= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= @@ -1171,10 +1385,12 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/yuin/gopher-lua v0.0.0-20200816102855-ee81675732da h1:NimzV1aGyq29m5ukMK0AMWEhFaL/lrEOaephfuoiARg= +github.com/yuin/gopher-lua v0.0.0-20200816102855-ee81675732da/go.mod h1:E1AXubJBdNmFERAOucpDIxNzeGfLzg0mYh+UfMWdChA= github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= -github.com/yusufpapurcu/wmi v1.2.3 h1:E1ctvB7uKFMOJw3fdOW32DwGE9I7t++CRUEMKvFoFiw= -github.com/yusufpapurcu/wmi v1.2.3/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= +github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0= +github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= github.com/zeebo/xxh3 v1.0.2 h1:xZmwmqxHZA8AI603jOQ0tMqmBr9lPeFwGg6d+xy9DC0= +github.com/zeebo/xxh3 v1.0.2/go.mod h1:5NWz9Sef7zIDm2JHfFlcQvNekmcEl9ekUZQQKCYaDcA= go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= go.etcd.io/etcd/api/v3 v3.5.4/go.mod h1:5GB2vv4A4AOn3yk7MftYGHkUfGtDHnEraIjym4dYz5A= go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= @@ -1182,7 +1398,8 @@ go.etcd.io/etcd/client/pkg/v3 v3.5.4/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3 go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ= go.etcd.io/etcd/client/v3 v3.5.0/go.mod h1:AIKXXVX/DQXtfTEqBryiLTUXwON+GuvO6Z7lLS/oTh0= go.etcd.io/etcd/client/v3 v3.5.4/go.mod h1:ZaRkVgBZC+L+dLCjTcF1hRXpgZXQPOvnA/Ak/gq3kiY= -go.mongodb.org/mongo-driver v1.11.0 h1:FZKhBSTydeuffHj9CBjXlR8vQLee1cQyTWYPA6/tqiE= +go.mongodb.org/mongo-driver v1.15.0 h1:rJCKC8eEliewXjZGf0ddURtl7tTVy1TK3bfl0gkUSLc= +go.mongodb.org/mongo-driver v1.15.0/go.mod h1:Vzb0Mk/pa7e6cWw85R4F/endUC3u0U9jGcNU603k65c= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= @@ -1191,98 +1408,121 @@ go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/collector v0.89.0 h1:lzpfD9NTHh+1M+qzcoYUH+i2rOgFSox3bGQFUI5BPJg= -go.opentelemetry.io/collector v0.89.0/go.mod h1:UZUtmQ3kai0CLPWvPmHKpmwqqEoo50n1bwzYYhXX0eA= -go.opentelemetry.io/collector/component v0.89.0 h1:PoQJX86BpaSZhzx0deQXHh3QMuW6XKVmolSdTKE506c= -go.opentelemetry.io/collector/component v0.89.0/go.mod h1:ZZncnMVaNs++JIbAMiemUIWLZrZ3PMEzI3S3K8pnkws= -go.opentelemetry.io/collector/config/configauth v0.89.0 h1:F082cy1OwrjyucI0wgEO2lRPTWJlgJzM/I5d0BoVgp4= -go.opentelemetry.io/collector/config/configauth v0.89.0/go.mod h1:yRJj70B3MyfbyGuyKO1I+5LtGuvx/WLUh8kuQ/XX6RE= -go.opentelemetry.io/collector/config/configcompression v0.89.0 h1:Z4LG045HwoNqXaibVbAQkcAQGmvY4OHrY4eCppoAzoQ= -go.opentelemetry.io/collector/config/configcompression v0.89.0/go.mod h1:LaavoxZsro5lL7qh1g9DMifG0qixWPEecW18Qr8bpag= -go.opentelemetry.io/collector/config/configgrpc v0.89.0 h1:Lnv/4EbImss5JuVE2nffk00pCGfk4tRyV8Gm3Gl7yuM= -go.opentelemetry.io/collector/config/configgrpc v0.89.0/go.mod h1:2GuAxpU34a1X19kCZ8Kw3FUsxOIiFcWHiYorRCyhnCc= -go.opentelemetry.io/collector/config/confignet v0.89.0 h1:Ekh+XhXelnnJ9as8IWhdUOfjnDPhz12XMJQ8sWNAbGw= -go.opentelemetry.io/collector/config/confignet v0.89.0/go.mod h1:cpO8JYWGONaViOygKVw+Hd2UoBcn2cUiyi0WWeFTwJY= -go.opentelemetry.io/collector/config/configopaque v0.89.0 h1:Ad6yGcGBHs+J9SNjkedY68JsLZ1vBn4kKzdqKuTCRsE= -go.opentelemetry.io/collector/config/configopaque v0.89.0/go.mod h1:TPCHaU+QXiEV+JXbgyr6mSErTI9chwQyasDVMdJr3eY= -go.opentelemetry.io/collector/config/configtelemetry v0.89.0 h1:NtRknYDfMgP1r8mnByo6qQQK8IBw/lF9Qke5f7VhGZ0= -go.opentelemetry.io/collector/config/configtelemetry v0.89.0/go.mod h1:+LAXM5WFMW/UbTlAuSs6L/W72WC+q8TBJt/6z39FPOU= -go.opentelemetry.io/collector/config/configtls v0.89.0 h1:XDeUaTU7LYwnEXz/CSdjbCStJa7n0YR1q0QpK0Vtw9w= -go.opentelemetry.io/collector/config/configtls v0.89.0/go.mod h1:NlE4elqXoyFfzQvYfzgH6uOU1zNVa+5tt6EIq52TJ9Y= -go.opentelemetry.io/collector/config/internal v0.89.0 h1:fs7LJTJd1EF76pjK7ZZZMWNxze0+pDXq3mfRwhm0P0g= -go.opentelemetry.io/collector/config/internal v0.89.0/go.mod h1:42VsQ/1kP2qnvzjNi+dfNP+KyCFRADejyrJ8m2GVL3M= -go.opentelemetry.io/collector/confmap v0.89.0 h1:N5Vg1+FXEFBHHlGIPg4OSlM9uTHjCI7RlWWrKjtOzWQ= -go.opentelemetry.io/collector/confmap v0.89.0/go.mod h1:D8FMPvuihtVxwXaz/qp5q9X2lq9l97QyjfsdZD1spmc= -go.opentelemetry.io/collector/connector v0.89.0 h1:2v7RgaLrlGmvHV1BjG133MswZ2BhH77vTHOLa4/Tqm0= -go.opentelemetry.io/collector/connector v0.89.0/go.mod h1:z82Yw0wZarpva9byeqByb3+eZSZ35DMdNRkjFgMm0S0= -go.opentelemetry.io/collector/consumer v0.89.0 h1:MteKhkudX2L1ylbtdpSazO8SwyHSxl6fUEElc0rRLDQ= -go.opentelemetry.io/collector/consumer v0.89.0/go.mod h1:aOaoi6R0qVvfHu0pEPCzSE74gIPNJoCQM8Ml4Bc9NHE= -go.opentelemetry.io/collector/exporter v0.89.0 h1:8sYpJdKDQ2RuYOPKDsMz/lMJqp4WEWZgfJzyn9IgOag= -go.opentelemetry.io/collector/exporter v0.89.0/go.mod h1:zR8PFXMHlG0qPIEdRPNaefxDNj4UVP47uJ4vbHs+YME= -go.opentelemetry.io/collector/exporter/loggingexporter v0.89.0 h1:AdMGaH1p9D1Kvjkd/sXMg/Hkc5M1lqBdhXY5WfmiGSE= -go.opentelemetry.io/collector/exporter/loggingexporter v0.89.0/go.mod h1:N4zI/Zkjz5hx774uM3RdQTIeiw59FMmOdHrpbOK5UvE= -go.opentelemetry.io/collector/extension v0.89.0 h1:iiaWIPPFqP4T0FSgl6+D1xRUhVnhsk88uk2BxCFqt7E= -go.opentelemetry.io/collector/extension v0.89.0/go.mod h1:tBh5wD4AZ3xFO6M1CjkEEx2urexTqcAcgi9cJSPME3E= -go.opentelemetry.io/collector/extension/auth v0.89.0 h1:eo9JoWklZdSManEPLm1LqlwEq5v/YIsOupjZHdRYm3I= -go.opentelemetry.io/collector/extension/auth v0.89.0/go.mod h1:TzC5WYGMgsZvkpYSU1Jlwxh46tSDmWRLFsc9awXaedk= -go.opentelemetry.io/collector/extension/zpagesextension v0.89.0 h1:opvHcGANx+dS4HJwGTPtRMd6dZdOmmGn6c3PfW/bihQ= -go.opentelemetry.io/collector/featuregate v1.0.0 h1:5MGqe2v5zxaoo73BUOvUTunftX5J8RGrbFsC2Ha7N3g= -go.opentelemetry.io/collector/featuregate v1.0.0/go.mod h1:xGbRuw+GbutRtVVSEy3YR2yuOlEyiUMhN2M9DJljgqY= +go.opentelemetry.io/collector v0.98.0 h1:O7bpARGWzNfFQEYevLl4iigDrpGTJY3vV/kKqNZzMOk= +go.opentelemetry.io/collector v0.98.0/go.mod h1:fvPM+tBML07uvAP1MV2msYPSYJ9U/lgE1jDb3AFBaMM= +go.opentelemetry.io/collector/component v0.98.0 h1:0TMaBOyCdABiVLFdGOgG8zd/1IeGldCinYonbY08xWk= +go.opentelemetry.io/collector/component v0.98.0/go.mod h1:F6zyQLsoExl6r2q6WWZm8rmSSALbwG2zwIHLrMzZVio= +go.opentelemetry.io/collector/config/configauth v0.98.0 h1:FPffZ1dRL6emStrDUEGpL0rCChbUZNAQgpArXD0SESI= +go.opentelemetry.io/collector/config/configauth v0.98.0/go.mod h1:5pMzf2zgFwS7tujNq0AtOOli5vxIvnrNi7JlZwrBOFo= +go.opentelemetry.io/collector/config/configcompression v1.5.0 h1:FTxKbFPN4LznRCH/GQ+b+0tAWmg80Y2eEka79S2sLZ0= +go.opentelemetry.io/collector/config/configcompression v1.5.0/go.mod h1:O0fOPCADyGwGLLIf5lf7N3960NsnIfxsm6dr/mIpL+M= +go.opentelemetry.io/collector/config/configgrpc v0.98.0 h1:4yP/TphwQnbgLpJ72NymXaERVjLjuDAQp4iDKCTcv5g= +go.opentelemetry.io/collector/config/configgrpc v0.98.0/go.mod h1:tIng0xx1XlVr4I0YG5bNpts0hZDjwzN3Jkz6cKaSH/s= +go.opentelemetry.io/collector/config/confighttp v0.98.0 h1:pW7gR34TTXcrCHJgemL6A4VBVBS2NyDAkruSMvQj1Vo= +go.opentelemetry.io/collector/config/confighttp v0.98.0/go.mod h1:M9PMtiKrTJMG8i3SqJ+AUVKhR6sa3G/8S2F1+Dxkkr0= +go.opentelemetry.io/collector/config/confignet v0.98.0 h1:pXDBb2hFe10T/NMHlL/oMgk1aFfe4NmmJFdFoioyC9o= +go.opentelemetry.io/collector/config/confignet v0.98.0/go.mod h1:3naWoPss70RhDHhYjGACi7xh4NcVRvs9itzIRVWyu1k= +go.opentelemetry.io/collector/config/configopaque v1.5.0 h1:WJzgmsFU2v63BypPBNGL31ACwWn6PwumPJNpLZplcdE= +go.opentelemetry.io/collector/config/configopaque v1.5.0/go.mod h1:/otnfj2E8r5EfaAdNV4qHkTclmiBCZXaahV5EcLwT7k= +go.opentelemetry.io/collector/config/configretry v0.98.0 h1:gZRenX9oMLJmQ/CD8YwFNl9YYl68RtcD0RYSCJhrMAk= +go.opentelemetry.io/collector/config/configretry v0.98.0/go.mod h1:uRdmPeCkrW9Zsadh2WEbQ1AGXGYJ02vCfmmT+0g69nY= +go.opentelemetry.io/collector/config/configtelemetry v0.98.0 h1:f8RNZ1l/kYPPoxFmKKvTUli8iON7CMsm85KM38PVNts= +go.opentelemetry.io/collector/config/configtelemetry v0.98.0/go.mod h1:YV5PaOdtnU1xRomPcYqoHmyCr48tnaAREeGO96EZw8o= +go.opentelemetry.io/collector/config/configtls v0.98.0 h1:g+MADy01ge8iGC6v2tbJ5G27CWNG1BaJtmYdmpvm8e4= +go.opentelemetry.io/collector/config/configtls v0.98.0/go.mod h1:9RHArziz0mNEEkti0kz5LIdvbQGT7/Unu/0whKKazHQ= +go.opentelemetry.io/collector/config/internal v0.98.0 h1:wz/6ncawMX5cfIiXJEYSUm1g1U6iE/VxFRm4/WhVBPI= +go.opentelemetry.io/collector/config/internal v0.98.0/go.mod h1:xPnEE6QaTSXr+ctYMSTBxI2qwTntTUM4cYk7OTm6Ugc= +go.opentelemetry.io/collector/confmap v0.98.0 h1:qQreBlrqio1y7uhrAvr+W86YbQ6fw7StgkbYpvJ2vVc= +go.opentelemetry.io/collector/confmap v0.98.0/go.mod h1:BWKPIpYeUzSG6ZgCJMjF7xsLvyrvJCfYURl57E5vhiQ= +go.opentelemetry.io/collector/confmap/converter/expandconverter v0.98.0 h1:lRhfcLr3gK5S/zn92h3clyOPnCvvNKs1WTMbtH4UvO0= +go.opentelemetry.io/collector/confmap/converter/expandconverter v0.98.0/go.mod h1:vNMFTWe4dF05LsodUOc84OfxdlYVp1kCMuZzb41WfAk= +go.opentelemetry.io/collector/confmap/provider/envprovider v0.98.0 h1:x/VsGlBj+DtJCXIucwzwcxiwnwAU8a6ALK6UN8fPdKQ= +go.opentelemetry.io/collector/confmap/provider/envprovider v0.98.0/go.mod h1:BapTGXu7CYrQGNohbapPwTSt2Ty/k/c6Oemx9mSSiK4= +go.opentelemetry.io/collector/confmap/provider/fileprovider v0.98.0 h1:SxDS+Yr8qE+ID58ELR5n0D+SUlqHKOZ72pK3YPFAelA= +go.opentelemetry.io/collector/confmap/provider/fileprovider v0.98.0/go.mod h1:DEoB0d0k1iGt4KEABntL8AW9xYQ6E7fmgkM2/s8aXvM= +go.opentelemetry.io/collector/confmap/provider/httpprovider v0.98.0 h1:C02SPbRPvrtmZ9TvsHWpz2TvHzqY5mNyEAlDdhax/a4= +go.opentelemetry.io/collector/confmap/provider/httpprovider v0.98.0/go.mod h1:dzZKtykJio3Rm+G+Cmr15VV3xKp0PmFuh9Q9b3c1K7A= +go.opentelemetry.io/collector/confmap/provider/httpsprovider v0.98.0 h1:04zGXVQZ8D6nvoPX8AaqxWxGHNNVsGR78E+tY+2VQr8= +go.opentelemetry.io/collector/confmap/provider/httpsprovider v0.98.0/go.mod h1:+UrRiugWaQPssz4mgEgQQo640f2bDUCFlo2Xr0/5ulc= +go.opentelemetry.io/collector/confmap/provider/yamlprovider v0.98.0 h1:JYpDN0OnMsu0awk0rjaYEIko9hFzzBJ6+2U5W2iVvUE= +go.opentelemetry.io/collector/confmap/provider/yamlprovider v0.98.0/go.mod h1:xrXL804nBum1PgbvmJQ4I+hyW+DU4xBGO3MKMiYFX6E= +go.opentelemetry.io/collector/connector v0.98.0 h1:1ifadXqOtB5bZ+OocLVlzF0zltWjP70E3+xYt2fJnMg= +go.opentelemetry.io/collector/connector v0.98.0/go.mod h1:OFii9qa2ZgktI61/r0gWDsGjXtpEe+qXC8+0o4ZySeA= +go.opentelemetry.io/collector/consumer v0.98.0 h1:47zJ5HFKXVA0RciuwkZnPU5W8j0TYUxToB1/zzzgEhs= +go.opentelemetry.io/collector/consumer v0.98.0/go.mod h1:c2edTq38uVJET/NE6VV7/Qpyznnlz8b6VE7J6TXD57c= +go.opentelemetry.io/collector/exporter v0.98.0 h1:eN2qtkiwpeX9gBu9JZw1k/CZ3N9wZE1aGJ1A0EvwJ7w= +go.opentelemetry.io/collector/exporter v0.98.0/go.mod h1:GCW46a0VAuW7nljlW//GgFXI+8mSrJjrdEKVO9icExE= +go.opentelemetry.io/collector/exporter/loggingexporter v0.98.0 h1:2DNfziYl0w8Sq9bPdYlPpn5MLLQGB73LB7O1BIYQxA4= +go.opentelemetry.io/collector/exporter/loggingexporter v0.98.0/go.mod h1:SBuTQ0sA3fEd/jAJFAxjTX8Ndwkc4Mtkc6gsz115S+8= +go.opentelemetry.io/collector/extension v0.98.0 h1:08B5ipEsoNmPHY96j5EUsUrFre01GOZ4zgttUDtPUkY= +go.opentelemetry.io/collector/extension v0.98.0/go.mod h1:fZ1Hnnahszl5j3xcW2sMRJ0FLWDOFkFMQeVDP0Se7i8= +go.opentelemetry.io/collector/extension/auth v0.98.0 h1:7b1jioijJbTMqaOCrz5Hoqf+zJn2iPlGmtN7pXLNWbA= +go.opentelemetry.io/collector/extension/auth v0.98.0/go.mod h1:gssWC4AxAwAEKI2CqS93lhjWffsVdzD8q7UGL6LaRr0= +go.opentelemetry.io/collector/extension/zpagesextension v0.98.0 h1:JfvsDpTwAhA9au8/4vmONRh0OBVU6n36seb41JD/mTQ= +go.opentelemetry.io/collector/extension/zpagesextension v0.98.0/go.mod h1:t1zDwy6kYp4w1JgcGHMvdGbKYHqWpK00bB1AEQ0Oqlc= +go.opentelemetry.io/collector/featuregate v1.5.0 h1:uK8qnYQKz1TMkK+FDTFsywg/EybW/gbnOUaPNUkRznM= +go.opentelemetry.io/collector/featuregate v1.5.0/go.mod h1:w7nUODKxEi3FLf1HslCiE6YWtMtOOrMnSwsDam8Mg9w= go.opentelemetry.io/collector/model v0.44.0 h1:I+M6X2NANYChOGYrpbxCoEYJah3eHdMvumKjothIAtA= -go.opentelemetry.io/collector/otelcol v0.89.0 h1:b3kC0zM9T6GxjhTcNEHIzXDd4zTIc7cBpexCPZ9nPzg= -go.opentelemetry.io/collector/otelcol v0.89.0/go.mod h1:BILxDcJe32wK+paX7ssnt4jyjOmTkIHvXe9JNjlHUk8= -go.opentelemetry.io/collector/pdata v1.0.0 h1:ECP2jnLztewsHmL1opL8BeMtWVc7/oSlKNhfY9jP8ec= -go.opentelemetry.io/collector/pdata v1.0.0/go.mod h1:TsDFgs4JLNG7t6x9D8kGswXUz4mme+MyNChHx8zSF6k= -go.opentelemetry.io/collector/processor v0.89.0 h1:ypMnoOqBYbXgbDnAm9/Cb4uN3kxvmI05Vf6o4u/riBU= -go.opentelemetry.io/collector/processor v0.89.0/go.mod h1:HzMQ2VbxaECk7Oy1mHtug4qsl4acAW4XP1hpTgQKv84= -go.opentelemetry.io/collector/processor/batchprocessor v0.89.0 h1:oTEOHs2t00f9LfRwZGzKSXJPR3FHjPDDhjt05iWXYZM= -go.opentelemetry.io/collector/processor/batchprocessor v0.89.0/go.mod h1:xaTfwrIbnsUpvaUCIecvlH8wTH500U7LtczdaEPXmmk= -go.opentelemetry.io/collector/receiver v0.89.0 h1:wC/FB8e2Ej06jjNW2OiuZoyiSyB8TQNIzYyPlh9oRqI= -go.opentelemetry.io/collector/receiver v0.89.0/go.mod h1:Rk7Bkz45fVdrcJaVDsPTnHa97ZfSs1ULO76LXc4kLN0= -go.opentelemetry.io/collector/receiver/otlpreceiver v0.89.0 h1:ayqzWyj56xyc0tcBpaYjWtxvj5pfZNhYC8LrwvlLAeE= -go.opentelemetry.io/collector/receiver/otlpreceiver v0.89.0/go.mod h1:7p2oZ09hP+h6WpTv67PgEJx+azViVqLBE4IfrnDOWOc= -go.opentelemetry.io/collector/semconv v0.89.0 h1:Sw+MiI3/oiYIY+ebkanZsOaBxXMx3sqnH1/6NaD4rLQ= -go.opentelemetry.io/collector/semconv v0.89.0/go.mod h1:j/8THcqVxFna1FpvA2zYIsUperEtOaRaqoLYIN4doWw= -go.opentelemetry.io/collector/service v0.89.0 h1:dZSK++Eu2H/vzWjhVG3yC0P76Xu5xE6qji0dMy0fRTw= -go.opentelemetry.io/collector/service v0.89.0/go.mod h1:6IAr9Asn6t6YxSw6Qv5LwA+ilVUtP0nQsu1xzc9+mZA= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.0 h1:PzIubN4/sjByhDRHLviCjJuweBXWFZWhghjg7cS28+M= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.0/go.mod h1:Ct6zzQEuGK3WpJs2n4dn+wfJYzd/+hNnxMRTWjGn30M= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.0 h1:1eHu3/pUSWaOgltNK3WJFaywKsTIr/PwvHyDmi0lQA0= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.0/go.mod h1:HyABWq60Uy1kjJSa2BVOxUVao8Cdick5AWSKPutqy6U= -go.opentelemetry.io/contrib/propagators/b3 v1.21.1 h1:WPYiUgmw3+b7b3sQ1bFBFAf0q+Di9dvNc3AtYfnT4RQ= -go.opentelemetry.io/contrib/propagators/b3 v1.21.1/go.mod h1:EmzokPoSqsYMBVK4nRnhsfm5mbn8J1eDuz/U1UaQaWg= -go.opentelemetry.io/contrib/zpages v0.45.0 h1:jIwHHGoWzJoZdbIUtWdErjL85Gni6BignnAFqDtMRL4= -go.opentelemetry.io/otel v1.21.0 h1:hzLeKBZEL7Okw2mGzZ0cc4k/A7Fta0uoPgaJCr8fsFc= -go.opentelemetry.io/otel v1.21.0/go.mod h1:QZzNPQPm1zLX4gZK4cMi+71eaorMSGT3A4znnUvNNEo= -go.opentelemetry.io/otel/bridge/opencensus v0.44.0 h1:/inELPJztkn6Xx3ap9qw8i8XdeWF0B/OjGHOdRTePZ8= -go.opentelemetry.io/otel/bridge/opencensus v0.44.0/go.mod h1:dQTBJVBx1xahrXEFBV1BGPAnGuXC92LCj55fxIrtj7I= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.44.0 h1:jd0+5t/YynESZqsSyPz+7PAFdEop0dlN0+PkyHYo8oI= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.44.0/go.mod h1:U707O40ee1FpQGyhvqnzmCJm1Wh6OX6GGBVn0E6Uyyk= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v0.44.0 h1:bflGWrfYyuulcdxf14V6n9+CoQcu5SAAdHmDPAJnlps= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v0.44.0/go.mod h1:qcTO4xHAxZLaLxPd60TdE88rxtItPHgHWqOhOGRr0as= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0 h1:cl5P5/GIfFh4t6xyruOgJP5QiA1pw4fYYdv6nc6CBWw= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0/go.mod h1:zgBdWWAu7oEEMC06MMKc5NLbA/1YDXV1sMpSqEeLQLg= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.21.0 h1:tIqheXEFWAZ7O8A7m+J0aPTmpJN3YQ7qetUAdkkkKpk= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.21.0/go.mod h1:nUeKExfxAQVbiVFn32YXpXZZHZ61Cc3s3Rn1pDBGAb0= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.21.0 h1:digkEZCJWobwBqMwC0cwCq8/wkkRy/OowZg5OArWZrM= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.21.0/go.mod h1:/OpE/y70qVkndM0TrxT4KBoN3RsFZP0QaofcfYrj76I= -go.opentelemetry.io/otel/exporters/prometheus v0.44.0 h1:08qeJgaPC0YEBu2PQMbqU3rogTlyzpjhCI2b58Yn00w= -go.opentelemetry.io/otel/exporters/prometheus v0.44.0/go.mod h1:ERL2uIeBtg4TxZdojHUwzZfIFlUIjZtxubT5p4h1Gjg= -go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v0.44.0 h1:dEZWPjVN22urgYCza3PXRUGEyCB++y1sAqm6guWFesk= -go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v0.44.0/go.mod h1:sTt30Evb7hJB/gEk27qLb1+l9n4Tb8HvHkR0Wx3S6CU= -go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.21.0 h1:VhlEQAPp9R1ktYfrPk5SOryw1e9LDDTZCbIPFrho0ec= -go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.21.0/go.mod h1:kB3ufRbfU+CQ4MlUcqtW8Z7YEOBeK2DJ6CmR5rYYF3E= -go.opentelemetry.io/otel/metric v1.21.0 h1:tlYWfeo+Bocx5kLEloTjbcDwBuELRrIFxwdQ36PlJu4= -go.opentelemetry.io/otel/metric v1.21.0/go.mod h1:o1p3CA8nNHW8j5yuQLdc1eeqEaPfzug24uvsyIEJRWM= -go.opentelemetry.io/otel/sdk v1.21.0 h1:FTt8qirL1EysG6sTQRZ5TokkU8d0ugCj8htOgThZXQ8= -go.opentelemetry.io/otel/sdk v1.21.0/go.mod h1:Nna6Yv7PWTdgJHVRD9hIYywQBRx7pbox6nwBnZIxl/E= -go.opentelemetry.io/otel/sdk/metric v1.21.0 h1:smhI5oD714d6jHE6Tie36fPx4WDFIg+Y6RfAY4ICcR0= -go.opentelemetry.io/otel/sdk/metric v1.21.0/go.mod h1:FJ8RAsoPGv/wYMgBdUJXOm+6pzFY3YdljnXtv1SBE8Q= -go.opentelemetry.io/otel/trace v1.21.0 h1:WD9i5gzvoUPuXIXH24ZNBudiarZDKuekPqi/E8fpfLc= -go.opentelemetry.io/otel/trace v1.21.0/go.mod h1:LGbsEB0f9LGjN+OZaQQ26sohbOmiMR+BaslueVtS/qQ= +go.opentelemetry.io/collector/model v0.44.0/go.mod h1:4jo1R8uBDspLCxUGhQ0k3v/EFXFbW7s0AIy3LuGLbcU= +go.opentelemetry.io/collector/otelcol v0.98.0 h1:sUk49Wqw+VBYeDynEA+GSeVEusFvzFW3KuF2vfDbyo0= +go.opentelemetry.io/collector/otelcol v0.98.0/go.mod h1:dW3UzuaiaNTddjKajk3Tp2Y7muDvYJdQz2yGUOE53gs= +go.opentelemetry.io/collector/pdata v1.5.0 h1:1fKTmUpr0xCOhP/B0VEvtz7bYPQ45luQ8XFyA07j8LE= +go.opentelemetry.io/collector/pdata v1.5.0/go.mod h1:TYj8aKRWZyT/KuKQXKyqSEvK/GV+slFaDMEI+Ke64Yw= +go.opentelemetry.io/collector/pdata/testdata v0.98.0 h1:8gohV+LFXqMzuDwfOOQy9GcZBOX0C9xGoQkoeXFTzmI= +go.opentelemetry.io/collector/pdata/testdata v0.98.0/go.mod h1:B/IaHcf6+RtxI292CZu9TjfYQdi1n4+v6b8rHEonpKs= +go.opentelemetry.io/collector/processor v0.98.0 h1:onrg8a99lToytbHF148Bg9a7DfNk31B+p6UHouiiVTw= +go.opentelemetry.io/collector/processor v0.98.0/go.mod h1:QxgzjmJI12DQWN0LIHmZBOR7HRzPuVWFW4oqTdrS1ho= +go.opentelemetry.io/collector/processor/batchprocessor v0.98.0 h1:iM4fMLGig3GKmz5XNtOPKDsnCnvbi0+UHYaWsx/aSRc= +go.opentelemetry.io/collector/processor/batchprocessor v0.98.0/go.mod h1:ROnuUkZJgpKEIDf3AIVjgRGNI7KPqCKPXsw8whL6Hzs= +go.opentelemetry.io/collector/receiver v0.98.0 h1:qw6JYwm+sHcZvM1DByo3QlGe6yGHuwd0yW4hEPVqYKU= +go.opentelemetry.io/collector/receiver v0.98.0/go.mod h1:AwIWn+KnquTR+kbhXQrMH+i2PvTCFldSIJznBWFYs0s= +go.opentelemetry.io/collector/receiver/otlpreceiver v0.98.0 h1:j7lfLwc5o1dtXIPXU8LjmxadejmJVRHN57ZYGH33Wq4= +go.opentelemetry.io/collector/receiver/otlpreceiver v0.98.0/go.mod h1:uWDBDxaWuzF1U5S2UIhstO0+Q8aUiwiUu8uO1IYN2XQ= +go.opentelemetry.io/collector/semconv v0.98.0 h1:zO4L4TmlxXoYu8UgPeYElGY19BW7wPjM+quL5CzoOoY= +go.opentelemetry.io/collector/semconv v0.98.0/go.mod h1:8ElcRZ8Cdw5JnvhTOQOdYizkJaQ10Z2fS+R6djOnj6A= +go.opentelemetry.io/collector/service v0.98.0 h1:lLJ7VXPXcV62fSISh4GuNd5ti6WvKje76NSgezc3ydo= +go.opentelemetry.io/collector/service v0.98.0/go.mod h1:wB7ozvZTHtMefb5KTYy5nyrVYWpGk8teq8jWFs4blIU= +go.opentelemetry.io/contrib/config v0.4.0 h1:Xb+ncYOqseLroMuBesGNRgVQolXcXOhMj7EhGwJCdHs= +go.opentelemetry.io/contrib/config v0.4.0/go.mod h1:drNk2xRqLWW4/amk6Uh1S+sDAJTc7bcEEN1GfJzj418= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 h1:4Pp6oUg3+e/6M4C0A/3kJ2VYa++dsWVTtGgLVj5xtHg= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0/go.mod h1:Mjt1i1INqiaoZOMGR1RIUJN+i3ChKoFRqzrRQhlkbs0= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 h1:jq9TW8u3so/bN+JPT166wjOI6/vQPF6Xe7nMNIltagk= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0/go.mod h1:p8pYQP+m5XfbZm9fxtSKAbM6oIllS7s2AfxrChvc7iw= +go.opentelemetry.io/contrib/propagators/b3 v1.25.0 h1:QU8UEKyPqgr/8vCC9LlDmkPnfFmiWAUF9GtJdcLz+BU= +go.opentelemetry.io/contrib/propagators/b3 v1.25.0/go.mod h1:qonC7wyvtX1E6cEpAR+bJmhcGr6IVRGc/f6ZTpvi7jA= +go.opentelemetry.io/contrib/zpages v0.50.0 h1:hKC5asr83xDN4ErwSHVdk3gv053pZiF8SZKmS86IPEw= +go.opentelemetry.io/contrib/zpages v0.50.0/go.mod h1:8WovRn95fZdaX/dr3e4h7D8IqiVsnZ+WxY0Yn4LyU3k= +go.opentelemetry.io/otel v1.25.0 h1:gldB5FfhRl7OJQbUHt/8s0a7cE8fbsPAtdpRaApKy4k= +go.opentelemetry.io/otel v1.25.0/go.mod h1:Wa2ds5NOXEMkCmUou1WA7ZBfLTHWIsp034OVD7AO+Vg= +go.opentelemetry.io/otel/bridge/opencensus v1.25.0 h1:0o/9KwAgxjK+3pMV0pwIF5toYHqDsPmQhfrBvKaG6mU= +go.opentelemetry.io/otel/bridge/opencensus v1.25.0/go.mod h1:rZyTdpmRqoV+PpUn6QlruxJp/kE4765rPy0pP6mRDk8= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.25.0 h1:hDKnobznDpcdTlNzO0S/owRB8tyVr1OoeZZhDoqY+Cs= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.25.0/go.mod h1:kUDQaUs1h8iTIHbQTk+iJRiUvSfJYMMKTtMCaiVu7B0= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.25.0 h1:Wc4hZuYXhVqq+TfRXLXlmNIL/awOanGx8ssq3ciDQxc= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.25.0/go.mod h1:BydOvapRqVEc0DVz27qWBX2jq45Ca5TI9mhZBDIdweY= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.25.0 h1:dT33yIHtmsqpixFsSQPwNeY5drM9wTcoL8h0FWF4oGM= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.25.0/go.mod h1:h95q0LBGh7hlAC08X2DhSeyIG02YQ0UyioTCVAqRPmc= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.25.0 h1:vOL89uRfOCCNIjkisd0r7SEdJF3ZJFyCNY34fdZs8eU= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.25.0/go.mod h1:8GlBGcDk8KKi7n+2S4BT/CPZQYH3erLu0/k64r1MYgo= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.25.0 h1:Mbi5PKN7u322woPa85d7ebZ+SOvEoPvoiBu+ryHWgfA= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.25.0/go.mod h1:e7ciERRhZaOZXVjx5MiL8TK5+Xv7G5Gv5PA2ZDEJdL8= +go.opentelemetry.io/otel/exporters/prometheus v0.47.0 h1:OL6yk1Z/pEGdDnrBbxSsH+t4FY1zXfBRGd7bjwhlMLU= +go.opentelemetry.io/otel/exporters/prometheus v0.47.0/go.mod h1:xF3N4OSICZDVbbYZydz9MHFro1RjmkPUKEvar2utG+Q= +go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.25.0 h1:d7nHbdzU84STOiszaOxQ3kw5IwkSmHsU5Muol5/vL4I= +go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.25.0/go.mod h1:yiPA1iZbb/EHYnODXOxvtKuB0I2hV8ehfLTEWpl7BJU= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.25.0 h1:0vZZdECYzhTt9MKQZ5qQ0V+J3MFu4MQaQ3COfugF+FQ= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.25.0/go.mod h1:e7iXx3HjaSSBXfy9ykVUlupS2Vp7LBIBuT21ousM2Hk= +go.opentelemetry.io/otel/metric v1.25.0 h1:LUKbS7ArpFL/I2jJHdJcqMGxkRdxpPHE0VU/D4NuEwA= +go.opentelemetry.io/otel/metric v1.25.0/go.mod h1:rkDLUSd2lC5lq2dFNrX9LGAbINP5B7WBkC78RXCpH5s= +go.opentelemetry.io/otel/sdk v1.25.0 h1:PDryEJPC8YJZQSyLY5eqLeafHtG+X7FWnf3aXMtxbqo= +go.opentelemetry.io/otel/sdk v1.25.0/go.mod h1:oFgzCM2zdsxKzz6zwpTZYLLQsFwc+K0daArPdIhuxkw= +go.opentelemetry.io/otel/sdk/metric v1.25.0 h1:7CiHOy08LbrxMAp4vWpbiPcklunUshVpAvGBrdDRlGw= +go.opentelemetry.io/otel/sdk/metric v1.25.0/go.mod h1:LzwoKptdbBBdYfvtGCzGwk6GWMA3aUzBOwtQpR6Nz7o= +go.opentelemetry.io/otel/trace v1.25.0 h1:tqukZGLwQYRIFtSQM2u2+yfMVTgGVeqRLPUYx1Dq6RM= +go.opentelemetry.io/otel/trace v1.25.0/go.mod h1:hCCs70XM/ljO+BeQkyFnbK28SBIJ/Emuha+ccrCRT7I= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= -go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I= -go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= +go.opentelemetry.io/proto/otlp v1.1.0 h1:2Di21piLrCqJ3U3eXGCTPHE9R8Nh+0uglSnOyxikMeI= +go.opentelemetry.io/proto/otlp v1.1.0/go.mod h1:GpBHCBWiqvVLDqmHZsoMM3C5ySeKTC7ej/RNTae6MdY= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= @@ -1296,8 +1536,8 @@ go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI= -go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo= -go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so= +go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= @@ -1316,8 +1556,8 @@ golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0 golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= -golang.org/x/crypto v0.16.0 h1:mMMrFzRSCF0GvB7Ne27XVtVAaXLrPmgPC7/v0tkwHaY= -golang.org/x/crypto v0.16.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= +golang.org/x/crypto v0.22.0 h1:g1v0xeRhjcugydODzvb3mEM9SQ0HGp9s/nh3COQ/C30= +golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -1331,8 +1571,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20231127185646-65229373498e h1:Gvh4YaCaXNs6dKTlfgismwWZKyjVZXwOPfIyUaqU3No= -golang.org/x/exp v0.0.0-20231127185646-65229373498e/go.mod h1:iRJReGqOEeBhDZGkGbynYwcHlctCvnjTYIamk7uXpHI= +golang.org/x/exp v0.0.0-20240119083558-1b970713d09a h1:Q8/wZp0KX97QFTc2ywcOE0YRjZPVIx+MXInMzdvQqcA= +golang.org/x/exp v0.0.0-20240119083558-1b970713d09a/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08= golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= @@ -1359,8 +1599,8 @@ golang.org/x/mod v0.3.1-0.20200828183125-ce943fd02449/go.mod h1:s0Qsj1ACt9ePp/hM golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0= -golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.16.0 h1:QX4fJ0Rr5cPQCF7O9lh9Se4pmwfwskqZfq5moyldzic= +golang.org/x/mod v0.16.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1403,25 +1643,21 @@ golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96b golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8= golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20211029224645-99673261e6eb/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= -golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c= -golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U= +golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w= +golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= -golang.org/x/oauth2 v0.14.0 h1:P0Vrf/2538nmC0H+pEQ3MNFRRnVR7RlqyVw+bvm26z0= -golang.org/x/oauth2 v0.14.0/go.mod h1:lAtNWgaWfL4cm7j2OV8TxGi9Qb7ECORx8DktCY74OwM= +golang.org/x/oauth2 v0.18.0 h1:09qnuIAgzdx1XplqJvW6CQqMCtGZykZWcXzPMPUusvI= +golang.org/x/oauth2 v0.18.0/go.mod h1:Wf7knwG0MPoWIMMBgFlEaSUDaKskp0dCfrlJRJXbBi8= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1433,11 +1669,10 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE= -golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= +golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1466,7 +1701,6 @@ golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1506,15 +1740,11 @@ golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220111092808-5a964db01320/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220708085239-5a0f0661e09d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -1525,8 +1755,8 @@ golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc= -golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= +golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= @@ -1534,8 +1764,8 @@ golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuX golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= -golang.org/x/term v0.15.0 h1:y/Oo/a/q3IXu26lQgl04j/gjuBDOBlx7X6Om1j2CPW4= -golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= +golang.org/x/term v0.19.0 h1:+ThwsDv+tYfnJFhF4L8jITxu1tdTWRTZpdsWgEgjL6Q= +golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1546,6 +1776,7 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= @@ -1556,8 +1787,9 @@ golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20211116232009-f0f3c7e86c11/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.4.0 h1:Z81tqI5ddIoXDPvVQ7/7CC9TnLM7ubaFG2qXYd5BbYY= -golang.org/x/time v0.4.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= +golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -1616,17 +1848,18 @@ golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.16.0 h1:GO788SKMRunPIBCXiQyo2AaexLstOrVhuAL5YwsckQM= -golang.org/x/tools v0.16.0/go.mod h1:kYVVN6I1mBNoB1OX+noeBjbRk4IUEPa7JJ+TJMEooJ0= +golang.org/x/tools v0.19.0 h1:tfGCXNR1OsFG+sVdLAitlpjAvD/I6dHDKnYrpEZUHkw= +golang.org/x/tools v0.19.0/go.mod h1:qoJWxmGSIBmAeriMx19ogtrEPrGtDbPK634QFIcLAhc= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk= +golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= gonum.org/v1/gonum v0.8.2/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0= -gonum.org/v1/gonum v0.14.0 h1:2NiG67LD1tEH0D7kM+ps2V+fXmsAnpUeec7n8tcr4S0= -gonum.org/v1/gonum v0.14.0/go.mod h1:AoWeoz0becf9QMWtE8iWXNXc27fK4fNeHNf/oMejGfU= +gonum.org/v1/gonum v0.15.0 h1:2lYxjRbTYyxkJxlhC+LvJIx3SsANPdRybu1tGj9/OrQ= +gonum.org/v1/gonum v0.15.0/go.mod h1:xzZVBJBtS+Mz4q0Yl2LJTk+OxOg4jiXZ7qBoM0uISGo= gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= @@ -1645,16 +1878,16 @@ google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0M google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= -google.golang.org/api v0.150.0 h1:Z9k22qD289SZ8gCJrk4DrWXkNjtfvKAUo/l1ma8eBYE= -google.golang.org/api v0.150.0/go.mod h1:ccy+MJ6nrYFgE3WgRx/AMXOxOmU8Q4hSa+jjibzhxcg= +google.golang.org/api v0.168.0 h1:MBRe+Ki4mMN93jhDDbpuRLjRddooArz4FeSObvUMmjY= +google.golang.org/api v0.168.0/go.mod h1:gpNOiMA2tZ4mf5R9Iwf4rK/Dcz0fbdIgWYWVoxmsyLg= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= -google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= +google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= @@ -1688,12 +1921,12 @@ google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= google.golang.org/genproto v0.0.0-20210917145530-b395a37504d4/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20231030173426-d783a09b4405 h1:I6WNifs6pF9tNdSob2W24JtyxIYjzFB9qDlpUC76q+U= -google.golang.org/genproto v0.0.0-20231030173426-d783a09b4405/go.mod h1:3WDQMjmJk36UQhjQ89emUzb1mdaHcPeeAh4SCBKznB4= -google.golang.org/genproto/googleapis/api v0.0.0-20231106174013-bbf56f31fb17 h1:JpwMPBpFN3uKhdaekDpiNlImDdkUAyiJ6ez/uxGaUSo= -google.golang.org/genproto/googleapis/api v0.0.0-20231106174013-bbf56f31fb17/go.mod h1:0xJLfVdJqpAPl8tDg1ujOCGzx6LFLttXT5NhllGOXY4= -google.golang.org/genproto/googleapis/rpc v0.0.0-20231030173426-d783a09b4405 h1:AB/lmRny7e2pLhFEYIbl5qkDAUt2h0ZRO4wGPhZf+ik= -google.golang.org/genproto/googleapis/rpc v0.0.0-20231030173426-d783a09b4405/go.mod h1:67X1fPuzjcrkymZzZV1vvkFeTn2Rvc6lYF9MYFGCcwE= +google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de h1:F6qOa9AZTYJXOUEr4jDysRDLrm4PHePlge4v4TGAlxY= +google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:VUhTRKeHn9wwcdrk73nvdC9gF178Tzhmt/qyaFcPLSo= +google.golang.org/genproto/googleapis/api v0.0.0-20240318140521-94a12d6c2237 h1:RFiFrvy37/mpSpdySBDrUdipW/dHwsRwh3J3+A9VgT4= +google.golang.org/genproto/googleapis/api v0.0.0-20240318140521-94a12d6c2237/go.mod h1:Z5Iiy3jtmioajWHDGFk7CeugTyHtPvMHA4UTmUkyalE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda h1:LI5DOvAxUPMv/50agcLLoo+AdWc1irS9Rzz4vPuD1V4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= @@ -1713,8 +1946,8 @@ google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk= -google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98= +google.golang.org/grpc v1.63.0 h1:WjKe+dnvABXyPJMD7KDNLxtoGk5tgk+YFWN6cBWjZE8= +google.golang.org/grpc v1.63.0/go.mod h1:WAX/8DgncnokcFUldAxq7GeB5DXHDbMF+lLvDomNkRA= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -1729,10 +1962,8 @@ google.golang.org/protobuf v1.25.1-0.20200805231151-a709e31e5d12/go.mod h1:9JNX7 google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= -google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= +google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -1743,10 +1974,12 @@ gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntN gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fatih/pool.v2 v2.0.0 h1:xIFeWtxifuQJGk/IEPKsTduEKcKvPmhoiVDGpC40nKg= +gopkg.in/fatih/pool.v2 v2.0.0/go.mod h1:8xVGeu1/2jr2wm5V9SPuMht2H5AEmf5aFMGSQixtjTY= gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= gopkg.in/gorethink/gorethink.v3 v3.0.5 h1:e2Uc/Xe+hpcVQFsj6MuHlYog3r0JYpnTzwDj/y2O4MU= +gopkg.in/gorethink/gorethink.v3 v3.0.5/go.mod h1:+3yIIHJUGMBK+wyPH+iN5TP+88ikFDfZdqTlK3Y9q8I= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= @@ -1754,10 +1987,12 @@ gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8= gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= gopkg.in/olivere/elastic.v5 v5.0.86 h1:xFy6qRCGAmo5Wjx96srho9BitLhZl2fcnpuidPwduXM= +gopkg.in/olivere/elastic.v5 v5.0.86/go.mod h1:M3WNlsF+WhYn7api4D87NIflwTV/c0iVs8cqfWhK+68= gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/tomb.v2 v2.0.0-20161208151619-d5d1b5820637 h1:yiW+nvdHb9LVqSHQBXfZCieqV4fzYhNBql77zY0ykqs= +gopkg.in/tomb.v2 v2.0.0-20161208151619-d5d1b5820637/go.mod h1:BHsqpu/nsuzkT5BpiH1EMZPLyqSMM8JbIavyFACoFNk= gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= @@ -1783,40 +2018,50 @@ honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= k8s.io/api v0.21.1/go.mod h1:FstGROTmsSHBarKc8bylzXih8BLNYTiS3TZcsoEDg2s= -k8s.io/api v0.28.3 h1:Gj1HtbSdB4P08C8rs9AR94MfSGpRhJgsS+GF9V26xMM= -k8s.io/api v0.28.3/go.mod h1:MRCV/jr1dW87/qJnZ57U5Pak65LGmQVkKTzf3AtKFHc= +k8s.io/api v0.30.0 h1:siWhRq7cNjy2iHssOB9SCGNCl2spiF1dO3dABqZ8niA= +k8s.io/api v0.30.0/go.mod h1:OPlaYhoHs8EQ1ql0R/TsUgaRPhpKNxIMrKQfWUp8QSE= k8s.io/apimachinery v0.21.1/go.mod h1:jbreFvJo3ov9rj7eWT7+sYiRx+qZuCYXwWT1bcDswPY= -k8s.io/apimachinery v0.28.3 h1:B1wYx8txOaCQG0HmYF6nbpU8dg6HvA06x5tEffvOe7A= -k8s.io/apimachinery v0.28.3/go.mod h1:uQTKmIqs+rAYaq+DFaoD2X7pcjLOqbQX2AOiO0nIpb8= +k8s.io/apimachinery v0.30.0 h1:qxVPsyDM5XS96NIh9Oj6LavoVFYff/Pon9cZeDIkHHA= +k8s.io/apimachinery v0.30.0/go.mod h1:iexa2somDaxdnj7bha06bhb43Zpa6eWH8N8dbqVjTUc= k8s.io/client-go v0.21.1/go.mod h1:/kEw4RgW+3xnBGzvp9IWxKSNA+lXn3A7AuH3gdOAzLs= -k8s.io/client-go v0.28.3 h1:2OqNb72ZuTZPKCl+4gTKvqao0AMOl9f3o2ijbAj3LI4= -k8s.io/client-go v0.28.3/go.mod h1:LTykbBp9gsA7SwqirlCXBWtK0guzfhpoW4qSm7i9dxo= +k8s.io/client-go v0.30.0 h1:sB1AGGlhY/o7KCyCEQ0bPWzYDL0pwOZO4vAtTSh/gJQ= +k8s.io/client-go v0.30.0/go.mod h1:g7li5O5256qe6TYdAMyX/otJqMhIiGgTapdLchhmOaY= k8s.io/code-generator v0.21.1/go.mod h1:hUlps5+9QaTrKx+jiM4rmq7YmH8wPOIko64uZCHDh6Q= k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20201214224949-b6c5ce23f027/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= -k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= -k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.8.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= -k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg= -k8s.io/klog/v2 v2.100.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= +k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw= +k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7/go.mod h1:wXW5VT87nVfh/iLV8FpR2uDvrFyomxbtb1KivDbvPTE= -k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 h1:LyMgNKD2P8Wn1iAwQU5OhxCKlKJy0sHc+PcDwFB24dQ= -k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9/go.mod h1:wZK2AVp1uHCp4VamDVgBP2COHZjqD1T68Rf0CM3YjSM= +k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag= +k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98= +k8s.io/kubelet v0.30.0 h1:/pqHVR2Rn8ExCpn211wL3pMtqRFpcBcJPl4+1INbIMk= +k8s.io/kubelet v0.30.0/go.mod h1:WukdKqbQxnj+csn3K8XOKeX7Sh60J/da25IILjvvB5s= k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20230711102312-30195339c3c7 h1:ZgnF1KZsYxWIifwSNZFZgNtWE89WI5yiP5WwlfDoIyc= -k8s.io/utils v0.0.0-20230711102312-30195339c3c7/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -lukechampine.com/uint128 v1.2.0 h1:mBi/5l91vocEN8otkC5bDLhi2KdCticRiwbdB0O+rjI= +k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= +k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +lukechampine.com/uint128 v1.3.0 h1:cDdUVfRwDUDovz610ABgFD17nXD4/uDgVHl2sC3+sbo= +lukechampine.com/uint128 v1.3.0/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= modernc.org/cc/v3 v3.40.0 h1:P3g79IUS/93SYhtoeaHW+kRCIrYaxJ27MFPv+7kaTOw= +modernc.org/cc/v3 v3.40.0/go.mod h1:/bTg4dnWkSXowUO6ssQKnOV0yMVxDYNIsIrzqTFDGH0= modernc.org/ccgo/v3 v3.16.13 h1:Mkgdzl46i5F/CNR/Kj80Ri59hC8TKAhZrYSaqvkwzUw= -modernc.org/libc v1.22.2 h1:4U7v51GyhlWqQmwCHj28Rdq2Yzwk55ovjFrdPjs8Hb0= +modernc.org/ccgo/v3 v3.16.13/go.mod h1:2Quk+5YgpImhPjv2Qsob1DnZ/4som1lJTodubIcoUkY= +modernc.org/libc v1.22.4 h1:wymSbZb0AlrjdAVX3cjreCHTPCpPARbQXNz6BHPzdwQ= +modernc.org/libc v1.22.4/go.mod h1:jj+Z7dTNX8fBScMVNRAYZ/jF91K8fdT2hYMThc3YjBY= modernc.org/mathutil v1.5.0 h1:rV0Ko/6SfM+8G+yKiyI830l3Wuz1zRutdslNoQ0kfiQ= +modernc.org/mathutil v1.5.0/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= modernc.org/memory v1.5.0 h1:N+/8c5rE6EqugZwHii4IFsaJ7MUhoWX07J5tC/iI5Ds= +modernc.org/memory v1.5.0/go.mod h1:PkUhL0Mugw21sHPeskwZW4D6VscE/GQJOnIpCnW6pSU= modernc.org/opt v0.1.3 h1:3XOZf2yznlhC+ibLltsDGzABUGVx8J6pnFMS3E4dcq4= -modernc.org/sqlite v1.18.2 h1:S2uFiaNPd/vTAP/4EmyY8Qe2Quzu26A2L1e25xRNTio= +modernc.org/opt v0.1.3/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0= +modernc.org/sqlite v1.21.2 h1:ixuUG0QS413Vfzyx6FWx6PYTmHaOegTY+hjzhn7L+a0= +modernc.org/sqlite v1.21.2/go.mod h1:cxbLkB5WS32DnQqeH4h4o1B0eMr8W/y8/RGuxQ3JsC0= modernc.org/strutil v1.1.3 h1:fNMm+oJklMGYfU9Ylcywl0CO5O6nTfaowNsh2wpPjzY= +modernc.org/strutil v1.1.3/go.mod h1:MEHNA7PdEnEwLvspRMtWTNnp2nnyvMfkimT1NKNAGbw= modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y= +modernc.org/token v1.1.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= @@ -1825,8 +2070,8 @@ sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMm sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/structured-merge-diff/v4 v4.1.0/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= -sigs.k8s.io/structured-merge-diff/v4 v4.3.0 h1:UZbZAZfX0wV2zr7YZorDz6GXROfDFj6LvqCRm4VUVKk= -sigs.k8s.io/structured-merge-diff/v4 v4.3.0/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= diff --git a/internal/cloudwatch/unit.go b/internal/cloudwatch/unit.go index b215cfe211..829ac604ff 100644 --- a/internal/cloudwatch/unit.go +++ b/internal/cloudwatch/unit.go @@ -5,12 +5,11 @@ package cloudwatch import ( "fmt" + "strings" "time" - "unicode" "github.com/aws/aws-sdk-go-v2/service/cloudwatch/types" - "github.com/aws/amazon-cloudwatch-agent/internal/util/collections" "github.com/aws/amazon-cloudwatch-agent/internal/util/unit" ) @@ -26,18 +25,18 @@ var baseUnits = map[string]types.StandardUnit{ "us": types.StandardUnitMicroseconds, "ms": types.StandardUnitMilliseconds, // bytes - "B": types.StandardUnitBytes, - "By": types.StandardUnitBytes, - "Bi": types.StandardUnitBits, + "b": types.StandardUnitBytes, + "by": types.StandardUnitBytes, + "bi": types.StandardUnitBits, // rates - "B/s": types.StandardUnitBytesSecond, - "By/s": types.StandardUnitBytesSecond, - "Bi/s": types.StandardUnitBitsSecond, + "b/s": types.StandardUnitBytesSecond, + "by/s": types.StandardUnitBytesSecond, + "bi/s": types.StandardUnitBitsSecond, } var uniqueConversions = map[string]struct { - unit types.StandardUnit - scale float64 + standardUnit types.StandardUnit + scale float64 }{ // time "ns": {types.StandardUnitMicroseconds, 1 / float64(time.Microsecond.Nanoseconds())}, @@ -76,72 +75,81 @@ var scaledBaseUnits = map[types.StandardUnit]map[unit.MetricPrefix]types.Standar // ToStandardUnit converts from the OTEL unit names to the corresponding names // supported by AWS CloudWatch. Some OTEL unit types are unsupported. func ToStandardUnit(unit string) (string, float64, error) { - if IsStandardUnit(unit) { - return unit, 1, nil + standardUnit, scale, err := toStandardUnit(unit) + return string(standardUnit), scale, err +} + +func toStandardUnit(unit string) (types.StandardUnit, float64, error) { + u := strings.ToLower(unit) + if standardUnit, ok := standardUnits[u]; ok { + return standardUnit, 1, nil } - if baseUnit, ok := baseUnits[unit]; ok { - return string(baseUnit), 1, nil + if standardUnit, ok := baseUnits[u]; ok { + return standardUnit, 1, nil } - if conversion, ok := uniqueConversions[unit]; ok { - return string(conversion.unit), conversion.scale, nil + if conversion, ok := uniqueConversions[u]; ok { + return conversion.standardUnit, conversion.scale, nil } - prefix, base := splitUnit(unit) - if baseUnit, ok := baseUnits[base]; ok { - return scaleBaseUnit(prefix, baseUnit) + prefix, baseUnit := splitUnit(u) + if standardUnit, ok := baseUnits[baseUnit]; ok && prefix != nil { + return scaleBaseUnit(prefix, standardUnit) } - return string(types.StandardUnitNone), 1, fmt.Errorf("non-convertible unit: %q", unit) + return types.StandardUnitNone, 1, fmt.Errorf("non-convertible unit: %q", unit) } -// splitUnit splits a unit and its prefix based on the second capital letter found. +// splitUnit splits a unit and its prefix based on available prefixes. // e.g. MiBy will split into prefix "Mi" and base "By". -func splitUnit(unit string) (string, string) { - var index int - if len(unit) > 1 { - for i, r := range unit[1:] { - if unicode.IsUpper(r) { - index = i + 1 - break - } +func splitUnit(unit string) (unit.Prefix, string) { + for _, prefix := range supportedPrefixes { + p := strings.ToLower(prefix.String()) + baseUnit, ok := strings.CutPrefix(unit, p) + if ok { + return prefix, baseUnit } } - return unit[:index], unit[index:] + return nil, unit } -// scaleBaseUnit takes a prefix and the CloudWatch base unit and finds the scaled CloudWatch unit and +// scaleBaseUnit takes a prefix and the CloudWatch standard unit and finds the scaled CloudWatch unit and // the scale factor if value adjustments are necessary. -func scaleBaseUnit(prefix string, baseUnit types.StandardUnit) (string, float64, error) { - scaledUnits, ok := scaledBaseUnits[baseUnit] +func scaleBaseUnit(prefix unit.Prefix, standardUnit types.StandardUnit) (types.StandardUnit, float64, error) { + scaledUnits, ok := scaledBaseUnits[standardUnit] if !ok { - return string(types.StandardUnitNone), 1, fmt.Errorf("non-scalable unit: %v", baseUnit) + return types.StandardUnitNone, 1, fmt.Errorf("non-scalable unit: %v", standardUnit) } + var metricPrefix unit.MetricPrefix scale := float64(1) - metricPrefix := unit.MetricPrefix(prefix) - if metricPrefix.Value() == -1 { + switch p := prefix.(type) { + case unit.MetricPrefix: + metricPrefix = p + case unit.BinaryPrefix: var err error - metricPrefix, scale, err = unit.ConvertToMetric(unit.BinaryPrefix(prefix)) + metricPrefix, scale, err = unit.ConvertToMetric(p) if err != nil { - return string(types.StandardUnitNone), 1, fmt.Errorf("unsupported prefix: %v", prefix) + return types.StandardUnitNone, 1, err } + default: + return types.StandardUnitNone, 1, fmt.Errorf("unsupported prefix: %v", prefix) } if scaledUnit, ok := scaledUnits[metricPrefix]; ok { - return string(scaledUnit), scale, nil + return scaledUnit, scale, nil } - return string(types.StandardUnitNone), 1, fmt.Errorf("unsupported prefix %v for %v", prefix, baseUnit) + return types.StandardUnitNone, 1, fmt.Errorf("unsupported prefix %v for %v", prefix, standardUnit) } -var standardUnits = collections.NewSet[string]() - -// IsStandardUnit determines if the unit is acceptable by CloudWatch. -func IsStandardUnit(unit string) bool { - if unit == "" { - return false - } - _, ok := standardUnits[unit] - return ok -} +var ( + standardUnits = make(map[string]types.StandardUnit) + supportedPrefixes []unit.Prefix +) func init() { for _, standardUnit := range types.StandardUnitNone.Values() { - standardUnits.Add(string(standardUnit)) + standardUnits[strings.ToLower(string(standardUnit))] = standardUnit + } + for _, binaryPrefix := range unit.BinaryPrefixes { + supportedPrefixes = append(supportedPrefixes, binaryPrefix) + } + for _, metricPrefix := range unit.MetricPrefixes { + supportedPrefixes = append(supportedPrefixes, metricPrefix) } } diff --git a/internal/cloudwatch/unit_test.go b/internal/cloudwatch/unit_test.go index c4f312ff8d..20ec570f9a 100644 --- a/internal/cloudwatch/unit_test.go +++ b/internal/cloudwatch/unit_test.go @@ -4,21 +4,21 @@ package cloudwatch import ( - "math" "testing" "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) func TestSimpleUnit(t *testing.T) { - // Each element in the slice has the input and expectedOutput. - cases := [][2]string{ + // Each element in the slice has the input and expected output. + testCases := [][2]string{ {"", "None"}, {"1", "None"}, {"B", "Bytes"}, + {"By", "Bytes"}, + {"by", "Bytes"}, {"B/s", "Bytes/Second"}, - {"By/s", "Bytes/Second"}, + {"BY/S", "Bytes/Second"}, {"Bi/s", "Bits/Second"}, {"Bi", "Bits"}, {"None", "None"}, @@ -26,15 +26,15 @@ func TestSimpleUnit(t *testing.T) { {"%", "Percent"}, } - for _, c := range cases { - a, s, err := ToStandardUnit(c[0]) + for _, testCase := range testCases { + unit, scale, err := ToStandardUnit(testCase[0]) assert.NoError(t, err) - assert.Equal(t, c[1], a) - assert.EqualValues(t, 1, s) + assert.Equal(t, testCase[1], unit) + assert.EqualValues(t, 1, scale) } } -// If the unit cannot be converted then use None. +// If the unit cannot be converted then use None and return an error. func TestUnsupportedUnit(t *testing.T) { testCases := []string{"banana", "ks"} for _, testCase := range testCases { @@ -47,20 +47,24 @@ func TestUnsupportedUnit(t *testing.T) { func TestScaledUnits(t *testing.T) { testCases := []struct { - input string - unit string - scale float64 - epsilon float64 + input string + unit string + scale float64 }{ - {"MiBy", "Megabytes", 1.049, 0.001}, - {"kB", "Kilobytes", 1, 0}, - {"min", "Seconds", 60, 0}, - {"ns", "Microseconds", 0.001, 0}, + {"MiBy", "Megabytes", 1.048576}, + {"mby", "Megabytes", 1}, + {"kB", "Kilobytes", 1}, + {"kib/s", "Kilobytes/Second", 1.024}, + {"ms", "Milliseconds", 1}, + {"ns", "Microseconds", 0.001}, + {"min", "Seconds", 60}, + {"h", "Seconds", 60 * 60}, + {"d", "Seconds", 24 * 60 * 60}, } for _, testCase := range testCases { unit, scale, err := ToStandardUnit(testCase.input) - require.NoError(t, err) + assert.NoError(t, err) assert.Equal(t, testCase.unit, unit) - assert.GreaterOrEqual(t, testCase.epsilon, math.Abs(testCase.scale-scale)) + assert.Equal(t, testCase.scale, scale) } } diff --git a/internal/containerinsightscommon/const.go b/internal/containerinsightscommon/const.go index 0b9dc01672..6a0a44706f 100644 --- a/internal/containerinsightscommon/const.go +++ b/internal/containerinsightscommon/const.go @@ -4,15 +4,19 @@ package containerinsightscommon const ( - InstanceId = "InstanceId" GoPSUtilProcDirEnv = "HOST_PROC" - MinTimeDiff = 50 * 1000 // We assume 50 micro-seconds is the minimal gap between two collected data sample to be valid to calculate delta - ClusterNameKey = "ClusterName" - NodeNameKey = "NodeName" + MinTimeDiff = 50 * 1000 // We assume 50 micro-seconds is the minimal gap between two collected data sample to be valid to calculate delta - MetricType = "Type" - SourcesKey = "Sources" + ClusterNameKey = "ClusterName" + NodeNameKey = "NodeName" // Attribute names + InstanceIdKey = "InstanceId" + InstanceTypeKey = "InstanceType" + AutoScalingGroupNameKey = "AutoScalingGroupName" + VersionKey = "Version" + MetricType = "Type" + SourcesKey = "Sources" + GpuDeviceKey = "GpuDevice" // metric collected CpuTotal = "cpu_usage_total" @@ -72,6 +76,32 @@ const ( DiskIOWrite = "Write" DiskIOTotal = "Total" + GpuUtilization = "gpu_utilization" + GpuMemUtilization = "gpu_memory_utilization" + GpuMemUsed = "gpu_memory_used" + GpuMemTotal = "gpu_memory_total" + GpuTemperature = "gpu_temperature" + GpuPowerDraw = "gpu_power_draw" + GpuRequest = "gpu_request" + GpuLimit = "gpu_limit" + GpuTotal = "gpu_total" + GpuUniqueId = "UUID" + + NeuronCoreUtilization = "neuroncore_utilization" + NeuronCoreMemoryUtilizationTotal = "neuroncore_memory_usage_total" + NeuronCoreMemoryUtilizationConstants = "neuroncore_memory_usage_constants" + NeuronCoreMemoryUtilizationModelCode = "neuroncore_memory_usage_model_code" + NeuronCoreMemoryUtilizationSharedScratchpad = "neuroncore_memory_usage_model_shared_scratchpad" + NeuronCoreMemoryUtilizationRuntimeMemory = "neuroncore_memory_usage_runtime_memory" + NeuronCoreMemoryUtilizationTensors = "neuroncore_memory_usage_tensors" + NeuronDeviceHardwareEccEvents = "neurondevice_hw_ecc_events" + NeuronExecutionStatus = "neuron_execution_status" + NeuronExecutionErrors = "neuron_execution_errors" + NeuronRuntimeMemoryUsage = "neurondevice_runtime_memory_used_bytes" + NeuronInstanceInfo = "instance_info" + NeuronHardware = "neuron_hardware" + NeuronExecutionLatency = "neuron_execution_latency" + TypeCluster = "Cluster" TypeClusterService = "ClusterService" TypeClusterNamespace = "ClusterNamespace" @@ -86,6 +116,10 @@ const ( TypeNodeNet = "NodeNet" TypeInstanceDiskIO = "InstanceDiskIO" TypeNodeDiskIO = "NodeDiskIO" + TypeGpuContainer = "ContainerGPU" + TypeGpuPod = "PodGPU" + TypeGpuNode = "NodeGPU" + TypeGpuCluster = "ClusterGPU" TypePod = "Pod" TypePodNet = "PodNet" diff --git a/internal/containerinsightscommon/k8sconst.go b/internal/containerinsightscommon/k8sconst.go index b975824d49..c8423665fa 100644 --- a/internal/containerinsightscommon/k8sconst.go +++ b/internal/containerinsightscommon/k8sconst.go @@ -12,11 +12,14 @@ const ( Kubernetes = "kubernetes" K8sNamespace = "Namespace" PodIdKey = "PodId" + FullPodNameKey = "FullPodName" PodNameKey = "PodName" K8sPodNameKey = "K8sPodName" ContainerNamekey = "ContainerName" ContainerIdkey = "ContainerId" PodOwnersKey = "PodOwners" + HostKey = "host" + K8sKey = "kubernetes" RunningPodCount = "number_of_running_pods" RunningContainerCount = "number_of_running_containers" diff --git a/internal/containerinsightscommon/util.go b/internal/containerinsightscommon/util.go index aa5d7c3e68..352bbcb0a8 100644 --- a/internal/containerinsightscommon/util.go +++ b/internal/containerinsightscommon/util.go @@ -34,35 +34,23 @@ func MetricName(mType string, name string) string { namespace := "namespace_" switch mType { - case TypeInstance: - prefix = instancePrefix - case TypeInstanceFS: - prefix = instancePrefix - case TypeInstanceDiskIO: + case TypeInstance, TypeInstanceFS, TypeInstanceDiskIO: prefix = instancePrefix case TypeInstanceNet: prefix = instanceNetPrefix - case TypeNode: - prefix = nodePrefix - case TypeNodeFS: - prefix = nodePrefix - case TypeNodeDiskIO: + case TypeNode, TypeNodeFS, TypeNodeDiskIO, TypeGpuNode: prefix = nodePrefix case TypeNodeNet: prefix = nodeNetPrefix - case TypePod: + case TypePod, TypeGpuPod: prefix = podPrefix case TypePodNet: prefix = podNetPrefix - case TypeContainer: - prefix = containerPrefix - case TypeContainerDiskIO: - prefix = containerPrefix - case TypeContainerFS: + case TypeContainer, TypeContainerDiskIO, TypeContainerFS, TypeGpuContainer: prefix = containerPrefix case TypeService: prefix = service - case TypeCluster: + case TypeCluster, TypeGpuCluster: prefix = cluster case K8sNamespace: prefix = namespace diff --git a/internal/exec.go b/internal/exec.go new file mode 100644 index 0000000000..9d3ee34747 --- /dev/null +++ b/internal/exec.go @@ -0,0 +1,53 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package internal + +import ( + "bytes" + "errors" + "os/exec" + "time" +) + +var ( + ErrTimeout = errors.New("command timed out") + ErrNotImplemented = errors.New("not implemented yet") +) + +// CombinedOutputTimeout runs the given command with the given timeout and +// returns the combined output of stdout and stderr. +// If the command times out, it attempts to kill the process. +func CombinedOutputTimeout(c *exec.Cmd, timeout time.Duration) ([]byte, error) { + var b bytes.Buffer + c.Stdout = &b + c.Stderr = &b + if err := c.Start(); err != nil { + return nil, err + } + err := WaitTimeout(c, timeout) + return b.Bytes(), err +} + +// StdOutputTimeout runs the given command with the given timeout and +// returns the output of stdout. +// If the command times out, it attempts to kill the process. +func StdOutputTimeout(c *exec.Cmd, timeout time.Duration) ([]byte, error) { + var b bytes.Buffer + c.Stdout = &b + c.Stderr = nil + if err := c.Start(); err != nil { + return nil, err + } + err := WaitTimeout(c, timeout) + return b.Bytes(), err +} + +// RunTimeout runs the given command with the given timeout. +// If the command times out, it attempts to kill the process. +func RunTimeout(c *exec.Cmd, timeout time.Duration) error { + if err := c.Start(); err != nil { + return err + } + return WaitTimeout(c, timeout) +} diff --git a/internal/exec_unix.go b/internal/exec_unix.go new file mode 100644 index 0000000000..7f5fb87988 --- /dev/null +++ b/internal/exec_unix.go @@ -0,0 +1,69 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +//go:build !windows + +package internal + +import ( + "log" + "os/exec" + "syscall" + "time" +) + +// KillGrace is the amount of time we allow a process to shutdown before +// sending a SIGKILL. +const KillGrace = 5 * time.Second + +// WaitTimeout waits for the given command to finish with a timeout. +// It assumes the command has already been started. +// If the command times out, it attempts to kill the process. +func WaitTimeout(c *exec.Cmd, timeout time.Duration) error { + var kill *time.Timer + term := time.AfterFunc(timeout, func() { + err := syscall.Kill(-c.Process.Pid, syscall.SIGTERM) + if err != nil { + log.Printf("E! [agent] Error terminating process children: %s", err) + } + err = c.Process.Signal(syscall.SIGTERM) + if err != nil { + log.Printf("E! [agent] Error terminating process: %s", err) + return + } + + kill = time.AfterFunc(KillGrace, func() { + err := syscall.Kill(-c.Process.Pid, syscall.SIGKILL) + if err != nil { + log.Printf("E! [agent] Error terminating process children: %s", err) + } + err = c.Process.Kill() + if err != nil { + log.Printf("E! [agent] Error killing process: %s", err) + return + } + }) + }) + + err := c.Wait() + + // Shutdown all timers + if kill != nil { + kill.Stop() + } + termSent := !term.Stop() + + // If the process exited without error treat it as success. This allows a + // process to do a clean shutdown on signal. + if err == nil { + return nil + } + + // If SIGTERM was sent then treat any process error as a timeout. + if termSent { + return ErrTimeout + } + + // Otherwise there was an error unrelated to termination. + return err +} diff --git a/internal/exec_windows.go b/internal/exec_windows.go new file mode 100644 index 0000000000..4ff0103036 --- /dev/null +++ b/internal/exec_windows.go @@ -0,0 +1,44 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +//go:build windows + +package internal + +import ( + "log" + "os/exec" + "time" +) + +// WaitTimeout waits for the given command to finish with a timeout. +// It assumes the command has already been started. +// If the command times out, it attempts to kill the process. +func WaitTimeout(c *exec.Cmd, timeout time.Duration) error { + timer := time.AfterFunc(timeout, func() { + err := c.Process.Kill() + if err != nil { + log.Printf("E! [agent] Error killing process: %s", err) + return + } + }) + + err := c.Wait() + + // Shutdown all timers + termSent := !timer.Stop() + + // If the process exited without error treat it as success. This allows a + // process to do a clean shutdown on signal. + if err == nil { + return nil + } + + // If SIGTERM was sent then treat any process error as a timeout. + if termSent { + return ErrTimeout + } + + // Otherwise there was an error unrelated to termination. + return err +} diff --git a/internal/mapstructure/encoder.go b/internal/mapstructure/encoder.go new file mode 100644 index 0000000000..72efafde15 --- /dev/null +++ b/internal/mapstructure/encoder.go @@ -0,0 +1,286 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package mapstructure + +import ( + "encoding" + "errors" + "fmt" + "reflect" + "strings" + + "github.com/mitchellh/mapstructure" +) + +const ( + tagNameMapStructure = "mapstructure" + optionSeparator = "," + optionOmitEmpty = "omitempty" + optionSquash = "squash" + optionRemain = "remain" + optionSkip = "-" +) + +var ( + errNonStringEncodedKey = errors.New("non string-encoded key") + errUnsupportedKind = errors.New("unsupported kind") +) + +// tagInfo stores the mapstructure tag details. +type tagInfo struct { + name string + omitEmpty bool + squash bool +} + +// An Encoder takes structured data and converts it into an +// interface following the mapstructure tags. +type Encoder struct { + config *EncoderConfig +} + +// EncoderConfig is the configuration used to create a new encoder. +type EncoderConfig struct { + // EncodeHook, if set, is a way to provide custom encoding. It + // will be called before structs and primitive types. + EncodeHook mapstructure.DecodeHookFunc + // NilEmptyMap, if set, is a way to nil out empty maps. + NilEmptyMap bool + // OmitNilFields, if set, is a way to omit all nil struct fields even if omitempty isn't present. + OmitNilFields bool +} + +// New returns a new encoder for the configuration. +func New(cfg *EncoderConfig) *Encoder { + return &Encoder{config: cfg} +} + +// Encode takes the input and uses reflection to encode it to +// an interface based on the mapstructure spec. +func (e *Encoder) Encode(input any) (any, error) { + return e.encode(reflect.ValueOf(input)) +} + +// encode processes the value based on the reflect.Kind. +func (e *Encoder) encode(value reflect.Value) (any, error) { + if value.IsValid() { + switch value.Kind() { + case reflect.Interface, reflect.Pointer: + return e.encode(value.Elem()) + case reflect.Map: + return e.encodeMap(value) + case reflect.Slice, reflect.Array: + return e.encodeSlice(value) + case reflect.Struct: + return e.encodeStruct(value) + default: + return e.encodeHook(value) + } + } + return nil, nil +} + +// encodeHook calls the EncodeHook in the EncoderConfig with the value passed in. +// This is called before processing structs and for primitive data types. +func (e *Encoder) encodeHook(value reflect.Value) (any, error) { + if e.config != nil && e.config.EncodeHook != nil { + out, err := mapstructure.DecodeHookExec(e.config.EncodeHook, value, value) + if err != nil { + return nil, fmt.Errorf("error running encode hook: %w", err) + } + return out, nil + } + return value.Interface(), nil +} + +// encodeStruct encodes the struct by iterating over the fields, getting the +// mapstructure tagInfo for each exported field, and encoding the value. +func (e *Encoder) encodeStruct(value reflect.Value) (any, error) { + if value.Kind() != reflect.Struct { + return nil, &reflect.ValueError{ + Method: "encodeStruct", + Kind: value.Kind(), + } + } + out, err := e.encodeHook(value) + if err != nil { + return nil, err + } + value = reflect.ValueOf(out) + // if the output of encodeHook is no longer a struct, + // call encode against it. + if value.Kind() != reflect.Struct { + return e.encode(value) + } + result := make(map[string]any) + for i := 0; i < value.NumField(); i++ { + field := value.Field(i) + if field.CanInterface() { + info := getTagInfo(value.Type().Field(i)) + if (info.omitEmpty && field.IsZero()) || info.name == optionSkip { + continue + } + encoded, err := e.encode(field) + if err != nil { + if errors.Is(err, errUnsupportedKind) { + continue + } + return nil, fmt.Errorf("error encoding field %q: %w", info.name, err) + } + if e.config.OmitNilFields && encoded == nil { + continue + } + if info.squash { + if m, ok := encoded.(map[string]any); ok { + for k, v := range m { + result[k] = v + } + } + } else { + result[info.name] = encoded + } + } + } + return result, nil +} + +// encodeSlice iterates over the slice and encodes each of the elements. +func (e *Encoder) encodeSlice(value reflect.Value) (any, error) { + if value.Kind() != reflect.Slice && value.Kind() != reflect.Array { + return nil, &reflect.ValueError{ + Method: "encodeSlice", + Kind: value.Kind(), + } + } + if value.Kind() == reflect.Slice && value.IsNil() { + return nil, nil + } + result := make([]any, value.Len()) + for i := 0; i < value.Len(); i++ { + var err error + if result[i], err = e.encode(value.Index(i)); err != nil { + return nil, fmt.Errorf("error encoding element in slice at index %d: %w", i, err) + } + } + return result, nil +} + +// encodeMap encodes a map by encoding the key and value. Returns errNonStringEncodedKey +// if the key is not encoded into a string. +func (e *Encoder) encodeMap(value reflect.Value) (any, error) { + if value.Kind() != reflect.Map { + return nil, &reflect.ValueError{ + Method: "encodeMap", + Kind: value.Kind(), + } + } + if value.IsNil() { + return nil, nil + } + result := make(map[string]any) + iterator := value.MapRange() + for iterator.Next() { + encoded, err := e.encode(iterator.Key()) + if err != nil { + return nil, fmt.Errorf("error encoding key: %w", err) + } + key, ok := encoded.(string) + if !ok { + return nil, fmt.Errorf("%w key %q, kind: %v", errNonStringEncodedKey, iterator.Key().Interface(), iterator.Key().Kind()) + } + if _, ok = result[key]; ok { + return nil, fmt.Errorf("duplicate key %q while encoding", key) + } + if result[key], err = e.encode(iterator.Value()); err != nil { + return nil, fmt.Errorf("error encoding map value for key %q: %w", key, err) + } + } + if e.config.NilEmptyMap && len(result) == 0 { + return nil, nil + } + return result, nil +} + +// getTagInfo looks up the mapstructure tag and uses that if available. +// Uses the lowercase field if not found. Checks for omitempty and squash. +func getTagInfo(field reflect.StructField) *tagInfo { + info := tagInfo{} + if tag, ok := field.Tag.Lookup(tagNameMapStructure); ok { + options := strings.Split(tag, optionSeparator) + info.name = options[0] + if len(options) > 1 { + for _, option := range options[1:] { + switch option { + case optionOmitEmpty: + info.omitEmpty = true + case optionSquash, optionRemain: + info.squash = true + } + } + } + } else { + info.name = strings.ToLower(field.Name) + } + return &info +} + +// TextMarshalerHookFunc returns a DecodeHookFuncValue that checks +// for the encoding.TextMarshaler interface and calls the MarshalText +// function if found. +func TextMarshalerHookFunc() mapstructure.DecodeHookFuncValue { + return func(from reflect.Value, _ reflect.Value) (any, error) { + if !from.IsValid() { + return nil, nil + } + marshaler, ok := from.Interface().(encoding.TextMarshaler) + if !ok { + return from.Interface(), nil + } + out, err := marshaler.MarshalText() + if err != nil { + return nil, err + } + return string(out), nil + } +} + +// NilHookFunc returns a DecodeHookFuncValue that checks if the value matches the type and nils it out. Allows specific +// types to be omitted. +func NilHookFunc[T any]() mapstructure.DecodeHookFuncValue { + return nilHookFunc[T](false) +} + +// NilZeroValueHookFunc returns a DecodeHookFuncValue that only nils the field if it's a zero value. +func NilZeroValueHookFunc[T any]() mapstructure.DecodeHookFuncValue { + return nilHookFunc[T](true) +} + +func nilHookFunc[T any](onlyIfZero bool) mapstructure.DecodeHookFuncValue { + return func(from reflect.Value, _ reflect.Value) (any, error) { + if !from.IsValid() { + return nil, nil + } + _, ok := from.Interface().(T) + if ok && (!onlyIfZero || from.IsZero()) { + return nil, nil + } + return from.Interface(), nil + } +} + +// UnsupportedKindHookFunc returns a DecodeHookFuncValue that checks that the kind isn't one unsupported by the YAML +// encoder. +func UnsupportedKindHookFunc() mapstructure.DecodeHookFuncValue { + return func(from reflect.Value, _ reflect.Value) (any, error) { + if !from.IsValid() { + return nil, nil + } + switch from.Kind() { + case reflect.Chan, reflect.Func: + return nil, fmt.Errorf("%w: %s", errUnsupportedKind, from.Kind()) + default: + return from.Interface(), nil + } + } +} diff --git a/internal/mapstructure/encoder_test.go b/internal/mapstructure/encoder_test.go new file mode 100644 index 0000000000..af7374469e --- /dev/null +++ b/internal/mapstructure/encoder_test.go @@ -0,0 +1,416 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package mapstructure + +import ( + "encoding" + "errors" + "reflect" + "strings" + "testing" + + "github.com/mitchellh/mapstructure" + "github.com/stretchr/testify/assert" +) + +type TestComplexStruct struct { + Skipped TestEmptyStruct `mapstructure:",squash"` + Nested TestSimpleStruct `mapstructure:",squash"` + Slice []TestSimpleStruct `mapstructure:"slice"` + Array [2]string `mapstructure:"array,omitempty"` + Pointer *TestSimpleStruct `mapstructure:"ptr"` + Map map[string]TestSimpleStruct `mapstructure:"map"` + Remain map[string]any `mapstructure:",remain"` + Interface encoding.TextMarshaler + Function func() string + Channel chan string +} + +type TestSimpleStruct struct { + Value string `mapstructure:"value"` + skipped string + err error +} + +type TestEmptyStruct struct { + Value string `mapstructure:"-"` +} + +type TestID string + +func (tID TestID) MarshalText() (text []byte, err error) { + out := string(tID) + if out == "error" { + return nil, errors.New("parsing error") + } + if !strings.HasSuffix(out, "_") { + out += "_" + } + return []byte(out), nil +} + +func TestEncode(t *testing.T) { + enc := New(&EncoderConfig{ + EncodeHook: mapstructure.ComposeDecodeHookFunc( + TextMarshalerHookFunc(), + UnsupportedKindHookFunc(), + ), + }) + testCases := map[string]struct { + input any + want any + }{ + "WithString": { + input: "test", + want: "test", + }, + "WithTextMarshaler": { + input: TestID("type"), + want: "type_", + }, + "WithSlice": { + input: []TestID{ + TestID("nop"), + TestID("type_"), + }, + want: []any{"nop_", "type_"}, + }, + "WithSimpleStruct": { + input: TestSimpleStruct{Value: "test", skipped: "skipped"}, + want: map[string]any{ + "value": "test", + }, + }, + "WithComplexStruct": { + input: &TestComplexStruct{ + Skipped: TestEmptyStruct{ + Value: "omitted", + }, + Nested: TestSimpleStruct{ + Value: "nested", + }, + Slice: []TestSimpleStruct{ + {Value: "slice"}, + }, + Array: [2]string{"one", "two"}, + Map: map[string]TestSimpleStruct{ + "Key": {Value: "map"}, + }, + Pointer: &TestSimpleStruct{ + Value: "pointer", + }, + Remain: map[string]any{ + "remain1": 23, + "remain2": "value", + }, + Interface: TestID("value"), + Function: func() string { + return "ignore" + }, + Channel: make(chan string), + }, + want: map[string]any{ + "value": "nested", + "slice": []any{map[string]any{"value": "slice"}}, + "array": []any{"one", "two"}, + "map": map[string]any{ + "Key": map[string]any{"value": "map"}, + }, + "ptr": map[string]any{"value": "pointer"}, + "interface": "value_", + "remain1": 23, + "remain2": "value", + }, + }, + } + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + got, err := enc.Encode(testCase.input) + assert.NoError(t, err) + assert.Equal(t, testCase.want, got) + }) + } + // without the TextMarshalerHookFunc + enc.config.EncodeHook = nil + testCase := TestID("test") + got, err := enc.Encode(testCase) + assert.NoError(t, err) + assert.Equal(t, testCase, got) +} + +func TestEncodeNil(t *testing.T) { + enc := New(&EncoderConfig{}) + got, err := enc.Encode(nil) + assert.NoError(t, err) + assert.Nil(t, got) + + testCase := struct { + NilMap map[string]string + NilSlice []string + }{ + NilMap: nil, + NilSlice: nil, + } + got, err = enc.Encode(testCase) + assert.NoError(t, err) + assert.Equal(t, map[string]any{ + "nilmap": nil, + "nilslice": nil, + }, got) +} + +func TestGetTagInfo(t *testing.T) { + testCases := map[string]struct { + field reflect.StructField + wantName string + wantOmit bool + wantSquash bool + }{ + "WithoutTags": { + field: reflect.StructField{ + Name: "Test", + }, + wantName: "test", + }, + "WithoutMapStructureTag": { + field: reflect.StructField{ + Tag: `yaml:"hello,inline"`, + Name: "YAML", + }, + wantName: "yaml", + }, + "WithRename": { + field: reflect.StructField{ + Tag: `mapstructure:"hello"`, + Name: "Test", + }, + wantName: "hello", + }, + "WithOmitEmpty": { + field: reflect.StructField{ + Tag: `mapstructure:"hello,omitempty"`, + Name: "Test", + }, + wantName: "hello", + wantOmit: true, + }, + "WithSquash": { + field: reflect.StructField{ + Tag: `mapstructure:",squash"`, + Name: "Test", + }, + wantSquash: true, + }, + "WithRemain": { + field: reflect.StructField{ + Tag: `mapstructure:",remain"`, + Name: "Test", + }, + wantSquash: true, + }, + } + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + got := getTagInfo(testCase.field) + assert.Equal(t, testCase.wantName, got.name) + assert.Equal(t, testCase.wantOmit, got.omitEmpty) + assert.Equal(t, testCase.wantSquash, got.squash) + }) + } +} + +func TestEncodeValueError(t *testing.T) { + enc := New(nil) + testValue := reflect.ValueOf("") + testCases := []struct { + encodeFn func(value reflect.Value) (any, error) + wantErr error + }{ + {encodeFn: enc.encodeMap, wantErr: &reflect.ValueError{Method: "encodeMap", Kind: reflect.String}}, + {encodeFn: enc.encodeStruct, wantErr: &reflect.ValueError{Method: "encodeStruct", Kind: reflect.String}}, + {encodeFn: enc.encodeSlice, wantErr: &reflect.ValueError{Method: "encodeSlice", Kind: reflect.String}}, + } + for _, testCase := range testCases { + got, err := testCase.encodeFn(testValue) + assert.Error(t, err) + assert.Equal(t, testCase.wantErr, err) + assert.Nil(t, got) + } +} + +func TestEncodeNonStringEncodedKey(t *testing.T) { + enc := New(nil) + testCase := []struct { + Test map[string]any + }{ + { + Test: map[string]any{ + "test": map[TestEmptyStruct]TestSimpleStruct{ + {Value: "key"}: {Value: "value"}, + }, + }, + }, + } + got, err := enc.Encode(testCase) + assert.Error(t, err) + assert.True(t, errors.Is(err, errNonStringEncodedKey)) + assert.Nil(t, got) +} + +func TestDuplicateKey(t *testing.T) { + enc := New(&EncoderConfig{ + EncodeHook: TextMarshalerHookFunc(), + }) + testCase := map[TestID]string{ + "test": "value", + "test_": "other value", + } + got, err := enc.Encode(testCase) + assert.Error(t, err) + assert.Nil(t, got) +} + +func TestTextMarshalerError(t *testing.T) { + enc := New(&EncoderConfig{ + EncodeHook: TextMarshalerHookFunc(), + }) + testCase := map[TestID]string{ + "error": "value", + } + got, err := enc.Encode(testCase) + assert.Error(t, err) + assert.Nil(t, got) +} + +func TestEncodeStruct(t *testing.T) { + enc := New(&EncoderConfig{ + EncodeHook: testHookFunc(), + }) + testCase := TestSimpleStruct{ + Value: "original", + skipped: "final", + } + got, err := enc.Encode(testCase) + assert.NoError(t, err) + assert.Equal(t, "final", got) +} + +func TestEncodeStructError(t *testing.T) { + enc := New(&EncoderConfig{ + EncodeHook: testHookFunc(), + }) + wantErr := errors.New("test") + testCase := map[TestSimpleStruct]string{ + {err: wantErr}: "value", + } + got, err := enc.Encode(testCase) + assert.Error(t, err) + assert.True(t, errors.Is(err, wantErr)) + assert.Nil(t, got) +} + +func testHookFunc() mapstructure.DecodeHookFuncValue { + return func(from reflect.Value, _ reflect.Value) (any, error) { + if from.Kind() != reflect.Struct { + return from.Interface(), nil + } + + got, ok := from.Interface().(TestSimpleStruct) + if !ok { + return from.Interface(), nil + } + if got.err != nil { + return nil, got.err + } + return got.skipped, nil + } +} + +func TestNilHook(t *testing.T) { + cfg := &EncoderConfig{ + EncodeHook: mapstructure.ComposeDecodeHookFunc( + NilHookFunc[TestSimpleStruct](), + TextMarshalerHookFunc(), + ), + } + enc := New(cfg) + testCase := struct { + Skip TestSimpleStruct + }{ + Skip: TestSimpleStruct{Value: "skip"}, + } + got, err := enc.Encode(testCase) + assert.NoError(t, err) + assert.Equal(t, map[string]any{ + "skip": nil, + }, got) + cfg.OmitNilFields = true + got, err = enc.Encode(testCase) + assert.NoError(t, err) + assert.Equal(t, map[string]any{}, got) +} + +func TestNilZeroValueHook(t *testing.T) { + enc := New(&EncoderConfig{ + EncodeHook: NilZeroValueHookFunc[TestSimpleStruct](), + }) + testCase := struct { + NonZero TestSimpleStruct + Zero TestSimpleStruct + }{ + NonZero: TestSimpleStruct{ + Value: "test", + }, + Zero: TestSimpleStruct{}, + } + got, err := enc.Encode(testCase) + assert.NoError(t, err) + assert.Equal(t, map[string]any{ + "nonzero": map[string]any{ + "value": "test", + }, + "zero": nil, + }, got) +} + +func TestUnsupportedKindHook(t *testing.T) { + enc := New(&EncoderConfig{ + EncodeHook: UnsupportedKindHookFunc(), + }) + testCases := map[reflect.Kind]any{ + reflect.Func: func() string { + return "unsupported" + }, + reflect.Chan: make(chan string), + } + for kind, input := range testCases { + t.Run(kind.String(), func(t *testing.T) { + got, err := enc.Encode(input) + assert.Error(t, err) + assert.ErrorIs(t, err, errUnsupportedKind) + assert.Nil(t, got) + }) + } +} + +type TestFunction func() string + +func (tf TestFunction) MarshalText() (text []byte, err error) { + return []byte(tf()), nil +} + +func TestUnsupportedKindWithMarshaler(t *testing.T) { + enc := New(&EncoderConfig{ + EncodeHook: mapstructure.ComposeDecodeHookFunc( + TextMarshalerHookFunc(), + UnsupportedKindHookFunc(), + ), + }) + testCase := TestFunction(func() string { + return "marshal" + }) + got, err := enc.Encode(testCase) + assert.NoError(t, err) + assert.Equal(t, "marshal", got) +} diff --git a/internal/mapstructure/marshaler.go b/internal/mapstructure/marshaler.go new file mode 100644 index 0000000000..3d32e2c0a6 --- /dev/null +++ b/internal/mapstructure/marshaler.go @@ -0,0 +1,71 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package mapstructure + +import ( + "fmt" + "reflect" + + "github.com/mitchellh/mapstructure" + "go.opentelemetry.io/collector/config/configopaque" + "go.opentelemetry.io/collector/config/configtls" + "go.opentelemetry.io/collector/confmap" +) + +func Marshal(rawVal any) (map[string]any, error) { + enc := New(encoderConfig(rawVal)) + data, err := enc.Encode(rawVal) + if err != nil { + return nil, err + } + out, ok := data.(map[string]any) + if !ok { + return nil, fmt.Errorf("invalid config encoding") + } + return out, nil +} + +// encoderConfig returns a default encoder.EncoderConfig that includes an EncodeHook that handles both +// TextMarshaller and confmap.Marshaler interfaces. +func encoderConfig(rawVal any) *EncoderConfig { + return &EncoderConfig{ + EncodeHook: mapstructure.ComposeDecodeHookFunc( + NilHookFunc[configopaque.String](), + NilZeroValueHookFunc[configtls.ServerConfig](), + TextMarshalerHookFunc(), + MarshalerHookFunc(rawVal), + UnsupportedKindHookFunc(), + ), + NilEmptyMap: true, + OmitNilFields: true, + } +} + +// MarshalerHookFunc returns a DecodeHookFuncValue that checks structs that aren't +// the original to see if they implement the Marshaler interface. +func MarshalerHookFunc(orig any) mapstructure.DecodeHookFuncValue { + origType := reflect.TypeOf(orig) + return func(from reflect.Value, _ reflect.Value) (any, error) { + if !from.IsValid() { + return nil, nil + } + if from.Kind() != reflect.Struct { + return from.Interface(), nil + } + + // ignore original to avoid infinite loop. + if from.Type() == origType && reflect.DeepEqual(from.Interface(), orig) { + return from.Interface(), nil + } + marshaler, ok := from.Interface().(confmap.Marshaler) + if !ok { + return from.Interface(), nil + } + conf := confmap.New() + if err := marshaler.Marshal(conf); err != nil { + return nil, err + } + return conf.ToStringMap(), nil + } +} diff --git a/internal/mapstructure/marshaler_test.go b/internal/mapstructure/marshaler_test.go new file mode 100644 index 0000000000..4efbd7e161 --- /dev/null +++ b/internal/mapstructure/marshaler_test.go @@ -0,0 +1,99 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package mapstructure + +import ( + "errors" + "testing" + + "github.com/stretchr/testify/assert" + "go.opentelemetry.io/collector/confmap" +) + +type TestConfig struct { + Boolean *bool `mapstructure:"boolean"` + Struct *Struct `mapstructure:"struct"` + MapStruct map[string]*Struct `mapstructure:"map_struct"` +} + +func (t TestConfig) Marshal(conf *confmap.Conf) error { + if t.Boolean != nil && !*t.Boolean { + return errors.New("unable to marshal") + } + if err := conf.Marshal(t); err != nil { + return err + } + return conf.Merge(confmap.NewFromStringMap(map[string]any{ + "additional": "field", + })) +} + +type Struct struct { + Name string +} + +type TestIDConfig struct { + Boolean bool `mapstructure:"bool"` + Map map[TestID]string `mapstructure:"map"` +} + +func TestMarshal(t *testing.T) { + cfg := &TestIDConfig{ + Boolean: true, + Map: map[TestID]string{ + "string": "this is a string", + }, + } + got, err := Marshal(cfg) + assert.NoError(t, err) + conf := confmap.NewFromStringMap(got) + assert.Equal(t, true, conf.Get("bool")) + assert.Equal(t, map[string]any{"string_": "this is a string"}, conf.Get("map")) +} + +func TestMarshalDuplicateID(t *testing.T) { + cfg := &TestIDConfig{ + Boolean: true, + Map: map[TestID]string{ + "string": "this is a string", + "string_": "this is another string", + }, + } + _, err := Marshal(cfg) + assert.Error(t, err) +} + +func TestMarshalError(t *testing.T) { + _, err := Marshal(nil) + assert.Error(t, err) +} + +func TestMarshaler(t *testing.T) { + cfg := &TestConfig{ + Struct: &Struct{ + Name: "StructName", + }, + } + got, err := Marshal(cfg) + assert.NoError(t, err) + conf := confmap.NewFromStringMap(got) + assert.Equal(t, "field", conf.Get("additional")) + + type NestedMarshaler struct { + TestConfig *TestConfig + } + nmCfg := &NestedMarshaler{ + TestConfig: cfg, + } + got, err = Marshal(nmCfg) + assert.NoError(t, err) + conf = confmap.NewFromStringMap(got) + sub, err := conf.Sub("testconfig") + assert.NoError(t, err) + assert.True(t, sub.IsSet("additional")) + assert.Equal(t, "field", sub.Get("additional")) + varBool := false + nmCfg.TestConfig.Boolean = &varBool + assert.Error(t, conf.Marshal(nmCfg)) +} diff --git a/internal/util/testutil/testutil.go b/internal/util/testutil/testutil.go index 16868852c5..1ed0570f3f 100644 --- a/internal/util/testutil/testutil.go +++ b/internal/util/testutil/testutil.go @@ -29,3 +29,12 @@ func GetConf(t *testing.T, path string) *confmap.Conf { require.NoError(t, err) return conf } + +func GetConfWithOverrides(t *testing.T, path string, overrides map[string]any) *confmap.Conf { + t.Helper() + conf, err := confmaptest.LoadConf(path) + require.NoError(t, err) + err = conf.Merge(confmap.NewFromStringMap(overrides)) + require.NoError(t, err) + return conf +} diff --git a/internal/util/unit/prefix.go b/internal/util/unit/prefix.go index 834677078e..07b052ccde 100644 --- a/internal/util/unit/prefix.go +++ b/internal/util/unit/prefix.go @@ -5,6 +5,11 @@ package unit import "fmt" +type Prefix interface { + fmt.Stringer + Scale() float64 +} + // MetricPrefix is a base 10 prefix used by the metric system. type MetricPrefix string @@ -20,8 +25,10 @@ const ( MetricPrefixTera = "T" ) -// Value returns the scale from the base unit or -1 if invalid. -func (m MetricPrefix) Value() float64 { +var MetricPrefixes = []MetricPrefix{MetricPrefixKilo, MetricPrefixMega, MetricPrefixGiga, MetricPrefixTera} + +// Scale returns the scale from the base unit or -1 if invalid. +func (m MetricPrefix) Scale() float64 { switch m { case MetricPrefixKilo: return kilo @@ -35,6 +42,10 @@ func (m MetricPrefix) Value() float64 { return -1 } +func (m MetricPrefix) String() string { + return string(m) +} + // BinaryPrefix is a base 2 prefix for data storage. type BinaryPrefix string @@ -51,8 +62,10 @@ const ( BinaryPrefixTebi = "Ti" ) -// Value returns the scale from the base unit or -1 if invalid. -func (b BinaryPrefix) Value() float64 { +var BinaryPrefixes = []BinaryPrefix{BinaryPrefixKibi, BinaryPrefixMebi, BinaryPrefixGibi, BinaryPrefixTebi} + +// Scale returns the scale from the base unit or -1 if invalid. +func (b BinaryPrefix) Scale() float64 { switch b { case BinaryPrefixKibi: return kibi @@ -66,6 +79,10 @@ func (b BinaryPrefix) Value() float64 { return -1 } +func (b BinaryPrefix) String() string { + return string(b) +} + var binaryToMetricMapping = map[BinaryPrefix]MetricPrefix{ BinaryPrefixKibi: MetricPrefixKilo, BinaryPrefixMebi: MetricPrefixMega, @@ -79,6 +96,6 @@ func ConvertToMetric(binaryPrefix BinaryPrefix) (MetricPrefix, float64, error) { if !ok { return "", -1, fmt.Errorf("no valid conversion for %v", binaryPrefix) } - scale := binaryPrefix.Value() / metricPrefix.Value() + scale := binaryPrefix.Scale() / metricPrefix.Scale() return metricPrefix, scale, nil } diff --git a/internal/util/unit/prefix_test.go b/internal/util/unit/prefix_test.go index 786d62dd3e..68c126e4ec 100644 --- a/internal/util/unit/prefix_test.go +++ b/internal/util/unit/prefix_test.go @@ -8,7 +8,6 @@ import ( "testing" "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) func TestMetricPrefix(t *testing.T) { @@ -18,12 +17,16 @@ func TestMetricPrefix(t *testing.T) { }{ {"Ki", -1}, {"k", 1e3}, + {"M", 1e6}, {"G", 1e9}, + {"T", 1e12}, } for _, testCase := range testCases { got := MetricPrefix(testCase.prefix) - assert.Equal(t, testCase.value, got.Value()) + assert.Equal(t, testCase.value, got.Scale()) + assert.Equal(t, testCase.prefix, got.String()) } + assert.Len(t, MetricPrefixes, 4) } func TestBinaryPrefix(t *testing.T) { @@ -32,13 +35,17 @@ func TestBinaryPrefix(t *testing.T) { value float64 }{ {"k", -1}, - {"Ki", 1024}, - {"Gi", 1073741824}, + {"Ki", math.Pow(2, 10)}, + {"Mi", math.Pow(2, 20)}, + {"Gi", math.Pow(2, 30)}, + {"Ti", math.Pow(2, 40)}, } for _, testCase := range testCases { got := BinaryPrefix(testCase.prefix) - assert.Equal(t, testCase.value, got.Value()) + assert.Equal(t, testCase.value, got.Scale()) + assert.Equal(t, testCase.prefix, got.String()) } + assert.Len(t, BinaryPrefixes, 4) } func TestConvertBinaryToMetric(t *testing.T) { @@ -50,15 +57,14 @@ func TestConvertBinaryToMetric(t *testing.T) { prefix BinaryPrefix metricPrefix MetricPrefix scale float64 - epsilon float64 }{ - {BinaryPrefixKibi, MetricPrefixKilo, 1.024, 0}, - {BinaryPrefixGibi, MetricPrefixGiga, 1.073, 0.001}, + {BinaryPrefixKibi, MetricPrefixKilo, 1.024}, + {BinaryPrefixGibi, MetricPrefixGiga, 1.073741824}, } for _, testCase := range testCases { got, scale, err = ConvertToMetric(testCase.prefix) - require.NoError(t, err) + assert.NoError(t, err) assert.Equal(t, testCase.metricPrefix, got) - assert.GreaterOrEqual(t, testCase.epsilon, math.Abs(testCase.scale-scale)) + assert.Equal(t, testCase.scale, scale) } } diff --git a/packaging/dependencies/amazon-cloudwatch-agent-ctl b/packaging/dependencies/amazon-cloudwatch-agent-ctl index 83f2f0c46f..bd0d47000e 100755 --- a/packaging/dependencies/amazon-cloudwatch-agent-ctl +++ b/packaging/dependencies/amazon-cloudwatch-agent-ctl @@ -154,7 +154,7 @@ agent_cond_restart() { agent_name="${1:-}" restart_file="${2:-}" if [ -f "${restart_file}" ]; then - agent_start "${agent_name}" + agent_start "${agent_name}" "${mode}" rm -f "${restart_file}" fi } diff --git a/plugins/inputs/logfile/fileconfig_test.go b/plugins/inputs/logfile/fileconfig_test.go index d876ac1d4f..7baa7cbbea 100644 --- a/plugins/inputs/logfile/fileconfig_test.go +++ b/plugins/inputs/logfile/fileconfig_test.go @@ -233,6 +233,17 @@ func TestTimestampParserWithFracSeconds(t *testing.T) { fmt.Sprintf("The timestampFromLogLine value %v is not the same as expected %v.", timestamp, expectedTimestamp)) } +func TestNonAllowlistedTimezone(t *testing.T) { + fileConfig := &FileConfig{ + Timezone: "EST", + } + + err := fileConfig.init() + assert.NoError(t, err) + + assert.Equal(t, time.Local, fileConfig.TimezoneLoc, "The timezone location should be in local timezone.") +} + func TestMultiLineStartPattern(t *testing.T) { multiLineStartPattern := "---" fileConfig := &FileConfig{ diff --git a/plugins/inputs/logfile/logfile.go b/plugins/inputs/logfile/logfile.go index 63a1329179..f253262812 100644 --- a/plugins/inputs/logfile/logfile.go +++ b/plugins/inputs/logfile/logfile.go @@ -107,8 +107,9 @@ func (t *LogFile) Start(acc telegraf.Accumulator) error { return fmt.Errorf("failed to create state file directory %s: %v", t.FileStateFolder, err) } - // Clean state file regularly + // Clean state file on init and regularly go func() { + t.cleanupStateFolder() ticker := time.NewTicker(1 * time.Hour) defer ticker.Stop() for { diff --git a/plugins/inputs/nvidia_smi/README.md b/plugins/inputs/nvidia_smi/README.md new file mode 100644 index 0000000000..58dabcf790 --- /dev/null +++ b/plugins/inputs/nvidia_smi/README.md @@ -0,0 +1,155 @@ +# Nvidia System Management Interface (SMI) Input Plugin + +This plugin uses a query on the +[`nvidia-smi`](https://developer.nvidia.com/nvidia-system-management-interface) +binary to pull GPU stats including memory and GPU usage, temp and other. + +## Global configuration options + +In addition to the plugin-specific configuration settings, plugins support +additional global and plugin configuration settings. These settings are used to +modify metrics, tags, and field or create aliases and configure ordering, etc. +See the [CONFIGURATION.md][CONFIGURATION.md] for more details. + +[CONFIGURATION.md]: ../../../docs/CONFIGURATION.md#plugins + +## Configuration + +```toml @sample.conf +# Pulls statistics from nvidia GPUs attached to the host +[[inputs.nvidia_smi]] + ## Optional: path to nvidia-smi binary, defaults "/usr/bin/nvidia-smi" + ## We will first try to locate the nvidia-smi binary with the explicitly specified value (or default value), + ## if it is not found, we will try to locate it on PATH(exec.LookPath), if it is still not found, an error will be returned + # bin_path = "/usr/bin/nvidia-smi" + + ## Optional: specifies plugin behavior regarding missing nvidia-smi binary + ## Available choices: + ## - error: telegraf will return an error on startup + ## - ignore: telegraf will ignore this plugin + # startup_error_behavior = "error" + + ## Optional: timeout for GPU polling + # timeout = "5s" +``` + +### Linux + +On Linux, `nvidia-smi` is generally located at `/usr/bin/nvidia-smi` + +### Windows + +On Windows, `nvidia-smi` is generally located at `C:\Program Files\NVIDIA +Corporation\NVSMI\nvidia-smi.exe` On Windows 10, you may also find this located +here `C:\Windows\System32\nvidia-smi.exe` + +You'll need to escape the `\` within the `telegraf.conf` like this: `C:\\Program +Files\\NVIDIA Corporation\\NVSMI\\nvidia-smi.exe` + +## Metrics + +- measurement: `nvidia_smi` + - tags + - `name` (type of GPU e.g. `GeForce GTX 1070 Ti`) + - `compute_mode` (The compute mode of the GPU e.g. `Default`) + - `index` (The port index where the GPU is connected to the motherboard e.g. `1`) + - `pstate` (Overclocking state for the GPU e.g. `P0`) + - `uuid` (A unique identifier for the GPU e.g. `GPU-f9ba66fc-a7f5-94c5-da19-019ef2f9c665`) + - fields + - `fan_speed` (integer, percentage) + - `fbc_stats_session_count` (integer) + - `fbc_stats_average_fps` (integer) + - `fbc_stats_average_latency` (integer) + - `memory_free` (integer, MiB) + - `memory_used` (integer, MiB) + - `memory_total` (integer, MiB) + - `memory_reserved` (integer, MiB) + - `retired_pages_multiple_single_bit` (integer) + - `retired_pages_double_bit` (integer) + - `retired_pages_blacklist` (string) + - `retired_pages_pending` (string) + - `remapped_rows_correctable` (int) + - `remapped_rows_uncorrectable` (int) + - `remapped_rows_pending` (string) + - `remapped_rows_pending` (string) + - `remapped_rows_failure` (string) + - `power_draw` (float, W) + - `temperature_gpu` (integer, degrees C) + - `utilization_gpu` (integer, percentage) + - `utilization_memory` (integer, percentage) + - `utilization_encoder` (integer, percentage) + - `utilization_decoder` (integer, percentage) + - `pcie_link_gen_current` (integer) + - `pcie_link_width_current` (integer) + - `encoder_stats_session_count` (integer) + - `encoder_stats_average_fps` (integer) + - `encoder_stats_average_latency` (integer) + - `clocks_current_graphics` (integer, MHz) + - `clocks_current_sm` (integer, MHz) + - `clocks_current_memory` (integer, MHz) + - `clocks_current_video` (integer, MHz) + - `driver_version` (string) + - `cuda_version` (string) + +## Sample Query + +The below query could be used to alert on the average temperature of the your +GPUs over the last minute + +```sql +SELECT mean("temperature_gpu") FROM "nvidia_smi" WHERE time > now() - 5m GROUP BY time(1m), "index", "name", "host" +``` + +## Troubleshooting + +Check the full output by running `nvidia-smi` binary manually. + +Linux: + +```sh +sudo -u telegraf -- /usr/bin/nvidia-smi -q -x +``` + +Windows: + +```sh +"C:\Program Files\NVIDIA Corporation\NVSMI\nvidia-smi.exe" -q -x +``` + +Please include the output of this command if opening an GitHub issue. + +## Example Output + +```text +nvidia_smi,compute_mode=Default,host=8218cf,index=0,name=GeForce\ GTX\ 1070,pstate=P2,uuid=GPU-823bc202-6279-6f2c-d729-868a30f14d96 fan_speed=100i,memory_free=7563i,memory_total=8112i,memory_used=549i,temperature_gpu=53i,utilization_gpu=100i,utilization_memory=90i 1523991122000000000 +nvidia_smi,compute_mode=Default,host=8218cf,index=1,name=GeForce\ GTX\ 1080,pstate=P2,uuid=GPU-f9ba66fc-a7f5-94c5-da19-019ef2f9c665 fan_speed=100i,memory_free=7557i,memory_total=8114i,memory_used=557i,temperature_gpu=50i,utilization_gpu=100i,utilization_memory=85i 1523991122000000000 +nvidia_smi,compute_mode=Default,host=8218cf,index=2,name=GeForce\ GTX\ 1080,pstate=P2,uuid=GPU-d4cfc28d-0481-8d07-b81a-ddfc63d74adf fan_speed=100i,memory_free=7557i,memory_total=8114i,memory_used=557i,temperature_gpu=58i,utilization_gpu=100i,utilization_memory=86i 1523991122000000000 +``` + +## Limitations + +Note that there seems to be an issue with getting current memory clock values +when the memory is overclocked. This may or may not apply to everyone but it's +confirmed to be an issue on an EVGA 2080 Ti. + +**NOTE:** For use with docker either generate your own custom docker image based +on nvidia/cuda which also installs a telegraf package or use [volume mount +binding](https://docs.docker.com/storage/bind-mounts/) to inject the required +binary into the docker container. In particular you will need to pass through +the /dev/nvidia* devices, the nvidia-smi binary and the nvidia libraries. +An minimal docker-compose example of how to do this is: + +```yaml + telegraf: + image: telegraf + runtime: nvidia + devices: + - /dev/nvidiactl:/dev/nvidiactl + - /dev/nvidia0:/dev/nvidia0 + volumes: + - ./telegraf/etc/telegraf.conf:/etc/telegraf/telegraf.conf:ro + - /usr/bin/nvidia-smi:/usr/bin/nvidia-smi:ro + - /usr/lib/x86_64-linux-gnu/nvidia:/usr/lib/x86_64-linux-gnu/nvidia:ro + environment: + - LD_PRELOAD=/usr/lib/x86_64-linux-gnu/nvidia/current/libnvidia-ml.so +``` diff --git a/plugins/inputs/nvidia_smi/common/setters.go b/plugins/inputs/nvidia_smi/common/setters.go new file mode 100644 index 0000000000..9bd27ba585 --- /dev/null +++ b/plugins/inputs/nvidia_smi/common/setters.go @@ -0,0 +1,48 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package common + +import ( + "strconv" + "strings" +) + +func SetTagIfUsed(m map[string]string, k, v string) { + if v != "" { + m[k] = v + } +} + +func SetIfUsed(t string, m map[string]interface{}, k, v string) { + vals := strings.Fields(v) + if len(vals) < 1 { + return + } + + val := vals[0] + if k == "pcie_link_width_current" { + val = strings.TrimSuffix(vals[0], "x") + } + + switch t { + case "float": + if val != "" { + f, err := strconv.ParseFloat(val, 64) + if err == nil { + m[k] = f + } + } + case "int": + if val != "" && val != "N/A" { + i, err := strconv.Atoi(val) + if err == nil { + m[k] = i + } + } + case "str": + if val != "" && val != "N/A" { + m[k] = val + } + } +} diff --git a/plugins/inputs/nvidia_smi/nvidia_smi.go b/plugins/inputs/nvidia_smi/nvidia_smi.go new file mode 100644 index 0000000000..876cfb2e52 --- /dev/null +++ b/plugins/inputs/nvidia_smi/nvidia_smi.go @@ -0,0 +1,143 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +//go:generate ../../../tools/readme_config_includer/generator +package nvidia_smi + +import ( + "bytes" + _ "embed" + "encoding/xml" + "errors" + "fmt" + "io" + "os" + "os/exec" + "strings" + "sync" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/plugins/inputs" + + "github.com/aws/amazon-cloudwatch-agent/internal" + "github.com/aws/amazon-cloudwatch-agent/plugins/inputs/nvidia_smi/schema_v11" + "github.com/aws/amazon-cloudwatch-agent/plugins/inputs/nvidia_smi/schema_v12" +) + +//go:embed sample.conf +var sampleConfig string + +// NvidiaSMI holds the methods for this plugin +type NvidiaSMI struct { + BinPath string `toml:"bin_path"` + Timeout config.Duration `toml:"timeout"` + StartupErrorBehavior string `toml:"startup_error_behavior"` + Log telegraf.Logger `toml:"-"` + + ignorePlugin bool + once sync.Once +} + +// Description returns the description of the NvidiaSMI plugin +func (smi *NvidiaSMI) Description() string { + return "Pulls statistics from nvidia GPUs attached to the host" +} + +func (*NvidiaSMI) SampleConfig() string { + return sampleConfig +} + +func (smi *NvidiaSMI) Init() error { + if _, err := os.Stat(smi.BinPath); os.IsNotExist(err) { + binPath, err := exec.LookPath("nvidia-smi") + if err != nil { + switch smi.StartupErrorBehavior { + case "ignore": + smi.ignorePlugin = true + smi.Log.Warnf("nvidia-smi not found on the system, ignoring: %s", err) + return nil + case "", "error": + return fmt.Errorf("nvidia-smi not found in %q and not in PATH; please make sure nvidia-smi is installed and/or is in PATH", smi.BinPath) + default: + return fmt.Errorf("unknown startup behavior setting: %s", smi.StartupErrorBehavior) + } + } + smi.BinPath = binPath + } + + return nil +} + +// Gather implements the telegraf interface +func (smi *NvidiaSMI) Gather(acc telegraf.Accumulator) error { + if smi.ignorePlugin { + return nil + } + + // Construct and execute metrics query + data, err := internal.CombinedOutputTimeout(exec.Command(smi.BinPath, "-q", "-x"), time.Duration(smi.Timeout)) + if err != nil { + return fmt.Errorf("calling %q failed: %w", smi.BinPath, err) + } + + // Parse the output + return smi.parse(acc, data) +} + +func (smi *NvidiaSMI) parse(acc telegraf.Accumulator, data []byte) error { + schema := "v11" + + buf := bytes.NewBuffer(data) + decoder := xml.NewDecoder(buf) + for { + token, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return fmt.Errorf("reading token failed: %w", err) + } + d, ok := token.(xml.Directive) + if !ok { + continue + } + directive := string(d) + if !strings.HasPrefix(directive, "DOCTYPE") { + continue + } + parts := strings.Split(directive, " ") + s := strings.Trim(parts[len(parts)-1], "\" ") + if strings.HasPrefix(s, "nvsmi_device_") && strings.HasSuffix(s, ".dtd") { + schema = strings.TrimSuffix(strings.TrimPrefix(s, "nvsmi_device_"), ".dtd") + } else { + smi.Log.Debugf("Cannot find schema version in %q", directive) + } + break + } + smi.Log.Debugf("Using schema version in %s", schema) + + switch schema { + case "v10", "v11": + return schema_v11.Parse(acc, data) + case "v12": + return schema_v12.Parse(acc, data) + } + + smi.once.Do(func() { + smi.Log.Warnf(`Unknown schema version %q, using latest know schema for parsing. + Please report this as an issue to https://github.com/influxdata/telegraf together + with a sample output of 'nvidia_smi -q -x'!`, schema) + }) + return schema_v12.Parse(acc, data) +} + +func init() { + inputs.Add("nvidia_smi", func() telegraf.Input { + return &NvidiaSMI{ + BinPath: "/usr/bin/nvidia-smi", + Timeout: config.Duration(5 * time.Second), + } + }) +} diff --git a/plugins/inputs/nvidia_smi/nvidia_smi_test.go b/plugins/inputs/nvidia_smi/nvidia_smi_test.go new file mode 100644 index 0000000000..3d8202fd67 --- /dev/null +++ b/plugins/inputs/nvidia_smi/nvidia_smi_test.go @@ -0,0 +1,542 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package nvidia_smi + +import ( + "os" + "path/filepath" + "testing" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" +) + +func TestErrorBehaviorError(t *testing.T) { + // make sure we can't find nvidia-smi in $PATH somewhere + os.Unsetenv("PATH") + plugin := &NvidiaSMI{ + BinPath: "/random/non-existent/path", + Log: &testutil.Logger{}, + StartupErrorBehavior: "error", + } + require.Error(t, plugin.Init()) +} + +func TestErrorBehaviorDefault(t *testing.T) { + // make sure we can't find nvidia-smi in $PATH somewhere + os.Unsetenv("PATH") + plugin := &NvidiaSMI{ + BinPath: "/random/non-existent/path", + Log: &testutil.Logger{}, + } + require.Error(t, plugin.Init()) +} + +func TestErorBehaviorIgnore(t *testing.T) { + // make sure we can't find nvidia-smi in $PATH somewhere + os.Unsetenv("PATH") + plugin := &NvidiaSMI{ + BinPath: "/random/non-existent/path", + Log: &testutil.Logger{}, + StartupErrorBehavior: "ignore", + } + require.NoError(t, plugin.Init()) + acc := testutil.Accumulator{} + require.NoError(t, plugin.Gather(&acc)) +} + +func TestErrorBehaviorInvalidOption(t *testing.T) { + // make sure we can't find nvidia-smi in $PATH somewhere + os.Unsetenv("PATH") + plugin := &NvidiaSMI{ + BinPath: "/random/non-existent/path", + Log: &testutil.Logger{}, + StartupErrorBehavior: "giveup", + } + require.Error(t, plugin.Init()) +} + +func TestGatherValidXML(t *testing.T) { + tests := []struct { + name string + filename string + expected []telegraf.Metric + }{ + { + name: "GeForce GTX 1070 Ti", + filename: "gtx-1070-ti.xml", + expected: []telegraf.Metric{ + testutil.MustMetric( + "nvidia_smi", + map[string]string{ + "name": "GeForce GTX 1070 Ti", + "compute_mode": "Default", + "index": "0", + "pstate": "P8", + "uuid": "GPU-f9ba66fc-a7f5-94c5-da19-019ef2f9c665", + }, + map[string]interface{}{ + "clocks_current_graphics": 135, + "clocks_current_memory": 405, + "clocks_current_sm": 135, + "clocks_current_video": 405, + "encoder_stats_average_fps": 0, + "encoder_stats_average_latency": 0, + "encoder_stats_session_count": 0, + "fan_speed": 100, + "memory_free": 4054, + "memory_total": 4096, + "memory_used": 42, + "pcie_link_gen_current": 1, + "pcie_link_width_current": 16, + "temperature_gpu": 39, + "utilization_gpu": 0, + "utilization_memory": 0, + }, + time.Unix(0, 0)), + }, + }, + { + name: "GeForce GTX 1660 Ti", + filename: "gtx-1660-ti.xml", + expected: []telegraf.Metric{ + testutil.MustMetric( + "nvidia_smi", + map[string]string{ + "compute_mode": "Default", + "index": "0", + "name": "Graphics Device", + "pstate": "P8", + "uuid": "GPU-304a277d-3545-63b8-3a36-dfde3c992989", + }, + map[string]interface{}{ + "clocks_current_graphics": 300, + "clocks_current_memory": 405, + "clocks_current_sm": 300, + "clocks_current_video": 540, + "cuda_version": "10.1", + "display_active": "Disabled", + "display_mode": "Disabled", + "driver_version": "418.43", + "encoder_stats_average_fps": 0, + "encoder_stats_average_latency": 0, + "encoder_stats_session_count": 0, + "fbc_stats_average_fps": 0, + "fbc_stats_average_latency": 0, + "fbc_stats_session_count": 0, + "fan_speed": 0, + "memory_free": 5912, + "memory_total": 5912, + "memory_used": 0, + "pcie_link_gen_current": 1, + "pcie_link_width_current": 16, + "power_draw": 8.93, + "temperature_gpu": 40, + "utilization_gpu": 0, + "utilization_memory": 1, + "utilization_encoder": 0, + "utilization_decoder": 0, + "vbios_version": "90.16.25.00.4C", + }, + time.Unix(0, 0)), + }, + }, + { + name: "Quadro P400", + filename: "quadro-p400.xml", + expected: []telegraf.Metric{ + testutil.MustMetric( + "nvidia_smi", + map[string]string{ + "compute_mode": "Default", + "index": "0", + "name": "Quadro P400", + "pstate": "P8", + "uuid": "GPU-8f750be4-dfbc-23b9-b33f-da729a536494", + }, + map[string]interface{}{ + "clocks_current_graphics": 139, + "clocks_current_memory": 405, + "clocks_current_sm": 139, + "clocks_current_video": 544, + "cuda_version": "10.1", + "display_active": "Disabled", + "display_mode": "Disabled", + "driver_version": "418.43", + "encoder_stats_average_fps": 0, + "encoder_stats_average_latency": 0, + "encoder_stats_session_count": 0, + "fbc_stats_average_fps": 0, + "fbc_stats_average_latency": 0, + "fbc_stats_session_count": 0, + "fan_speed": 34, + "memory_free": 1998, + "memory_total": 1998, + "memory_used": 0, + "pcie_link_gen_current": 1, + "pcie_link_width_current": 16, + "serial": "0424418054852", + "temperature_gpu": 33, + "utilization_gpu": 0, + "utilization_memory": 3, + "utilization_encoder": 0, + "utilization_decoder": 0, + "vbios_version": "86.07.3B.00.4A", + }, + time.Unix(0, 0)), + }, + }, + { + name: "Quadro P2000", + filename: "quadro-p2000-v12.xml", + expected: []telegraf.Metric{ + testutil.MustMetric( + "nvidia_smi", + map[string]string{ + "arch": "Pascal", + "compute_mode": "Default", + "index": "0", + "name": "Quadro P2000", + "pstate": "P8", + "uuid": "GPU-396caaed-39ca-3199-2e68-717cdb786ec6", + }, + map[string]interface{}{ + + "clocks_current_graphics": 139, + "clocks_current_memory": 405, + "clocks_current_sm": 139, + "clocks_current_video": 544, + "cuda_version": "12.0", + "display_active": "Disabled", + "display_mode": "Disabled", + "driver_version": "525.125.06", + "encoder_stats_average_fps": 0, + "encoder_stats_average_latency": 0, + "encoder_stats_session_count": 0, + "fbc_stats_average_fps": 0, + "fbc_stats_average_latency": 0, + "fbc_stats_session_count": 0, + "fan_speed": 46, + "memory_free": 5051, + "memory_reserved": 66, + "memory_total": 5120, + "memory_used": 1, + "pcie_link_gen_current": 1, + "pcie_link_width_current": 8, + "power_draw": float64(4.61), + "serial": "0322218049033", + "temperature_gpu": 34, + "utilization_gpu": 0, + "utilization_memory": 0, + "utilization_encoder": 0, + "utilization_decoder": 0, + "vbios_version": "86.06.3F.00.30", + }, + time.Unix(0, 0)), + }, + }, + { + name: "Tesla T4", + filename: "tesla-t4.xml", + expected: []telegraf.Metric{ + testutil.MustMetric( + "nvidia_smi", + map[string]string{ + "compute_mode": "Default", + "index": "0", + "name": "Tesla T4", + "pstate": "P0", + "uuid": "GPU-d37e67a5-91dd-3774-a5cb-99096249601a", + }, + map[string]interface{}{ + "clocks_current_graphics": 585, + "clocks_current_memory": 5000, + "clocks_current_sm": 585, + "clocks_current_video": 810, + "cuda_version": "11.7", + "current_ecc": "Enabled", + "display_active": "Disabled", + "display_mode": "Disabled", + "driver_version": "515.105.01", + "encoder_stats_average_fps": 0, + "encoder_stats_average_latency": 0, + "encoder_stats_session_count": 0, + "fbc_stats_average_fps": 0, + "fbc_stats_average_latency": 0, + "fbc_stats_session_count": 0, + "power_draw": 26.78, + "memory_free": 13939, + "memory_total": 15360, + "memory_used": 1032, + "memory_reserved": 388, + "retired_pages_multiple_single_bit": 0, + "retired_pages_double_bit": 0, + "retired_pages_blacklist": "No", + "retired_pages_pending": "No", + "pcie_link_gen_current": 3, + "pcie_link_width_current": 8, + "serial": "0000000000000", + "temperature_gpu": 40, + "utilization_gpu": 0, + "utilization_memory": 0, + "utilization_encoder": 0, + "utilization_decoder": 0, + "vbios_version": "90.04.84.00.06", + }, + time.Unix(0, 0)), + }, + }, + { + name: "A10G", + filename: "a10g.xml", + expected: []telegraf.Metric{ + testutil.MustMetric( + "nvidia_smi", + map[string]string{ + "compute_mode": "Default", + "index": "0", + "name": "NVIDIA A10G", + "pstate": "P8", + "uuid": "GPU-9a9a6c50-2a47-2f51-a902-b82c3b127e94", + }, + map[string]interface{}{ + "clocks_current_graphics": 210, + "clocks_current_memory": 405, + "clocks_current_sm": 210, + "clocks_current_video": 555, + "cuda_version": "11.7", + "current_ecc": "Enabled", + "display_active": "Disabled", + "display_mode": "Disabled", + "driver_version": "515.105.01", + "encoder_stats_average_fps": 0, + "encoder_stats_average_latency": 0, + "encoder_stats_session_count": 0, + "fbc_stats_average_fps": 0, + "fbc_stats_average_latency": 0, + "fbc_stats_session_count": 0, + "fan_speed": 0, + "power_draw": 25.58, + "memory_free": 22569, + "memory_total": 23028, + "memory_used": 22, + "memory_reserved": 435, + "remapped_rows_correctable": 0, + "remapped_rows_uncorrectable": 0, + "remapped_rows_pending": "No", + "remapped_rows_failure": "No", + "pcie_link_gen_current": 1, + "pcie_link_width_current": 8, + "serial": "0000000000000", + "temperature_gpu": 17, + "utilization_gpu": 0, + "utilization_memory": 0, + "utilization_encoder": 0, + "utilization_decoder": 0, + "vbios_version": "94.02.75.00.01", + }, + time.Unix(0, 0)), + }, + }, + { + name: "RTC 3080 schema v12", + filename: "rtx-3080-v12.xml", + expected: []telegraf.Metric{ + testutil.MustMetric( + "nvidia_smi", + map[string]string{ + "compute_mode": "Default", + "index": "0", + "name": "NVIDIA GeForce RTX 3080", + "arch": "Ampere", + "pstate": "P8", + "uuid": "GPU-19d6d965-2acc-f646-00f8-4c76979aabb4", + }, + map[string]interface{}{ + "clocks_current_graphics": 210, + "clocks_current_memory": 405, + "clocks_current_sm": 210, + "clocks_current_video": 555, + "cuda_version": "12.2", + "display_active": "Enabled", + "display_mode": "Enabled", + "driver_version": "536.40", + "encoder_stats_average_fps": 0, + "encoder_stats_average_latency": 0, + "encoder_stats_session_count": 0, + "fbc_stats_average_fps": 0, + "fbc_stats_average_latency": 0, + "fbc_stats_session_count": 0, + "fan_speed": 0, + "power_draw": 22.78, + "memory_free": 8938, + "memory_total": 10240, + "memory_used": 1128, + "memory_reserved": 173, + "pcie_link_gen_current": 4, + "pcie_link_width_current": 16, + "temperature_gpu": 31, + "utilization_gpu": 0, + "utilization_jpeg": 0, + "utilization_memory": 37, + "utilization_encoder": 0, + "utilization_decoder": 0, + "utilization_ofa": 0, + "vbios_version": "94.02.71.40.72", + }, + time.Unix(1689872450, 0)), + }, + }, + { + name: "A100-SXM4 schema v12", + filename: "a100-sxm4-v12.xml", + expected: []telegraf.Metric{ + testutil.MustMetric( + "nvidia_smi", + map[string]string{ + "compute_mode": "Default", + "index": "0", + "name": "NVIDIA A100-SXM4-80GB", + "arch": "Ampere", + "pstate": "P0", + "uuid": "GPU-513536b6-7d19-9063-b049-1e69664bb298", + }, + map[string]interface{}{ + "clocks_current_graphics": 1275, + "clocks_current_memory": 1593, + "clocks_current_sm": 1275, + "clocks_current_video": 1275, + "cuda_version": "12.2", + "current_ecc": "Enabled", + "display_active": "Disabled", + "display_mode": "Enabled", + "driver_version": "535.54.03", + "encoder_stats_average_fps": 0, + "encoder_stats_average_latency": 0, + "encoder_stats_session_count": 0, + "fbc_stats_average_fps": 0, + "fbc_stats_average_latency": 0, + "fbc_stats_session_count": 0, + "power_draw": 67.03, + "memory_free": 80999, + "memory_total": 81920, + "memory_used": 50, + "memory_reserved": 869, + "pcie_link_gen_current": 4, + "pcie_link_width_current": 16, + "serial": "1650522003820", + "temperature_gpu": 27, + "vbios_version": "92.00.36.00.02", + }, + time.Unix(1689872450, 0)), + testutil.MustMetric( + "nvidia_smi_mig", + map[string]string{ + "compute_mode": "Default", + "index": "0", + "name": "NVIDIA A100-SXM4-80GB", + "arch": "Ampere", + "pstate": "P0", + "uuid": "GPU-513536b6-7d19-9063-b049-1e69664bb298", + "compute_index": "0", + "gpu_index": "3", + }, + map[string]interface{}{ + "memory_bar1_free": 32767, + "memory_bar1_total": 32767, + "memory_bar1_used": 0, + "memory_fb_free": 19955, + "memory_fb_reserved": 0, + "memory_fb_total": 19968, + "memory_fb_used": 12, + "sram_uncorrectable": 0, + }, + time.Unix(1689872450, 0)), + testutil.MustMetric( + "nvidia_smi_mig", + map[string]string{ + "compute_mode": "Default", + "index": "1", + "name": "NVIDIA A100-SXM4-80GB", + "arch": "Ampere", + "pstate": "P0", + "uuid": "GPU-513536b6-7d19-9063-b049-1e69664bb298", + "compute_index": "0", + "gpu_index": "4", + }, + map[string]interface{}{ + "memory_bar1_free": 32767, + "memory_bar1_total": 32767, + "memory_bar1_used": 0, + "memory_fb_free": 19955, + "memory_fb_reserved": 0, + "memory_fb_total": 19968, + "memory_fb_used": 12, + "sram_uncorrectable": 0, + }, + time.Unix(1689872450, 0)), + testutil.MustMetric( + "nvidia_smi_mig", + map[string]string{ + "compute_mode": "Default", + "index": "2", + "name": "NVIDIA A100-SXM4-80GB", + "arch": "Ampere", + "pstate": "P0", + "uuid": "GPU-513536b6-7d19-9063-b049-1e69664bb298", + "compute_index": "0", + "gpu_index": "5", + }, + map[string]interface{}{ + "memory_bar1_free": 32767, + "memory_bar1_total": 32767, + "memory_bar1_used": 0, + "memory_fb_free": 19955, + "memory_fb_reserved": 0, + "memory_fb_total": 19968, + "memory_fb_used": 12, + "sram_uncorrectable": 0, + }, + time.Unix(1689872450, 0)), + testutil.MustMetric( + "nvidia_smi_mig", + map[string]string{ + "compute_mode": "Default", + "index": "3", + "name": "NVIDIA A100-SXM4-80GB", + "arch": "Ampere", + "pstate": "P0", + "uuid": "GPU-513536b6-7d19-9063-b049-1e69664bb298", + "compute_index": "0", + "gpu_index": "6", + }, + map[string]interface{}{ + "memory_bar1_free": 32767, + "memory_bar1_total": 32767, + "memory_bar1_used": 0, + "memory_fb_free": 19955, + "memory_fb_reserved": 0, + "memory_fb_total": 19968, + "memory_fb_used": 12, + "sram_uncorrectable": 0, + }, + time.Unix(1689872450, 0)), + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + octets, err := os.ReadFile(filepath.Join("testdata", tt.filename)) + require.NoError(t, err) + + plugin := &NvidiaSMI{Log: &testutil.Logger{}} + + var acc testutil.Accumulator + require.NoError(t, plugin.parse(&acc, octets)) + testutil.RequireMetricsEqual(t, tt.expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime()) + }) + } +} diff --git a/plugins/inputs/nvidia_smi/sample.conf b/plugins/inputs/nvidia_smi/sample.conf new file mode 100644 index 0000000000..dee34936d3 --- /dev/null +++ b/plugins/inputs/nvidia_smi/sample.conf @@ -0,0 +1,15 @@ +# Pulls statistics from nvidia GPUs attached to the host +[[inputs.nvidia_smi]] + ## Optional: path to nvidia-smi binary, defaults "/usr/bin/nvidia-smi" + ## We will first try to locate the nvidia-smi binary with the explicitly specified value (or default value), + ## if it is not found, we will try to locate it on PATH(exec.LookPath), if it is still not found, an error will be returned + # bin_path = "/usr/bin/nvidia-smi" + + ## Optional: specifies plugin behavior regarding missing nvidia-smi binary + ## Available choices: + ## - error: telegraf will return an error on startup + ## - ignore: telegraf will ignore this plugin + # startup_error_behavior = "error" + + ## Optional: timeout for GPU polling + # timeout = "5s" diff --git a/plugins/inputs/nvidia_smi/schema_v11/parser.go b/plugins/inputs/nvidia_smi/schema_v11/parser.go new file mode 100644 index 0000000000..af383f990b --- /dev/null +++ b/plugins/inputs/nvidia_smi/schema_v11/parser.go @@ -0,0 +1,77 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package schema_v11 + +import ( + "encoding/xml" + "strconv" + + "github.com/influxdata/telegraf" + + "github.com/aws/amazon-cloudwatch-agent/plugins/inputs/nvidia_smi/common" +) + +func Parse(acc telegraf.Accumulator, buf []byte) error { + var s smi + if err := xml.Unmarshal(buf, &s); err != nil { + return err + } + + for i := range s.GPU { + gpu := &s.GPU[i] + + tags := map[string]string{ + "index": strconv.Itoa(i), + } + fields := map[string]interface{}{} + + common.SetTagIfUsed(tags, "pstate", gpu.PState) + common.SetTagIfUsed(tags, "name", gpu.ProdName) + common.SetTagIfUsed(tags, "uuid", gpu.UUID) + common.SetTagIfUsed(tags, "compute_mode", gpu.ComputeMode) + + common.SetIfUsed("str", fields, "driver_version", s.DriverVersion) + common.SetIfUsed("str", fields, "cuda_version", s.CUDAVersion) + common.SetIfUsed("str", fields, "serial", gpu.Serial) + common.SetIfUsed("str", fields, "vbios_version", gpu.VbiosVersion) + common.SetIfUsed("str", fields, "display_active", gpu.DisplayActive) + common.SetIfUsed("str", fields, "display_mode", gpu.DisplayMode) + common.SetIfUsed("str", fields, "current_ecc", gpu.EccMode.CurrentEcc) + common.SetIfUsed("int", fields, "fan_speed", gpu.FanSpeed) + common.SetIfUsed("int", fields, "memory_total", gpu.Memory.Total) + common.SetIfUsed("int", fields, "memory_used", gpu.Memory.Used) + common.SetIfUsed("int", fields, "memory_free", gpu.Memory.Free) + common.SetIfUsed("int", fields, "memory_reserved", gpu.Memory.Reserved) + common.SetIfUsed("int", fields, "retired_pages_multiple_single_bit", gpu.RetiredPages.MultipleSingleBit.Count) + common.SetIfUsed("int", fields, "retired_pages_double_bit", gpu.RetiredPages.DoubleBit.Count) + common.SetIfUsed("str", fields, "retired_pages_blacklist", gpu.RetiredPages.PendingBlacklist) + common.SetIfUsed("str", fields, "retired_pages_pending", gpu.RetiredPages.PendingRetirement) + common.SetIfUsed("int", fields, "remapped_rows_correctable", gpu.RemappedRows.Correctable) + common.SetIfUsed("int", fields, "remapped_rows_uncorrectable", gpu.RemappedRows.Uncorrectable) + common.SetIfUsed("str", fields, "remapped_rows_pending", gpu.RemappedRows.Pending) + common.SetIfUsed("str", fields, "remapped_rows_failure", gpu.RemappedRows.Failure) + common.SetIfUsed("int", fields, "temperature_gpu", gpu.Temp.GPUTemp) + common.SetIfUsed("int", fields, "utilization_gpu", gpu.Utilization.GPU) + common.SetIfUsed("int", fields, "utilization_memory", gpu.Utilization.Memory) + common.SetIfUsed("int", fields, "utilization_encoder", gpu.Utilization.Encoder) + common.SetIfUsed("int", fields, "utilization_decoder", gpu.Utilization.Decoder) + common.SetIfUsed("int", fields, "pcie_link_gen_current", gpu.PCI.LinkInfo.PCIEGen.CurrentLinkGen) + common.SetIfUsed("int", fields, "pcie_link_width_current", gpu.PCI.LinkInfo.LinkWidth.CurrentLinkWidth) + common.SetIfUsed("int", fields, "encoder_stats_session_count", gpu.Encoder.SessionCount) + common.SetIfUsed("int", fields, "encoder_stats_average_fps", gpu.Encoder.AverageFPS) + common.SetIfUsed("int", fields, "encoder_stats_average_latency", gpu.Encoder.AverageLatency) + common.SetIfUsed("int", fields, "fbc_stats_session_count", gpu.FBC.SessionCount) + common.SetIfUsed("int", fields, "fbc_stats_average_fps", gpu.FBC.AverageFPS) + common.SetIfUsed("int", fields, "fbc_stats_average_latency", gpu.FBC.AverageLatency) + common.SetIfUsed("int", fields, "clocks_current_graphics", gpu.Clocks.Graphics) + common.SetIfUsed("int", fields, "clocks_current_sm", gpu.Clocks.SM) + common.SetIfUsed("int", fields, "clocks_current_memory", gpu.Clocks.Memory) + common.SetIfUsed("int", fields, "clocks_current_video", gpu.Clocks.Video) + + common.SetIfUsed("float", fields, "power_draw", gpu.Power.PowerDraw) + acc.AddFields("nvidia_smi", fields, tags) + } + + return nil +} diff --git a/plugins/inputs/nvidia_smi/schema_v11/types.go b/plugins/inputs/nvidia_smi/schema_v11/types.go new file mode 100644 index 0000000000..8b3e14cfed --- /dev/null +++ b/plugins/inputs/nvidia_smi/schema_v11/types.go @@ -0,0 +1,121 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package schema_v11 + +// SMI defines the structure for the output of _nvidia-smi -q -x_. +type smi struct { + GPU []GPU `xml:"gpu"` + DriverVersion string `xml:"driver_version"` + CUDAVersion string `xml:"cuda_version"` +} + +// GPU defines the structure of the GPU portion of the smi output. +type GPU struct { + Clocks ClockStats `xml:"clocks"` + ComputeMode string `xml:"compute_mode"` + DisplayActive string `xml:"display_active"` + DisplayMode string `xml:"display_mode"` + EccMode ECCMode `xml:"ecc_mode"` + Encoder EncoderStats `xml:"encoder_stats"` + FanSpeed string `xml:"fan_speed"` // int + FBC FBCStats `xml:"fbc_stats"` + Memory MemoryStats `xml:"fb_memory_usage"` + PCI PCI `xml:"pci"` + Power PowerReadings `xml:"power_readings"` + ProdName string `xml:"product_name"` + PState string `xml:"performance_state"` + RemappedRows MemoryRemappedRows `xml:"remapped_rows"` + RetiredPages MemoryRetiredPages `xml:"retired_pages"` + Serial string `xml:"serial"` + Temp TempStats `xml:"temperature"` + Utilization UtilizationStats `xml:"utilization"` + UUID string `xml:"uuid"` + VbiosVersion string `xml:"vbios_version"` +} + +// ECCMode defines the structure of the ecc portions in the smi output. +type ECCMode struct { + CurrentEcc string `xml:"current_ecc"` // Enabled, Disabled, N/A + PendingEcc string `xml:"pending_ecc"` // Enabled, Disabled, N/A +} + +// MemoryStats defines the structure of the memory portions in the smi output. +type MemoryStats struct { + Total string `xml:"total"` // int + Used string `xml:"used"` // int + Free string `xml:"free"` // int + Reserved string `xml:"reserved"` // int +} + +// MemoryRetiredPages defines the structure of the retired pages portions in the smi output. +type MemoryRetiredPages struct { + MultipleSingleBit struct { + Count string `xml:"retired_count"` // int + } `xml:"multiple_single_bit_retirement"` + DoubleBit struct { + Count string `xml:"retired_count"` // int + } `xml:"double_bit_retirement"` + PendingBlacklist string `xml:"pending_blacklist"` // Yes/No + PendingRetirement string `xml:"pending_retirement"` // Yes/No +} + +// MemoryRemappedRows defines the structure of the remapped rows portions in the smi output. +type MemoryRemappedRows struct { + Correctable string `xml:"remapped_row_corr"` // int + Uncorrectable string `xml:"remapped_row_unc"` // int + Pending string `xml:"remapped_row_pending"` // Yes/No + Failure string `xml:"remapped_row_failure"` // Yes/No +} + +// TempStats defines the structure of the temperature portion of the smi output. +type TempStats struct { + GPUTemp string `xml:"gpu_temp"` // int +} + +// UtilizationStats defines the structure of the utilization portion of the smi output. +type UtilizationStats struct { + GPU string `xml:"gpu_util"` // int + Memory string `xml:"memory_util"` // int + Encoder string `xml:"encoder_util"` // int + Decoder string `xml:"decoder_util"` // int +} + +// PowerReadings defines the structure of the power_readings portion of the smi output. +type PowerReadings struct { + PowerDraw string `xml:"power_draw"` // float +} + +// PCI defines the structure of the pci portion of the smi output. +type PCI struct { + LinkInfo struct { + PCIEGen struct { + CurrentLinkGen string `xml:"current_link_gen"` // int + } `xml:"pcie_gen"` + LinkWidth struct { + CurrentLinkWidth string `xml:"current_link_width"` // int + } `xml:"link_widths"` + } `xml:"pci_gpu_link_info"` +} + +// EncoderStats defines the structure of the encoder_stats portion of the smi output. +type EncoderStats struct { + SessionCount string `xml:"session_count"` // int + AverageFPS string `xml:"average_fps"` // int + AverageLatency string `xml:"average_latency"` // int +} + +// FBCStats defines the structure of the fbc_stats portion of the smi output. +type FBCStats struct { + SessionCount string `xml:"session_count"` // int + AverageFPS string `xml:"average_fps"` // int + AverageLatency string `xml:"average_latency"` // int +} + +// ClockStats defines the structure of the clocks portion of the smi output. +type ClockStats struct { + Graphics string `xml:"graphics_clock"` // int + SM string `xml:"sm_clock"` // int + Memory string `xml:"mem_clock"` // int + Video string `xml:"video_clock"` // int +} diff --git a/plugins/inputs/nvidia_smi/schema_v12/parser.go b/plugins/inputs/nvidia_smi/schema_v12/parser.go new file mode 100644 index 0000000000..8a89573fca --- /dev/null +++ b/plugins/inputs/nvidia_smi/schema_v12/parser.go @@ -0,0 +1,113 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package schema_v12 + +import ( + "encoding/xml" + "strconv" + "time" + + "github.com/influxdata/telegraf" + + "github.com/aws/amazon-cloudwatch-agent/plugins/inputs/nvidia_smi/common" +) + +func Parse(acc telegraf.Accumulator, buf []byte) error { + var s smi + if err := xml.Unmarshal(buf, &s); err != nil { + return err + } + + timestamp := time.Now() + if s.Timestamp != "" { + if t, err := time.ParseInLocation(time.ANSIC, s.Timestamp, time.Local); err == nil { + timestamp = t + } + } + + for i := range s.Gpu { + gpu := &s.Gpu[i] + + tags := map[string]string{ + "index": strconv.Itoa(i), + } + fields := map[string]interface{}{} + + common.SetTagIfUsed(tags, "pstate", gpu.PerformanceState) + common.SetTagIfUsed(tags, "name", gpu.ProductName) + common.SetTagIfUsed(tags, "arch", gpu.ProductArchitecture) + common.SetTagIfUsed(tags, "uuid", gpu.UUID) + common.SetTagIfUsed(tags, "compute_mode", gpu.ComputeMode) + + common.SetIfUsed("str", fields, "driver_version", s.DriverVersion) + common.SetIfUsed("str", fields, "cuda_version", s.CudaVersion) + common.SetIfUsed("str", fields, "serial", gpu.Serial) + common.SetIfUsed("str", fields, "vbios_version", gpu.VbiosVersion) + common.SetIfUsed("str", fields, "display_active", gpu.DisplayActive) + common.SetIfUsed("str", fields, "display_mode", gpu.DisplayMode) + common.SetIfUsed("str", fields, "current_ecc", gpu.EccMode.CurrentEcc) + common.SetIfUsed("int", fields, "fan_speed", gpu.FanSpeed) + common.SetIfUsed("int", fields, "memory_total", gpu.FbMemoryUsage.Total) + common.SetIfUsed("int", fields, "memory_used", gpu.FbMemoryUsage.Used) + common.SetIfUsed("int", fields, "memory_free", gpu.FbMemoryUsage.Free) + common.SetIfUsed("int", fields, "memory_reserved", gpu.FbMemoryUsage.Reserved) + common.SetIfUsed("int", fields, "retired_pages_multiple_single_bit", gpu.RetiredPages.MultipleSingleBitRetirement.RetiredCount) + common.SetIfUsed("int", fields, "retired_pages_double_bit", gpu.RetiredPages.DoubleBitRetirement.RetiredCount) + common.SetIfUsed("str", fields, "retired_pages_blacklist", gpu.RetiredPages.PendingBlacklist) + common.SetIfUsed("str", fields, "retired_pages_pending", gpu.RetiredPages.PendingRetirement) + common.SetIfUsed("int", fields, "remapped_rows_correctable", gpu.RemappedRows.Correctable) + common.SetIfUsed("int", fields, "remapped_rows_uncorrectable", gpu.RemappedRows.Uncorrectable) + common.SetIfUsed("str", fields, "remapped_rows_pending", gpu.RemappedRows.Pending) + common.SetIfUsed("str", fields, "remapped_rows_failure", gpu.RemappedRows.Failure) + common.SetIfUsed("int", fields, "temperature_gpu", gpu.Temperature.GpuTemp) + common.SetIfUsed("int", fields, "utilization_gpu", gpu.Utilization.GpuUtil) + common.SetIfUsed("int", fields, "utilization_memory", gpu.Utilization.MemoryUtil) + common.SetIfUsed("int", fields, "utilization_encoder", gpu.Utilization.EncoderUtil) + common.SetIfUsed("int", fields, "utilization_decoder", gpu.Utilization.DecoderUtil) + common.SetIfUsed("int", fields, "utilization_jpeg", gpu.Utilization.JpegUtil) + common.SetIfUsed("int", fields, "utilization_ofa", gpu.Utilization.OfaUtil) + common.SetIfUsed("int", fields, "pcie_link_gen_current", gpu.Pci.PciGpuLinkInfo.PcieGen.CurrentLinkGen) + common.SetIfUsed("int", fields, "pcie_link_width_current", gpu.Pci.PciGpuLinkInfo.LinkWidths.CurrentLinkWidth) + common.SetIfUsed("int", fields, "encoder_stats_session_count", gpu.EncoderStats.SessionCount) + common.SetIfUsed("int", fields, "encoder_stats_average_fps", gpu.EncoderStats.AverageFps) + common.SetIfUsed("int", fields, "encoder_stats_average_latency", gpu.EncoderStats.AverageLatency) + common.SetIfUsed("int", fields, "fbc_stats_session_count", gpu.FbcStats.SessionCount) + common.SetIfUsed("int", fields, "fbc_stats_average_fps", gpu.FbcStats.AverageFps) + common.SetIfUsed("int", fields, "fbc_stats_average_latency", gpu.FbcStats.AverageLatency) + common.SetIfUsed("int", fields, "clocks_current_graphics", gpu.Clocks.GraphicsClock) + common.SetIfUsed("int", fields, "clocks_current_sm", gpu.Clocks.SmClock) + common.SetIfUsed("int", fields, "clocks_current_memory", gpu.Clocks.MemClock) + common.SetIfUsed("int", fields, "clocks_current_video", gpu.Clocks.VideoClock) + common.SetIfUsed("float", fields, "power_draw", gpu.PowerReadings.PowerDraw) + common.SetIfUsed("float", fields, "power_draw", gpu.GpuPowerReadings.PowerDraw) + common.SetIfUsed("float", fields, "module_power_draw", gpu.ModulePowerReadings.PowerDraw) + acc.AddFields("nvidia_smi", fields, tags, timestamp) + + for _, device := range gpu.MigDevices.MigDevice { + tags := map[string]string{} + common.SetTagIfUsed(tags, "index", device.Index) + common.SetTagIfUsed(tags, "gpu_index", device.GpuInstanceID) + common.SetTagIfUsed(tags, "compute_index", device.ComputeInstanceID) + common.SetTagIfUsed(tags, "pstate", gpu.PerformanceState) + common.SetTagIfUsed(tags, "name", gpu.ProductName) + common.SetTagIfUsed(tags, "arch", gpu.ProductArchitecture) + common.SetTagIfUsed(tags, "uuid", gpu.UUID) + common.SetTagIfUsed(tags, "compute_mode", gpu.ComputeMode) + + fields := map[string]interface{}{} + common.SetIfUsed("int", fields, "sram_uncorrectable", device.EccErrorCount.VolatileCount.SramUncorrectable) + common.SetIfUsed("int", fields, "memory_fb_total", device.FbMemoryUsage.Total) + common.SetIfUsed("int", fields, "memory_fb_reserved", device.FbMemoryUsage.Reserved) + common.SetIfUsed("int", fields, "memory_fb_used", device.FbMemoryUsage.Used) + common.SetIfUsed("int", fields, "memory_fb_free", device.FbMemoryUsage.Free) + common.SetIfUsed("int", fields, "memory_bar1_total", device.Bar1MemoryUsage.Total) + common.SetIfUsed("int", fields, "memory_bar1_used", device.Bar1MemoryUsage.Used) + common.SetIfUsed("int", fields, "memory_bar1_free", device.Bar1MemoryUsage.Free) + + acc.AddFields("nvidia_smi_mig", fields, tags, timestamp) + } + } + + return nil +} diff --git a/plugins/inputs/nvidia_smi/schema_v12/types.go b/plugins/inputs/nvidia_smi/schema_v12/types.go new file mode 100644 index 0000000000..7eb7a6b1b5 --- /dev/null +++ b/plugins/inputs/nvidia_smi/schema_v12/types.go @@ -0,0 +1,291 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package schema_v12 + +// Generated by https://github.com/twpayne/go-xmlstruct with some type corrections. +type smi struct { + AttachedGpus string `xml:"attached_gpus"` + CudaVersion string `xml:"cuda_version"` + DriverVersion string `xml:"driver_version"` + Gpu []struct { + ID string `xml:"id,attr"` + AccountedProcesses struct{} `xml:"accounted_processes"` + AccountingMode string `xml:"accounting_mode"` + AccountingModeBufferSize string `xml:"accounting_mode_buffer_size"` + AddressingMode string `xml:"addressing_mode"` + ApplicationsClocks struct { + GraphicsClock string `xml:"graphics_clock"` + MemClock string `xml:"mem_clock"` + } `xml:"applications_clocks"` + Bar1MemoryUsage struct { + Free string `xml:"free"` + Total string `xml:"total"` + Used string `xml:"used"` + } `xml:"bar1_memory_usage"` + BoardID string `xml:"board_id"` + BoardPartNumber string `xml:"board_part_number"` + CcProtectedMemoryUsage struct { + Free string `xml:"free"` + Total string `xml:"total"` + Used string `xml:"used"` + } `xml:"cc_protected_memory_usage"` + ClockPolicy struct { + AutoBoost string `xml:"auto_boost"` + AutoBoostDefault string `xml:"auto_boost_default"` + } `xml:"clock_policy"` + Clocks struct { + GraphicsClock string `xml:"graphics_clock"` + MemClock string `xml:"mem_clock"` + SmClock string `xml:"sm_clock"` + VideoClock string `xml:"video_clock"` + } `xml:"clocks"` + ClocksEventReasons struct { + ClocksEventReasonApplicationsClocksSetting string `xml:"clocks_event_reason_applications_clocks_setting"` + ClocksEventReasonDisplayClocksSetting string `xml:"clocks_event_reason_display_clocks_setting"` + ClocksEventReasonGpuIdle string `xml:"clocks_event_reason_gpu_idle"` + ClocksEventReasonHwPowerBrakeSlowdown string `xml:"clocks_event_reason_hw_power_brake_slowdown"` + ClocksEventReasonHwSlowdown string `xml:"clocks_event_reason_hw_slowdown"` + ClocksEventReasonHwThermalSlowdown string `xml:"clocks_event_reason_hw_thermal_slowdown"` + ClocksEventReasonSwPowerCap string `xml:"clocks_event_reason_sw_power_cap"` + ClocksEventReasonSwThermalSlowdown string `xml:"clocks_event_reason_sw_thermal_slowdown"` + ClocksEventReasonSyncBoost string `xml:"clocks_event_reason_sync_boost"` + } `xml:"clocks_event_reasons"` + ComputeMode string `xml:"compute_mode"` + DefaultApplicationsClocks struct { + GraphicsClock string `xml:"graphics_clock"` + MemClock string `xml:"mem_clock"` + } `xml:"default_applications_clocks"` + DeferredClocks struct { + MemClock string `xml:"mem_clock"` + } `xml:"deferred_clocks"` + DisplayActive string `xml:"display_active"` + DisplayMode string `xml:"display_mode"` + DriverModel struct { + CurrentDm string `xml:"current_dm"` + PendingDm string `xml:"pending_dm"` + } `xml:"driver_model"` + EccErrors struct { + Aggregate struct { + DramCorrectable string `xml:"dram_correctable"` + DramUncorrectable string `xml:"dram_uncorrectable"` + SramCorrectable string `xml:"sram_correctable"` + SramUncorrectable string `xml:"sram_uncorrectable"` + } `xml:"aggregate"` + Volatile struct { + DramCorrectable string `xml:"dram_correctable"` + DramUncorrectable string `xml:"dram_uncorrectable"` + SramCorrectable string `xml:"sram_correctable"` + SramUncorrectable string `xml:"sram_uncorrectable"` + } `xml:"volatile"` + } `xml:"ecc_errors"` + EccMode struct { + CurrentEcc string `xml:"current_ecc"` + PendingEcc string `xml:"pending_ecc"` + } `xml:"ecc_mode"` + EncoderStats struct { + AverageFps string `xml:"average_fps"` + AverageLatency string `xml:"average_latency"` + SessionCount string `xml:"session_count"` + } `xml:"encoder_stats"` + Fabric struct { + State string `xml:"state"` + Status string `xml:"status"` + } `xml:"fabric"` + FanSpeed string `xml:"fan_speed"` + FbMemoryUsage struct { + Free string `xml:"free"` + Reserved string `xml:"reserved"` + Total string `xml:"total"` + Used string `xml:"used"` + } `xml:"fb_memory_usage"` + FbcStats struct { + AverageFps string `xml:"average_fps"` + AverageLatency string `xml:"average_latency"` + SessionCount string `xml:"session_count"` + } `xml:"fbc_stats"` + GpuFruPartNumber string `xml:"gpu_fru_part_number"` + GpuModuleID string `xml:"gpu_module_id"` + GpuOperationMode struct { + CurrentGom string `xml:"current_gom"` + PendingGom string `xml:"pending_gom"` + } `xml:"gpu_operation_mode"` + GpuPartNumber string `xml:"gpu_part_number"` + GpuPowerReadings struct { + CurrentPowerLimit string `xml:"current_power_limit"` + DefaultPowerLimit string `xml:"default_power_limit"` + MaxPowerLimit string `xml:"max_power_limit"` + MinPowerLimit string `xml:"min_power_limit"` + PowerDraw string `xml:"power_draw"` + PowerState string `xml:"power_state"` + RequestedPowerLimit string `xml:"requested_power_limit"` + } `xml:"gpu_power_readings"` + GpuResetStatus struct { + DrainAndResetRecommended string `xml:"drain_and_reset_recommended"` + ResetRequired string `xml:"reset_required"` + } `xml:"gpu_reset_status"` + GpuVirtualizationMode struct { + HostVgpuMode string `xml:"host_vgpu_mode"` + VirtualizationMode string `xml:"virtualization_mode"` + } `xml:"gpu_virtualization_mode"` + GspFirmwareVersion string `xml:"gsp_firmware_version"` + Ibmnpu struct { + RelaxedOrderingMode string `xml:"relaxed_ordering_mode"` + } `xml:"ibmnpu"` + InforomVersion struct { + EccObject string `xml:"ecc_object"` + ImgVersion string `xml:"img_version"` + OemObject string `xml:"oem_object"` + PwrObject string `xml:"pwr_object"` + } `xml:"inforom_version"` + MaxClocks struct { + GraphicsClock string `xml:"graphics_clock"` + MemClock string `xml:"mem_clock"` + SmClock string `xml:"sm_clock"` + VideoClock string `xml:"video_clock"` + } `xml:"max_clocks"` + MaxCustomerBoostClocks struct { + GraphicsClock string `xml:"graphics_clock"` + } `xml:"max_customer_boost_clocks"` + MigDevices struct { + MigDevice []struct { + Index string `xml:"index"` + GpuInstanceID string `xml:"gpu_instance_id"` + ComputeInstanceID string `xml:"compute_instance_id"` + EccErrorCount struct { + Text string `xml:",chardata" json:"text"` + VolatileCount struct { + SramUncorrectable string `xml:"sram_uncorrectable"` + } `xml:"volatile_count" json:"volatile_count"` + } `xml:"ecc_error_count" json:"ecc_error_count"` + FbMemoryUsage struct { + Total string `xml:"total"` + Reserved string `xml:"reserved"` + Used string `xml:"used"` + Free string `xml:"free"` + } `xml:"fb_memory_usage" json:"fb_memory_usage"` + Bar1MemoryUsage struct { + Total string `xml:"total"` + Used string `xml:"used"` + Free string `xml:"free"` + } `xml:"bar1_memory_usage" json:"bar1_memory_usage"` + } `xml:"mig_device" json:"mig_device"` + } `xml:"mig_devices" json:"mig_devices"` + MigMode struct { + CurrentMig string `xml:"current_mig"` + PendingMig string `xml:"pending_mig"` + } `xml:"mig_mode"` + MinorNumber string `xml:"minor_number"` + ModulePowerReadings struct { + CurrentPowerLimit string `xml:"current_power_limit"` + DefaultPowerLimit string `xml:"default_power_limit"` + MaxPowerLimit string `xml:"max_power_limit"` + MinPowerLimit string `xml:"min_power_limit"` + PowerDraw string `xml:"power_draw"` + PowerState string `xml:"power_state"` + RequestedPowerLimit string `xml:"requested_power_limit"` + } `xml:"module_power_readings"` + MultigpuBoard string `xml:"multigpu_board"` + Pci struct { + AtomicCapsInbound string `xml:"atomic_caps_inbound"` + AtomicCapsOutbound string `xml:"atomic_caps_outbound"` + PciBridgeChip struct { + BridgeChipFw string `xml:"bridge_chip_fw"` + BridgeChipType string `xml:"bridge_chip_type"` + } `xml:"pci_bridge_chip"` + PciBus string `xml:"pci_bus"` + PciBusID string `xml:"pci_bus_id"` + PciDevice string `xml:"pci_device"` + PciDeviceID string `xml:"pci_device_id"` + PciDomain string `xml:"pci_domain"` + PciGpuLinkInfo struct { + LinkWidths struct { + CurrentLinkWidth string `xml:"current_link_width"` + MaxLinkWidth string `xml:"max_link_width"` + } `xml:"link_widths"` + PcieGen struct { + CurrentLinkGen string `xml:"current_link_gen"` + DeviceCurrentLinkGen string `xml:"device_current_link_gen"` + MaxDeviceLinkGen string `xml:"max_device_link_gen"` + MaxHostLinkGen string `xml:"max_host_link_gen"` + MaxLinkGen string `xml:"max_link_gen"` + } `xml:"pcie_gen"` + } `xml:"pci_gpu_link_info"` + PciSubSystemID string `xml:"pci_sub_system_id"` + ReplayCounter string `xml:"replay_counter"` + ReplayRolloverCounter string `xml:"replay_rollover_counter"` + RxUtil string `xml:"rx_util"` + TxUtil string `xml:"tx_util"` + } `xml:"pci"` + PerformanceState string `xml:"performance_state"` + PersistenceMode string `xml:"persistence_mode"` + PowerReadings struct { + PowerState string `xml:"power_state"` + PowerManagement string `xml:"power_management"` + PowerDraw string `xml:"power_draw"` + PowerLimit string `xml:"power_limit"` + DefaultPowerLimit string `xml:"default_power_limit"` + EnforcedPowerLimit string `xml:"enforced_power_limit"` + MinPowerLimit string `xml:"min_power_limit"` + MaxPowerLimit string `xml:"max_power_limit"` + } `xml:"power_readings"` + Processes struct{} `xml:"processes"` + ProductArchitecture string `xml:"product_architecture"` + ProductBrand string `xml:"product_brand"` + ProductName string `xml:"product_name"` + RemappedRows struct { + // Manually added + Correctable string `xml:"remapped_row_corr"` + Uncorrectable string `xml:"remapped_row_unc"` + Pending string `xml:"remapped_row_pending"` + Failure string `xml:"remapped_row_failure"` + } `xml:"remapped_rows"` + RetiredPages struct { + DoubleBitRetirement struct { + RetiredCount string `xml:"retired_count"` + RetiredPagelist string `xml:"retired_pagelist"` + } `xml:"double_bit_retirement"` + MultipleSingleBitRetirement struct { + RetiredCount string `xml:"retired_count"` + RetiredPagelist string `xml:"retired_pagelist"` + } `xml:"multiple_single_bit_retirement"` + PendingBlacklist string `xml:"pending_blacklist"` + PendingRetirement string `xml:"pending_retirement"` + } `xml:"retired_pages"` + Serial string `xml:"serial"` + SupportedClocks struct { + SupportedMemClock []struct { + SupportedGraphicsClock []string `xml:"supported_graphics_clock"` + Value string `xml:"value"` + } `xml:"supported_mem_clock"` + } `xml:"supported_clocks"` + SupportedGpuTargetTemp struct { + GpuTargetTempMax string `xml:"gpu_target_temp_max"` + GpuTargetTempMin string `xml:"gpu_target_temp_min"` + } `xml:"supported_gpu_target_temp"` + Temperature struct { + GpuTargetTemperature string `xml:"gpu_target_temperature"` + GpuTemp string `xml:"gpu_temp"` + GpuTempMaxGpuThreshold string `xml:"gpu_temp_max_gpu_threshold"` + GpuTempMaxMemThreshold string `xml:"gpu_temp_max_mem_threshold"` + GpuTempMaxThreshold string `xml:"gpu_temp_max_threshold"` + GpuTempSlowThreshold string `xml:"gpu_temp_slow_threshold"` + GpuTempTlimit string `xml:"gpu_temp_tlimit"` + MemoryTemp string `xml:"memory_temp"` + } `xml:"temperature"` + Utilization struct { + DecoderUtil string `xml:"decoder_util"` + EncoderUtil string `xml:"encoder_util"` + GpuUtil string `xml:"gpu_util"` + JpegUtil string `xml:"jpeg_util"` + MemoryUtil string `xml:"memory_util"` + OfaUtil string `xml:"ofa_util"` + } `xml:"utilization"` + UUID string `xml:"uuid"` + VbiosVersion string `xml:"vbios_version"` + Voltage struct { + GraphicsVolt string `xml:"graphics_volt"` + } `xml:"voltage"` + } `xml:"gpu"` + Timestamp string `xml:"timestamp"` +} diff --git a/plugins/inputs/nvidia_smi/testdata/a100-sxm4-v12.xml b/plugins/inputs/nvidia_smi/testdata/a100-sxm4-v12.xml new file mode 100644 index 0000000000..29bf40cae9 --- /dev/null +++ b/plugins/inputs/nvidia_smi/testdata/a100-sxm4-v12.xml @@ -0,0 +1,452 @@ + + + + Fri Aug 4 11:44:30 2023 + 535.54.03 + 12.2 + 4 + + NVIDIA A100-SXM4-80GB + NVIDIA + Ampere + Enabled + Disabled + Disabled + None + + Enabled + Enabled + + + + 0 + 3 + 0 + + + 14 + 1 + 0 + 1 + 0 + 0 + + + + + 0 + + + + 19968 MiB + 0 MiB + 12 MiB + 19955 MiB + + + 32767 MiB + 0 MiB + 32767 MiB + + + + 1 + 4 + 0 + + + 14 + 1 + 0 + 1 + 0 + 0 + + + + + 0 + + + + 19968 MiB + 0 MiB + 12 MiB + 19955 MiB + + + 32767 MiB + 0 MiB + 32767 MiB + + + + 2 + 5 + 0 + + + 14 + 1 + 0 + 1 + 0 + 0 + + + + + 0 + + + + 19968 MiB + 0 MiB + 12 MiB + 19955 MiB + + + 32767 MiB + 0 MiB + 32767 MiB + + + + 3 + 6 + 0 + + + 14 + 1 + 0 + 1 + 0 + 0 + + + + + 0 + + + + 19968 MiB + 0 MiB + 12 MiB + 19955 MiB + + + 32767 MiB + 0 MiB + 32767 MiB + + + + Disabled + 4000 + + N/A + N/A + + 1650522003820 + GPU-513536b6-7d19-9063-b049-1e69664bb298 + 1 + 92.00.36.00.02 + No + 0x100 + 692-2G506-0212-002 + 20B2-895-A1 + N/A + 4 + + G506.0212.00.01 + 2.0 + 6.16 + N/A + + + N/A + N/A + + 535.54.03 + + None + N/A + + + No + No + + + N/A + + + 01 + 00 + 0000 + 20B210DE + 00000000:01:00.0 + 147F10DE + + + 4 + 4 + 4 + 4 + 4 + + + 16x + 16x + + + + N/A + N/A + + 0 + 0 + 4000 KB/s + 0 KB/s + N/A + N/A + + N/A + P0 + + Not Active + Not Active + Not Active + Not Active + Not Active + Not Active + Not Active + Not Active + Not Active + + + 81920 MiB + 869 MiB + 50 MiB + 80999 MiB + + + 131072 MiB + 1 MiB + 131071 MiB + + + 0 MiB + 0 MiB + 0 MiB + + Default + + N/A + N/A + N/A + N/A + N/A + N/A + + + 0 + 0 + 0 + + + 0 + 0 + 0 + + + Enabled + Enabled + + + + 0 + 0 + 0 + 0 + + + 0 + 0 + 0 + 0 + + + + + N/A + N/A + + + N/A + N/A + + N/A + N/A + + N/A + + 27 C + N/A + 92 C + 89 C + 85 C + N/A + 44 C + 95 C + + + N/A + N/A + + + P0 + 67.03 W + 500.00 W + 500.00 W + 500.00 W + 100.00 W + 500.00 W + + + P0 + N/A + N/A + N/A + N/A + N/A + N/A + + + 1275 MHz + 1275 MHz + 1593 MHz + 1275 MHz + + + 1275 MHz + 1593 MHz + + + 1275 MHz + 1593 MHz + + + N/A + + + 1410 MHz + 1410 MHz + 1593 MHz + 1290 MHz + + + 1410 MHz + + + N/A + N/A + + + 912.500 mV + + + N/A + N/A + + + + 1593 MHz + 1410 MHz + 1395 MHz + 1380 MHz + 1365 MHz + 1350 MHz + 1335 MHz + 1320 MHz + 1305 MHz + 1290 MHz + 1275 MHz + 1260 MHz + 1245 MHz + 1230 MHz + 1215 MHz + 1200 MHz + 1185 MHz + 1170 MHz + 1155 MHz + 1140 MHz + 1125 MHz + 1110 MHz + 1095 MHz + 1080 MHz + 1065 MHz + 1050 MHz + 1035 MHz + 1020 MHz + 1005 MHz + 990 MHz + 975 MHz + 960 MHz + 945 MHz + 930 MHz + 915 MHz + 900 MHz + 885 MHz + 870 MHz + 855 MHz + 840 MHz + 825 MHz + 810 MHz + 795 MHz + 780 MHz + 765 MHz + 750 MHz + 735 MHz + 720 MHz + 705 MHz + 690 MHz + 675 MHz + 660 MHz + 645 MHz + 630 MHz + 615 MHz + 600 MHz + 585 MHz + 570 MHz + 555 MHz + 540 MHz + 525 MHz + 510 MHz + 495 MHz + 480 MHz + 465 MHz + 450 MHz + 435 MHz + 420 MHz + 405 MHz + 390 MHz + 375 MHz + 360 MHz + 345 MHz + 330 MHz + 315 MHz + 300 MHz + 285 MHz + 270 MHz + 255 MHz + 240 MHz + 225 MHz + 210 MHz + + + + + + diff --git a/plugins/inputs/nvidia_smi/testdata/a10g.xml b/plugins/inputs/nvidia_smi/testdata/a10g.xml new file mode 100644 index 0000000000..77f3c17c8b --- /dev/null +++ b/plugins/inputs/nvidia_smi/testdata/a10g.xml @@ -0,0 +1,355 @@ + + + + Mon Apr 24 16:11:51 2023 + 515.105.01 + 11.7 + 1 + + NVIDIA A10G + NVIDIA + Ampere + Disabled + Disabled + Disabled + + N/A + N/A + + + None + + Disabled + 4000 + + N/A + N/A + + 0000000000000 + GPU-9a9a6c50-2a47-2f51-a902-b82c3b127e94 + 0 + 94.02.75.00.01 + No + 0x1e + 000-00000-0000-000 + 0 + + G133.0210.00.04 + 2.0 + 6.16 + N/A + + + N/A + N/A + + 515.105.01 + + Pass-Through + N/A + + + N/A + + + 00 + 1E + 0000 + 000000DE + 00000000:00:1E.0 + 000000DE + + + 4 + 1 + + + 16x + 8x + + + + N/A + N/A + + 0 + 0 + 0 KB/s + 0 KB/s + + 0 % + P8 + + Active + Not Active + Not Active + Not Active + Not Active + Not Active + Not Active + Not Active + Not Active + + + 23028 MiB + 435 MiB + 22 MiB + 22569 MiB + + + 32768 MiB + 1 MiB + 32767 MiB + + Default + + 0 % + 0 % + 0 % + 0 % + + + 0 + 0 + 0 + + + 0 + 0 + 0 + + + Enabled + Enabled + + + + 0 + 0 + 0 + 0 + + + 0 + 0 + 0 + 0 + + + + + N/A + N/A + + + N/A + N/A + + N/A + N/A + + + 0 + 0 + No + No + + 192 bank(s) + 0 bank(s) + 0 bank(s) + 0 bank(s) + 0 bank(s) + + + + 17 C + 98 C + 95 C + 88 C + N/A + N/A + N/A + + + N/A + N/A + + + P8 + Supported + 25.58 W + 300.00 W + 300.00 W + 300.00 W + 100.00 W + 300.00 W + + + 210 MHz + 210 MHz + 405 MHz + 555 MHz + + + 1710 MHz + 6251 MHz + + + 1710 MHz + 6251 MHz + + + 1710 MHz + 1710 MHz + 6251 MHz + 1500 MHz + + + 1710 MHz + + + N/A + N/A + + + 693.750 mV + + + + 6251 MHz + 1710 MHz + 1695 MHz + 1680 MHz + 1665 MHz + 1650 MHz + 1635 MHz + 1620 MHz + 1605 MHz + 1590 MHz + 1575 MHz + 1560 MHz + 1545 MHz + 1530 MHz + 1515 MHz + 1500 MHz + 1485 MHz + 1470 MHz + 1455 MHz + 1440 MHz + 1425 MHz + 1410 MHz + 1395 MHz + 1380 MHz + 1365 MHz + 1350 MHz + 1335 MHz + 1320 MHz + 1305 MHz + 1290 MHz + 1275 MHz + 1260 MHz + 1245 MHz + 1230 MHz + 1215 MHz + 1200 MHz + 1185 MHz + 1170 MHz + 1155 MHz + 1140 MHz + 1125 MHz + 1110 MHz + 1095 MHz + 1080 MHz + 1065 MHz + 1050 MHz + 1035 MHz + 1020 MHz + 1005 MHz + 990 MHz + 975 MHz + 960 MHz + 945 MHz + 930 MHz + 915 MHz + 900 MHz + 885 MHz + 870 MHz + 855 MHz + 840 MHz + 825 MHz + 810 MHz + 795 MHz + 780 MHz + 765 MHz + 750 MHz + 735 MHz + 720 MHz + 705 MHz + 690 MHz + 675 MHz + 660 MHz + 645 MHz + 630 MHz + 615 MHz + 600 MHz + 585 MHz + 570 MHz + 555 MHz + 540 MHz + 525 MHz + 510 MHz + 495 MHz + 480 MHz + 465 MHz + 450 MHz + 435 MHz + 420 MHz + 405 MHz + 390 MHz + 375 MHz + 360 MHz + 345 MHz + 330 MHz + 315 MHz + 300 MHz + 285 MHz + 270 MHz + 255 MHz + 240 MHz + 225 MHz + 210 MHz + + + 405 MHz + 420 MHz + 405 MHz + 390 MHz + 375 MHz + 360 MHz + 345 MHz + 330 MHz + 315 MHz + 300 MHz + 285 MHz + 270 MHz + 255 MHz + 240 MHz + 225 MHz + 210 MHz + + + + + N/A + N/A + 725 + G + /usr/lib/xorg/Xorg + 22 MiB + + + + + + + \ No newline at end of file diff --git a/plugins/inputs/nvidia_smi/testdata/gtx-1070-ti.xml b/plugins/inputs/nvidia_smi/testdata/gtx-1070-ti.xml new file mode 100644 index 0000000000..3e3e3ec870 --- /dev/null +++ b/plugins/inputs/nvidia_smi/testdata/gtx-1070-ti.xml @@ -0,0 +1,47 @@ + + + + + GeForce GTX 1070 Ti + GPU-f9ba66fc-a7f5-94c5-da19-019ef2f9c665 + + + + 1 + + + 16x + + + + 100 % + P8 + + 4096 MiB + 42 MiB + 4054 MiB + + Default + + 0 % + 0 % + + + 0 + 0 + 0 + + + 39 C + + + N/A + + + 135 MHz + 135 MHz + 405 MHz + 405 MHz + + + diff --git a/plugins/inputs/nvidia_smi/testdata/gtx-1660-ti.xml b/plugins/inputs/nvidia_smi/testdata/gtx-1660-ti.xml new file mode 100644 index 0000000000..1a6c7d0891 --- /dev/null +++ b/plugins/inputs/nvidia_smi/testdata/gtx-1660-ti.xml @@ -0,0 +1,189 @@ + + + Fri Mar 29 19:19:44 2019 + 418.43 + 10.1 + 1 + + Graphics Device + GeForce + Disabled + Disabled + Disabled + Disabled + 4000 + + N/A + N/A + + N/A + GPU-304a277d-3545-63b8-3a36-dfde3c992989 + 0 + 90.16.25.00.4C + No + 0x4300 + N/A + + G001.0000.02.04 + 1.1 + N/A + N/A + + + N/A + N/A + + + None + + + N/A + + + 43 + 00 + 0000 + 218410DE + 00000000:43:00.0 + 3FC81458 + + + 3 + 1 + + + 16x + 16x + + + + N/A + N/A + + 0 + 0 + 0 KB/s + 0 KB/s + + 0 % + P8 + + Active + Not Active + Not Active + Not Active + Not Active + Not Active + Not Active + Not Active + Not Active + + + 5912 MiB + 0 MiB + 5912 MiB + + + 256 MiB + 2 MiB + 254 MiB + + Default + + 0 % + 1 % + 0 % + 0 % + + + 0 + 0 + 0 + + + 0 + 0 + 0 + + + N/A + N/A + + + + N/A + N/A + N/A + N/A + + + N/A + N/A + N/A + N/A + + + + + N/A + N/A + + + N/A + N/A + + N/A + + + 40 C + 96 C + 93 C + 91 C + N/A + N/A + + + P8 + Supported + 8.93 W + 130.00 W + 130.00 W + 130.00 W + 70.00 W + 130.00 W + + + 300 MHz + 300 MHz + 405 MHz + 540 MHz + + + N/A + N/A + + + N/A + N/A + + + 2145 MHz + 2145 MHz + 4001 MHz + 1950 MHz + + + N/A + + + N/A + N/A + + N/A + + + + + + + diff --git a/plugins/inputs/nvidia_smi/testdata/quadro-p2000-v12.xml b/plugins/inputs/nvidia_smi/testdata/quadro-p2000-v12.xml new file mode 100644 index 0000000000..669113e7c3 --- /dev/null +++ b/plugins/inputs/nvidia_smi/testdata/quadro-p2000-v12.xml @@ -0,0 +1,558 @@ + + + + Wed Sep 20 09:37:38 2023 + 525.125.06 + 12.0 + 1 + + Quadro P2000 + Quadro + Pascal + Disabled + Disabled + Enabled + + N/A + N/A + + + None + + Disabled + 4000 + + N/A + N/A + + 0322218049033 + GPU-396caaed-39ca-3199-2e68-717cdb786ec6 + 0 + 86.06.3F.00.30 + No + 0x1700 + 900-5G410-0100-000 + 1C30-875-A1 + 1 + + G410.0502.00.02 + 1.1 + N/A + N/A + + + N/A + N/A + + N/A + + None + N/A + + + N/A + + + 17 + 00 + 0000 + 1C3010DE + 00000000:17:00.0 + 11B31028 + + + 3 + 1 + 1 + 3 + 3 + + + 16x + 8x + + + + N/A + N/A + + 0 + 0 + 0 KB/s + 0 KB/s + N/A + N/A + + 46 % + P8 + + Active + Not Active + Not Active + Not Active + Not Active + Not Active + Not Active + Not Active + Not Active + + + 5120 MiB + 66 MiB + 1 MiB + 5051 MiB + + + 256 MiB + 5 MiB + 251 MiB + + Default + + 0 % + 0 % + 0 % + 0 % + + + 0 + 0 + 0 + + + 0 + 0 + 0 + + + N/A + N/A + + + + + N/A + N/A + N/A + N/A + N/A + N/A + N/A + N/A + + + N/A + N/A + N/A + N/A + N/A + N/A + N/A + N/A + + + + + N/A + N/A + N/A + N/A + N/A + N/A + N/A + N/A + + + N/A + N/A + N/A + N/A + N/A + N/A + N/A + N/A + + + + + + N/A + N/A + + + N/A + N/A + + N/A + N/A + + N/A + + 34 C + N/A + 104 C + 101 C + N/A + 83 C + N/A + N/A + + + 65 C + 98 C + + + P8 + Supported + 4.61 W + 75.00 W + 75.00 W + 75.00 W + 75.00 W + 75.00 W + + + 139 MHz + 139 MHz + 405 MHz + 544 MHz + + + 1075 MHz + 3504 MHz + + + 1075 MHz + 3504 MHz + + + N/A + + + 1721 MHz + 1721 MHz + 3504 MHz + 1556 MHz + + + 1721 MHz + + + N/A + N/A + + + N/A + + + N/A + N/A + + + + 3504 MHz + 1721 MHz + 1708 MHz + 1695 MHz + 1683 MHz + 1670 MHz + 1657 MHz + 1645 MHz + 1632 MHz + 1620 MHz + 1607 MHz + 1594 MHz + 1582 MHz + 1569 MHz + 1556 MHz + 1544 MHz + 1531 MHz + 1518 MHz + 1506 MHz + 1493 MHz + 1480 MHz + 1468 MHz + 1455 MHz + 1442 MHz + 1430 MHz + 1417 MHz + 1404 MHz + 1392 MHz + 1379 MHz + 1366 MHz + 1354 MHz + 1341 MHz + 1328 MHz + 1316 MHz + 1303 MHz + 1290 MHz + 1278 MHz + 1265 MHz + 1252 MHz + 1240 MHz + 1227 MHz + 1215 MHz + 1202 MHz + 1189 MHz + 1177 MHz + 1164 MHz + 1151 MHz + 1139 MHz + 1126 MHz + 1113 MHz + 1101 MHz + 1088 MHz + 1075 MHz + 1063 MHz + 1050 MHz + 1037 MHz + 1025 MHz + 1012 MHz + 999 MHz + 987 MHz + 974 MHz + 961 MHz + 949 MHz + 936 MHz + 923 MHz + 911 MHz + 898 MHz + 885 MHz + 873 MHz + 860 MHz + 847 MHz + 835 MHz + 822 MHz + 810 MHz + 797 MHz + 784 MHz + 772 MHz + 759 MHz + 746 MHz + 734 MHz + 721 MHz + 708 MHz + 696 MHz + 683 MHz + 670 MHz + 658 MHz + 645 MHz + 632 MHz + 620 MHz + 607 MHz + 594 MHz + 582 MHz + 569 MHz + 556 MHz + 544 MHz + 531 MHz + 518 MHz + 506 MHz + 493 MHz + 480 MHz + 468 MHz + 455 MHz + 442 MHz + 430 MHz + 417 MHz + 405 MHz + 392 MHz + 379 MHz + 367 MHz + 354 MHz + 341 MHz + 329 MHz + 316 MHz + 303 MHz + 291 MHz + 278 MHz + 265 MHz + 253 MHz + 240 MHz + 227 MHz + 215 MHz + 202 MHz + 189 MHz + 177 MHz + 164 MHz + 151 MHz + 139 MHz + + + 810 MHz + 1721 MHz + 1708 MHz + 1695 MHz + 1683 MHz + 1670 MHz + 1657 MHz + 1645 MHz + 1632 MHz + 1620 MHz + 1607 MHz + 1594 MHz + 1582 MHz + 1569 MHz + 1556 MHz + 1544 MHz + 1531 MHz + 1518 MHz + 1506 MHz + 1493 MHz + 1480 MHz + 1468 MHz + 1455 MHz + 1442 MHz + 1430 MHz + 1417 MHz + 1404 MHz + 1392 MHz + 1379 MHz + 1366 MHz + 1354 MHz + 1341 MHz + 1328 MHz + 1316 MHz + 1303 MHz + 1290 MHz + 1278 MHz + 1265 MHz + 1252 MHz + 1240 MHz + 1227 MHz + 1215 MHz + 1202 MHz + 1189 MHz + 1177 MHz + 1164 MHz + 1151 MHz + 1139 MHz + 1126 MHz + 1113 MHz + 1101 MHz + 1088 MHz + 1075 MHz + 1063 MHz + 1050 MHz + 1037 MHz + 1025 MHz + 1012 MHz + 999 MHz + 987 MHz + 974 MHz + 961 MHz + 949 MHz + 936 MHz + 923 MHz + 911 MHz + 898 MHz + 885 MHz + 873 MHz + 860 MHz + 847 MHz + 835 MHz + 822 MHz + 810 MHz + 797 MHz + 784 MHz + 772 MHz + 759 MHz + 746 MHz + 734 MHz + 721 MHz + 708 MHz + 696 MHz + 683 MHz + 670 MHz + 658 MHz + 645 MHz + 632 MHz + 620 MHz + 607 MHz + 594 MHz + 582 MHz + 569 MHz + 556 MHz + 544 MHz + 531 MHz + 518 MHz + 506 MHz + 493 MHz + 480 MHz + 468 MHz + 455 MHz + 442 MHz + 430 MHz + 417 MHz + 405 MHz + 392 MHz + 379 MHz + 367 MHz + 354 MHz + 341 MHz + 329 MHz + 316 MHz + 303 MHz + 291 MHz + 278 MHz + 265 MHz + 253 MHz + 240 MHz + 227 MHz + 215 MHz + 202 MHz + 189 MHz + 177 MHz + 164 MHz + 151 MHz + 139 MHz + + + 405 MHz + 607 MHz + 594 MHz + 582 MHz + 569 MHz + 556 MHz + 544 MHz + 531 MHz + 518 MHz + 506 MHz + 493 MHz + 480 MHz + 468 MHz + 455 MHz + 442 MHz + 430 MHz + 417 MHz + 405 MHz + 392 MHz + 379 MHz + 367 MHz + 354 MHz + 341 MHz + 329 MHz + 316 MHz + 303 MHz + 291 MHz + 278 MHz + 265 MHz + 253 MHz + 240 MHz + 227 MHz + 215 MHz + 202 MHz + 189 MHz + 177 MHz + 164 MHz + 151 MHz + 139 MHz + + + + + + + + + diff --git a/plugins/inputs/nvidia_smi/testdata/quadro-p400.xml b/plugins/inputs/nvidia_smi/testdata/quadro-p400.xml new file mode 100644 index 0000000000..ca9e2191ec --- /dev/null +++ b/plugins/inputs/nvidia_smi/testdata/quadro-p400.xml @@ -0,0 +1,447 @@ + + + Mon Mar 11 17:03:27 2019 + 418.43 + 10.1 + 1 + + Quadro P400 + Quadro + Disabled + Disabled + Disabled + Disabled + 4000 + + N/A + N/A + + 0424418054852 + GPU-8f750be4-dfbc-23b9-b33f-da729a536494 + 0 + 86.07.3B.00.4A + No + 0x4300 + 900-5G212-1701-000 + + G212.0500.00.01 + 1.1 + N/A + N/A + + + N/A + N/A + + + None + + + N/A + + + 43 + 00 + 0000 + 1CB310DE + 00000000:43:00.0 + 11BE10DE + + + 3 + 1 + + + 16x + 16x + + + + N/A + N/A + + 0 + 0 + 0 KB/s + 0 KB/s + + 34 % + P8 + + Active + Not Active + Not Active + Not Active + Not Active + Not Active + Not Active + Not Active + Not Active + + + 1998 MiB + 0 MiB + 1998 MiB + + + 256 MiB + 2 MiB + 254 MiB + + Default + + 0 % + 3 % + 0 % + 0 % + + + 0 + 0 + 0 + + + 0 + 0 + 0 + + + N/A + N/A + + + + + N/A + N/A + N/A + N/A + N/A + N/A + N/A + N/A + + + N/A + N/A + N/A + N/A + N/A + N/A + N/A + N/A + + + + + N/A + N/A + N/A + N/A + N/A + N/A + N/A + N/A + + + N/A + N/A + N/A + N/A + N/A + N/A + N/A + N/A + + + + + + N/A + N/A + + + N/A + N/A + + N/A + + + 33 C + 103 C + 100 C + N/A + N/A + N/A + + + P8 + N/A + N/A + N/A + N/A + N/A + N/A + N/A + + + 139 MHz + 139 MHz + 405 MHz + 544 MHz + + + 1227 MHz + 2005 MHz + + + 1227 MHz + 2005 MHz + + + 1252 MHz + 1252 MHz + 2005 MHz + 1126 MHz + + + 1252 MHz + + + N/A + N/A + + + + 2005 MHz + 1252 MHz + 1240 MHz + 1227 MHz + 1215 MHz + 1202 MHz + 1189 MHz + 1177 MHz + 1164 MHz + 1151 MHz + 1139 MHz + 1126 MHz + 1113 MHz + 1101 MHz + 1088 MHz + 1075 MHz + 1063 MHz + 1050 MHz + 1037 MHz + 1025 MHz + 1012 MHz + 999 MHz + 987 MHz + 974 MHz + 961 MHz + 949 MHz + 936 MHz + 923 MHz + 911 MHz + 898 MHz + 885 MHz + 873 MHz + 860 MHz + 847 MHz + 835 MHz + 822 MHz + 810 MHz + 797 MHz + 784 MHz + 772 MHz + 759 MHz + 746 MHz + 734 MHz + 721 MHz + 708 MHz + 696 MHz + 683 MHz + 670 MHz + 658 MHz + 645 MHz + 632 MHz + 620 MHz + 607 MHz + 594 MHz + 582 MHz + 569 MHz + 556 MHz + 544 MHz + 531 MHz + 518 MHz + 506 MHz + 493 MHz + 480 MHz + 468 MHz + 455 MHz + 442 MHz + 430 MHz + 417 MHz + 405 MHz + 392 MHz + 379 MHz + 367 MHz + 354 MHz + 341 MHz + 329 MHz + 316 MHz + 303 MHz + 291 MHz + 278 MHz + 265 MHz + 253 MHz + 240 MHz + 227 MHz + 215 MHz + 202 MHz + 189 MHz + 177 MHz + 164 MHz + 151 MHz + 139 MHz + + + 810 MHz + 1252 MHz + 1240 MHz + 1227 MHz + 1215 MHz + 1202 MHz + 1189 MHz + 1177 MHz + 1164 MHz + 1151 MHz + 1139 MHz + 1126 MHz + 1113 MHz + 1101 MHz + 1088 MHz + 1075 MHz + 1063 MHz + 1050 MHz + 1037 MHz + 1025 MHz + 1012 MHz + 999 MHz + 987 MHz + 974 MHz + 961 MHz + 949 MHz + 936 MHz + 923 MHz + 911 MHz + 898 MHz + 885 MHz + 873 MHz + 860 MHz + 847 MHz + 835 MHz + 822 MHz + 810 MHz + 797 MHz + 784 MHz + 772 MHz + 759 MHz + 746 MHz + 734 MHz + 721 MHz + 708 MHz + 696 MHz + 683 MHz + 670 MHz + 658 MHz + 645 MHz + 632 MHz + 620 MHz + 607 MHz + 594 MHz + 582 MHz + 569 MHz + 556 MHz + 544 MHz + 531 MHz + 518 MHz + 506 MHz + 493 MHz + 480 MHz + 468 MHz + 455 MHz + 442 MHz + 430 MHz + 417 MHz + 405 MHz + 392 MHz + 379 MHz + 367 MHz + 354 MHz + 341 MHz + 329 MHz + 316 MHz + 303 MHz + 291 MHz + 278 MHz + 265 MHz + 253 MHz + 240 MHz + 227 MHz + 215 MHz + 202 MHz + 189 MHz + 177 MHz + 164 MHz + 151 MHz + 139 MHz + + + 405 MHz + 607 MHz + 594 MHz + 582 MHz + 569 MHz + 556 MHz + 544 MHz + 531 MHz + 518 MHz + 506 MHz + 493 MHz + 480 MHz + 468 MHz + 455 MHz + 442 MHz + 430 MHz + 417 MHz + 405 MHz + 392 MHz + 379 MHz + 367 MHz + 354 MHz + 341 MHz + 329 MHz + 316 MHz + 303 MHz + 291 MHz + 278 MHz + 265 MHz + 253 MHz + 240 MHz + 227 MHz + 215 MHz + 202 MHz + 189 MHz + 177 MHz + 164 MHz + 151 MHz + 139 MHz + + + + + + + + + diff --git a/plugins/inputs/nvidia_smi/testdata/rtx-3080-v12.xml b/plugins/inputs/nvidia_smi/testdata/rtx-3080-v12.xml new file mode 100644 index 0000000000..e427481ca6 --- /dev/null +++ b/plugins/inputs/nvidia_smi/testdata/rtx-3080-v12.xml @@ -0,0 +1,786 @@ + + + + Thu Jul 20 17:00:50 2023 + 536.40 + 12.2 + 1 + + NVIDIA GeForce RTX 3080 + GeForce + Ampere + Enabled + Enabled + N/A + N/A + + N/A + N/A + + + None + + Disabled + 4000 + + WDDM + WDDM + + N/A + GPU-19d6d965-2acc-f646-00f8-4c76979aabb4 + N/A + 94.02.71.40.72 + No + 0x400 + N/A + 2216-202-A1 + N/A + 1 + + G001.0000.03.03 + 2.0 + N/A + N/A + + + N/A + N/A + + N/A + + Pass-Through + N/A + + + No + N/A + + + N/A + + + 04 + 00 + 0000 + 221610DE + 00000000:04:00.0 + 161219DA + + + 4 + 4 + 4 + 4 + N/A + + + 16x + 16x + + + + N/A + N/A + + 0 + 0 + 1000 KB/s + 6000 KB/s + N/A + N/A + + 0 % + P8 + + Active + Not Active + Not Active + Not Active + Not Active + Not Active + Not Active + Not Active + Not Active + + + 10240 MiB + 173 MiB + 1128 MiB + 8938 MiB + + + 16384 MiB + 1 MiB + 16383 MiB + + + N/A + N/A + N/A + + Default + + 0 % + 37 % + 0 % + 0 % + 0 % + 0 % + + + 0 + 0 + 0 + + + 0 + 0 + 0 + + + N/A + N/A + + + + N/A + N/A + N/A + N/A + + + N/A + N/A + N/A + N/A + + + + + N/A + N/A + + + N/A + N/A + + N/A + N/A + + N/A + + 31 C + N/A + 98 C + 95 C + 93 C + 91 C + N/A + N/A + + + 65 C + 91 C + + + P8 + 22.78 W + 336.00 W + 336.00 W + 320.00 W + 100.00 W + 336.00 W + + + P8 + N/A + N/A + N/A + N/A + N/A + N/A + + + 210 MHz + 210 MHz + 405 MHz + 555 MHz + + + N/A + N/A + + + N/A + N/A + + + N/A + + + 2100 MHz + 2100 MHz + 9501 MHz + 1950 MHz + + + N/A + + + N/A + N/A + + + 750.000 mV + + + N/A + N/A + + + + 9501 MHz + 2100 MHz + 2085 MHz + 2070 MHz + 2055 MHz + 2040 MHz + 2025 MHz + 2010 MHz + 1995 MHz + 1980 MHz + 1965 MHz + 1950 MHz + 1935 MHz + 1920 MHz + 1905 MHz + 1890 MHz + 1875 MHz + 1860 MHz + 1845 MHz + 1830 MHz + 1815 MHz + 1800 MHz + 1785 MHz + 1770 MHz + 1755 MHz + 1740 MHz + 1725 MHz + 1710 MHz + 1695 MHz + 1680 MHz + 1665 MHz + 1650 MHz + 1635 MHz + 1620 MHz + 1605 MHz + 1590 MHz + 1575 MHz + 1560 MHz + 1545 MHz + 1530 MHz + 1515 MHz + 1500 MHz + 1485 MHz + 1470 MHz + 1455 MHz + 1440 MHz + 1425 MHz + 1410 MHz + 1395 MHz + 1380 MHz + 1365 MHz + 1350 MHz + 1335 MHz + 1320 MHz + 1305 MHz + 1290 MHz + 1275 MHz + 1260 MHz + 1245 MHz + 1230 MHz + 1215 MHz + 1200 MHz + 1185 MHz + 1170 MHz + 1155 MHz + 1140 MHz + 1125 MHz + 1110 MHz + 1095 MHz + 1080 MHz + 1065 MHz + 1050 MHz + 1035 MHz + 1020 MHz + 1005 MHz + 990 MHz + 975 MHz + 960 MHz + 945 MHz + 930 MHz + 915 MHz + 900 MHz + 885 MHz + 870 MHz + 855 MHz + 840 MHz + 825 MHz + 810 MHz + 795 MHz + 780 MHz + 765 MHz + 750 MHz + 735 MHz + 720 MHz + 705 MHz + 690 MHz + 675 MHz + 660 MHz + 645 MHz + 630 MHz + 615 MHz + 600 MHz + 585 MHz + 570 MHz + 555 MHz + 540 MHz + 525 MHz + 510 MHz + 495 MHz + 480 MHz + 465 MHz + 450 MHz + 435 MHz + 420 MHz + 405 MHz + 390 MHz + 375 MHz + 360 MHz + 345 MHz + 330 MHz + 315 MHz + 300 MHz + 285 MHz + 270 MHz + 255 MHz + 240 MHz + 225 MHz + 210 MHz + + + 9251 MHz + 2100 MHz + 2085 MHz + 2070 MHz + 2055 MHz + 2040 MHz + 2025 MHz + 2010 MHz + 1995 MHz + 1980 MHz + 1965 MHz + 1950 MHz + 1935 MHz + 1920 MHz + 1905 MHz + 1890 MHz + 1875 MHz + 1860 MHz + 1845 MHz + 1830 MHz + 1815 MHz + 1800 MHz + 1785 MHz + 1770 MHz + 1755 MHz + 1740 MHz + 1725 MHz + 1710 MHz + 1695 MHz + 1680 MHz + 1665 MHz + 1650 MHz + 1635 MHz + 1620 MHz + 1605 MHz + 1590 MHz + 1575 MHz + 1560 MHz + 1545 MHz + 1530 MHz + 1515 MHz + 1500 MHz + 1485 MHz + 1470 MHz + 1455 MHz + 1440 MHz + 1425 MHz + 1410 MHz + 1395 MHz + 1380 MHz + 1365 MHz + 1350 MHz + 1335 MHz + 1320 MHz + 1305 MHz + 1290 MHz + 1275 MHz + 1260 MHz + 1245 MHz + 1230 MHz + 1215 MHz + 1200 MHz + 1185 MHz + 1170 MHz + 1155 MHz + 1140 MHz + 1125 MHz + 1110 MHz + 1095 MHz + 1080 MHz + 1065 MHz + 1050 MHz + 1035 MHz + 1020 MHz + 1005 MHz + 990 MHz + 975 MHz + 960 MHz + 945 MHz + 930 MHz + 915 MHz + 900 MHz + 885 MHz + 870 MHz + 855 MHz + 840 MHz + 825 MHz + 810 MHz + 795 MHz + 780 MHz + 765 MHz + 750 MHz + 735 MHz + 720 MHz + 705 MHz + 690 MHz + 675 MHz + 660 MHz + 645 MHz + 630 MHz + 615 MHz + 600 MHz + 585 MHz + 570 MHz + 555 MHz + 540 MHz + 525 MHz + 510 MHz + 495 MHz + 480 MHz + 465 MHz + 450 MHz + 435 MHz + 420 MHz + 405 MHz + 390 MHz + 375 MHz + 360 MHz + 345 MHz + 330 MHz + 315 MHz + 300 MHz + 285 MHz + 270 MHz + 255 MHz + 240 MHz + 225 MHz + 210 MHz + + + 5001 MHz + 2100 MHz + 2085 MHz + 2070 MHz + 2055 MHz + 2040 MHz + 2025 MHz + 2010 MHz + 1995 MHz + 1980 MHz + 1965 MHz + 1950 MHz + 1935 MHz + 1920 MHz + 1905 MHz + 1890 MHz + 1875 MHz + 1860 MHz + 1845 MHz + 1830 MHz + 1815 MHz + 1800 MHz + 1785 MHz + 1770 MHz + 1755 MHz + 1740 MHz + 1725 MHz + 1710 MHz + 1695 MHz + 1680 MHz + 1665 MHz + 1650 MHz + 1635 MHz + 1620 MHz + 1605 MHz + 1590 MHz + 1575 MHz + 1560 MHz + 1545 MHz + 1530 MHz + 1515 MHz + 1500 MHz + 1485 MHz + 1470 MHz + 1455 MHz + 1440 MHz + 1425 MHz + 1410 MHz + 1395 MHz + 1380 MHz + 1365 MHz + 1350 MHz + 1335 MHz + 1320 MHz + 1305 MHz + 1290 MHz + 1275 MHz + 1260 MHz + 1245 MHz + 1230 MHz + 1215 MHz + 1200 MHz + 1185 MHz + 1170 MHz + 1155 MHz + 1140 MHz + 1125 MHz + 1110 MHz + 1095 MHz + 1080 MHz + 1065 MHz + 1050 MHz + 1035 MHz + 1020 MHz + 1005 MHz + 990 MHz + 975 MHz + 960 MHz + 945 MHz + 930 MHz + 915 MHz + 900 MHz + 885 MHz + 870 MHz + 855 MHz + 840 MHz + 825 MHz + 810 MHz + 795 MHz + 780 MHz + 765 MHz + 750 MHz + 735 MHz + 720 MHz + 705 MHz + 690 MHz + 675 MHz + 660 MHz + 645 MHz + 630 MHz + 615 MHz + 600 MHz + 585 MHz + 570 MHz + 555 MHz + 540 MHz + 525 MHz + 510 MHz + 495 MHz + 480 MHz + 465 MHz + 450 MHz + 435 MHz + 420 MHz + 405 MHz + 390 MHz + 375 MHz + 360 MHz + 345 MHz + 330 MHz + 315 MHz + 300 MHz + 285 MHz + 270 MHz + 255 MHz + 240 MHz + 225 MHz + 210 MHz + + + 810 MHz + 2100 MHz + 2085 MHz + 2070 MHz + 2055 MHz + 2040 MHz + 2025 MHz + 2010 MHz + 1995 MHz + 1980 MHz + 1965 MHz + 1950 MHz + 1935 MHz + 1920 MHz + 1905 MHz + 1890 MHz + 1875 MHz + 1860 MHz + 1845 MHz + 1830 MHz + 1815 MHz + 1800 MHz + 1785 MHz + 1770 MHz + 1755 MHz + 1740 MHz + 1725 MHz + 1710 MHz + 1695 MHz + 1680 MHz + 1665 MHz + 1650 MHz + 1635 MHz + 1620 MHz + 1605 MHz + 1590 MHz + 1575 MHz + 1560 MHz + 1545 MHz + 1530 MHz + 1515 MHz + 1500 MHz + 1485 MHz + 1470 MHz + 1455 MHz + 1440 MHz + 1425 MHz + 1410 MHz + 1395 MHz + 1380 MHz + 1365 MHz + 1350 MHz + 1335 MHz + 1320 MHz + 1305 MHz + 1290 MHz + 1275 MHz + 1260 MHz + 1245 MHz + 1230 MHz + 1215 MHz + 1200 MHz + 1185 MHz + 1170 MHz + 1155 MHz + 1140 MHz + 1125 MHz + 1110 MHz + 1095 MHz + 1080 MHz + 1065 MHz + 1050 MHz + 1035 MHz + 1020 MHz + 1005 MHz + 990 MHz + 975 MHz + 960 MHz + 945 MHz + 930 MHz + 915 MHz + 900 MHz + 885 MHz + 870 MHz + 855 MHz + 840 MHz + 825 MHz + 810 MHz + 795 MHz + 780 MHz + 765 MHz + 750 MHz + 735 MHz + 720 MHz + 705 MHz + 690 MHz + 675 MHz + 660 MHz + 645 MHz + 630 MHz + 615 MHz + 600 MHz + 585 MHz + 570 MHz + 555 MHz + 540 MHz + 525 MHz + 510 MHz + 495 MHz + 480 MHz + 465 MHz + 450 MHz + 435 MHz + 420 MHz + 405 MHz + 390 MHz + 375 MHz + 360 MHz + 345 MHz + 330 MHz + 315 MHz + 300 MHz + 285 MHz + 270 MHz + 255 MHz + 240 MHz + 225 MHz + 210 MHz + + + 405 MHz + 420 MHz + 405 MHz + 390 MHz + 375 MHz + 360 MHz + 345 MHz + 330 MHz + 315 MHz + 300 MHz + 285 MHz + 270 MHz + 255 MHz + 240 MHz + 225 MHz + 210 MHz + + + + + + + + + diff --git a/plugins/inputs/nvidia_smi/testdata/tesla-t4.xml b/plugins/inputs/nvidia_smi/testdata/tesla-t4.xml new file mode 100644 index 0000000000..3a4e962ca8 --- /dev/null +++ b/plugins/inputs/nvidia_smi/testdata/tesla-t4.xml @@ -0,0 +1,348 @@ + + + + Mon Apr 24 16:22:39 2023 + 515.105.01 + 11.7 + 1 + + Tesla T4 + NVIDIA + Turing + Disabled + Disabled + Disabled + + N/A + N/A + + + None + + Disabled + 4000 + + N/A + N/A + + 0000000000000 + GPU-d37e67a5-91dd-3774-a5cb-99096249601a + 0 + 90.04.84.00.06 + No + 0x1e + 900-2G183-0000-001 + 0 + + G183.0200.00.02 + 1.1 + 5.0 + N/A + + + N/A + N/A + + 515.105.01 + + Pass-Through + N/A + + + N/A + + + 00 + 1E + 0000 + 1EB810DE + 00000000:00:1E.0 + 12A210DE + + + 3 + 3 + + + 16x + 8x + + + + N/A + N/A + + 0 + 0 + 0 KB/s + 0 KB/s + + N/A + P0 + + Not Active + Not Active + Not Active + Not Active + Not Active + Not Active + Not Active + Not Active + Not Active + + + 15360 MiB + 388 MiB + 1032 MiB + 13939 MiB + + + 256 MiB + 5 MiB + 251 MiB + + Default + + 0 % + 0 % + 0 % + 0 % + + + 0 + 0 + 0 + + + 0 + 0 + 0 + + + Enabled + Enabled + + + + 0 + 0 + 0 + 0 + + + 0 + 0 + 0 + 0 + + + + + 0 + + + + + 0 + + + + No + No + + N/A + + 40 C + 96 C + 93 C + 85 C + N/A + N/A + N/A + + + N/A + N/A + + + P0 + Supported + 26.78 W + 70.00 W + 70.00 W + 70.00 W + 60.00 W + 70.00 W + + + 585 MHz + 585 MHz + 5000 MHz + 810 MHz + + + 585 MHz + 5001 MHz + + + 585 MHz + 5001 MHz + + + 1590 MHz + 1590 MHz + 5001 MHz + 1470 MHz + + + 1590 MHz + + + N/A + N/A + + + N/A + + + + 5001 MHz + 1590 MHz + 1575 MHz + 1560 MHz + 1545 MHz + 1530 MHz + 1515 MHz + 1500 MHz + 1485 MHz + 1470 MHz + 1455 MHz + 1440 MHz + 1425 MHz + 1410 MHz + 1395 MHz + 1380 MHz + 1365 MHz + 1350 MHz + 1335 MHz + 1320 MHz + 1305 MHz + 1290 MHz + 1275 MHz + 1260 MHz + 1245 MHz + 1230 MHz + 1215 MHz + 1200 MHz + 1185 MHz + 1170 MHz + 1155 MHz + 1140 MHz + 1125 MHz + 1110 MHz + 1095 MHz + 1080 MHz + 1065 MHz + 1050 MHz + 1035 MHz + 1020 MHz + 1005 MHz + 990 MHz + 975 MHz + 960 MHz + 945 MHz + 930 MHz + 915 MHz + 900 MHz + 885 MHz + 870 MHz + 855 MHz + 840 MHz + 825 MHz + 810 MHz + 795 MHz + 780 MHz + 765 MHz + 750 MHz + 735 MHz + 720 MHz + 705 MHz + 690 MHz + 675 MHz + 660 MHz + 645 MHz + 630 MHz + 615 MHz + 600 MHz + 585 MHz + 570 MHz + 555 MHz + 540 MHz + 525 MHz + 510 MHz + 495 MHz + 480 MHz + 465 MHz + 450 MHz + 435 MHz + 420 MHz + 405 MHz + 390 MHz + 375 MHz + 360 MHz + 345 MHz + 330 MHz + 315 MHz + 300 MHz + + + 405 MHz + 645 MHz + 630 MHz + 615 MHz + 600 MHz + 585 MHz + 570 MHz + 555 MHz + 540 MHz + 525 MHz + 510 MHz + 495 MHz + 480 MHz + 465 MHz + 450 MHz + 435 MHz + 420 MHz + 405 MHz + 390 MHz + 375 MHz + 360 MHz + 345 MHz + 330 MHz + 315 MHz + 300 MHz + + + + + N/A + N/A + 675 + G + /usr/lib/xorg/Xorg + 22 MiB + + + N/A + N/A + 5762 + C + python + 1005 MiB + + + + + + + \ No newline at end of file diff --git a/plugins/inputs/prometheus/metric_type_handler_test.go b/plugins/inputs/prometheus/metric_type_handler_test.go index 05f1c29bfa..32062878cf 100644 --- a/plugins/inputs/prometheus/metric_type_handler_test.go +++ b/plugins/inputs/prometheus/metric_type_handler_test.go @@ -9,7 +9,6 @@ import ( "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/labels" - "github.com/prometheus/prometheus/model/textparse" "github.com/prometheus/prometheus/scrape" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -17,7 +16,7 @@ import ( type mockMetricMetadataStore struct { MetricList []string - Type textparse.MetricType + Type model.MetricType Help string Unit string } @@ -76,7 +75,7 @@ func (ms *mockScrapeManager) TargetsAll() map[string][]*scrape.Target { target1 := scrape.NewTarget(labels1, labels1, params) mStore1 := &mockMetricMetadataStore{ MetricList: []string{"m1", "m2", "m4"}, - Type: textparse.MetricTypeCounter, + Type: model.MetricTypeCounter, Help: "", Unit: "", } @@ -96,7 +95,7 @@ func (ms *mockScrapeManager) TargetsAll() map[string][]*scrape.Target { target2 := scrape.NewTarget(labels2, labels2, params2) mStore2 := &mockMetricMetadataStore{ MetricList: []string{"m1", "m2"}, - Type: textparse.MetricTypeGauge, + Type: model.MetricTypeGauge, Help: "", Unit: "", } @@ -135,7 +134,7 @@ func TestMetadataServiceImpl_GetWithOriginalJobname(t *testing.T) { require.NoError(t, err) expectedMetricMetadata := scrape.MetricMetadata{ Metric: "m1", - Type: textparse.MetricTypeCounter, + Type: model.MetricTypeCounter, Help: "", Unit: "", } @@ -145,7 +144,7 @@ func TestMetadataServiceImpl_GetWithOriginalJobname(t *testing.T) { expectedMetricMetadata = scrape.MetricMetadata{ Metric: "m2", - Type: textparse.MetricTypeCounter, + Type: model.MetricTypeCounter, Help: "", Unit: "", } @@ -219,16 +218,16 @@ func TestNewMetricsTypeHandler_HandleWithNormalTarget(t *testing.T) { metricNameBeforeRelabel: "m1", jobBeforeRelabel: "job1", instanceBeforeRelabel: "instance1", - metricType: string(textparse.MetricTypeCounter), - tags: map[string]string{"job": "job1", "instance": "instance1", "prom_metric_type": string(textparse.MetricTypeCounter)}, + metricType: string(model.MetricTypeCounter), + tags: map[string]string{"job": "job1", "instance": "instance1", "prom_metric_type": string(model.MetricTypeCounter)}, } expectedMetric2 := PrometheusMetric{ metricName: "m2", metricNameBeforeRelabel: "m2", jobBeforeRelabel: "job1", instanceBeforeRelabel: "instance1", - metricType: string(textparse.MetricTypeCounter), - tags: map[string]string{"job": "job1", "instance": "instance1", "prom_metric_type": string(textparse.MetricTypeCounter)}, + metricType: string(model.MetricTypeCounter), + tags: map[string]string{"job": "job1", "instance": "instance1", "prom_metric_type": string(model.MetricTypeCounter)}, } assert.Equal(t, *result[0], expectedMetric1) assert.Equal(t, *result[1], expectedMetric2) @@ -269,16 +268,16 @@ func TestNewMetricsTypeHandler_HandleWithReplacedJobname(t *testing.T) { metricNameBeforeRelabel: "m1", jobBeforeRelabel: "job2", instanceBeforeRelabel: "instance2", - metricType: string(textparse.MetricTypeGauge), - tags: map[string]string{"job": "job2_replaced", "instance": "instance2_replaced", "prom_metric_type": string(textparse.MetricTypeGauge)}, + metricType: string(model.MetricTypeGauge), + tags: map[string]string{"job": "job2_replaced", "instance": "instance2_replaced", "prom_metric_type": string(model.MetricTypeGauge)}, } expectedMetric2 := PrometheusMetric{ metricName: "m2", metricNameBeforeRelabel: "m2", jobBeforeRelabel: "job2", instanceBeforeRelabel: "instance2", - metricType: string(textparse.MetricTypeGauge), - tags: map[string]string{"job": "job2_replaced", "instance": "instance2", "prom_metric_type": string(textparse.MetricTypeGauge)}, + metricType: string(model.MetricTypeGauge), + tags: map[string]string{"job": "job2_replaced", "instance": "instance2", "prom_metric_type": string(model.MetricTypeGauge)}, } assert.Equal(t, *result[0], expectedMetric1) assert.Equal(t, *result[1], expectedMetric2) @@ -325,24 +324,24 @@ func TestNewMetricsTypeHandler_HandleWithMetricSuffix(t *testing.T) { metricNameBeforeRelabel: "m1_sum", jobBeforeRelabel: "job1", instanceBeforeRelabel: "instance1", - metricType: string(textparse.MetricTypeCounter), - tags: map[string]string{"job": "job1", "instance": "instance1", "prom_metric_type": string(textparse.MetricTypeCounter)}, + metricType: string(model.MetricTypeCounter), + tags: map[string]string{"job": "job1", "instance": "instance1", "prom_metric_type": string(model.MetricTypeCounter)}, } expectedMetric2 := PrometheusMetric{ metricName: "m2_count", metricNameBeforeRelabel: "m2_count", jobBeforeRelabel: "job1", instanceBeforeRelabel: "instance1", - metricType: string(textparse.MetricTypeCounter), - tags: map[string]string{"job": "job1", "instance": "instance1", "prom_metric_type": string(textparse.MetricTypeCounter)}, + metricType: string(model.MetricTypeCounter), + tags: map[string]string{"job": "job1", "instance": "instance1", "prom_metric_type": string(model.MetricTypeCounter)}, } expectedMetric4 := PrometheusMetric{ metricName: "m4_total", metricNameBeforeRelabel: "m4_total", jobBeforeRelabel: "job1", instanceBeforeRelabel: "instance1", - metricType: string(textparse.MetricTypeCounter), - tags: map[string]string{"job": "job1", "instance": "instance1", "prom_metric_type": string(textparse.MetricTypeCounter)}, + metricType: string(model.MetricTypeCounter), + tags: map[string]string{"job": "job1", "instance": "instance1", "prom_metric_type": string(model.MetricTypeCounter)}, } assert.Equal(t, *result[0], expectedMetric1) assert.Equal(t, *result[1], expectedMetric2) @@ -382,19 +381,19 @@ func TestNewMetricsTypeHandler_HandleRelabelName(t *testing.T) { expectedMetric1 := PrometheusMetric{ metricName: "m1", metricNameBeforeRelabel: "m1", - metricType: string(textparse.MetricTypeCounter), + metricType: string(model.MetricTypeCounter), jobBeforeRelabel: "job1", instanceBeforeRelabel: "instance1", // The saved label should be gone - tags: map[string]string{"job": "job1", "instance": "instance1", "prom_metric_type": string(textparse.MetricTypeCounter)}, + tags: map[string]string{"job": "job1", "instance": "instance1", "prom_metric_type": string(model.MetricTypeCounter)}, } expectedMetric2 := PrometheusMetric{ metricName: "m2_changed", metricNameBeforeRelabel: "m2", jobBeforeRelabel: "job1", instanceBeforeRelabel: "instance1", - metricType: string(textparse.MetricTypeCounter), - tags: map[string]string{"job": "job1", "instance": "instance1", "prom_metric_type": string(textparse.MetricTypeCounter)}, + metricType: string(model.MetricTypeCounter), + tags: map[string]string{"job": "job1", "instance": "instance1", "prom_metric_type": string(model.MetricTypeCounter)}, } assert.Equal(t, expectedMetric1, *result[0]) assert.Equal(t, expectedMetric2, *result[1]) diff --git a/plugins/inputs/prometheus/metrics_receiver.go b/plugins/inputs/prometheus/metrics_receiver.go index aff1d10b1b..0a89f1c6d3 100644 --- a/plugins/inputs/prometheus/metrics_receiver.go +++ b/plugins/inputs/prometheus/metrics_receiver.go @@ -48,6 +48,11 @@ type metricAppender struct { batch PrometheusMetricBatch } +func (m *metricAppender) AppendCTZeroSample(storage.SeriesRef, labels.Labels, int64, int64) (storage.SeriesRef, error) { + // TODO: implement this func + return 0, nil +} + func (mr *metricsReceiver) Appender(ctx context.Context) storage.Appender { return &metricAppender{receiver: mr, batch: PrometheusMetricBatch{}} } diff --git a/plugins/inputs/prometheus/metrics_type_handler.go b/plugins/inputs/prometheus/metrics_type_handler.go index 19a96842e6..f003461c71 100644 --- a/plugins/inputs/prometheus/metrics_type_handler.go +++ b/plugins/inputs/prometheus/metrics_type_handler.go @@ -9,7 +9,7 @@ import ( "log" "strings" - "github.com/prometheus/prometheus/model/textparse" + v1 "github.com/prometheus/client_golang/api/prometheus/v1" "github.com/prometheus/prometheus/scrape" ) @@ -40,19 +40,19 @@ func normalizeMetricName(name string, suffixes []string) string { } func (pm *PrometheusMetric) isCounter() bool { - return pm.metricType == string(textparse.MetricTypeCounter) + return pm.metricType == string(v1.MetricTypeCounter) } func (pm *PrometheusMetric) isGauge() bool { - return pm.metricType == string(textparse.MetricTypeGauge) + return pm.metricType == string(v1.MetricTypeGauge) } func (pm *PrometheusMetric) isHistogram() bool { - return pm.metricType == string(textparse.MetricTypeHistogram) + return pm.metricType == string(v1.MetricTypeHistogram) } func (pm *PrometheusMetric) isSummary() bool { - return pm.metricType == string(textparse.MetricTypeSummary) + return pm.metricType == string(v1.MetricTypeSummary) } // Adapter to prometheus scrape.Target @@ -66,7 +66,7 @@ type mCache struct { } func (m *mCache) Metadata(metricName string) (scrape.MetricMetadata, bool) { - return m.t.Metadata(metricName) + return m.t.GetMetadata(metricName) } // Adapter to ScrapeManager to retrieve the cache by job and instance @@ -89,7 +89,7 @@ func (t *metadataServiceImpl) Get(job, instance string) (metadataCache, error) { // from the same targetGroup, instance is not going to be duplicated for _, target := range targetGroup { - if target.Labels().Get(savedScrapeInstanceLabel) == instance { + if target.DiscoveredLabels().Get(savedScrapeInstanceLabel) == instance || target.DiscoveredLabels().Get(scrapeInstanceLabel) == instance { return &mCache{target}, nil } } diff --git a/plugins/inputs/prometheus/start.go b/plugins/inputs/prometheus/start.go index 7a0021a1bf..d5b81e1fe5 100644 --- a/plugins/inputs/prometheus/start.go +++ b/plugins/inputs/prometheus/start.go @@ -32,6 +32,7 @@ import ( "github.com/oklog/run" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" + v "github.com/prometheus/client_golang/prometheus/collectors/version" "github.com/prometheus/common/model" "github.com/prometheus/common/promlog" "github.com/prometheus/common/version" @@ -75,7 +76,7 @@ var ( ) func init() { - prometheus.MustRegister(version.NewCollector("prometheus")) + prometheus.MustRegister(v.NewCollector("prometheus")) } func Start(configFilePath string, receiver storage.Appendable, shutDownChan chan interface{}, wg *sync.WaitGroup, mth *metricsTypeHandler) { @@ -113,8 +114,9 @@ func Start(configFilePath string, receiver storage.Appendable, shutDownChan chan var ( ctxScrape, cancelScrape = context.WithCancel(context.Background()) - discoveryManagerScrape = discovery.NewManager(ctxScrape, log.With(logger, "component", "discovery manager scrape"), discovery.Name("scrape")) - scrapeManager = scrape.NewManager(&scrape.Options{}, log.With(logger, "component", "scrape manager"), receiver) + sdMetrics, _ = discovery.CreateAndRegisterSDMetrics(prometheus.DefaultRegisterer) + discoveryManagerScrape = discovery.NewManager(ctxScrape, log.With(logger, "component", "discovery manager scrape"), prometheus.DefaultRegisterer, sdMetrics, discovery.Name("scrape")) + scrapeManager, _ = scrape.NewManager(&scrape.Options{}, log.With(logger, "component", "scrape manager"), receiver, prometheus.DefaultRegisterer) ) mth.SetScrapeManager(scrapeManager) @@ -282,6 +284,7 @@ func Start(configFilePath string, receiver storage.Appendable, shutDownChan chan const ( savedScrapeJobLabel = "cwagent_saved_scrape_job" savedScrapeInstanceLabel = "cwagent_saved_scrape_instance" + scrapeInstanceLabel = "__address__" savedScrapeNameLabel = "cwagent_saved_scrape_name" // just arbitrary name that end user won't override in relabel config ) diff --git a/plugins/outputs/cloudwatch/cloudwatch.go b/plugins/outputs/cloudwatch/cloudwatch.go index e936a72622..4cc9a4276d 100644 --- a/plugins/outputs/cloudwatch/cloudwatch.go +++ b/plugins/outputs/cloudwatch/cloudwatch.go @@ -27,7 +27,6 @@ import ( "golang.org/x/exp/maps" configaws "github.com/aws/amazon-cloudwatch-agent/cfg/aws" - "github.com/aws/amazon-cloudwatch-agent/extension/agenthealth/handler/stats/provider" "github.com/aws/amazon-cloudwatch-agent/handlers" "github.com/aws/amazon-cloudwatch-agent/internal/publisher" "github.com/aws/amazon-cloudwatch-agent/internal/retryer" @@ -97,8 +96,6 @@ func (c *CloudWatch) Start(_ context.Context, host component.Host) error { Filename: c.config.SharedCredentialFilename, Token: c.config.Token, } - provider.GetFlagsStats().SetFlagWithValue(provider.FlagRegionType, c.config.RegionType) - provider.GetFlagsStats().SetFlagWithValue(provider.FlagMode, c.config.Mode) configProvider := credentialConfig.Credentials() logger := models.NewLogger("outputs", "cloudwatch", "") logThrottleRetryer := retryer.NewLogThrottleRetryer(logger) diff --git a/plugins/outputs/cloudwatch/cloudwatch_test.go b/plugins/outputs/cloudwatch/cloudwatch_test.go index 79aa8a5c25..b3aada3801 100644 --- a/plugins/outputs/cloudwatch/cloudwatch_test.go +++ b/plugins/outputs/cloudwatch/cloudwatch_test.go @@ -501,7 +501,8 @@ func TestPublish(t *testing.T) { func TestMiddleware(t *testing.T) { t.Setenv("AWS_ACCESS_KEY_ID", "test") t.Setenv("AWS_SECRET_ACCESS_KEY", "test") - id := component.NewID("test") + newType, _ := component.NewType("test") + id := component.NewID(newType) server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { w.WriteHeader(http.StatusOK) })) diff --git a/plugins/outputs/cloudwatch/config.go b/plugins/outputs/cloudwatch/config.go index 43469bc9d5..74d4d9b097 100644 --- a/plugins/outputs/cloudwatch/config.go +++ b/plugins/outputs/cloudwatch/config.go @@ -18,8 +18,6 @@ type Config struct { AccessKey string `mapstructure:"access_key,omitempty"` SecretKey string `mapstructure:"secret_key,omitempty"` RoleARN string `mapstructure:"role_arn,omitempty"` - RegionType string `mapstructure:"region_type,omitempty"` - Mode string `mapstructure:"mode,omitempty"` Profile string `mapstructure:"profile,omitempty"` SharedCredentialFilename string `mapstructure:"shared_credential_file,omitempty"` Token string `mapstructure:"token,omitempty"` diff --git a/plugins/outputs/cloudwatch/factory.go b/plugins/outputs/cloudwatch/factory.go index f1a554b703..c97c87cd8a 100644 --- a/plugins/outputs/cloudwatch/factory.go +++ b/plugins/outputs/cloudwatch/factory.go @@ -14,8 +14,11 @@ import ( ) const ( - TypeStr component.Type = "awscloudwatch" - stability = component.StabilityLevelAlpha + stability = component.StabilityLevelAlpha +) + +var ( + TypeStr, _ = component.NewType("awscloudwatch") ) func NewFactory() exporter.Factory { diff --git a/plugins/outputs/cloudwatchlogs/cloudwatchlogs.go b/plugins/outputs/cloudwatchlogs/cloudwatchlogs.go index 06ec2cc5e2..7cd89fb4b1 100644 --- a/plugins/outputs/cloudwatchlogs/cloudwatchlogs.go +++ b/plugins/outputs/cloudwatchlogs/cloudwatchlogs.go @@ -23,7 +23,6 @@ import ( "github.com/aws/amazon-cloudwatch-agent/cfg/envconfig" "github.com/aws/amazon-cloudwatch-agent/extension/agenthealth" "github.com/aws/amazon-cloudwatch-agent/extension/agenthealth/handler/stats/agent" - "github.com/aws/amazon-cloudwatch-agent/extension/agenthealth/handler/stats/provider" "github.com/aws/amazon-cloudwatch-agent/extension/agenthealth/handler/useragent" "github.com/aws/amazon-cloudwatch-agent/handlers" "github.com/aws/amazon-cloudwatch-agent/internal" @@ -150,8 +149,8 @@ func (c *CloudWatchLogs) getDest(t Target) *cwDest { Logger: configaws.SDKLogger{}, }, ) - provider.GetFlagsStats().SetFlagWithValue(provider.FlagRegionType, c.RegionType) - provider.GetFlagsStats().SetFlagWithValue(provider.FlagMode, c.Mode) + agent.UsageFlags().SetValue(agent.FlagRegionType, c.RegionType) + agent.UsageFlags().SetValue(agent.FlagMode, c.Mode) if containerInsightsRegexp.MatchString(t.Group) { useragent.Get().SetContainerInsightsFlag() } diff --git a/plugins/outputs/cloudwatchlogs/pusher.go b/plugins/outputs/cloudwatchlogs/pusher.go index 2ac9e46ad5..50c5b4ab71 100644 --- a/plugins/outputs/cloudwatchlogs/pusher.go +++ b/plugins/outputs/cloudwatchlogs/pusher.go @@ -19,9 +19,10 @@ import ( ) const ( - reqSizeLimit = 1024 * 1024 - reqEventsLimit = 10000 - warnOldTimeStamp = 1 * 24 * time.Hour + reqSizeLimit = 1024 * 1024 + reqEventsLimit = 10000 + warnOldTimeStamp = 1 * 24 * time.Hour + warnOldTimeStampLogInterval = 1 * 5 * time.Minute ) var ( @@ -52,6 +53,7 @@ type pusher struct { sequenceToken *string lastValidTime int64 lastUpdateTime time.Time + lastWarnMessage time.Time needSort bool stop <-chan struct{} lastSentTime time.Time @@ -424,9 +426,12 @@ func (p *pusher) convertEvent(e logs.LogEvent) *cloudwatchlogs.InputLogEvent { // not have a timestamp. t = p.lastValidTime if !p.lastUpdateTime.IsZero() { - // Check when timestamp has an interval of 5 days. - if time.Since(p.lastUpdateTime) > warnOldTimeStamp { - p.Log.Warnf("Unable to parse timestamp, using last valid timestamp found in the logs %v: which is at least older than 1 day for log group %v: ", p.lastValidTime, p.Group) + // Check when timestamp has an interval of 1 days. + if (time.Since(p.lastUpdateTime) > warnOldTimeStamp) && (time.Since(p.lastWarnMessage) > warnOldTimeStampLogInterval) { + { + p.Log.Warnf("Unable to parse timestamp, using last valid timestamp found in the logs %v: which is at least older than 1 day for log group %v: ", p.lastValidTime, p.Group) + p.lastWarnMessage = time.Now() + } } } } else { @@ -436,6 +441,7 @@ func (p *pusher) convertEvent(e logs.LogEvent) *cloudwatchlogs.InputLogEvent { t = e.Time().UnixNano() / 1000000 p.lastValidTime = t p.lastUpdateTime = time.Now() + p.lastWarnMessage = time.Time{} } return &cloudwatchlogs.InputLogEvent{ Message: &message, diff --git a/plugins/plugins.go b/plugins/plugins.go index 897cbef921..4bf1c8a918 100644 --- a/plugins/plugins.go +++ b/plugins/plugins.go @@ -10,6 +10,7 @@ import ( // Enabled cloudwatch-agent input plugins _ "github.com/aws/amazon-cloudwatch-agent/plugins/inputs/logfile" + _ "github.com/aws/amazon-cloudwatch-agent/plugins/inputs/nvidia_smi" _ "github.com/aws/amazon-cloudwatch-agent/plugins/inputs/prometheus" _ "github.com/aws/amazon-cloudwatch-agent/plugins/inputs/statsd" _ "github.com/aws/amazon-cloudwatch-agent/plugins/inputs/win_perf_counters" @@ -28,7 +29,6 @@ import ( _ "github.com/influxdata/telegraf/plugins/inputs/ethtool" _ "github.com/influxdata/telegraf/plugins/inputs/mem" _ "github.com/influxdata/telegraf/plugins/inputs/net" - _ "github.com/influxdata/telegraf/plugins/inputs/nvidia_smi" _ "github.com/influxdata/telegraf/plugins/inputs/processes" _ "github.com/influxdata/telegraf/plugins/inputs/procstat" _ "github.com/influxdata/telegraf/plugins/inputs/socket_listener" diff --git a/plugins/processors/awsappsignals/README.md b/plugins/processors/awsapplicationsignals/README.md similarity index 92% rename from plugins/processors/awsappsignals/README.md rename to plugins/processors/awsapplicationsignals/README.md index 4906b69a46..36893c2528 100644 --- a/plugins/processors/awsappsignals/README.md +++ b/plugins/processors/awsapplicationsignals/README.md @@ -50,36 +50,36 @@ A replacements section defines a matching against the dimensions of incoming met ## AWS AppSignals Processor Configuration Example ```yaml -awsappsignals: +awsapplicationsignals: resolvers: ["eks"] rules: - selectors: - dimension: Operation - match: "POST *" + match: "POST *" - dimension: RemoteService - match: "*" - action: keep - rule_name: "keep01" + match: "*" + action: keep + rule_name: "keep01" - selectors: - dimension: Operation - match: "GET *" + match: "GET *" - dimension: RemoteService - match: "*" - action: keep - rule_name: "keep02" + match: "*" + action: keep + rule_name: "keep02" - selectors: - dimension: Operation - match: "POST *" - action: drop - rule_name: "drop01" + match: "POST *" + action: drop + rule_name: "drop01" - selectors: - dimension: Operation - match: "*" - replacements: - - target_dimension: RemoteOperation - value: "This is a test string" - action: replace - rule_name: "replace01" + match: "*" + replacements: + - target_dimension: RemoteOperation + value: "This is a test string" + action: replace + rule_name: "replace01" ``` ## Amazon CloudWatch Agent Configuration Example diff --git a/plugins/processors/awsapplicationsignals/common/types.go b/plugins/processors/awsapplicationsignals/common/types.go new file mode 100644 index 0000000000..a9e72a2fd3 --- /dev/null +++ b/plugins/processors/awsapplicationsignals/common/types.go @@ -0,0 +1,43 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package common + +const ( + MetricAttributeLocalService = "Service" + MetricAttributeLocalOperation = "Operation" + MetricAttributeEnvironment = "Environment" + MetricAttributeRemoteService = "RemoteService" + MetricAttributeRemoteEnvironment = "RemoteEnvironment" + MetricAttributeRemoteOperation = "RemoteOperation" + MetricAttributeRemoteResourceIdentifier = "RemoteResourceIdentifier" + MetricAttributeRemoteResourceType = "RemoteResourceType" +) + +const ( + AttributeEKSClusterName = "EKS.Cluster" + AttributeK8SClusterName = "K8s.Cluster" + AttributeK8SNamespace = "K8s.Namespace" + AttributeEC2AutoScalingGroup = "EC2.AutoScalingGroup" + AttributeEC2InstanceId = "EC2.InstanceId" + AttributeHost = "Host" + AttributePlatformType = "PlatformType" + AttributeTelemetrySDK = "Telemetry.SDK" + AttributeTelemetryAgent = "Telemetry.Agent" + AttributeTelemetrySource = "Telemetry.Source" +) + +const ( + AttributeTmpReserved = "aws.tmp.reserved" +) + +var IndexableMetricAttributes = []string{ + MetricAttributeLocalService, + MetricAttributeLocalOperation, + MetricAttributeEnvironment, + MetricAttributeRemoteService, + MetricAttributeRemoteEnvironment, + MetricAttributeRemoteOperation, + MetricAttributeRemoteResourceIdentifier, + MetricAttributeRemoteResourceType, +} diff --git a/plugins/processors/awsapplicationsignals/config/config.go b/plugins/processors/awsapplicationsignals/config/config.go new file mode 100644 index 0000000000..19d5faeddf --- /dev/null +++ b/plugins/processors/awsapplicationsignals/config/config.go @@ -0,0 +1,77 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package config + +import ( + "context" + "errors" + "time" + + "github.com/aws/amazon-cloudwatch-agent/plugins/processors/awsapplicationsignals/rules" +) + +type Config struct { + Resolvers []Resolver `mapstructure:"resolvers"` + Rules []rules.Rule `mapstructure:"rules"` + Limiter *LimiterConfig `mapstructure:"limiter"` +} + +type LimiterConfig struct { + Threshold int `mapstructure:"drop_threshold"` + Disabled bool `mapstructure:"disabled"` + LogDroppedMetrics bool `mapstructure:"log_dropped_metrics"` + RotationInterval time.Duration `mapstructure:"rotation_interval"` + GarbageCollectionInterval time.Duration `mapstructure:"garbage_collection_interval"` + ParentContext context.Context `mapstructure:"-"` +} + +const ( + DefaultThreshold = 500 + DefaultRotationInterval = 1 * time.Hour + DefaultGCInterval = 10 * time.Minute +) + +func NewDefaultLimiterConfig() *LimiterConfig { + return &LimiterConfig{ + Threshold: DefaultThreshold, + Disabled: false, + LogDroppedMetrics: false, + RotationInterval: DefaultRotationInterval, + GarbageCollectionInterval: DefaultGCInterval, + } +} + +func (lc *LimiterConfig) Validate() { + if lc.GarbageCollectionInterval == 0 { + lc.GarbageCollectionInterval = DefaultGCInterval + } +} + +func (cfg *Config) Validate() error { + if len(cfg.Resolvers) == 0 { + return errors.New("resolvers must not be empty") + } + for _, resolver := range cfg.Resolvers { + switch resolver.Platform { + case PlatformEKS: + if resolver.Name == "" { + return errors.New("name must not be empty for eks resolver") + } + case PlatformK8s: + if resolver.Name == "" { + return errors.New("name must not be empty for k8s resolver") + } + case PlatformEC2, PlatformGeneric: + case PlatformECS: + return errors.New("ecs resolver is not supported") + default: + return errors.New("unknown resolver") + } + } + + if cfg.Limiter != nil { + cfg.Limiter.Validate() + } + return nil +} diff --git a/plugins/processors/awsappsignals/config/config_test.go b/plugins/processors/awsapplicationsignals/config/config_test.go similarity index 81% rename from plugins/processors/awsappsignals/config/config_test.go rename to plugins/processors/awsapplicationsignals/config/config_test.go index 51205db674..26a2119350 100644 --- a/plugins/processors/awsappsignals/config/config_test.go +++ b/plugins/processors/awsapplicationsignals/config/config_test.go @@ -21,6 +21,12 @@ func TestValidatePassed(t *testing.T) { Rules: nil, } assert.Nil(t, config.Validate()) + + config = Config{ + Resolvers: []Resolver{NewEC2Resolver("test"), NewGenericResolver("")}, + Rules: nil, + } + assert.Nil(t, config.Validate()) } func TestValidateFailedOnEmptyResolver(t *testing.T) { @@ -31,7 +37,7 @@ func TestValidateFailedOnEmptyResolver(t *testing.T) { assert.NotNil(t, config.Validate()) } -func TestValidateFailedOnEmptyClusterName(t *testing.T) { +func TestValidateFailedOnEmptyResolverName(t *testing.T) { config := Config{ Resolvers: []Resolver{NewEKSResolver("")}, Rules: nil, diff --git a/plugins/processors/awsappsignals/config/resolvers.go b/plugins/processors/awsapplicationsignals/config/resolvers.go similarity index 78% rename from plugins/processors/awsappsignals/config/resolvers.go rename to plugins/processors/awsapplicationsignals/config/resolvers.go index 56f91d2e2c..2ad6e25bf4 100644 --- a/plugins/processors/awsappsignals/config/resolvers.go +++ b/plugins/processors/awsapplicationsignals/config/resolvers.go @@ -10,6 +10,10 @@ const ( PlatformEKS = "eks" // PlatformK8s Native Kubernetes PlatformK8s = "k8s" + // PlatformEC2 Amazon EC2 platform + PlatformEC2 = "ec2" + // PlatformECS Amazon ECS + PlatformECS = "ecs" ) type Resolver struct { @@ -31,6 +35,13 @@ func NewK8sResolver(name string) Resolver { } } +func NewEC2Resolver(name string) Resolver { + return Resolver{ + Name: name, + Platform: PlatformEC2, + } +} + func NewGenericResolver(name string) Resolver { return Resolver{ Name: name, diff --git a/plugins/processors/awsappsignals/config/resolvers_test.go b/plugins/processors/awsapplicationsignals/config/resolvers_test.go similarity index 82% rename from plugins/processors/awsappsignals/config/resolvers_test.go rename to plugins/processors/awsapplicationsignals/config/resolvers_test.go index 2f1b09186a..a68ab706f4 100644 --- a/plugins/processors/awsappsignals/config/resolvers_test.go +++ b/plugins/processors/awsapplicationsignals/config/resolvers_test.go @@ -19,6 +19,11 @@ func TestK8sResolver(t *testing.T) { assert.Equal(t, "k8s", resolver.Platform) } +func TestEC2Resolver(t *testing.T) { + resolver := NewEC2Resolver("test") + assert.Equal(t, "ec2", resolver.Platform) +} + func TestNewGenericResolver(t *testing.T) { resolver := NewGenericResolver("") assert.Equal(t, "generic", resolver.Platform) diff --git a/plugins/processors/awsappsignals/factory.go b/plugins/processors/awsapplicationsignals/factory.go similarity index 79% rename from plugins/processors/awsappsignals/factory.go rename to plugins/processors/awsapplicationsignals/factory.go index 83a6c764ed..18b5cb5a54 100644 --- a/plugins/processors/awsappsignals/factory.go +++ b/plugins/processors/awsapplicationsignals/factory.go @@ -1,7 +1,7 @@ // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: MIT -package awsappsignals +package awsapplicationsignals import ( "context" @@ -12,17 +12,19 @@ import ( "go.opentelemetry.io/collector/processor" "go.opentelemetry.io/collector/processor/processorhelper" - appsignalsconfig "github.com/aws/amazon-cloudwatch-agent/plugins/processors/awsappsignals/config" + appsignalsconfig "github.com/aws/amazon-cloudwatch-agent/plugins/processors/awsapplicationsignals/config" ) const ( - // The value of "type" key in configuration. - typeStr = "awsappsignals" // The stability level of the processor. stability = component.StabilityLevelBeta ) -var consumerCapabilities = consumer.Capabilities{MutatesData: true} +var ( + // The value of "type" key in configuration. + typeStr, _ = component.NewType("awsapplicationsignals") + consumerCapabilities = consumer.Capabilities{MutatesData: true} +) // NewFactory returns a new factory for the aws attributes processor. func NewFactory() processor.Factory { @@ -58,7 +60,7 @@ func createTracesProcessor( next, ap.processTraces, processorhelper.WithCapabilities(consumerCapabilities), - processorhelper.WithStart(ap.Start), + processorhelper.WithStart(ap.StartTraces), processorhelper.WithShutdown(ap.Shutdown)) } @@ -80,19 +82,19 @@ func createMetricsProcessor( nextMetricsConsumer, ap.processMetrics, processorhelper.WithCapabilities(consumerCapabilities), - processorhelper.WithStart(ap.Start), + processorhelper.WithStart(ap.StartMetrics), processorhelper.WithShutdown(ap.Shutdown)) } func createProcessor( params processor.CreateSettings, cfg component.Config, -) (*awsappsignalsprocessor, error) { +) (*awsapplicationsignalsprocessor, error) { pCfg, ok := cfg.(*appsignalsconfig.Config) if !ok { - return nil, errors.New("could not initialize awsappsignalsprocessor") + return nil, errors.New("could not initialize awsapplicationsignalsprocessor") } - ap := &awsappsignalsprocessor{logger: params.Logger, config: pCfg} + ap := &awsapplicationsignalsprocessor{logger: params.Logger, config: pCfg} return ap, nil } diff --git a/plugins/processors/awsappsignals/factory_test.go b/plugins/processors/awsapplicationsignals/factory_test.go similarity index 85% rename from plugins/processors/awsappsignals/factory_test.go rename to plugins/processors/awsapplicationsignals/factory_test.go index 1bc2c4761f..29aa9472d3 100644 --- a/plugins/processors/awsappsignals/factory_test.go +++ b/plugins/processors/awsapplicationsignals/factory_test.go @@ -1,7 +1,7 @@ // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: MIT -package awsappsignals +package awsapplicationsignals import ( "path/filepath" @@ -12,8 +12,8 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/confmap/confmaptest" - "github.com/aws/amazon-cloudwatch-agent/plugins/processors/awsappsignals/config" - "github.com/aws/amazon-cloudwatch-agent/plugins/processors/awsappsignals/rules" + "github.com/aws/amazon-cloudwatch-agent/plugins/processors/awsapplicationsignals/config" + "github.com/aws/amazon-cloudwatch-agent/plugins/processors/awsapplicationsignals/rules" ) var expectedRules = []rules.Rule{ @@ -73,12 +73,12 @@ func TestLoadEKSConfig(t *testing.T) { t.Parallel() tests := []struct { - id component.ID + name string expected component.Config errorMessage string }{ { - id: component.NewIDWithName("awsappsignals", ""), + name: "awsapplicationsignals", expected: &config.Config{ Resolvers: []config.Resolver{config.NewEKSResolver("test")}, Rules: expectedRules, @@ -86,14 +86,16 @@ func TestLoadEKSConfig(t *testing.T) { }, } for _, tt := range tests { - t.Run(tt.id.String(), func(t *testing.T) { + t.Run(tt.name, func(t *testing.T) { + newType, _ := component.NewType(tt.name) + id := component.NewIDWithName(newType, "") cm, err := confmaptest.LoadConf(filepath.Join("testdata", "config_eks.yaml")) require.NoError(t, err) factory := NewFactory() cfg := factory.CreateDefaultConfig().(*config.Config) - sub, err := cm.Sub(tt.id.String()) + sub, err := cm.Sub(id.String()) require.NoError(t, err) require.NoError(t, component.UnmarshalConfig(sub, cfg)) @@ -114,12 +116,12 @@ func TestLoadGenericConfig(t *testing.T) { t.Parallel() tests := []struct { - id component.ID + name string expected component.Config errorMessage string }{ { - id: component.NewIDWithName("awsappsignals", ""), + name: "awsapplicationsignals", expected: &config.Config{ Resolvers: []config.Resolver{config.NewGenericResolver("")}, Rules: expectedRules, @@ -127,14 +129,16 @@ func TestLoadGenericConfig(t *testing.T) { }, } for _, tt := range tests { - t.Run(tt.id.String(), func(t *testing.T) { + t.Run(tt.name, func(t *testing.T) { + newType, _ := component.NewType(tt.name) + id := component.NewIDWithName(newType, "") cm, err := confmaptest.LoadConf(filepath.Join("testdata", "config_generic.yaml")) require.NoError(t, err) factory := NewFactory() cfg := factory.CreateDefaultConfig().(*config.Config) - sub, err := cm.Sub(tt.id.String()) + sub, err := cm.Sub(id.String()) require.NoError(t, err) require.NoError(t, component.UnmarshalConfig(sub, cfg)) diff --git a/plugins/processors/awsapplicationsignals/internal/attributes/attributes.go b/plugins/processors/awsapplicationsignals/internal/attributes/attributes.go new file mode 100644 index 0000000000..75e9a2fca1 --- /dev/null +++ b/plugins/processors/awsapplicationsignals/internal/attributes/attributes.go @@ -0,0 +1,24 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package attributes + +const ( + // aws attributes + AWSSpanKind = "aws.span.kind" + AWSLocalService = "aws.local.service" + AWSLocalEnvironment = "aws.local.environment" + AWSLocalOperation = "aws.local.operation" + AWSRemoteService = "aws.remote.service" + AWSRemoteOperation = "aws.remote.operation" + AWSRemoteEnvironment = "aws.remote.environment" + AWSRemoteTarget = "aws.remote.target" + AWSRemoteResourceIdentifier = "aws.remote.resource.identifier" + AWSRemoteResourceType = "aws.remote.resource.type" + AWSHostedInEnvironment = "aws.hostedin.environment" + + // resource detection processor attributes + ResourceDetectionHostId = "host.id" + ResourceDetectionHostName = "host.name" + ResourceDetectionASG = "ec2.tag.aws:autoscaling:groupName" +) diff --git a/plugins/processors/awsapplicationsignals/internal/cardinalitycontrol/count_min_sketch.go b/plugins/processors/awsapplicationsignals/internal/cardinalitycontrol/count_min_sketch.go new file mode 100644 index 0000000000..99a1eabae5 --- /dev/null +++ b/plugins/processors/awsapplicationsignals/internal/cardinalitycontrol/count_min_sketch.go @@ -0,0 +1,98 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package cardinalitycontrol + +import ( + "hash/adler32" + "hash/crc32" + "hash/fnv" +) + +type CountMinSketchHashFunc func(hashKey string) int64 + +type CountMinSketchEntry interface { + HashKey() string + Frequency() int +} + +type CountMinSketch struct { + depth int + maxDepth int + width int + matrix [][]int + hashFuncs []CountMinSketchHashFunc +} + +func (cms *CountMinSketch) Insert(obj CountMinSketchEntry) { + for i := 0; i < cms.depth; i++ { + hashFunc := cms.hashFuncs[i] + hashValue := hashFunc(obj.HashKey()) + pos := int(hashValue % int64(cms.width)) + + cms.matrix[i][pos] += obj.Frequency() + } +} + +func NewCountMinSketch(depth, width int, hashFuncs ...CountMinSketchHashFunc) *CountMinSketch { + matrix := make([][]int, depth) + for i := range matrix { + matrix[i] = make([]int, width) + } + cms := &CountMinSketch{ + depth: 0, + maxDepth: depth, + width: width, + matrix: matrix, + } + if hashFuncs != nil { + cms.RegisterHashFunc(hashFuncs...) + } else { + RegisterDefaultHashFuncs(cms) + } + return cms +} + +func RegisterDefaultHashFuncs(cms *CountMinSketch) { + hashFunc1 := func(hashKey string) int64 { + h := fnv.New32a() + h.Write([]byte(hashKey)) + return int64(h.Sum32()) + } + hashFunc2 := func(hashKey string) int64 { + hash := crc32.ChecksumIEEE([]byte(hashKey)) + return int64(hash) + } + hashFunc3 := func(hashKey string) int64 { + hash := adler32.Checksum([]byte(hashKey)) + return int64(hash) + } + cms.RegisterHashFunc(hashFunc1, hashFunc2, hashFunc3) +} + +func (cms *CountMinSketch) RegisterHashFunc(hashFuncs ...CountMinSketchHashFunc) { + if cms.hashFuncs == nil { + cms.hashFuncs = hashFuncs + } else { + cms.hashFuncs = append(cms.hashFuncs, hashFuncs...) + } + if cms.maxDepth < len(cms.hashFuncs) { + cms.depth = cms.maxDepth + } else { + cms.depth = len(cms.hashFuncs) + } +} + +func (cms *CountMinSketch) Get(obj CountMinSketchEntry) int { + minCount := int(^uint(0) >> 1) // Initialize with the maximum possible integer value + for i := 0; i < cms.depth; i++ { + hashFunc := cms.hashFuncs[i] + hashValue := hashFunc(obj.HashKey()) + pos := int(hashValue % int64(cms.width)) + + if cms.matrix[i][pos] < minCount { + minCount = cms.matrix[i][pos] + } + } + return minCount +} diff --git a/plugins/processors/awsapplicationsignals/internal/cardinalitycontrol/count_min_sketch_test.go b/plugins/processors/awsapplicationsignals/internal/cardinalitycontrol/count_min_sketch_test.go new file mode 100644 index 0000000000..19cd7bcc21 --- /dev/null +++ b/plugins/processors/awsapplicationsignals/internal/cardinalitycontrol/count_min_sketch_test.go @@ -0,0 +1,94 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package cardinalitycontrol + +import ( + "math/rand" + "strconv" + "testing" + + "github.com/stretchr/testify/assert" +) + +var metricNames = []string{"latency", "error", "fault"} + +func TestUpdateFrequency(t *testing.T) { + cms := NewCountMinSketch(3, 10) + for i := 0; i < 10; i++ { + md := MetricData{ + hashKey: "xxx", + name: "latency", + service: "app1", + frequency: 1, + } + cms.Insert(md) + val := cms.Get(md) + assert.Equal(t, 1+i, val) + } +} + +var testCases = []int{50, 100, 200, 500, 1000, 2000} + +func TestWriteMultipleEntries(t *testing.T) { + cms := NewCountMinSketch(3, 5000) + + maxCollisionRate := 0 + for _, dataCount := range testCases { + metricDataArray := make([]*MetricData, dataCount) + for i := 0; i < dataCount; i++ { + labels := map[string]string{ + "operation": "/api/customers/" + strconv.Itoa(rand.Int()), + } + for _, metricName := range metricNames { + freq := rand.Intn(5000) + md := MetricData{ + hashKey: sortAndConcatLabels(labels), + name: metricName, + service: "app", + frequency: freq, + } + cms.Insert(md) + if metricDataArray[i] == nil { + metricDataArray[i] = &md + } else { + metricDataArray[i].frequency = metricDataArray[i].frequency + freq + } + + } + } + + err := 0 + for _, data := range metricDataArray { + val := cms.Get(data) + if data.frequency != val { + err += 1 + } + } + collisionRate := err * 100 / len(metricDataArray) + if maxCollisionRate < collisionRate { + maxCollisionRate = collisionRate + } + t.Logf("When the item count is %d with even distribution, the collision rate is %d.\n", dataCount, collisionRate) + } + + // revisit the count min sketch setting if the assertion fails. + assert.True(t, maxCollisionRate < 30) +} + +func TestAdjustUnsupportedDepth(t *testing.T) { + cms := NewCountMinSketch(5, 10) + assert.Equal(t, 3, cms.depth) + for i := 0; i < 2; i++ { + cms.RegisterHashFunc(func(hashKey string) int64 { + return int64(0) + }) + } + assert.Equal(t, 5, cms.depth) + for i := 0; i < 2; i++ { + cms.RegisterHashFunc(func(hashKey string) int64 { + return int64(0) + }) + } + assert.Equal(t, 5, cms.depth) +} diff --git a/plugins/processors/awsapplicationsignals/internal/cardinalitycontrol/metrics_limiter.go b/plugins/processors/awsapplicationsignals/internal/cardinalitycontrol/metrics_limiter.go new file mode 100644 index 0000000000..3b2efda288 --- /dev/null +++ b/plugins/processors/awsapplicationsignals/internal/cardinalitycontrol/metrics_limiter.go @@ -0,0 +1,418 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package cardinalitycontrol + +import ( + "context" + "fmt" + "sort" + "sync" + "time" + + "go.opentelemetry.io/collector/pdata/pcommon" + "go.uber.org/zap" + + "github.com/aws/amazon-cloudwatch-agent/plugins/processors/awsapplicationsignals/common" + "github.com/aws/amazon-cloudwatch-agent/plugins/processors/awsapplicationsignals/config" +) + +const ( + UnprocessedServiceOperationValue = "AllOtherOperations" + UnprocessedRemoteServiceOperationValue = "AllOtherRemoteOperations" +) + +const ( + defaultCMSDepth = 3 + defaultCMSWidth = 5000 +) + +var awsDeclaredMetricAttributes = []string{ + common.AttributeEKSClusterName, + common.AttributeK8SNamespace, + common.MetricAttributeEnvironment, + common.MetricAttributeLocalService, + common.MetricAttributeLocalOperation, + common.MetricAttributeRemoteService, + common.MetricAttributeRemoteOperation, + common.MetricAttributeRemoteResourceIdentifier, + common.MetricAttributeRemoteEnvironment, + common.AttributeK8SClusterName, +} + +type Limiter interface { + Admit(name string, attributes, resourceAttributes pcommon.Map) (bool, error) +} + +type MetricsLimiter struct { + DropThreshold int + LogDroppedMetrics bool + RotationInterval time.Duration + + logger *zap.Logger + ctx context.Context + mapLock sync.RWMutex + services map[string]*service +} + +func NewMetricsLimiter(config *config.LimiterConfig, logger *zap.Logger) Limiter { + logger.Info("creating metrics limiter with config", zap.Any("config", config)) + + ctx := config.ParentContext + if ctx == nil { + ctx = context.TODO() + } + + limiter := &MetricsLimiter{ + DropThreshold: config.Threshold, + LogDroppedMetrics: config.LogDroppedMetrics, + RotationInterval: config.RotationInterval, + + logger: logger, + ctx: ctx, + services: map[string]*service{}, + } + + go func() { + for { + select { + case <-ctx.Done(): + return + default: + limiter.removeStaleServices() + time.Sleep(config.GarbageCollectionInterval) + } + } + }() + + logger.Info("metrics limiter created.") + + return limiter +} + +func (m *MetricsLimiter) Admit(metricName string, attributes, resourceAttributes pcommon.Map) (bool, error) { + labels, serviceName, found := m.filterAWSDeclaredAttributes(attributes, resourceAttributes) + if !found { + return true, nil + } + admitted := true + + m.mapLock.RLock() + svc := m.services[serviceName] + m.mapLock.RUnlock() + if svc == nil { + m.mapLock.Lock() + svc = m.services[serviceName] + if svc == nil { + svc = newService(serviceName, m.DropThreshold, m.RotationInterval, m.ctx, m.logger) + m.services[serviceName] = svc + } + m.mapLock.Unlock() + } + + metricData := newMetricData(serviceName, metricName, labels) + + reserved, _ := attributes.Get(common.AttributeTmpReserved) + if reserved.Bool() { + attributes.Remove(common.AttributeTmpReserved) + return true, nil + } + + if !svc.admitMetricData(metricData) { + svc.rollupMetricData(attributes) + + svc.totalRollup++ + admitted = false + + if m.LogDroppedMetrics { + m.logger.Debug(fmt.Sprintf("[%s] drop metric data", svc.name), zap.Any("labels", labels)) + } + } + + svc.totalMetricSent++ + + svc.rwLock.RLock() + defer svc.rwLock.RUnlock() + + svc.totalCount++ + svc.InsertMetricDataToPrimary(metricData) + svc.InsertMetricDataToSecondary(metricData) + return admitted, nil +} + +func (m *MetricsLimiter) filterAWSDeclaredAttributes(attributes, resourceAttributes pcommon.Map) (map[string]string, string, bool) { + svcNameAttr, exists := attributes.Get(common.MetricAttributeLocalService) + if !exists { + return nil, "", false + } + labels := map[string]string{} + svcName := svcNameAttr.AsString() + for _, attrKey := range awsDeclaredMetricAttributes { + if attr, ok := attributes.Get(attrKey); ok { + labels[attrKey] = attr.AsString() + } + } + return labels, svcName, true +} + +func (m *MetricsLimiter) removeStaleServices() { + var svcToRemove []string + for name, svc := range m.services { + if svc.rotations > 3 { + if svc.countSnapshot[0] == svc.countSnapshot[1] && svc.countSnapshot[1] == svc.countSnapshot[2] { + svc.cancelFunc() + svcToRemove = append(svcToRemove, name) + } + } + } + + m.mapLock.Lock() + defer m.mapLock.Unlock() + + for _, name := range svcToRemove { + m.logger.Info("remove stale service " + name + ".") + delete(m.services, name) + } +} + +type service struct { + logger *zap.Logger + name string + cancelFunc context.CancelFunc + + rwLock sync.RWMutex + primaryCMS *CountMinSketch + primaryTopK *topKMetrics + secondaryCMS *CountMinSketch + secondaryTopK *topKMetrics + + totalCount int + rotations int + countSnapshot []int + + totalRollup int + totalMetricSent int +} + +func (s *service) InsertMetricDataToPrimary(md *MetricData) { + s.primaryCMS.Insert(md) + updatedFrequency := s.primaryCMS.Get(md) + updatedMd := copyMetricDataWithUpdatedFrequency(md, updatedFrequency) + s.primaryTopK.Push(md, updatedMd) +} + +func (s *service) InsertMetricDataToSecondary(md *MetricData) { + if s.secondaryCMS != nil { + s.secondaryCMS.Insert(md) + updatedFrequency := s.secondaryCMS.Get(md) + updatedMd := copyMetricDataWithUpdatedFrequency(md, updatedFrequency) + s.secondaryTopK.Push(md, updatedMd) + } +} + +// MetricData represents a key-value pair. +type MetricData struct { + hashKey string + name string + service string + frequency int +} + +func (m MetricData) HashKey() string { + return m.hashKey +} + +func (m MetricData) Frequency() int { + return m.frequency +} + +func newMetricData(serviceName, metricName string, labels map[string]string) *MetricData { + hashID := sortAndConcatLabels(labels) + return &MetricData{ + hashKey: hashID, + name: metricName, + service: serviceName, + frequency: 1, + } +} + +func copyMetricDataWithUpdatedFrequency(md *MetricData, frequency int) *MetricData { + return &MetricData{ + hashKey: md.hashKey, + name: md.name, + service: md.service, + frequency: frequency, + } +} + +func sortAndConcatLabels(labels map[string]string) string { + keys := make([]string, 0, len(labels)) + for key := range labels { + keys = append(keys, key) + } + sort.Strings(keys) + + var concatenatedLabels string + for _, key := range keys { + concatenatedLabels += labels[key] + } + keys = nil + return concatenatedLabels +} + +// topKMetrics represents the priority queue with a map for key lookup and a size limit. +type topKMetrics struct { + metricMap map[string]*MetricData + minMetric *MetricData + sizeLimit int +} + +// newTopKMetrics creates a new topKMetrics with a specified size limit. +func newTopKMetrics(sizeLimit int) *topKMetrics { + return &topKMetrics{ + metricMap: make(map[string]*MetricData), + minMetric: nil, + sizeLimit: sizeLimit, + } +} + +// Push adds a key-value pair to the priority queue. If the value already exists, it updates the frequency. +func (t *topKMetrics) Push(oldMetric, newMetric *MetricData) { + hashValue := oldMetric.hashKey + if t.minMetric == nil { + t.minMetric = oldMetric + } + + _, found := t.metricMap[hashValue] + if found { + // Update the frequency. + t.metricMap[hashValue].frequency = newMetric.frequency + // Check if this oldMetric is the new minimum, find the new minMetric after the updates + if t.minMetric.hashKey == hashValue { + // Find the new minMetrics after update the frequency + t.minMetric = t.findMinMetric() + } + return + } + + // If exceeded size limit, delete the smallest + if len(t.metricMap) >= t.sizeLimit { + if newMetric.frequency > t.minMetric.frequency { + delete(t.metricMap, t.minMetric.hashKey) + t.metricMap[hashValue] = newMetric + t.minMetric = t.findMinMetric() + } + } else { + // Check if this newMetric is the new minimum. + if newMetric.frequency < t.minMetric.frequency { + t.minMetric = newMetric + } + t.metricMap[hashValue] = newMetric + } +} + +// findMinMetric removes and returns the key-value pair with the minimum value. +func (t *topKMetrics) findMinMetric() *MetricData { + // Find the new minimum metric and smallest frequency. + var newMinMetric *MetricData + smallestFrequency := int(^uint(0) >> 1) // Initialize with the maximum possible integer value + + for _, metric := range t.metricMap { + if metric.frequency < smallestFrequency { + smallestFrequency = metric.frequency + newMinMetric = metric + } + } + return newMinMetric +} + +func (s *service) admitMetricData(metric *MetricData) bool { + _, found := s.primaryTopK.metricMap[metric.hashKey] + if len(s.primaryTopK.metricMap) < s.primaryTopK.sizeLimit || found { + return true + } + return false +} + +func (s *service) rollupMetricData(attributes pcommon.Map) { + for _, indexAttr := range awsDeclaredMetricAttributes { + if (indexAttr == common.MetricAttributeEnvironment) || (indexAttr == common.MetricAttributeLocalService) || (indexAttr == common.MetricAttributeRemoteService) { + continue + } + if indexAttr == common.MetricAttributeLocalOperation { + attributes.PutStr(indexAttr, UnprocessedServiceOperationValue) + } else if indexAttr == common.MetricAttributeRemoteOperation { + attributes.PutStr(indexAttr, UnprocessedRemoteServiceOperationValue) + } else { + attributes.PutStr(indexAttr, "-") + } + } +} + +// As a starting point, you can use rules of thumb, such as setting the depth to be around 4-6 times the logarithm of the expected number of distinct items and the width based on your memory constraints. However, these are rough guidelines, and the optimal size will depend on your unique application and requirements. +func newService(name string, limit int, rotationInterval time.Duration, parentCtx context.Context, logger *zap.Logger) *service { + depth := defaultCMSDepth + width := defaultCMSWidth + + ctx, cancel := context.WithCancel(parentCtx) + svc := &service{ + logger: logger, + name: name, + cancelFunc: cancel, + primaryCMS: NewCountMinSketch(depth, width), + primaryTopK: newTopKMetrics(limit), + countSnapshot: make([]int, 3), + } + + // Create a ticker to create a new countMinSketch every 1 hour + rotationTicker := time.NewTicker(rotationInterval) + //defer rotationTicker.Stop() + + // Create a goroutine to handle rotationTicker.C + go func() { + for { + select { + case <-rotationTicker.C: + svc.logger.Info(fmt.Sprintf("[%s] rotating visit records, current rotation %d", name, svc.rotations)) + if err := rotateVisitRecords(svc); err != nil { + svc.logger.Error(fmt.Sprintf("[%s] failed to rotate visit records.", name), zap.Error(err)) + } + case <-ctx.Done(): + return + default: + // Continue running the main program + time.Sleep(1 * time.Second) + } + } + }() + + svc.logger.Info(fmt.Sprintf("[%s] service entry is created.\n", name)) + return svc +} + +func rotateVisitRecords(svc *service) error { + svc.rwLock.Lock() + defer svc.rwLock.Unlock() + + cmsDepth := svc.primaryCMS.depth + cmsWidth := svc.primaryCMS.width + topKLimit := svc.primaryTopK.sizeLimit + + nextPrimaryCMS := svc.secondaryCMS + nextPrimaryTopK := svc.secondaryTopK + + svc.secondaryCMS = NewCountMinSketch(cmsDepth, cmsWidth) + svc.secondaryTopK = newTopKMetrics(topKLimit) + + if nextPrimaryCMS != nil && nextPrimaryTopK != nil { + svc.primaryCMS = nextPrimaryCMS + svc.primaryTopK = nextPrimaryTopK + } else { + svc.logger.Info(fmt.Sprintf("[%s] secondary visit records are nil.", svc.name)) + } + + svc.countSnapshot[svc.rotations%3] = svc.totalCount + svc.rotations++ + + return nil +} diff --git a/plugins/processors/awsapplicationsignals/internal/cardinalitycontrol/metrics_limiter_test.go b/plugins/processors/awsapplicationsignals/internal/cardinalitycontrol/metrics_limiter_test.go new file mode 100644 index 0000000000..e7b14e4978 --- /dev/null +++ b/plugins/processors/awsapplicationsignals/internal/cardinalitycontrol/metrics_limiter_test.go @@ -0,0 +1,272 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package cardinalitycontrol + +import ( + "context" + "fmt" + "math/rand" + "strconv" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.uber.org/zap" + + "github.com/aws/amazon-cloudwatch-agent/plugins/processors/awsapplicationsignals/common" + awsapplicationsignalsconfig "github.com/aws/amazon-cloudwatch-agent/plugins/processors/awsapplicationsignals/config" +) + +var emptyResourceAttributes = pcommon.NewMap() +var logger, _ = zap.NewDevelopment() + +func TestAdmitAndRollup(t *testing.T) { + config := &awsapplicationsignalsconfig.LimiterConfig{ + Threshold: 2, + Disabled: false, + LogDroppedMetrics: false, + RotationInterval: awsapplicationsignalsconfig.DefaultRotationInterval, + } + config.Validate() + + limiter := NewMetricsLimiter(config, logger) + + admittedAttributes := map[string]pcommon.Map{} + for i := 0; i < 10; i++ { + attr := newLowCardinalityAttributes(100) + if ok, _ := limiter.Admit("latency", attr, emptyResourceAttributes); ok { + uniqKey, _ := attr.Get("RemoteOperation") + admittedAttributes[uniqKey.AsString()] = attr + } else { + for _, indexedAttrKey := range awsDeclaredMetricAttributes { + if indexedAttrKey == common.MetricAttributeEnvironment || + indexedAttrKey == common.MetricAttributeLocalService || + indexedAttrKey == common.MetricAttributeRemoteService { + continue + } + attrValue, _ := attr.Get(indexedAttrKey) + if indexedAttrKey == common.MetricAttributeLocalOperation { + assert.Equal(t, UnprocessedServiceOperationValue, attrValue.AsString()) + } else if indexedAttrKey == common.MetricAttributeRemoteOperation { + assert.Equal(t, UnprocessedRemoteServiceOperationValue, attrValue.AsString()) + } else { + assert.Equal(t, "-", attrValue.AsString()) + } + } + } + } + assert.Equal(t, 2, len(admittedAttributes), fmt.Sprintf("admitted attributes are %v", admittedAttributes)) +} + +func TestAdmitByTopK(t *testing.T) { + config := awsapplicationsignalsconfig.LimiterConfig{ + Threshold: 100, + Disabled: false, + LogDroppedMetrics: false, + RotationInterval: awsapplicationsignalsconfig.DefaultRotationInterval, + } + config.Validate() + + limiter := NewMetricsLimiter(&config, logger) + + // fulfill topk with high cardinality attributes + for i := 0; i < 110; i++ { + attr := newHighCardinalityAttributes() + limiter.Admit("latency", attr, emptyResourceAttributes) + } + + // sending low cardinality attributes + for i := 0; i < 100; i++ { + attr := newFixedAttributes(i % 20) + limiter.Admit("latency", attr, emptyResourceAttributes) + } + + for i := 0; i < 20; i++ { + attr := newFixedAttributes(i) + ok, _ := limiter.Admit("latency", attr, emptyResourceAttributes) + assert.True(t, ok) + } +} + +func TestAdmitLowCardinalityAttributes(t *testing.T) { + config := awsapplicationsignalsconfig.LimiterConfig{ + Threshold: 10, + Disabled: false, + LogDroppedMetrics: false, + RotationInterval: awsapplicationsignalsconfig.DefaultRotationInterval, + } + config.Validate() + + limiter := NewMetricsLimiter(&config, logger) + + rejectCount := 0 + for i := 0; i < 100; i++ { + if ok, _ := limiter.Admit("latency", newLowCardinalityAttributes(10), emptyResourceAttributes); !ok { + rejectCount += 1 + } + } + assert.Equal(t, 0, rejectCount) +} + +func TestAdmitReservedMetrics(t *testing.T) { + config := awsapplicationsignalsconfig.LimiterConfig{ + Threshold: 10, + Disabled: false, + LogDroppedMetrics: false, + RotationInterval: awsapplicationsignalsconfig.DefaultRotationInterval, + } + config.Validate() + + limiter := NewMetricsLimiter(&config, logger) + + // fulfill topk with high cardinality attributes + for i := 0; i < 20; i++ { + attr := newHighCardinalityAttributes() + limiter.Admit("latency", attr, emptyResourceAttributes) + } + + for i := 0; i < 20; i++ { + attr := newHighCardinalityAttributes() + // simulate attributes touched by customization rules + attr.PutBool(common.AttributeTmpReserved, true) + + ok, _ := limiter.Admit("latency", attr, emptyResourceAttributes) + assert.True(t, ok) + _, exists := attr.Get(common.AttributeTmpReserved) + assert.False(t, exists) + } +} + +func TestClearStaleService(t *testing.T) { + ctx, cancel := context.WithCancel(context.TODO()) + + config := awsapplicationsignalsconfig.LimiterConfig{ + Threshold: 10, + Disabled: false, + LogDroppedMetrics: false, + + ParentContext: ctx, + RotationInterval: time.Second, + GarbageCollectionInterval: time.Second, + } + limiter := NewMetricsLimiter(&config, logger) + + for i := 0; i < 10; i++ { + appName := "app" + strconv.Itoa(i) + attr := pcommon.NewMap() + attr.PutStr("Service", appName) + limiter.Admit(appName, attr, emptyResourceAttributes) + } + + time.Sleep(10 * time.Second) + cancel() + + metricsLimiter := limiter.(*MetricsLimiter) + assert.Equal(t, 0, len(metricsLimiter.services)) +} + +func TestInheritanceAfterRotation(t *testing.T) { + config := awsapplicationsignalsconfig.LimiterConfig{ + Threshold: 10, + Disabled: false, + LogDroppedMetrics: true, + RotationInterval: 5 * time.Second, + } + config.Validate() + + limiter := NewMetricsLimiter(&config, logger) + + // fulfill primary with 0-10 + for i := 0; i < 10; i++ { + attr := newFixedAttributes(i) + ok, _ := limiter.Admit("latency", attr, emptyResourceAttributes) + assert.True(t, ok) + } + + // wait for rotation + time.Sleep(6 * time.Second) + // validate 0-10 are admitted + for i := 0; i < 10; i++ { + attr := newFixedAttributes(i) + ok, _ := limiter.Admit("latency", attr, emptyResourceAttributes) + assert.True(t, ok) + } + + // validate 10-20 are rejected + // promote 10-20 to top k + for j := 0; j < 2; j++ { + for i := 10; i < 20; i++ { + attr := newFixedAttributes(i) + ok, _ := limiter.Admit("latency", attr, emptyResourceAttributes) + assert.False(t, ok) + } + } + + // wait for rotation + time.Sleep(6 * time.Second) + + // validate 1--20 are admitted + for i := 10; i < 20; i++ { + attr := newFixedAttributes(i) + ok, _ := limiter.Admit("latency", attr, emptyResourceAttributes) + assert.True(t, ok) + } +} + +func TestRotationInterval(t *testing.T) { + svc := newService("test", 1, 5*time.Second, context.Background(), logger) + // wait for secondary to be created + time.Sleep(7 * time.Second) + for i := 0; i < 5; i++ { + svc.secondaryCMS.matrix[0][0] = 1 + + // wait for rotation + time.Sleep(5 * time.Second) + + // verify secondary is promoted to primary + assert.Equal(t, 0, svc.secondaryCMS.matrix[0][0]) + assert.Equal(t, 1, svc.primaryCMS.matrix[0][0]) + } +} + +func newRandomIP() string { + rand.NewSource(time.Now().UnixNano()) + + ipPart1 := rand.Intn(256) + ipPart2 := rand.Intn(256) + ipPart3 := rand.Intn(256) + ipPart4 := rand.Intn(256) + + return fmt.Sprintf("%d.%d.%d.%d", ipPart1, ipPart2, ipPart3, ipPart4) +} + +func newFixedAttributes(val int) pcommon.Map { + methodName := "/test" + strconv.Itoa(val) + attr := pcommon.NewMap() + attr.PutStr("Service", "app") + attr.PutStr("Operation", "/api/gateway"+methodName) + attr.PutStr("RemoteService", "upstream1") + attr.PutStr("RemoteOperation", methodName) + return attr +} + +func newLowCardinalityAttributes(admitRange int) pcommon.Map { + methodName := "/test" + strconv.Itoa(rand.Intn(admitRange)) + attr := pcommon.NewMap() + attr.PutStr("Service", "app") + attr.PutStr("Operation", "/api/gateway"+methodName) + attr.PutStr("RemoteService", "upstream1") + attr.PutStr("RemoteOperation", methodName) + return attr +} + +func newHighCardinalityAttributes() pcommon.Map { + attr := pcommon.NewMap() + attr.PutStr("Service", "app") + attr.PutStr("Operation", "/api/gateway/test") + attr.PutStr("RemoteService", newRandomIP()) + attr.PutStr("RemoteOperation", "/test/"+strconv.Itoa(rand.Int())) + return attr +} diff --git a/plugins/processors/awsapplicationsignals/internal/normalizer/attributesnormalizer.go b/plugins/processors/awsapplicationsignals/internal/normalizer/attributesnormalizer.go new file mode 100644 index 0000000000..b0966bca2c --- /dev/null +++ b/plugins/processors/awsapplicationsignals/internal/normalizer/attributesnormalizer.go @@ -0,0 +1,234 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package normalizer + +import ( + "fmt" + "strings" + + "go.opentelemetry.io/collector/pdata/pcommon" + deprecatedsemconv "go.opentelemetry.io/collector/semconv/v1.18.0" + semconv "go.opentelemetry.io/collector/semconv/v1.22.0" + "go.uber.org/zap" + + "github.com/aws/amazon-cloudwatch-agent/internal/version" + "github.com/aws/amazon-cloudwatch-agent/plugins/processors/awsapplicationsignals/common" + attr "github.com/aws/amazon-cloudwatch-agent/plugins/processors/awsapplicationsignals/internal/attributes" +) + +const ( + // Length limits from Application Signals SLOs + maxEnvironmentLength = 259 + maxServiceNameLength = 255 + + // Length limits from CloudWatch Metrics + defaultMetricAttributeLength = 1024 +) + +type attributesNormalizer struct { + logger *zap.Logger +} + +var attributesRenamingForMetric = map[string]string{ + attr.AWSLocalService: common.MetricAttributeLocalService, + attr.AWSLocalOperation: common.MetricAttributeLocalOperation, + attr.AWSLocalEnvironment: common.MetricAttributeEnvironment, + attr.AWSRemoteService: common.MetricAttributeRemoteService, + attr.AWSRemoteOperation: common.MetricAttributeRemoteOperation, + attr.AWSRemoteEnvironment: common.MetricAttributeRemoteEnvironment, + attr.AWSRemoteTarget: common.MetricAttributeRemoteResourceIdentifier, + attr.AWSRemoteResourceIdentifier: common.MetricAttributeRemoteResourceIdentifier, + attr.AWSRemoteResourceType: common.MetricAttributeRemoteResourceType, +} + +var resourceAttributesRenamingForTrace = map[string]string{ + // these kubernetes resource attributes are set by the openTelemetry operator + // see the code references from upstream: + // * https://github.com/open-telemetry/opentelemetry-operator/blob/0e39ee77693146e0924da3ca474a0fe14dc30b3a/pkg/instrumentation/sdk.go#L245 + // * https://github.com/open-telemetry/opentelemetry-operator/blob/0e39ee77693146e0924da3ca474a0fe14dc30b3a/pkg/instrumentation/sdk.go#L305C43-L305C43 + semconv.AttributeK8SDeploymentName: "K8s.Workload", + semconv.AttributeK8SStatefulSetName: "K8s.Workload", + semconv.AttributeK8SDaemonSetName: "K8s.Workload", + semconv.AttributeK8SJobName: "K8s.Workload", + semconv.AttributeK8SCronJobName: "K8s.Workload", + semconv.AttributeK8SPodName: "K8s.Pod", +} + +var attributesRenamingForTrace = map[string]string{ + attr.AWSRemoteTarget: attr.AWSRemoteResourceIdentifier, +} + +var copyMapForMetric = map[string]string{ + // these kubernetes resource attributes are set by the openTelemtry operator + // see the code referecnes from upstream: + // * https://github.com/open-telemetry/opentelemetry-operator/blob/0e39ee77693146e0924da3ca474a0fe14dc30b3a/pkg/instrumentation/sdk.go#L245 + // * https://github.com/open-telemetry/opentelemetry-operator/blob/0e39ee77693146e0924da3ca474a0fe14dc30b3a/pkg/instrumentation/sdk.go#L305C43-L305C43 + semconv.AttributeK8SDeploymentName: "K8s.Workload", + semconv.AttributeK8SStatefulSetName: "K8s.Workload", + semconv.AttributeK8SDaemonSetName: "K8s.Workload", + semconv.AttributeK8SJobName: "K8s.Workload", + semconv.AttributeK8SCronJobName: "K8s.Workload", + semconv.AttributeK8SPodName: "K8s.Pod", +} + +const ( + instrumentationModeAuto = "Auto" + instrumentationModeManual = "Manual" +) + +func NewAttributesNormalizer(logger *zap.Logger) *attributesNormalizer { + return &attributesNormalizer{ + logger: logger, + } +} + +func (n *attributesNormalizer) Process(attributes, resourceAttributes pcommon.Map, isTrace bool) error { + n.copyResourceAttributesToAttributes(attributes, resourceAttributes, isTrace) + truncateAttributesByLength(attributes) + n.renameAttributes(attributes, resourceAttributes, isTrace) + n.normalizeTelemetryAttributes(attributes, resourceAttributes, isTrace) + return nil +} + +func (n *attributesNormalizer) renameAttributes(attributes, resourceAttributes pcommon.Map, isTrace bool) { + if isTrace { + rename(resourceAttributes, resourceAttributesRenamingForTrace) + rename(attributes, attributesRenamingForTrace) + } else { + rename(attributes, attributesRenamingForMetric) + } +} + +func (n *attributesNormalizer) copyResourceAttributesToAttributes(attributes, resourceAttributes pcommon.Map, isTrace bool) { + if isTrace { + return + } + for k, v := range copyMapForMetric { + if resourceAttrValue, ok := resourceAttributes.Get(k); ok { + // print some debug info when an attribute value is overwritten + if originalAttrValue, ok := attributes.Get(k); ok { + n.logger.Debug("attribute value is overwritten", zap.String("attribute", k), zap.String("original", originalAttrValue.AsString()), zap.String("new", resourceAttrValue.AsString())) + } + attributes.PutStr(v, resourceAttrValue.AsString()) + if k == semconv.AttributeK8SPodName { + // only copy "host.id" from resource attributes to "K8s.Node" in attributesif the pod name is set + if host, ok := resourceAttributes.Get("host.id"); ok { + attributes.PutStr("K8s.Node", host.AsString()) + } + } + } + } +} + +func (n *attributesNormalizer) normalizeTelemetryAttributes(attributes, resourceAttributes pcommon.Map, isTrace bool) { + if isTrace { + return + } + + var ( + sdkName string + sdkVersion string + sdkLang string + ) + var ( + sdkAutoName string + sdkAutoVersion string + ) + sdkName, sdkVersion, sdkLang = "-", "-", "-" + mode := instrumentationModeManual + + resourceAttributes.Range(func(k string, v pcommon.Value) bool { + switch k { + case semconv.AttributeTelemetrySDKName: + sdkName = removeWhitespaces(v.Str()) + case semconv.AttributeTelemetrySDKLanguage: + sdkLang = removeWhitespaces(v.Str()) + case semconv.AttributeTelemetrySDKVersion: + sdkVersion = removeWhitespaces(v.Str()) + } + switch k { + case semconv.AttributeTelemetryDistroName: + sdkAutoName = removeWhitespaces(v.Str()) + case deprecatedsemconv.AttributeTelemetryAutoVersion, semconv.AttributeTelemetryDistroVersion: + sdkAutoVersion = removeWhitespaces(v.Str()) + } + return true + }) + if sdkAutoName != "" { + sdkName = sdkAutoName + mode = instrumentationModeAuto + } + if sdkAutoVersion != "" { + sdkVersion = sdkAutoVersion + mode = instrumentationModeAuto + } + attributes.PutStr(common.AttributeTelemetrySDK, fmt.Sprintf("%s,%s,%s,%s", sdkName, sdkVersion, sdkLang, mode)) + attributes.PutStr(common.AttributeTelemetryAgent, fmt.Sprintf("CWAgent/%s", version.Number())) + + var telemetrySource string + if val, ok := attributes.Get(attr.AWSSpanKind); ok { + switch val.Str() { + case "CLIENT": + telemetrySource = "ClientSpan" + case "SERVER": + telemetrySource = "ServerSpan" + case "PRODUCER": + telemetrySource = "ProducerSpan" + case "CONSUMER": + telemetrySource = "ConsumerSpan" + case "LOCAL_ROOT": + telemetrySource = "LocalRootSpan" + } + attributes.PutStr(common.AttributeTelemetrySource, telemetrySource) + attributes.Remove(attr.AWSSpanKind) + } +} + +func rename(attrs pcommon.Map, renameMap map[string]string) { + for original, replacement := range renameMap { + if value, ok := attrs.Get(original); ok { + attrs.PutStr(replacement, value.AsString()) + attrs.Remove(original) + if original == semconv.AttributeK8SPodName { + // only rename host.id if the pod name is set + if host, ok := attrs.Get("host.id"); ok { + attrs.PutStr("K8s.Node", host.AsString()) + } + } + } + } +} + +func truncateAttributesByLength(attributes pcommon.Map) { + // It's assumed that all attributes are initially inserted as trace attribute, and attributesRenamingForMetric + // contains all attributes that will be used for CloudWatch metric dimension. Therefore, we iterate the keys + // for enforcing the limits on length. + for attrKey := range attributesRenamingForMetric { + switch attrKey { + case attr.AWSLocalEnvironment, attr.AWSRemoteEnvironment: + if val, ok := attributes.Get(attrKey); ok { + attributes.PutStr(attrKey, truncateStringByLength(val.Str(), maxEnvironmentLength)) + } + case attr.AWSLocalService, attr.AWSRemoteService: + if val, ok := attributes.Get(attrKey); ok { + attributes.PutStr(attrKey, truncateStringByLength(val.Str(), maxServiceNameLength)) + } + default: + if val, ok := attributes.Get(attrKey); ok { + attributes.PutStr(attrKey, truncateStringByLength(val.Str(), defaultMetricAttributeLength)) + } + } + } +} + +func truncateStringByLength(val string, length int) string { + if len(val) > length { + return val[:length] + } + return val +} + +func removeWhitespaces(val string) string { + return strings.ReplaceAll(val, " ", "") +} diff --git a/plugins/processors/awsapplicationsignals/internal/normalizer/attributesnormalizer_test.go b/plugins/processors/awsapplicationsignals/internal/normalizer/attributesnormalizer_test.go new file mode 100644 index 0000000000..812b5a4142 --- /dev/null +++ b/plugins/processors/awsapplicationsignals/internal/normalizer/attributesnormalizer_test.go @@ -0,0 +1,206 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package normalizer + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "go.opentelemetry.io/collector/pdata/pcommon" + deprecatedsemconv "go.opentelemetry.io/collector/semconv/v1.18.0" + semconv "go.opentelemetry.io/collector/semconv/v1.22.0" + "go.uber.org/zap" + + attr "github.com/aws/amazon-cloudwatch-agent/plugins/processors/awsapplicationsignals/internal/attributes" +) + +func TestRenameAttributes_for_metric(t *testing.T) { + logger, _ := zap.NewDevelopment() + normalizer := NewAttributesNormalizer(logger) + + // test for metric + // Create a pcommon.Map with some attributes + attributes := pcommon.NewMap() + for originalKey, replacementKey := range attributesRenamingForMetric { + attributes.PutStr(originalKey, replacementKey+"-value") + } + + resourceAttributes := pcommon.NewMap() + // Call the process method + normalizer.renameAttributes(attributes, resourceAttributes, false) + + // Check that the original key has been removed + for originalKey := range attributesRenamingForMetric { + if _, ok := attributes.Get(originalKey); ok { + t.Errorf("originalKey was not removed") + } + } + + // Check that the new key has the correct value + for _, replacementKey := range attributesRenamingForMetric { + if value, ok := attributes.Get(replacementKey); !ok || value.AsString() != replacementKey+"-value" { + t.Errorf("replacementKey has incorrect value: got %v, want %v", value.AsString(), replacementKey+"-value") + } + } +} + +func TestRenameAttributes_for_trace(t *testing.T) { + logger, _ := zap.NewDevelopment() + normalizer := NewAttributesNormalizer(logger) + + // test for trace + // Create a pcommon.Map with some attributes + resourceAttributes := pcommon.NewMap() + for originalKey, replacementKey := range resourceAttributesRenamingForTrace { + resourceAttributes.PutStr(originalKey, replacementKey+"-value") + } + resourceAttributes.PutStr("host.id", "i-01ef7d37f42caa168") + + attributes := pcommon.NewMap() + // Call the process method + normalizer.renameAttributes(attributes, resourceAttributes, true) + + // Check that the original key has been removed + for originalKey := range resourceAttributesRenamingForTrace { + if _, ok := resourceAttributes.Get(originalKey); ok { + t.Errorf("originalKey was not removed") + } + } + + // Check that the new key has the correct value + for _, replacementKey := range resourceAttributesRenamingForTrace { + if value, ok := resourceAttributes.Get(replacementKey); !ok || value.AsString() != replacementKey+"-value" { + t.Errorf("replacementKey has incorrect value: got %v, want %v", value.AsString(), replacementKey+"-value") + } + } + + if value, ok := resourceAttributes.Get("K8s.Node"); !ok || value.AsString() != "i-01ef7d37f42caa168" { + t.Errorf("replacementKey has incorrect value: got %v, want %v", value.AsString(), "i-01ef7d37f42caa168") + } +} + +func TestCopyResourceAttributesToAttributes(t *testing.T) { + logger, _ := zap.NewDevelopment() + normalizer := NewAttributesNormalizer(logger) + + // Create a pcommon.Map for resourceAttributes with some attributes + resourceAttributes := pcommon.NewMap() + for resourceAttrKey, attrKey := range copyMapForMetric { + resourceAttributes.PutStr(resourceAttrKey, attrKey+"-value") + } + resourceAttributes.PutStr("host.id", "i-01ef7d37f42caa168") + + // Create a pcommon.Map for attributes + attributes := pcommon.NewMap() + + // Call the process method + normalizer.copyResourceAttributesToAttributes(attributes, resourceAttributes, false) + + // Check that the attribute has been copied correctly + for _, attrKey := range copyMapForMetric { + if value, ok := attributes.Get(attrKey); !ok || value.AsString() != attrKey+"-value" { + t.Errorf("Attribute was not copied correctly: got %v, want %v", value.AsString(), attrKey+"-value") + } + } + + if value, ok := attributes.Get("K8s.Node"); !ok || value.AsString() != "i-01ef7d37f42caa168" { + t.Errorf("Attribute was not copied correctly: got %v, want %v", value.AsString(), "i-01ef7d37f42caa168") + } +} + +func TestTruncateAttributes(t *testing.T) { + attributes := pcommon.NewMap() + + longValue := make([]byte, 300) + for i := 0; i < 300; i++ { + longValue[i] = 'a' + } + longStringValue := string(longValue) + for key, _ := range attributesRenamingForMetric { + attributes.PutStr(key, longStringValue) + } + + truncateAttributesByLength(attributes) + + val, _ := attributes.Get(attr.AWSLocalEnvironment) + assert.True(t, len(val.Str()) == maxEnvironmentLength) + val, _ = attributes.Get(attr.AWSRemoteEnvironment) + assert.True(t, len(val.Str()) == maxEnvironmentLength) + val, _ = attributes.Get(attr.AWSLocalService) + assert.True(t, len(val.Str()) == maxServiceNameLength) + val, _ = attributes.Get(attr.AWSRemoteService) + assert.True(t, len(val.Str()) == maxServiceNameLength) + val, _ = attributes.Get(attr.AWSRemoteResourceIdentifier) + assert.True(t, len(val.Str()) == 300) +} + +func Test_attributesNormalizer_appendNewAttributes(t *testing.T) { + logger, _ := zap.NewDevelopment() + + completeResourceAttributes := pcommon.NewMap() + completeResourceAttributes.PutStr(semconv.AttributeTelemetrySDKName, "opentelemetry") + completeResourceAttributes.PutStr(deprecatedsemconv.AttributeTelemetryAutoVersion, "0.0.1 auto") + completeResourceAttributes.PutStr(semconv.AttributeTelemetrySDKVersion, "0.0.1 test") + completeResourceAttributes.PutStr(semconv.AttributeTelemetrySDKLanguage, "go") + + incompleteResourceAttributes := pcommon.NewMap() + incompleteResourceAttributes.PutStr(semconv.AttributeTelemetrySDKName, "opentelemetry") + incompleteResourceAttributes.PutStr(semconv.AttributeTelemetrySDKVersion, "0.0.1 test") + + tests := []struct { + name string + attributes pcommon.Map + resourceAttributes pcommon.Map + isTrace bool + expectedAttributeValue string + }{ + { + "testAppendNoAttributesToTrace", + pcommon.NewMap(), + completeResourceAttributes, + true, + "", + }, { + "testAppendAttributesToMetricWithValuesFound", + pcommon.NewMap(), + completeResourceAttributes, + false, + "opentelemetry,0.0.1auto,go,Auto", + }, + { + "testAppendAttributesToMetricWithSomeValuesMissing", + pcommon.NewMap(), + incompleteResourceAttributes, + false, + "opentelemetry,0.0.1test,-,Manual", + }, + { + + "testAppendAttributesToMetricWithAllValuesMissing", + pcommon.NewMap(), + pcommon.NewMap(), + false, + "-,-,-,Manual", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + n := &attributesNormalizer{ + logger: logger, + } + n.normalizeTelemetryAttributes(tt.attributes, tt.resourceAttributes, tt.isTrace) + + if value, ok := tt.attributes.Get("Telemetry.SDK"); !ok { + if !tt.isTrace { + t.Errorf("attribute is not found.") + } + } else { + if tt.isTrace { + t.Errorf("unexpected attribute is found.") + } + assert.Equal(t, tt.expectedAttributeValue, value.Str()) + } + }) + } +} diff --git a/plugins/processors/awsapplicationsignals/internal/prune/metric_pruner.go b/plugins/processors/awsapplicationsignals/internal/prune/metric_pruner.go new file mode 100644 index 0000000000..5fcca636d4 --- /dev/null +++ b/plugins/processors/awsapplicationsignals/internal/prune/metric_pruner.go @@ -0,0 +1,42 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package prune + +import ( + "errors" + + "go.opentelemetry.io/collector/pdata/pcommon" + + "github.com/aws/amazon-cloudwatch-agent/plugins/processors/awsapplicationsignals/common" +) + +type MetricPruner struct { +} + +func (p *MetricPruner) ShouldBeDropped(attributes pcommon.Map) (bool, error) { + for _, attributeKey := range common.IndexableMetricAttributes { + if val, ok := attributes.Get(attributeKey); ok { + if !isAsciiPrintable(val.Str()) { + return true, errors.New("Metric attribute " + attributeKey + " must contain only ASCII characters.") + } + } + } + return false, nil +} + +func NewPruner() *MetricPruner { + return &MetricPruner{} +} + +func isAsciiPrintable(val string) bool { + nonWhitespaceFound := false + for _, c := range val { + if c < 32 || c > 126 { + return false + } else if !nonWhitespaceFound && c != 32 { + nonWhitespaceFound = true + } + } + return nonWhitespaceFound +} diff --git a/plugins/processors/awsapplicationsignals/internal/prune/metric_pruner_test.go b/plugins/processors/awsapplicationsignals/internal/prune/metric_pruner_test.go new file mode 100644 index 0000000000..4a14c5e9a7 --- /dev/null +++ b/plugins/processors/awsapplicationsignals/internal/prune/metric_pruner_test.go @@ -0,0 +1,85 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package prune + +import ( + "testing" + + "go.opentelemetry.io/collector/pdata/pcommon" + + "github.com/aws/amazon-cloudwatch-agent/plugins/processors/awsapplicationsignals/common" +) + +func TestMetricPrunerWithIndexableAttribute(t *testing.T) { + tests := []struct { + name string + val string + want bool + }{ + { + "testShouldDropChineseChar", + "漢", + true, + }, { + "testShouldDropSymbolChar", + "€, £, µ", + true, + }, { + "testShouldDropAllBlackSpace", + " ", + true, + }, + { + "testShouldDropAllTab", + " ", + true, + }, { + "testShouldKeepEnglishWord", + "abcdefg-", + false, + }, + } + + p := &MetricPruner{} + for _, tt := range tests { + attributes := pcommon.NewMap() + attributes.PutStr(common.MetricAttributeLocalService, tt.val) + t.Run(tt.name, func(t *testing.T) { + got, _ := p.ShouldBeDropped(attributes) + if got != tt.want { + t.Errorf("ShouldBeDropped() got = %v, want %v", got, tt.want) + } + }) + } +} + +func TestMetricPrunerWithNonIndexableAttribute(t *testing.T) { + tests := []struct { + name string + val string + want bool + }{ + { + "testShouldKeepChineseChar", + "漢", + false, + }, { + "testShouldKeepEnglishWord", + "abcdefg-", + false, + }, + } + + p := &MetricPruner{} + for _, tt := range tests { + attributes := pcommon.NewMap() + attributes.PutStr(common.AttributeEC2InstanceId, tt.val) + t.Run(tt.name, func(t *testing.T) { + got, _ := p.ShouldBeDropped(attributes) + if got != tt.want { + t.Errorf("ShouldBeDropped() got = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/plugins/processors/awsapplicationsignals/internal/resolver/attributesresolver.go b/plugins/processors/awsapplicationsignals/internal/resolver/attributesresolver.go new file mode 100644 index 0000000000..cab4106676 --- /dev/null +++ b/plugins/processors/awsapplicationsignals/internal/resolver/attributesresolver.go @@ -0,0 +1,163 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package resolver + +import ( + "context" + "errors" + "fmt" + "strings" + + "go.opentelemetry.io/collector/pdata/pcommon" + semconv "go.opentelemetry.io/collector/semconv/v1.22.0" + "go.uber.org/zap" + + "github.com/aws/amazon-cloudwatch-agent/plugins/processors/awsapplicationsignals/common" + appsignalsconfig "github.com/aws/amazon-cloudwatch-agent/plugins/processors/awsapplicationsignals/config" + attr "github.com/aws/amazon-cloudwatch-agent/plugins/processors/awsapplicationsignals/internal/attributes" + "github.com/aws/amazon-cloudwatch-agent/translator/util/ecsutil" +) + +const ( + AttributeEnvironmentDefault = "default" + + AttributePlatformGeneric = "Generic" + AttributePlatformEC2 = "AWS::EC2" + AttributePlatformEKS = "AWS::EKS" + AttributePlatformK8S = "K8s" +) + +var GenericInheritedAttributes = map[string]string{ + semconv.AttributeDeploymentEnvironment: attr.AWSLocalEnvironment, + attr.ResourceDetectionHostName: common.AttributeHost, +} + +// DefaultInheritedAttributes is an allow-list that also renames attributes from the resource detection processor +var DefaultInheritedAttributes = map[string]string{ + semconv.AttributeDeploymentEnvironment: attr.AWSLocalEnvironment, + attr.ResourceDetectionASG: common.AttributeEC2AutoScalingGroup, + attr.ResourceDetectionHostId: common.AttributeEC2InstanceId, + attr.ResourceDetectionHostName: common.AttributeHost, +} + +type subResolver interface { + Process(attributes, resourceAttributes pcommon.Map) error + Stop(ctx context.Context) error +} + +type attributesResolver struct { + subResolvers []subResolver +} + +// create a new attributes resolver +func NewAttributesResolver(resolvers []appsignalsconfig.Resolver, logger *zap.Logger) *attributesResolver { + subResolvers := []subResolver{} + for _, resolver := range resolvers { + switch resolver.Platform { + case appsignalsconfig.PlatformEKS, appsignalsconfig.PlatformK8s: + subResolvers = append(subResolvers, getKubernetesResolver(resolver.Platform, resolver.Name, logger), newKubernetesResourceAttributesResolver(resolver.Platform, resolver.Name)) + case appsignalsconfig.PlatformEC2: + subResolvers = append(subResolvers, newResourceAttributesResolver(resolver.Platform, AttributePlatformEC2, DefaultInheritedAttributes)) + default: + if ecsutil.GetECSUtilSingleton().IsECS() { + subResolvers = append(subResolvers, newResourceAttributesResolver(appsignalsconfig.PlatformECS, AttributePlatformGeneric, DefaultInheritedAttributes)) + } else { + subResolvers = append(subResolvers, newResourceAttributesResolver(resolver.Platform, AttributePlatformGeneric, GenericInheritedAttributes)) + } + } + } + return &attributesResolver{ + subResolvers: subResolvers, + } +} + +// Process the attributes +func (r *attributesResolver) Process(attributes, resourceAttributes pcommon.Map, _ bool) error { + for _, subResolver := range r.subResolvers { + if err := subResolver.Process(attributes, resourceAttributes); err != nil { + return err + } + } + return nil +} + +func (r *attributesResolver) Stop(ctx context.Context) error { + var errs error + for _, subResolver := range r.subResolvers { + errs = errors.Join(errs, subResolver.Stop(ctx)) + } + return errs +} + +type resourceAttributesResolver struct { + defaultEnvPrefix string + platformType string + attributeMap map[string]string +} + +func newResourceAttributesResolver(defaultEnvPrefix, platformType string, attributeMap map[string]string) *resourceAttributesResolver { + return &resourceAttributesResolver{ + defaultEnvPrefix: defaultEnvPrefix, + platformType: platformType, + attributeMap: attributeMap, + } +} +func (h *resourceAttributesResolver) Process(attributes, resourceAttributes pcommon.Map) error { + for attrKey, mappingKey := range h.attributeMap { + if val, ok := resourceAttributes.Get(attrKey); ok { + attributes.PutStr(mappingKey, val.Str()) + } + } + attributes.PutStr(attr.AWSLocalEnvironment, getLocalEnvironment(attributes, resourceAttributes, h.defaultEnvPrefix)) + attributes.PutStr(common.AttributePlatformType, h.platformType) + return nil +} + +func getLocalEnvironment(attributes, resourceAttributes pcommon.Map, defaultEnvPrefix string) string { + if val, ok := attributes.Get(attr.AWSLocalEnvironment); ok { + return val.Str() + } + if val, found := resourceAttributes.Get(attr.AWSHostedInEnvironment); found { + return val.Str() + } + if defaultEnvPrefix == appsignalsconfig.PlatformECS { + if clusterName, _ := getECSClusterName(resourceAttributes); clusterName != "" { + return getDefaultEnvironment(defaultEnvPrefix, clusterName) + } + if clusterName := ecsutil.GetECSUtilSingleton().Cluster; clusterName != "" { + return getDefaultEnvironment(defaultEnvPrefix, clusterName) + } + } else if defaultEnvPrefix == appsignalsconfig.PlatformEC2 { + if asgAttr, found := resourceAttributes.Get(attr.ResourceDetectionASG); found { + return getDefaultEnvironment(defaultEnvPrefix, asgAttr.Str()) + } + } + return getDefaultEnvironment(defaultEnvPrefix, AttributeEnvironmentDefault) +} + +func getECSClusterName(resourceAttributes pcommon.Map) (string, bool) { + if clusterAttr, ok := resourceAttributes.Get(semconv.AttributeAWSECSClusterARN); ok { + parts := strings.Split(clusterAttr.Str(), "/") + clusterName := parts[len(parts)-1] + return clusterName, true + } else if taskAttr, ok := resourceAttributes.Get(semconv.AttributeAWSECSTaskARN); ok { + parts := strings.SplitAfterN(taskAttr.Str(), ":task/", 2) + if len(parts) == 2 { + taskParts := strings.Split(parts[1], "/") + // cluster name in ARN + if len(taskParts) == 2 { + return taskParts[0], true + } + } + } + return "", false +} + +func getDefaultEnvironment(platformCode, val string) string { + return fmt.Sprintf("%s:%s", platformCode, val) +} + +func (h *resourceAttributesResolver) Stop(ctx context.Context) error { + return nil +} diff --git a/plugins/processors/awsapplicationsignals/internal/resolver/attributesresolver_test.go b/plugins/processors/awsapplicationsignals/internal/resolver/attributesresolver_test.go new file mode 100644 index 0000000000..cf8fd05f57 --- /dev/null +++ b/plugins/processors/awsapplicationsignals/internal/resolver/attributesresolver_test.go @@ -0,0 +1,247 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package resolver + +import ( + "context" + "errors" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "go.opentelemetry.io/collector/pdata/pcommon" + semconv "go.opentelemetry.io/collector/semconv/v1.22.0" + "go.uber.org/zap" + + "github.com/aws/amazon-cloudwatch-agent/plugins/processors/awsapplicationsignals/common" + "github.com/aws/amazon-cloudwatch-agent/plugins/processors/awsapplicationsignals/config" + attr "github.com/aws/amazon-cloudwatch-agent/plugins/processors/awsapplicationsignals/internal/attributes" +) + +type MockSubResolver struct { + mock.Mock +} + +func (m *MockSubResolver) Process(attributes, resourceAttributes pcommon.Map) error { + args := m.Called(attributes, resourceAttributes) + return args.Error(0) +} + +func (m *MockSubResolver) Stop(ctx context.Context) error { + args := m.Called(ctx) + return args.Error(0) +} + +func TestResourceAttributesResolverWithNoConfiguredName(t *testing.T) { + tests := []struct { + name string + platformCode string + platformType string + resolver config.Resolver + }{ + { + "testOnGeneric", + config.PlatformGeneric, + AttributePlatformGeneric, + config.NewGenericResolver(""), + }, + { + "testOnEC2", + config.PlatformEC2, + AttributePlatformEC2, + config.NewEC2Resolver(""), + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + logger, _ := zap.NewDevelopment() + attributesResolver := NewAttributesResolver([]config.Resolver{tt.resolver}, logger) + resolver := attributesResolver.subResolvers[0] + + attributes := pcommon.NewMap() + resourceAttributes := pcommon.NewMap() + + resolver.Process(attributes, resourceAttributes) + + attribute, ok := attributes.Get(common.AttributePlatformType) + assert.True(t, ok) + assert.Equal(t, tt.platformType, attribute.Str()) + + attribute, ok = attributes.Get(attr.AWSLocalEnvironment) + assert.True(t, ok) + assert.Equal(t, tt.platformCode+":default", attribute.Str()) + }) + } +} + +func TestResourceAttributesResolverWithECSClusterName(t *testing.T) { + resolver := resourceAttributesResolver{ + defaultEnvPrefix: "ecs", + platformType: "Generic", + attributeMap: DefaultInheritedAttributes, + } + + attributes := pcommon.NewMap() + resourceAttributes := pcommon.NewMap() + resourceAttributes.PutStr(semconv.AttributeAWSECSTaskARN, "arn:aws:ecs:us-west-1:123456789123:task/my-cluster/10838bed-421f-43ef-870a-f43feacbbb5b") + + resolver.Process(attributes, resourceAttributes) + + attribute, ok := attributes.Get(common.AttributePlatformType) + assert.True(t, ok) + assert.Equal(t, "Generic", attribute.Str()) + + attribute, ok = attributes.Get(attr.AWSLocalEnvironment) + assert.True(t, ok) + assert.Equal(t, "ecs:my-cluster", attribute.Str()) +} + +func TestResourceAttributesResolverWithOnEC2WithASG(t *testing.T) { + logger, _ := zap.NewDevelopment() + attributesResolver := NewAttributesResolver([]config.Resolver{config.NewEC2Resolver("")}, logger) + resolver := attributesResolver.subResolvers[0] + + attributes := pcommon.NewMap() + resourceAttributes := pcommon.NewMap() + resourceAttributes.PutStr(attr.ResourceDetectionASG, "my-asg") + + resolver.Process(attributes, resourceAttributes) + platformAttr, ok := attributes.Get(common.AttributePlatformType) + assert.True(t, ok) + assert.Equal(t, "AWS::EC2", platformAttr.Str()) + envAttr, ok := attributes.Get(attr.AWSLocalEnvironment) + assert.True(t, ok) + assert.Equal(t, "ec2:my-asg", envAttr.Str()) +} + +func TestResourceAttributesResolverWithHostname(t *testing.T) { + logger, _ := zap.NewDevelopment() + attributesResolver := NewAttributesResolver([]config.Resolver{config.NewGenericResolver("")}, logger) + resolver := attributesResolver.subResolvers[0] + + attributes := pcommon.NewMap() + resourceAttributes := pcommon.NewMap() + resourceAttributes.PutStr(attr.ResourceDetectionHostName, "hostname") + + resolver.Process(attributes, resourceAttributes) + envAttr, ok := attributes.Get(common.AttributeHost) + assert.True(t, ok) + assert.Equal(t, "hostname", envAttr.AsString()) +} + +func TestResourceAttributesResolverWithCustomEnvironment(t *testing.T) { + tests := []struct { + name string + platformCode string + resolver config.Resolver + }{ + { + "testOnGeneric", + config.PlatformGeneric, + config.NewGenericResolver(""), + }, + { + "testOnEC2", + config.PlatformEC2, + config.NewEC2Resolver(""), + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + logger, _ := zap.NewDevelopment() + attributesResolver := NewAttributesResolver([]config.Resolver{tt.resolver}, logger) + resolver := attributesResolver.subResolvers[0] + + attributes := pcommon.NewMap() + resourceAttributes := pcommon.NewMap() + // insert default env + resourceAttributes.PutStr(attr.ResourceDetectionASG, "my-asg") + resourceAttributes.PutStr(semconv.AttributeAWSECSTaskARN, "arn:aws:ecs:us-west-1:123456789123:task/my-cluster/10838bed-421f-43ef-870a-f43feacbbb5b") + + // insert custom env + resourceAttributes.PutStr(attr.AWSHostedInEnvironment, "env1") + resolver.Process(attributes, resourceAttributes) + envAttr, ok := attributes.Get(attr.AWSLocalEnvironment) + assert.True(t, ok) + assert.Equal(t, "env1", envAttr.Str()) + + attributes = pcommon.NewMap() + resourceAttributes = pcommon.NewMap() + + resourceAttributes.PutStr(attr.AWSHostedInEnvironment, "error") + resourceAttributes.PutStr(semconv.AttributeDeploymentEnvironment, "env2") + resolver.Process(attributes, resourceAttributes) + envAttr, ok = attributes.Get(attr.AWSLocalEnvironment) + assert.True(t, ok) + assert.Equal(t, "env2", envAttr.Str()) + + attributes = pcommon.NewMap() + resourceAttributes = pcommon.NewMap() + + resourceAttributes.PutStr(semconv.AttributeDeploymentEnvironment, "env3") + resolver.Process(attributes, resourceAttributes) + envAttr, ok = attributes.Get(attr.AWSLocalEnvironment) + assert.True(t, ok) + assert.Equal(t, "env3", envAttr.Str()) + }) + } +} + +func TestAttributesResolver_Process(t *testing.T) { + attributes := pcommon.NewMap() + resourceAttributes := pcommon.NewMap() + + mockSubResolver1 := new(MockSubResolver) + mockSubResolver1.On("Process", attributes, resourceAttributes).Return(nil) + + mockSubResolver2 := new(MockSubResolver) + mockSubResolver2.On("Process", attributes, resourceAttributes).Return(errors.New("error")) + + r := &attributesResolver{ + subResolvers: []subResolver{mockSubResolver1, mockSubResolver2}, + } + + err := r.Process(attributes, resourceAttributes, true) + assert.Error(t, err) + mockSubResolver1.AssertExpectations(t) + mockSubResolver2.AssertExpectations(t) +} + +func TestAttributesResolver_Stop(t *testing.T) { + ctx := context.Background() + + mockSubResolver1 := new(MockSubResolver) + mockSubResolver1.On("Stop", ctx).Return(nil) + + mockSubResolver2 := new(MockSubResolver) + mockSubResolver2.On("Stop", ctx).Return(errors.New("error")) + + r := &attributesResolver{ + subResolvers: []subResolver{mockSubResolver1, mockSubResolver2}, + } + + err := r.Stop(ctx) + assert.Error(t, err) + mockSubResolver1.AssertExpectations(t) + mockSubResolver2.AssertExpectations(t) +} + +func TestGetClusterName(t *testing.T) { + resourceAttributes := pcommon.NewMap() + resourceAttributes.PutStr(semconv.AttributeAWSECSClusterARN, "arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster") + clusterName, ok := getECSClusterName(resourceAttributes) + assert.True(t, ok) + assert.Equal(t, "my-cluster", clusterName) + + resourceAttributes = pcommon.NewMap() + resourceAttributes.PutStr(semconv.AttributeAWSECSTaskARN, "arn:aws:ecs:us-west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b") + _, ok = getECSClusterName(resourceAttributes) + assert.False(t, ok) + + resourceAttributes = pcommon.NewMap() + resourceAttributes.PutStr(semconv.AttributeAWSECSTaskARN, "arn:aws:ecs:us-west-1:123456789123:task/my-cluster/10838bed-421f-43ef-870a-f43feacbbb5b") + clusterName, ok = getECSClusterName(resourceAttributes) + assert.True(t, ok) + assert.Equal(t, "my-cluster", clusterName) +} diff --git a/plugins/processors/awsappsignals/internal/resolver/kubernetes.go b/plugins/processors/awsapplicationsignals/internal/resolver/kubernetes.go similarity index 90% rename from plugins/processors/awsappsignals/internal/resolver/kubernetes.go rename to plugins/processors/awsapplicationsignals/internal/resolver/kubernetes.go index 37b83cc975..fe5712976b 100644 --- a/plugins/processors/awsappsignals/internal/resolver/kubernetes.go +++ b/plugins/processors/awsapplicationsignals/internal/resolver/kubernetes.go @@ -17,7 +17,7 @@ import ( mapset "github.com/deckarep/golang-set/v2" "go.opentelemetry.io/collector/pdata/pcommon" - semconv "go.opentelemetry.io/collector/semconv/v1.17.0" + semconv "go.opentelemetry.io/collector/semconv/v1.22.0" "go.uber.org/zap" corev1 "k8s.io/api/core/v1" "k8s.io/client-go/informers" @@ -25,8 +25,9 @@ import ( "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/clientcmd" - attr "github.com/aws/amazon-cloudwatch-agent/plugins/processors/awsappsignals/internal/attributes" - "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/common" + "github.com/aws/amazon-cloudwatch-agent/plugins/processors/awsapplicationsignals/common" + "github.com/aws/amazon-cloudwatch-agent/plugins/processors/awsapplicationsignals/config" + attr "github.com/aws/amazon-cloudwatch-agent/plugins/processors/awsapplicationsignals/internal/attributes" ) const ( @@ -48,10 +49,6 @@ const ( jitterKubernetesAPISeconds = 10 ) -var DefaultHostedInAttributeMap = map[string]string{ - semconv.AttributeK8SNamespaceName: attr.HostedInK8SNamespace, -} - var ( // ReplicaSet name = Deployment name + "-" + up to 10 alphanumeric characters string, if the ReplicaSet was created through a deployment // The suffix string of the ReplicaSet name is an int32 number (0 to 4,294,967,295) that is cast to a string and then @@ -71,6 +68,8 @@ var ( type kubernetesResolver struct { logger *zap.Logger clientset kubernetes.Interface + clusterName string + platformCode string ipToPod *sync.Map podToWorkloadAndNamespace *sync.Map ipToServiceAndNamespace *sync.Map @@ -509,7 +508,7 @@ func (m *ServiceToWorkloadMapper) Start(stopCh chan struct{}) { }() } -func getKubernetesResolver(logger *zap.Logger) subResolver { +func getKubernetesResolver(platformCode, clusterName string, logger *zap.Logger) subResolver { once.Do(func() { config, err := clientcmd.BuildConfigFromFlags("", "") if err != nil { @@ -547,6 +546,8 @@ func getKubernetesResolver(logger *zap.Logger) subResolver { instance = &kubernetesResolver{ logger: logger, clientset: clientset, + clusterName: clusterName, + platformCode: platformCode, ipToServiceAndNamespace: serviceWatcher.ipToServiceAndNamespace, serviceAndNamespaceToSelectors: serviceWatcher.serviceAndNamespaceToSelectors, ipToPod: podWatcher.ipToPod, @@ -589,13 +590,14 @@ func (e *kubernetesResolver) GetWorkloadAndNamespaceByIP(ip string) (string, str } func (e *kubernetesResolver) Process(attributes, resourceAttributes pcommon.Map) error { + var namespace string if value, ok := attributes.Get(attr.AWSRemoteService); ok { valueStr := value.AsString() ipStr := "" if ip, _, ok := extractIPPort(valueStr); ok { - if workload, namespace, err := e.GetWorkloadAndNamespaceByIP(valueStr); err == nil { + if workload, ns, err := e.GetWorkloadAndNamespaceByIP(valueStr); err == nil { attributes.PutStr(attr.AWSRemoteService, workload) - attributes.PutStr(attr.K8SRemoteNamespace, namespace) + namespace = ns } else { ipStr = ip } @@ -604,9 +606,9 @@ func (e *kubernetesResolver) Process(attributes, resourceAttributes pcommon.Map) } if ipStr != "" { - if workload, namespace, err := e.GetWorkloadAndNamespaceByIP(ipStr); err == nil { + if workload, ns, err := e.GetWorkloadAndNamespaceByIP(ipStr); err == nil { attributes.PutStr(attr.AWSRemoteService, workload) - attributes.PutStr(attr.K8SRemoteNamespace, namespace) + namespace = ns } else { e.logger.Debug("failed to Process ip", zap.String("ip", ipStr), zap.Error(err)) attributes.PutStr(attr.AWSRemoteService, "UnknownRemoteService") @@ -614,6 +616,12 @@ func (e *kubernetesResolver) Process(attributes, resourceAttributes pcommon.Map) } } + if _, ok := attributes.Get(attr.AWSRemoteEnvironment); !ok { + if namespace != "" { + attributes.PutStr(attr.AWSRemoteEnvironment, fmt.Sprintf("%s:%s/%s", e.platformCode, e.clusterName, namespace)) + } + } + return nil } @@ -659,35 +667,56 @@ func getHostNetworkPorts(pod *corev1.Pod) []string { return ports } -type kubernetesHostedInAttributeResolver struct { +type kubernetesResourceAttributesResolver struct { + platformCode string clusterName string attributeMap map[string]string } -func newKubernetesHostedInAttributeResolver(clusterName string) *kubernetesHostedInAttributeResolver { - return &kubernetesHostedInAttributeResolver{ - clusterName: clusterName, - attributeMap: map[string]string{ - semconv.AttributeK8SNamespaceName: attr.HostedInK8SNamespace, - }, +func newKubernetesResourceAttributesResolver(platformCode, clusterName string) *kubernetesResourceAttributesResolver { + return &kubernetesResourceAttributesResolver{ + platformCode: platformCode, + clusterName: clusterName, + attributeMap: DefaultInheritedAttributes, } } -func (h *kubernetesHostedInAttributeResolver) Process(attributes, resourceAttributes pcommon.Map) error { +func (h *kubernetesResourceAttributesResolver) Process(attributes, resourceAttributes pcommon.Map) error { for attrKey, mappingKey := range h.attributeMap { if val, ok := resourceAttributes.Get(attrKey); ok { attributes.PutStr(mappingKey, val.AsString()) } } + if h.platformCode == config.PlatformEKS { + attributes.PutStr(common.AttributePlatformType, AttributePlatformEKS) + attributes.PutStr(common.AttributeEKSClusterName, h.clusterName) + } else { + attributes.PutStr(common.AttributePlatformType, AttributePlatformK8S) + attributes.PutStr(common.AttributeK8SClusterName, h.clusterName) + } + var namespace string + if nsAttr, ok := resourceAttributes.Get(semconv.AttributeK8SNamespaceName); ok { + namespace = nsAttr.Str() + } else { + namespace = "UnknownNamespace" + } - if isEks, _ := common.IsEKS(); isEks { - attributes.PutStr(attr.HostedInClusterNameEKS, h.clusterName) + if val, ok := attributes.Get(attr.AWSLocalEnvironment); !ok { + env := getDefaultEnvironment(h.platformCode, h.clusterName+"/"+namespace) + attributes.PutStr(attr.AWSLocalEnvironment, env) } else { - attributes.PutStr(attr.HostedInClusterNameK8s, h.clusterName) + attributes.PutStr(attr.AWSLocalEnvironment, val.Str()) } + attributes.PutStr(common.AttributeK8SNamespace, namespace) + //The application log group in Container Insights is a fixed pattern: + // "/aws/containerinsights/{Cluster_Name}/application" + // See https://github.com/aws/amazon-cloudwatch-agent-operator/blob/fe144bb02d7b1930715aa3ea32e57a5ff13406aa/helm/templates/fluent-bit-configmap.yaml#L82 + logGroupName := "/aws/containerinsights/" + h.clusterName + "/application" + resourceAttributes.PutStr(semconv.AttributeAWSLogGroupNames, logGroupName) + return nil } -func (h *kubernetesHostedInAttributeResolver) Stop(ctx context.Context) error { +func (h *kubernetesResourceAttributesResolver) Stop(ctx context.Context) error { return nil } diff --git a/plugins/processors/awsappsignals/internal/resolver/kubernetes_test.go b/plugins/processors/awsapplicationsignals/internal/resolver/kubernetes_test.go similarity index 80% rename from plugins/processors/awsappsignals/internal/resolver/kubernetes_test.go rename to plugins/processors/awsapplicationsignals/internal/resolver/kubernetes_test.go index 4ed7fff58e..20cb17a5cb 100644 --- a/plugins/processors/awsappsignals/internal/resolver/kubernetes_test.go +++ b/plugins/processors/awsapplicationsignals/internal/resolver/kubernetes_test.go @@ -5,6 +5,7 @@ package resolver import ( "context" + "fmt" "strings" "sync" "testing" @@ -13,12 +14,15 @@ import ( mapset "github.com/deckarep/golang-set/v2" "github.com/stretchr/testify/assert" "go.opentelemetry.io/collector/pdata/pcommon" + semconv "go.opentelemetry.io/collector/semconv/v1.22.0" "go.uber.org/zap" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - attr "github.com/aws/amazon-cloudwatch-agent/plugins/processors/awsappsignals/internal/attributes" - "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/common" + "github.com/aws/amazon-cloudwatch-agent/plugins/processors/awsapplicationsignals/common" + "github.com/aws/amazon-cloudwatch-agent/plugins/processors/awsapplicationsignals/config" + attr "github.com/aws/amazon-cloudwatch-agent/plugins/processors/awsapplicationsignals/internal/attributes" + "github.com/aws/amazon-cloudwatch-agent/translator/util/eksdetector" ) // MockDeleter deletes a key immediately, useful for testing. @@ -696,6 +700,7 @@ func TestEksResolver(t *testing.T) { t.Run("Test GetWorkloadAndNamespaceByIP", func(t *testing.T) { resolver := &kubernetesResolver{ logger: logger, + clusterName: "test", ipToPod: &sync.Map{}, podToWorkloadAndNamespace: &sync.Map{}, ipToServiceAndNamespace: &sync.Map{}, @@ -769,6 +774,8 @@ func TestEksResolver(t *testing.T) { logger, _ := zap.NewProduction() resolver := &kubernetesResolver{ logger: logger, + clusterName: "test", + platformCode: config.PlatformEKS, ipToPod: &sync.Map{}, podToWorkloadAndNamespace: &sync.Map{}, ipToServiceAndNamespace: &sync.Map{}, @@ -784,7 +791,7 @@ func TestEksResolver(t *testing.T) { err := resolver.Process(attributes, resourceAttributes) assert.NoError(t, err) assert.Equal(t, "test-deployment", getStrAttr(attributes, attr.AWSRemoteService, t)) - assert.Equal(t, "test-namespace", getStrAttr(attributes, attr.K8SRemoteNamespace, t)) + assert.Equal(t, "eks:test/test-namespace", getStrAttr(attributes, attr.AWSRemoteEnvironment, t)) // Test case 2: "aws.remote.service" contains only IP attributes = pcommon.NewMap() @@ -795,7 +802,7 @@ func TestEksResolver(t *testing.T) { err = resolver.Process(attributes, resourceAttributes) assert.NoError(t, err) assert.Equal(t, "test-deployment-2", getStrAttr(attributes, attr.AWSRemoteService, t)) - assert.Equal(t, "test-namespace-2", getStrAttr(attributes, attr.K8SRemoteNamespace, t)) + assert.Equal(t, "eks:test/test-namespace-2", getStrAttr(attributes, attr.AWSRemoteEnvironment, t)) // Test case 3: "aws.remote.service" contains non-ip string attributes = pcommon.NewMap() @@ -815,8 +822,9 @@ func TestEksResolver(t *testing.T) { }) } -func TestHostedInEksResolver(t *testing.T) { - common.NewDetector = common.TestEKSDetector +func TestK8sResourceAttributesResolverOnEKS(t *testing.T) { + eksdetector.NewDetector = eksdetector.TestEKSDetector + eksdetector.IsEKS = eksdetector.TestIsEKSCacheEKS // helper function to get string values from the attributes getStrAttr := func(attributes pcommon.Map, key string, t *testing.T) string { if value, ok := attributes.Get(key); ok { @@ -827,17 +835,222 @@ func TestHostedInEksResolver(t *testing.T) { } } - resolver := newKubernetesHostedInAttributeResolver("test-cluster") - - // Test case 1 and 2: resourceAttributes contains "k8s.namespace.name" and EKS cluster name - attributes := pcommon.NewMap() - resourceAttributes := pcommon.NewMap() - resourceAttributes.PutStr("cloud.provider", "aws") - resourceAttributes.PutStr("k8s.namespace.name", "test-namespace-3") - err := resolver.Process(attributes, resourceAttributes) - assert.NoError(t, err) - assert.Equal(t, "test-namespace-3", getStrAttr(attributes, attr.HostedInK8SNamespace, t)) - assert.Equal(t, "test-cluster", getStrAttr(attributes, attr.HostedInClusterNameEKS, t)) + resolver := newKubernetesResourceAttributesResolver(config.PlatformEKS, "test-cluster") + + resourceAttributesBase := map[string]string{ + "cloud.provider": "aws", + "k8s.namespace.name": "test-namespace-3", + "host.id": "instance-id", + "host.name": "hostname", + "ec2.tag.aws:autoscaling:groupName": "asg", + } + + tests := []struct { + name string + resourceAttributesOverwrite map[string]string + expectedAttributes map[string]string + }{ + { + "testDefault", + map[string]string{}, + + map[string]string{ + attr.AWSLocalEnvironment: "eks:test-cluster/test-namespace-3", + common.AttributeK8SNamespace: "test-namespace-3", + common.AttributeEKSClusterName: "test-cluster", + common.AttributeEC2InstanceId: "instance-id", + common.AttributeHost: "hostname", + common.AttributeEC2AutoScalingGroup: "asg", + }, + }, + { + "testOverwrite", + map[string]string{ + semconv.AttributeDeploymentEnvironment: "custom-env", + }, + map[string]string{ + attr.AWSLocalEnvironment: "custom-env", + common.AttributeK8SNamespace: "test-namespace-3", + common.AttributeEKSClusterName: "test-cluster", + common.AttributeEC2InstanceId: "instance-id", + common.AttributeHost: "hostname", + common.AttributeEC2AutoScalingGroup: "asg", + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + attributes := pcommon.NewMap() + resourceAttributes := pcommon.NewMap() + for key, val := range resourceAttributesBase { + resourceAttributes.PutStr(key, val) + } + for key, val := range tt.resourceAttributesOverwrite { + resourceAttributes.PutStr(key, val) + } + err := resolver.Process(attributes, resourceAttributes) + assert.NoError(t, err) + + for key, val := range tt.expectedAttributes { + assert.Equal(t, val, getStrAttr(attributes, key, t), fmt.Sprintf("expected %s for key %s", val, key)) + } + assert.Equal(t, "/aws/containerinsights/test-cluster/application", getStrAttr(resourceAttributes, semconv.AttributeAWSLogGroupNames, t)) + }) + } +} + +func TestK8sResourceAttributesResolverOnK8S(t *testing.T) { + eksdetector.NewDetector = eksdetector.TestK8sDetector + eksdetector.IsEKS = eksdetector.TestIsEKSCacheK8s + // helper function to get string values from the attributes + getStrAttr := func(attributes pcommon.Map, key string, t *testing.T) string { + if value, ok := attributes.Get(key); ok { + return value.AsString() + } else { + t.Errorf("Failed to get value for key: %s", key) + return "" + } + } + + resolver := newKubernetesResourceAttributesResolver(config.PlatformK8s, "test-cluster") + + resourceAttributesBase := map[string]string{ + "cloud.provider": "aws", + "k8s.namespace.name": "test-namespace-3", + "host.id": "instance-id", + "host.name": "hostname", + "ec2.tag.aws:autoscaling:groupName": "asg", + } + + tests := []struct { + name string + resourceAttributesOverwrite map[string]string + expectedAttributes map[string]string + }{ + { + "testDefaultOnK8s", + map[string]string{}, + + map[string]string{ + attr.AWSLocalEnvironment: "k8s:test-cluster/test-namespace-3", + common.AttributeK8SNamespace: "test-namespace-3", + common.AttributeK8SClusterName: "test-cluster", + common.AttributeEC2InstanceId: "instance-id", + common.AttributeHost: "hostname", + common.AttributeEC2AutoScalingGroup: "asg", + }, + }, + { + "testOverwriteOnK8s", + map[string]string{ + semconv.AttributeDeploymentEnvironment: "custom-env", + }, + map[string]string{ + attr.AWSLocalEnvironment: "custom-env", + common.AttributeK8SNamespace: "test-namespace-3", + common.AttributeK8SClusterName: "test-cluster", + common.AttributeEC2InstanceId: "instance-id", + common.AttributeHost: "hostname", + common.AttributeEC2AutoScalingGroup: "asg", + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + attributes := pcommon.NewMap() + resourceAttributes := pcommon.NewMap() + for key, val := range resourceAttributesBase { + resourceAttributes.PutStr(key, val) + } + for key, val := range tt.resourceAttributesOverwrite { + resourceAttributes.PutStr(key, val) + } + err := resolver.Process(attributes, resourceAttributes) + assert.NoError(t, err) + + for key, val := range tt.expectedAttributes { + assert.Equal(t, val, getStrAttr(attributes, key, t), fmt.Sprintf("expected %s for key %s", val, key)) + } + assert.Equal(t, "/aws/containerinsights/test-cluster/application", getStrAttr(resourceAttributes, semconv.AttributeAWSLogGroupNames, t)) + }) + } +} + +func TestK8sResourceAttributesResolverOnK8SOnPrem(t *testing.T) { + eksdetector.NewDetector = eksdetector.TestK8sDetector + // helper function to get string values from the attributes + getStrAttr := func(attributes pcommon.Map, key string, t *testing.T) string { + if value, ok := attributes.Get(key); ok { + return value.AsString() + } else { + t.Errorf("Failed to get value for key: %s", key) + return "" + } + } + + resolver := newKubernetesResourceAttributesResolver(config.PlatformK8s, "test-cluster") + + resourceAttributesBase := map[string]string{ + "cloud.provider": "aws", + "k8s.namespace.name": "test-namespace-3", + "host.name": "hostname", + } + + tests := []struct { + name string + resourceAttributesOverwrite map[string]string + expectedAttributes map[string]string + }{ + { + "testDefault", + map[string]string{}, + + map[string]string{ + attr.AWSLocalEnvironment: "k8s:test-cluster/test-namespace-3", + common.AttributeK8SNamespace: "test-namespace-3", + common.AttributeK8SClusterName: "test-cluster", + common.AttributeHost: "hostname", + }, + }, + { + "testOverwrite", + map[string]string{ + semconv.AttributeDeploymentEnvironment: "custom-env", + }, + map[string]string{ + attr.AWSLocalEnvironment: "custom-env", + common.AttributeK8SNamespace: "test-namespace-3", + common.AttributeK8SClusterName: "test-cluster", + common.AttributeHost: "hostname", + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + attributes := pcommon.NewMap() + resourceAttributes := pcommon.NewMap() + for key, val := range resourceAttributesBase { + resourceAttributes.PutStr(key, val) + } + for key, val := range tt.resourceAttributesOverwrite { + resourceAttributes.PutStr(key, val) + } + err := resolver.Process(attributes, resourceAttributes) + assert.NoError(t, err) + + for key, val := range tt.expectedAttributes { + assert.Equal(t, val, getStrAttr(attributes, key, t), fmt.Sprintf("expected %s for key %s", val, key)) + } + assert.Equal(t, "/aws/containerinsights/test-cluster/application", getStrAttr(resourceAttributes, semconv.AttributeAWSLogGroupNames, t)) + + // EC2 related fields that should not exist for on-prem + _, exists := attributes.Get(common.AttributeEC2AutoScalingGroup) + assert.False(t, exists) + + _, exists = attributes.Get(common.AttributeEC2InstanceId) + assert.False(t, exists) + }) + } } func TestExtractIPPort(t *testing.T) { diff --git a/plugins/processors/awsappsignals/processor.go b/plugins/processors/awsapplicationsignals/processor.go similarity index 63% rename from plugins/processors/awsappsignals/processor.go rename to plugins/processors/awsapplicationsignals/processor.go index 3fe73e356e..8fc3e25bb7 100644 --- a/plugins/processors/awsappsignals/processor.go +++ b/plugins/processors/awsapplicationsignals/processor.go @@ -1,7 +1,7 @@ // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: MIT -package awsappsignals +package awsapplicationsignals import ( "context" @@ -11,18 +11,24 @@ import ( "go.opentelemetry.io/collector/pdata/pmetric" "go.opentelemetry.io/collector/pdata/ptrace" "go.uber.org/zap" + "golang.org/x/text/cases" + "golang.org/x/text/language" - appsignalsconfig "github.com/aws/amazon-cloudwatch-agent/plugins/processors/awsappsignals/config" - "github.com/aws/amazon-cloudwatch-agent/plugins/processors/awsappsignals/internal/normalizer" - "github.com/aws/amazon-cloudwatch-agent/plugins/processors/awsappsignals/internal/resolver" - "github.com/aws/amazon-cloudwatch-agent/plugins/processors/awsappsignals/rules" + appsignalsconfig "github.com/aws/amazon-cloudwatch-agent/plugins/processors/awsapplicationsignals/config" + "github.com/aws/amazon-cloudwatch-agent/plugins/processors/awsapplicationsignals/internal/cardinalitycontrol" + "github.com/aws/amazon-cloudwatch-agent/plugins/processors/awsapplicationsignals/internal/normalizer" + "github.com/aws/amazon-cloudwatch-agent/plugins/processors/awsapplicationsignals/internal/prune" + "github.com/aws/amazon-cloudwatch-agent/plugins/processors/awsapplicationsignals/internal/resolver" + "github.com/aws/amazon-cloudwatch-agent/plugins/processors/awsapplicationsignals/rules" ) const ( - failedToProcessAttribute = "failed to process attributes" - failedToProcessAttributeWithCustomRule = "failed to process attributes with custom rule, will drop the metric" + failedToProcessAttribute = "failed to process attributes" + failedToProcessAttributeWithLimiter = "failed to process attributes with limiter, keep the data" ) +var metricCaser = cases.Title(language.English) + // this is used to Process some attributes (like IP addresses) to a generic form to reduce high cardinality type attributesMutator interface { Process(attributes, resourceAttributes pcommon.Map, isTrace bool) error @@ -36,35 +42,58 @@ type stopper interface { Stop(context.Context) error } -type awsappsignalsprocessor struct { +type awsapplicationsignalsprocessor struct { logger *zap.Logger config *appsignalsconfig.Config replaceActions *rules.ReplaceActions allowlistMutators []allowListMutator metricMutators []attributesMutator traceMutators []attributesMutator + limiter cardinalitycontrol.Limiter stoppers []stopper } -func (ap *awsappsignalsprocessor) Start(_ context.Context, _ component.Host) error { +func (ap *awsapplicationsignalsprocessor) StartMetrics(ctx context.Context, _ component.Host) error { attributesResolver := resolver.NewAttributesResolver(ap.config.Resolvers, ap.logger) ap.stoppers = []stopper{attributesResolver} - ap.metricMutators = []attributesMutator{attributesResolver} - attributesNormalizer := normalizer.NewAttributesNormalizer(ap.logger) ap.metricMutators = []attributesMutator{attributesResolver, attributesNormalizer} - ap.replaceActions = rules.NewReplacer(ap.config.Rules) - ap.traceMutators = []attributesMutator{attributesResolver, attributesNormalizer, ap.replaceActions} + limiterConfig := ap.config.Limiter + if limiterConfig == nil { + limiterConfig = appsignalsconfig.NewDefaultLimiterConfig() + } + if limiterConfig.ParentContext == nil { + limiterConfig.ParentContext = ctx + } - keeper := rules.NewKeeper(ap.config.Rules) + if !limiterConfig.Disabled { + ap.limiter = cardinalitycontrol.NewMetricsLimiter(limiterConfig, ap.logger) + } else { + ap.logger.Info("metrics limiter is disabled.") + } + + ap.replaceActions = rules.NewReplacer(ap.config.Rules, !limiterConfig.Disabled) + + pruner := prune.NewPruner() + keeper := rules.NewKeeper(ap.config.Rules, !limiterConfig.Disabled) dropper := rules.NewDropper(ap.config.Rules) - ap.allowlistMutators = []allowListMutator{keeper, dropper} + ap.allowlistMutators = []allowListMutator{pruner, keeper, dropper} return nil } -func (ap *awsappsignalsprocessor) Shutdown(ctx context.Context) error { +func (ap *awsapplicationsignalsprocessor) StartTraces(_ context.Context, _ component.Host) error { + attributesResolver := resolver.NewAttributesResolver(ap.config.Resolvers, ap.logger) + attributesNormalizer := normalizer.NewAttributesNormalizer(ap.logger) + customReplacer := rules.NewReplacer(ap.config.Rules, false) + + ap.stoppers = append(ap.stoppers, attributesResolver) + ap.traceMutators = append(ap.traceMutators, attributesResolver, attributesNormalizer, customReplacer) + return nil +} + +func (ap *awsapplicationsignalsprocessor) Shutdown(ctx context.Context) error { for _, stopper := range ap.stoppers { err := stopper.Stop(ctx) if err != nil { @@ -74,7 +103,7 @@ func (ap *awsappsignalsprocessor) Shutdown(ctx context.Context) error { return nil } -func (ap *awsappsignalsprocessor) processTraces(ctx context.Context, td ptrace.Traces) (ptrace.Traces, error) { +func (ap *awsapplicationsignalsprocessor) processTraces(_ context.Context, td ptrace.Traces) (ptrace.Traces, error) { rss := td.ResourceSpans() for i := 0; i < rss.Len(); i++ { rs := rss.At(i) @@ -97,7 +126,7 @@ func (ap *awsappsignalsprocessor) processTraces(ctx context.Context, td ptrace.T return td, nil } -func (ap *awsappsignalsprocessor) processMetrics(ctx context.Context, md pmetric.Metrics) (pmetric.Metrics, error) { +func (ap *awsapplicationsignalsprocessor) processMetrics(ctx context.Context, md pmetric.Metrics) (pmetric.Metrics, error) { rms := md.ResourceMetrics() for i := 0; i < rms.Len(); i++ { rs := rms.At(i) @@ -108,6 +137,7 @@ func (ap *awsappsignalsprocessor) processMetrics(ctx context.Context, md pmetric metrics := ils.Metrics() for k := 0; k < metrics.Len(); k++ { m := metrics.At(k) + m.SetName(metricCaser.String(m.Name())) // Ensure metric name is in sentence case ap.processMetricAttributes(ctx, m, resourceAttributes) } } @@ -117,8 +147,7 @@ func (ap *awsappsignalsprocessor) processMetrics(ctx context.Context, md pmetric // Attributes are provided for each log and trace, but not at the metric level // Need to process attributes for every data point within a metric. -func (ap *awsappsignalsprocessor) processMetricAttributes(ctx context.Context, m pmetric.Metric, resourceAttribes pcommon.Map) { - +func (ap *awsapplicationsignalsprocessor) processMetricAttributes(_ context.Context, m pmetric.Metric, resourceAttribes pcommon.Map) { // This is a lot of repeated code, but since there is no single parent superclass // between metric data types, we can't use polymorphism. switch m.Type() { @@ -136,7 +165,7 @@ func (ap *awsappsignalsprocessor) processMetricAttributes(ctx context.Context, m for _, mutator := range ap.allowlistMutators { shouldBeDropped, err := mutator.ShouldBeDropped(d.Attributes()) if err != nil { - ap.logger.Debug(failedToProcessAttributeWithCustomRule, zap.Error(err)) + ap.logger.Debug(failedToProcessAttribute, zap.Error(err)) } if shouldBeDropped { return true @@ -150,6 +179,13 @@ func (ap *awsappsignalsprocessor) processMetricAttributes(ctx context.Context, m ap.logger.Debug(failedToProcessAttribute, zap.Error(err)) } } + if ap.limiter != nil { + for i := 0; i < dps.Len(); i++ { + if _, err := ap.limiter.Admit(m.Name(), dps.At(i).Attributes(), resourceAttribes); err != nil { + ap.logger.Debug(failedToProcessAttributeWithLimiter, zap.Error(err)) + } + } + } case pmetric.MetricTypeSum: dps := m.Sum().DataPoints() for i := 0; i < dps.Len(); i++ { @@ -164,7 +200,7 @@ func (ap *awsappsignalsprocessor) processMetricAttributes(ctx context.Context, m for _, mutator := range ap.allowlistMutators { shouldBeDropped, err := mutator.ShouldBeDropped(d.Attributes()) if err != nil { - ap.logger.Debug(failedToProcessAttributeWithCustomRule, zap.Error(err)) + ap.logger.Debug(failedToProcessAttribute, zap.Error(err)) } if shouldBeDropped { return true @@ -178,6 +214,13 @@ func (ap *awsappsignalsprocessor) processMetricAttributes(ctx context.Context, m ap.logger.Debug(failedToProcessAttribute, zap.Error(err)) } } + if ap.limiter != nil { + for i := 0; i < dps.Len(); i++ { + if _, err := ap.limiter.Admit(m.Name(), dps.At(i).Attributes(), resourceAttribes); err != nil { + ap.logger.Debug(failedToProcessAttributeWithLimiter, zap.Error(err)) + } + } + } case pmetric.MetricTypeHistogram: dps := m.Histogram().DataPoints() for i := 0; i < dps.Len(); i++ { @@ -192,7 +235,7 @@ func (ap *awsappsignalsprocessor) processMetricAttributes(ctx context.Context, m for _, mutator := range ap.allowlistMutators { shouldBeDropped, err := mutator.ShouldBeDropped(d.Attributes()) if err != nil { - ap.logger.Debug(failedToProcessAttributeWithCustomRule, zap.Error(err)) + ap.logger.Debug(failedToProcessAttribute, zap.Error(err)) } if shouldBeDropped { return true @@ -206,6 +249,13 @@ func (ap *awsappsignalsprocessor) processMetricAttributes(ctx context.Context, m ap.logger.Debug(failedToProcessAttribute, zap.Error(err)) } } + if ap.limiter != nil { + for i := 0; i < dps.Len(); i++ { + if _, err := ap.limiter.Admit(m.Name(), dps.At(i).Attributes(), resourceAttribes); err != nil { + ap.logger.Debug(failedToProcessAttributeWithLimiter, zap.Error(err)) + } + } + } case pmetric.MetricTypeExponentialHistogram: dps := m.ExponentialHistogram().DataPoints() for i := 0; i < dps.Len(); i++ { @@ -220,7 +270,7 @@ func (ap *awsappsignalsprocessor) processMetricAttributes(ctx context.Context, m for _, mutator := range ap.allowlistMutators { shouldBeDropped, err := mutator.ShouldBeDropped(d.Attributes()) if err != nil { - ap.logger.Debug(failedToProcessAttributeWithCustomRule, zap.Error(err)) + ap.logger.Debug(failedToProcessAttribute, zap.Error(err)) } if shouldBeDropped { return true @@ -234,6 +284,13 @@ func (ap *awsappsignalsprocessor) processMetricAttributes(ctx context.Context, m ap.logger.Debug(failedToProcessAttribute, zap.Error(err)) } } + if ap.limiter != nil { + for i := 0; i < dps.Len(); i++ { + if _, err := ap.limiter.Admit(m.Name(), dps.At(i).Attributes(), resourceAttribes); err != nil { + ap.logger.Debug(failedToProcessAttributeWithLimiter, zap.Error(err)) + } + } + } case pmetric.MetricTypeSummary: dps := m.Summary().DataPoints() for i := 0; i < dps.Len(); i++ { @@ -248,7 +305,7 @@ func (ap *awsappsignalsprocessor) processMetricAttributes(ctx context.Context, m for _, mutator := range ap.allowlistMutators { shouldBeDropped, err := mutator.ShouldBeDropped(d.Attributes()) if err != nil { - ap.logger.Debug(failedToProcessAttributeWithCustomRule, zap.Error(err)) + ap.logger.Debug(failedToProcessAttribute, zap.Error(err)) } if shouldBeDropped { return true @@ -262,6 +319,13 @@ func (ap *awsappsignalsprocessor) processMetricAttributes(ctx context.Context, m ap.logger.Debug(failedToProcessAttribute, zap.Error(err)) } } + if ap.limiter != nil { + for i := 0; i < dps.Len(); i++ { + if _, err := ap.limiter.Admit(m.Name(), dps.At(i).Attributes(), resourceAttribes); err != nil { + ap.logger.Debug(failedToProcessAttributeWithLimiter, zap.Error(err)) + } + } + } default: ap.logger.Debug("Ignore unknown metric type", zap.String("type", m.Type().String())) } diff --git a/plugins/processors/awsappsignals/processor_test.go b/plugins/processors/awsapplicationsignals/processor_test.go similarity index 83% rename from plugins/processors/awsappsignals/processor_test.go rename to plugins/processors/awsapplicationsignals/processor_test.go index 60defaf078..630272cca4 100644 --- a/plugins/processors/awsappsignals/processor_test.go +++ b/plugins/processors/awsapplicationsignals/processor_test.go @@ -1,7 +1,7 @@ // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: MIT -package awsappsignals +package awsapplicationsignals import ( "context" @@ -12,8 +12,8 @@ import ( "go.opentelemetry.io/collector/pdata/ptrace" "go.uber.org/zap" - "github.com/aws/amazon-cloudwatch-agent/plugins/processors/awsappsignals/config" - "github.com/aws/amazon-cloudwatch-agent/plugins/processors/awsappsignals/rules" + "github.com/aws/amazon-cloudwatch-agent/plugins/processors/awsapplicationsignals/config" + "github.com/aws/amazon-cloudwatch-agent/plugins/processors/awsapplicationsignals/rules" ) var testRules = []rules.Rule{ @@ -58,7 +58,7 @@ var testRules = []rules.Rule{ func TestProcessMetrics(t *testing.T) { logger, _ := zap.NewDevelopment() - ap := &awsappsignalsprocessor{ + ap := &awsapplicationsignalsprocessor{ logger: logger, config: &config.Config{ Resolvers: []config.Resolver{config.NewGenericResolver("")}, @@ -67,7 +67,7 @@ func TestProcessMetrics(t *testing.T) { } ctx := context.Background() - ap.Start(ctx, nil) + ap.StartMetrics(ctx, nil) keepMetrics := generateMetrics(map[string]string{ "dim_action": "reserved", @@ -100,9 +100,36 @@ func TestProcessMetrics(t *testing.T) { assert.True(t, isMetricNil(dropMetricsByKeep)) } +func TestProcessMetricsLowercase(t *testing.T) { + logger, _ := zap.NewDevelopment() + ap := &awsapplicationsignalsprocessor{ + logger: logger, + config: &config.Config{ + Resolvers: []config.Resolver{config.NewGenericResolver("")}, + Rules: testRules, + }, + } + + ctx := context.Background() + ap.StartMetrics(ctx, nil) + + lowercaseMetrics := pmetric.NewMetrics() + errorMetric := lowercaseMetrics.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty().Metrics().AppendEmpty() + errorMetric.SetName("error") + latencyMetric := lowercaseMetrics.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty().Metrics().AppendEmpty() + latencyMetric.SetName("latency") + faultMetric := lowercaseMetrics.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty().Metrics().AppendEmpty() + faultMetric.SetName("fault") + + ap.processMetrics(ctx, lowercaseMetrics) + assert.Equal(t, "Error", lowercaseMetrics.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Name()) + assert.Equal(t, "Latency", lowercaseMetrics.ResourceMetrics().At(1).ScopeMetrics().At(0).Metrics().At(0).Name()) + assert.Equal(t, "Fault", lowercaseMetrics.ResourceMetrics().At(2).ScopeMetrics().At(0).Metrics().At(0).Name()) +} + func TestProcessTraces(t *testing.T) { logger, _ := zap.NewDevelopment() - ap := &awsappsignalsprocessor{ + ap := &awsapplicationsignalsprocessor{ logger: logger, config: &config.Config{ Resolvers: []config.Resolver{config.NewGenericResolver("")}, @@ -111,7 +138,7 @@ func TestProcessTraces(t *testing.T) { } ctx := context.Background() - ap.Start(ctx, nil) + ap.StartTraces(ctx, nil) traces := ptrace.NewTraces() span := traces.ResourceSpans().AppendEmpty().ScopeSpans().AppendEmpty().Spans().AppendEmpty() diff --git a/plugins/processors/awsappsignals/rules/common.go b/plugins/processors/awsapplicationsignals/rules/common.go similarity index 70% rename from plugins/processors/awsappsignals/rules/common.go rename to plugins/processors/awsapplicationsignals/rules/common.go index 4ac6ca8c14..8e432f20d1 100644 --- a/plugins/processors/awsappsignals/rules/common.go +++ b/plugins/processors/awsapplicationsignals/rules/common.go @@ -8,6 +8,9 @@ import ( "github.com/gobwas/glob" "go.opentelemetry.io/collector/pdata/pcommon" + + "github.com/aws/amazon-cloudwatch-agent/plugins/processors/awsapplicationsignals/common" + "github.com/aws/amazon-cloudwatch-agent/plugins/processors/awsapplicationsignals/internal/attributes" ) type AllowListAction string @@ -46,10 +49,14 @@ type ActionItem struct { } var traceKeyMap = map[string]string{ - "Service": "aws.local.service", - "Operation": "aws.local.operation", - "RemoteService": "aws.remote.service", - "RemoteOperation": "aws.remote.operation", + common.MetricAttributeLocalService: attributes.AWSLocalService, + common.MetricAttributeEnvironment: attributes.AWSLocalEnvironment, + common.MetricAttributeLocalOperation: attributes.AWSLocalOperation, + common.MetricAttributeRemoteService: attributes.AWSRemoteService, + common.MetricAttributeRemoteEnvironment: attributes.AWSRemoteEnvironment, + common.MetricAttributeRemoteOperation: attributes.AWSRemoteOperation, + common.MetricAttributeRemoteResourceIdentifier: attributes.AWSRemoteResourceIdentifier, + common.MetricAttributeRemoteResourceType: attributes.AWSRemoteResourceType, } func GetAllowListAction(action string) (AllowListAction, error) { @@ -64,21 +71,17 @@ func GetAllowListAction(action string) (AllowListAction, error) { return "", errors.New("invalid action in rule") } -func getExactKey(metricDimensionKey string, isTrace bool) string { - if !isTrace { - return metricDimensionKey - } - traceDimensionKey, ok := traceKeyMap[metricDimensionKey] - if !ok { - // return original key if there is no matches - return metricDimensionKey +func convertToManagedAttributeKey(attributeKey string, isTrace bool) string { + val, ok := traceKeyMap[attributeKey] + if ok && isTrace { + return val } - return traceDimensionKey + return attributeKey } func matchesSelectors(attributes pcommon.Map, selectorMatchers []SelectorMatcherItem, isTrace bool) bool { for _, item := range selectorMatchers { - exactKey := getExactKey(item.Key, isTrace) + exactKey := convertToManagedAttributeKey(item.Key, isTrace) value, ok := attributes.Get(exactKey) if !ok { return false diff --git a/plugins/processors/awsapplicationsignals/rules/common_test.go b/plugins/processors/awsapplicationsignals/rules/common_test.go new file mode 100644 index 0000000000..40dade4386 --- /dev/null +++ b/plugins/processors/awsapplicationsignals/rules/common_test.go @@ -0,0 +1,46 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package rules + +import ( + "go.opentelemetry.io/collector/pdata/pcommon" + + "github.com/aws/amazon-cloudwatch-agent/plugins/processors/awsapplicationsignals/common" + attr "github.com/aws/amazon-cloudwatch-agent/plugins/processors/awsapplicationsignals/internal/attributes" +) + +func generateTestAttributes(service string, operation string, remoteService string, remoteOperation string, + isTrace bool) pcommon.Map { + return generateAttributesWithEnv(service, operation, "", remoteService, remoteOperation, "", isTrace) +} + +func generateAttributesWithEnv(service string, operation string, environment string, + remoteService string, remoteOperation string, remoteEnvironment string, + isTrace bool) pcommon.Map { + attributes := pcommon.NewMap() + if isTrace { + attributes.PutStr(attr.AWSLocalService, service) + attributes.PutStr(attr.AWSLocalOperation, operation) + if environment != "" { + attributes.PutStr(attr.AWSLocalEnvironment, environment) + } + attributes.PutStr(attr.AWSRemoteService, remoteService) + attributes.PutStr(attr.AWSRemoteOperation, remoteOperation) + if remoteEnvironment != "" { + attributes.PutStr(attr.AWSRemoteEnvironment, remoteEnvironment) + } + } else { + attributes.PutStr(common.MetricAttributeLocalService, service) + attributes.PutStr(common.MetricAttributeLocalOperation, operation) + if environment != "" { + attributes.PutStr(common.MetricAttributeEnvironment, environment) + } + attributes.PutStr(common.MetricAttributeRemoteService, remoteService) + attributes.PutStr(common.MetricAttributeRemoteOperation, remoteOperation) + if remoteEnvironment != "" { + attributes.PutStr(common.MetricAttributeRemoteEnvironment, remoteEnvironment) + } + } + return attributes +} diff --git a/plugins/processors/awsappsignals/rules/dropper.go b/plugins/processors/awsapplicationsignals/rules/dropper.go similarity index 100% rename from plugins/processors/awsappsignals/rules/dropper.go rename to plugins/processors/awsapplicationsignals/rules/dropper.go diff --git a/plugins/processors/awsappsignals/rules/dropper_test.go b/plugins/processors/awsapplicationsignals/rules/dropper_test.go similarity index 100% rename from plugins/processors/awsappsignals/rules/dropper_test.go rename to plugins/processors/awsapplicationsignals/rules/dropper_test.go diff --git a/plugins/processors/awsappsignals/rules/keeper.go b/plugins/processors/awsapplicationsignals/rules/keeper.go similarity index 54% rename from plugins/processors/awsappsignals/rules/keeper.go rename to plugins/processors/awsapplicationsignals/rules/keeper.go index a8a44eb9d1..c4b65e999c 100644 --- a/plugins/processors/awsappsignals/rules/keeper.go +++ b/plugins/processors/awsapplicationsignals/rules/keeper.go @@ -3,15 +3,21 @@ package rules -import "go.opentelemetry.io/collector/pdata/pcommon" +import ( + "go.opentelemetry.io/collector/pdata/pcommon" + + "github.com/aws/amazon-cloudwatch-agent/plugins/processors/awsapplicationsignals/common" +) type KeepActions struct { - Actions []ActionItem + Actions []ActionItem + markDataPointAsReserved bool } -func NewKeeper(rules []Rule) *KeepActions { +func NewKeeper(rules []Rule, markDataPointAsReserved bool) *KeepActions { return &KeepActions{ - Actions: generateActionDetails(rules, AllowListActionKeep), + Actions: generateActionDetails(rules, AllowListActionKeep), + markDataPointAsReserved: markDataPointAsReserved, } } @@ -22,6 +28,9 @@ func (k *KeepActions) ShouldBeDropped(attributes pcommon.Map) (bool, error) { } for _, element := range k.Actions { isMatched := matchesSelectors(attributes, element.SelectorMatchers, false) + if k.markDataPointAsReserved { + attributes.PutBool(common.AttributeTmpReserved, true) + } if isMatched { // keep the datapoint as one of the keep rules is matched return false, nil diff --git a/plugins/processors/awsappsignals/rules/keeper_test.go b/plugins/processors/awsapplicationsignals/rules/keeper_test.go similarity index 97% rename from plugins/processors/awsappsignals/rules/keeper_test.go rename to plugins/processors/awsapplicationsignals/rules/keeper_test.go index 7ce17ef867..258fef3426 100644 --- a/plugins/processors/awsappsignals/rules/keeper_test.go +++ b/plugins/processors/awsapplicationsignals/rules/keeper_test.go @@ -69,7 +69,7 @@ func TestKeeperProcessor(t *testing.T) { }, } - testKeeper := NewKeeper(config) + testKeeper := NewKeeper(config, false) assert.Equal(t, 1, len(testKeeper.Actions)) isTrace := false @@ -102,7 +102,7 @@ func TestKeeperProcessor(t *testing.T) { } func TestKeeperProcessorWithNilConfig(t *testing.T) { - testKeeper := NewKeeper(nil) + testKeeper := NewKeeper(nil, false) isTrace := false testCases := []TestCaseForKeeper{ @@ -141,7 +141,7 @@ func TestKeeperProcessorWithEmptyConfig(t *testing.T) { config := []Rule{} - testKeeper := NewKeeper(config) + testKeeper := NewKeeper(config, false) isTrace := false testCases := []TestCaseForKeeper{ diff --git a/plugins/processors/awsappsignals/rules/replacer.go b/plugins/processors/awsapplicationsignals/rules/replacer.go similarity index 59% rename from plugins/processors/awsappsignals/rules/replacer.go rename to plugins/processors/awsapplicationsignals/rules/replacer.go index 87daecfc46..fdae4ef3e1 100644 --- a/plugins/processors/awsappsignals/rules/replacer.go +++ b/plugins/processors/awsapplicationsignals/rules/replacer.go @@ -5,15 +5,19 @@ package rules import ( "go.opentelemetry.io/collector/pdata/pcommon" + + "github.com/aws/amazon-cloudwatch-agent/plugins/processors/awsapplicationsignals/common" ) type ReplaceActions struct { - Actions []ActionItem + Actions []ActionItem + markDataPointAsReserved bool } -func NewReplacer(rules []Rule) *ReplaceActions { +func NewReplacer(rules []Rule, markDataPointAsReserved bool) *ReplaceActions { return &ReplaceActions{ - generateActionDetails(rules, AllowListActionReplace), + Actions: generateActionDetails(rules, AllowListActionReplace), + markDataPointAsReserved: markDataPointAsReserved, } } @@ -32,23 +36,22 @@ func (r *ReplaceActions) Process(attributes, _ pcommon.Map, isTrace bool) error continue } for _, replacement := range element.Replacements { - targetDimensionKey := getExactKey(replacement.TargetDimension, isTrace) - // don't allow customer add new dimension key - _, isExist := attributes.Get(targetDimensionKey) - if !isExist { - continue - } + targetDimension := replacement.TargetDimension + + attr := convertToManagedAttributeKey(targetDimension, isTrace) // every replacement in one specific dimension only will be performed once - _, ok := finalRules[targetDimensionKey] - if ok { - continue + if _, visited := finalRules[attr]; !visited { + finalRules[attr] = replacement.Value } - finalRules[targetDimensionKey] = replacement.Value } } for key, value := range finalRules { attributes.PutStr(key, value) } + + if len(finalRules) > 0 && r.markDataPointAsReserved { + attributes.PutBool(common.AttributeTmpReserved, true) + } return nil } diff --git a/plugins/processors/awsappsignals/rules/replacer_test.go b/plugins/processors/awsapplicationsignals/rules/replacer_test.go similarity index 75% rename from plugins/processors/awsappsignals/rules/replacer_test.go rename to plugins/processors/awsapplicationsignals/rules/replacer_test.go index 39e36fc024..d777194550 100644 --- a/plugins/processors/awsappsignals/rules/replacer_test.go +++ b/plugins/processors/awsapplicationsignals/rules/replacer_test.go @@ -71,7 +71,7 @@ func TestReplacerProcess(t *testing.T) { }, } - testReplacer := NewReplacer(config) + testReplacer := NewReplacer(config, false) assert.Equal(t, 1, len(testReplacer.Actions)) testCases := []TestCaseForReplacer{ @@ -119,6 +119,77 @@ func TestReplacerProcess(t *testing.T) { } } +func TestAddManagedDimensionKey(t *testing.T) { + config := []Rule{ + { + Selectors: []Selector{ + { + Dimension: "Service", + Match: "app", + }, + { + Dimension: "RemoteService", + Match: "remote-app", + }, + }, + Replacements: []Replacement{ + { + TargetDimension: "RemoteEnvironment", + Value: "test", + }, + }, + Action: "replace", + }, + } + + testReplacer := NewReplacer(config, false) + assert.Equal(t, 1, len(testReplacer.Actions)) + + testCases := []TestCaseForReplacer{ + { + name: "testAddMissingRemoteEnvironmentInMetric", + input: generateAttributesWithEnv("app", "PUT /api/customer/owners/12345", "test", + "remote-app", "GET", "", false), + output: generateAttributesWithEnv("app", "PUT /api/customer/owners/12345", "test", + "remote-app", "GET", "test", false), + isTrace: false, + }, + { + name: "testAddMissingRemoteEnvironmentInTrace", + input: generateAttributesWithEnv("app", "PUT /api/customer/owners/12345", "test", + "remote-app", "GET", "", true), + output: generateAttributesWithEnv("app", "PUT /api/customer/owners/12345", "test", + "remote-app", "GET", "test", true), + isTrace: true, + }, + { + name: "testReplaceRemoteEnvironmentInMetric", + input: generateAttributesWithEnv("app", "PUT /api/customer/owners/12345", "test", + "remote-app", "GET", "error", false), + output: generateAttributesWithEnv("app", "PUT /api/customer/owners/12345", "test", + "remote-app", "GET", "test", false), + isTrace: false, + }, + { + name: "testReplaceRemoteEnvironmentInTrace", + input: generateAttributesWithEnv("app", "PUT /api/customer/owners/12345", "test", + "remote-app", "GET", "error", true), + output: generateAttributesWithEnv("app", "PUT /api/customer/owners/12345", "test", + "remote-app", "GET", "test", true), + isTrace: true, + }, + } + + testMapPlaceHolder := pcommon.NewMap() + for i := range testCases { + tt := testCases[i] + t.Run(tt.name, func(t *testing.T) { + assert.NoError(t, testReplacer.Process(tt.input, testMapPlaceHolder, tt.isTrace)) + assert.Equal(t, tt.output, tt.input) + }) + } +} + func TestReplacerProcessWithPriority(t *testing.T) { config := []Rule{ @@ -170,7 +241,7 @@ func TestReplacerProcessWithPriority(t *testing.T) { }, } - testReplacer := NewReplacer(config) + testReplacer := NewReplacer(config, false) testMapPlaceHolder := pcommon.NewMap() testCases := []TestCaseForReplacer{ @@ -218,7 +289,7 @@ func TestReplacerProcessWithPriority(t *testing.T) { func TestReplacerProcessWithNilConfig(t *testing.T) { - testReplacer := NewReplacer(nil) + testReplacer := NewReplacer(nil, false) testMapPlaceHolder := pcommon.NewMap() testCases := []TestCaseForReplacer{ @@ -252,7 +323,7 @@ func TestReplacerProcessWithEmptyConfig(t *testing.T) { config := []Rule{} - testReplacer := NewReplacer(config) + testReplacer := NewReplacer(config, false) testMapPlaceHolder := pcommon.NewMap() testCases := []TestCaseForReplacer{ diff --git a/plugins/processors/awsappsignals/testdata/config_eks.yaml b/plugins/processors/awsapplicationsignals/testdata/config_eks.yaml similarity index 97% rename from plugins/processors/awsappsignals/testdata/config_eks.yaml rename to plugins/processors/awsapplicationsignals/testdata/config_eks.yaml index 2d00d37565..16b4c026f8 100644 --- a/plugins/processors/awsappsignals/testdata/config_eks.yaml +++ b/plugins/processors/awsapplicationsignals/testdata/config_eks.yaml @@ -1,4 +1,4 @@ -awsappsignals: +awsapplicationsignals: resolvers: - platform: eks name: test diff --git a/plugins/processors/awsappsignals/testdata/config_generic.yaml b/plugins/processors/awsapplicationsignals/testdata/config_generic.yaml similarity index 97% rename from plugins/processors/awsappsignals/testdata/config_generic.yaml rename to plugins/processors/awsapplicationsignals/testdata/config_generic.yaml index e875c741f4..ba3a82d532 100644 --- a/plugins/processors/awsappsignals/testdata/config_generic.yaml +++ b/plugins/processors/awsapplicationsignals/testdata/config_generic.yaml @@ -1,4 +1,4 @@ -awsappsignals: +awsapplicationsignals: resolvers: - platform: generic rules: diff --git a/plugins/processors/awsappsignals/config/config.go b/plugins/processors/awsappsignals/config/config.go deleted file mode 100644 index b39cda4a7c..0000000000 --- a/plugins/processors/awsappsignals/config/config.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -// SPDX-License-Identifier: MIT - -package config - -import ( - "errors" - - "github.com/aws/amazon-cloudwatch-agent/plugins/processors/awsappsignals/rules" -) - -type Config struct { - Resolvers []Resolver `mapstructure:"resolvers"` - Rules []rules.Rule `mapstructure:"rules"` -} - -func (cfg *Config) Validate() error { - if len(cfg.Resolvers) == 0 { - return errors.New("resolvers must not be empty") - } - for _, resolver := range cfg.Resolvers { - switch resolver.Platform { - case PlatformEKS: - if resolver.Name == "" { - return errors.New("name must not be empty for eks resolver") - } - case PlatformK8s: - if resolver.Name == "" { - return errors.New("name must not be empty for k8s resolver") - } - case PlatformGeneric: - default: - return errors.New("unknown resolver") - } - } - return nil -} diff --git a/plugins/processors/awsappsignals/internal/attributes/attributes.go b/plugins/processors/awsappsignals/internal/attributes/attributes.go deleted file mode 100644 index b878594e85..0000000000 --- a/plugins/processors/awsappsignals/internal/attributes/attributes.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -// SPDX-License-Identifier: MIT - -package attributes - -const ( - // aws attributes - AWSLocalService = "aws.local.service" - AWSLocalOperation = "aws.local.operation" - AWSRemoteService = "aws.remote.service" - AWSRemoteOperation = "aws.remote.operation" - AWSRemoteTarget = "aws.remote.target" - AWSHostedInEnvironment = "aws.hostedin.environment" - - // kubernetes resource attributes - K8SDeploymentName = "k8s.deployment.name" - K8SStatefulSetName = "k8s.statefulset.name" - K8SDaemonSetName = "k8s.daemonset.name" - K8SJobName = "k8s.job.name" - K8SCronJobName = "k8s.cronjob.name" - K8SPodName = "k8s.pod.name" - K8SRemoteNamespace = "K8s.RemoteNamespace" - - // hosted in attribute names - HostedInClusterNameEKS = "HostedIn.EKS.Cluster" - HostedInClusterNameK8s = "HostedIn.K8s.Cluster" - HostedInK8SNamespace = "HostedIn.K8s.Namespace" - HostedInEnvironment = "HostedIn.Environment" -) diff --git a/plugins/processors/awsappsignals/internal/normalizer/attributesnormalizer.go b/plugins/processors/awsappsignals/internal/normalizer/attributesnormalizer.go deleted file mode 100644 index 25a6af641e..0000000000 --- a/plugins/processors/awsappsignals/internal/normalizer/attributesnormalizer.go +++ /dev/null @@ -1,109 +0,0 @@ -// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -// SPDX-License-Identifier: MIT - -package normalizer - -import ( - "go.opentelemetry.io/collector/pdata/pcommon" - "go.uber.org/zap" - - attr "github.com/aws/amazon-cloudwatch-agent/plugins/processors/awsappsignals/internal/attributes" -) - -type attributesNormalizer struct { - logger *zap.Logger -} - -var renameMapForMetric = map[string]string{ - attr.AWSLocalService: "Service", - attr.AWSLocalOperation: "Operation", - attr.AWSRemoteService: "RemoteService", - attr.AWSRemoteOperation: "RemoteOperation", - attr.AWSRemoteTarget: "RemoteTarget", -} - -var renameMapForTrace = map[string]string{ - // these kubernetes resource attributes are set by the openTelemetry operator - // see the code references from upstream: - // * https://github.com/open-telemetry/opentelemetry-operator/blob/0e39ee77693146e0924da3ca474a0fe14dc30b3a/pkg/instrumentation/sdk.go#L245 - // * https://github.com/open-telemetry/opentelemetry-operator/blob/0e39ee77693146e0924da3ca474a0fe14dc30b3a/pkg/instrumentation/sdk.go#L305C43-L305C43 - attr.K8SDeploymentName: "K8s.Workload", - attr.K8SStatefulSetName: "K8s.Workload", - attr.K8SDaemonSetName: "K8s.Workload", - attr.K8SJobName: "K8s.Workload", - attr.K8SCronJobName: "K8s.Workload", - attr.K8SPodName: "K8s.Pod", -} - -var copyMapForMetric = map[string]string{ - // these kubernetes resource attributes are set by the openTelemtry operator - // see the code referecnes from upstream: - // * https://github.com/open-telemetry/opentelemetry-operator/blob/0e39ee77693146e0924da3ca474a0fe14dc30b3a/pkg/instrumentation/sdk.go#L245 - // * https://github.com/open-telemetry/opentelemetry-operator/blob/0e39ee77693146e0924da3ca474a0fe14dc30b3a/pkg/instrumentation/sdk.go#L305C43-L305C43 - attr.K8SDeploymentName: "K8s.Workload", - attr.K8SStatefulSetName: "K8s.Workload", - attr.K8SDaemonSetName: "K8s.Workload", - attr.K8SJobName: "K8s.Workload", - attr.K8SCronJobName: "K8s.Workload", - attr.K8SPodName: "K8s.Pod", -} - -func NewAttributesNormalizer(logger *zap.Logger) *attributesNormalizer { - return &attributesNormalizer{ - logger: logger, - } -} - -func (n *attributesNormalizer) Process(attributes, resourceAttributes pcommon.Map, isTrace bool) error { - n.copyResourceAttributesToAttributes(attributes, resourceAttributes, isTrace) - n.renameAttributes(attributes, resourceAttributes, isTrace) - return nil -} - -func (n *attributesNormalizer) renameAttributes(attributes, resourceAttributes pcommon.Map, isTrace bool) { - attrs := attributes - renameMap := renameMapForMetric - if isTrace { - attrs = resourceAttributes - renameMap = renameMapForTrace - } - - rename(attrs, renameMap) -} - -func (n *attributesNormalizer) copyResourceAttributesToAttributes(attributes, resourceAttributes pcommon.Map, isTrace bool) { - if isTrace { - return - } - for k, v := range copyMapForMetric { - if resourceAttrValue, ok := resourceAttributes.Get(k); ok { - // print some debug info when an attribute value is overwritten - if originalAttrValue, ok := attributes.Get(k); ok { - n.logger.Debug("attribute value is overwritten", zap.String("attribute", k), zap.String("original", originalAttrValue.AsString()), zap.String("new", resourceAttrValue.AsString())) - } - attributes.PutStr(v, resourceAttrValue.AsString()) - if k == attr.K8SPodName { - // only copy "host.id" from resource attributes to "K8s.Node" in attributesif the pod name is set - if host, ok := resourceAttributes.Get("host.id"); ok { - attributes.PutStr("K8s.Node", host.AsString()) - } - } - } - } -} - -func rename(attrs pcommon.Map, renameMap map[string]string) { - for original, replacement := range renameMap { - if value, ok := attrs.Get(original); ok { - attrs.PutStr(replacement, value.AsString()) - attrs.Remove(original) - if original == attr.K8SPodName { - // only rename host.id if the pod name is set - if host, ok := attrs.Get("host.id"); ok { - attrs.PutStr("K8s.Node", host.AsString()) - attrs.Remove("host.id") - } - } - } - } -} diff --git a/plugins/processors/awsappsignals/internal/normalizer/attributesnormalizer_test.go b/plugins/processors/awsappsignals/internal/normalizer/attributesnormalizer_test.go deleted file mode 100644 index 4ca77b3f61..0000000000 --- a/plugins/processors/awsappsignals/internal/normalizer/attributesnormalizer_test.go +++ /dev/null @@ -1,105 +0,0 @@ -// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -// SPDX-License-Identifier: MIT - -package normalizer - -import ( - "testing" - - "go.opentelemetry.io/collector/pdata/pcommon" - "go.uber.org/zap" -) - -func TestRenameAttributes_for_metric(t *testing.T) { - logger, _ := zap.NewDevelopment() - normalizer := NewAttributesNormalizer(logger) - - // test for metric - // Create a pcommon.Map with some attributes - attributes := pcommon.NewMap() - for originalKey, replacementKey := range renameMapForMetric { - attributes.PutStr(originalKey, replacementKey+"-value") - } - - resourceAttributes := pcommon.NewMap() - // Call the process method - normalizer.renameAttributes(attributes, resourceAttributes, false) - - // Check that the original key has been removed - for originalKey := range renameMapForMetric { - if _, ok := attributes.Get(originalKey); ok { - t.Errorf("originalKey was not removed") - } - } - - // Check that the new key has the correct value - for _, replacementKey := range renameMapForMetric { - if value, ok := attributes.Get(replacementKey); !ok || value.AsString() != replacementKey+"-value" { - t.Errorf("replacementKey has incorrect value: got %v, want %v", value.AsString(), replacementKey+"-value") - } - } -} - -func TestRenameAttributes_for_trace(t *testing.T) { - logger, _ := zap.NewDevelopment() - normalizer := NewAttributesNormalizer(logger) - - // test for trace - // Create a pcommon.Map with some attributes - resourceAttributes := pcommon.NewMap() - for originalKey, replacementKey := range renameMapForTrace { - resourceAttributes.PutStr(originalKey, replacementKey+"-value") - } - resourceAttributes.PutStr("host.id", "i-01ef7d37f42caa168") - - attributes := pcommon.NewMap() - // Call the process method - normalizer.renameAttributes(attributes, resourceAttributes, true) - - // Check that the original key has been removed - for originalKey := range renameMapForTrace { - if _, ok := resourceAttributes.Get(originalKey); ok { - t.Errorf("originalKey was not removed") - } - } - - // Check that the new key has the correct value - for _, replacementKey := range renameMapForTrace { - if value, ok := resourceAttributes.Get(replacementKey); !ok || value.AsString() != replacementKey+"-value" { - t.Errorf("replacementKey has incorrect value: got %v, want %v", value.AsString(), replacementKey+"-value") - } - } - - if value, ok := resourceAttributes.Get("K8s.Node"); !ok || value.AsString() != "i-01ef7d37f42caa168" { - t.Errorf("replacementKey has incorrect value: got %v, want %v", value.AsString(), "i-01ef7d37f42caa168") - } -} - -func TestCopyResourceAttributesToAttributes(t *testing.T) { - logger, _ := zap.NewDevelopment() - normalizer := NewAttributesNormalizer(logger) - - // Create a pcommon.Map for resourceAttributes with some attributes - resourceAttributes := pcommon.NewMap() - for resourceAttrKey, attrKey := range copyMapForMetric { - resourceAttributes.PutStr(resourceAttrKey, attrKey+"-value") - } - resourceAttributes.PutStr("host.id", "i-01ef7d37f42caa168") - - // Create a pcommon.Map for attributes - attributes := pcommon.NewMap() - - // Call the process method - normalizer.copyResourceAttributesToAttributes(attributes, resourceAttributes, false) - - // Check that the attribute has been copied correctly - for _, attrKey := range copyMapForMetric { - if value, ok := attributes.Get(attrKey); !ok || value.AsString() != attrKey+"-value" { - t.Errorf("Attribute was not copied correctly: got %v, want %v", value.AsString(), attrKey+"-value") - } - } - - if value, ok := attributes.Get("K8s.Node"); !ok || value.AsString() != "i-01ef7d37f42caa168" { - t.Errorf("Attribute was not copied correctly: got %v, want %v", value.AsString(), "i-01ef7d37f42caa168") - } -} diff --git a/plugins/processors/awsappsignals/internal/resolver/attributesresolver.go b/plugins/processors/awsappsignals/internal/resolver/attributesresolver.go deleted file mode 100644 index 9659844803..0000000000 --- a/plugins/processors/awsappsignals/internal/resolver/attributesresolver.go +++ /dev/null @@ -1,98 +0,0 @@ -// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -// SPDX-License-Identifier: MIT - -package resolver - -import ( - "context" - "errors" - - "go.opentelemetry.io/collector/pdata/pcommon" - "go.uber.org/zap" - - appsignalsconfig "github.com/aws/amazon-cloudwatch-agent/plugins/processors/awsappsignals/config" - attr "github.com/aws/amazon-cloudwatch-agent/plugins/processors/awsappsignals/internal/attributes" -) - -const AttributePlatformGeneric = "Generic" - -var DefaultHostedInAttributes = map[string]string{ - attr.AWSHostedInEnvironment: attr.HostedInEnvironment, -} - -type subResolver interface { - Process(attributes, resourceAttributes pcommon.Map) error - Stop(ctx context.Context) error -} - -type attributesResolver struct { - subResolvers []subResolver -} - -// create a new attributes resolver -func NewAttributesResolver(resolvers []appsignalsconfig.Resolver, logger *zap.Logger) *attributesResolver { - //TODO: Logic for native k8s needs to be implemented - subResolvers := []subResolver{} - for _, resolver := range resolvers { - if resolver.Platform == appsignalsconfig.PlatformEKS || resolver.Platform == appsignalsconfig.PlatformK8s { - subResolvers = append(subResolvers, getKubernetesResolver(logger), newKubernetesHostedInAttributeResolver(resolver.Name)) - } else { - subResolvers = append(subResolvers, newHostedInAttributeResolver(resolver.Name, DefaultHostedInAttributes)) - } - } - return &attributesResolver{ - subResolvers: subResolvers, - } -} - -// Process the attributes -func (r *attributesResolver) Process(attributes, resourceAttributes pcommon.Map, _ bool) error { - for _, subResolver := range r.subResolvers { - if err := subResolver.Process(attributes, resourceAttributes); err != nil { - return err - } - } - return nil -} - -func (r *attributesResolver) Stop(ctx context.Context) error { - var errs error - for _, subResolver := range r.subResolvers { - if err := subResolver.Stop(ctx); err != nil { - errs = errors.Join(errs, err) - } - } - return errs -} - -type hostedInAttributeResolver struct { - name string - attributeMap map[string]string -} - -func newHostedInAttributeResolver(name string, attributeMap map[string]string) *hostedInAttributeResolver { - if name == "" { - name = AttributePlatformGeneric - } - return &hostedInAttributeResolver{ - name: name, - attributeMap: attributeMap, - } -} -func (h *hostedInAttributeResolver) Process(attributes, resourceAttributes pcommon.Map) error { - for attrKey, mappingKey := range h.attributeMap { - if val, ok := resourceAttributes.Get(attrKey); ok { - attributes.PutStr(mappingKey, val.AsString()) - } - } - - if _, ok := resourceAttributes.Get(attr.AWSHostedInEnvironment); !ok { - attributes.PutStr(attr.HostedInEnvironment, h.name) - } - - return nil -} - -func (h *hostedInAttributeResolver) Stop(ctx context.Context) error { - return nil -} diff --git a/plugins/processors/awsappsignals/internal/resolver/attributesresolver_test.go b/plugins/processors/awsappsignals/internal/resolver/attributesresolver_test.go deleted file mode 100644 index 5f5d4868aa..0000000000 --- a/plugins/processors/awsappsignals/internal/resolver/attributesresolver_test.go +++ /dev/null @@ -1,106 +0,0 @@ -// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -// SPDX-License-Identifier: MIT - -package resolver - -import ( - "context" - "errors" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/mock" - "go.opentelemetry.io/collector/pdata/pcommon" - - attr "github.com/aws/amazon-cloudwatch-agent/plugins/processors/awsappsignals/internal/attributes" -) - -type MockSubResolver struct { - mock.Mock -} - -func (m *MockSubResolver) Process(attributes, resourceAttributes pcommon.Map) error { - args := m.Called(attributes, resourceAttributes) - return args.Error(0) -} - -func (m *MockSubResolver) Stop(ctx context.Context) error { - args := m.Called(ctx) - return args.Error(0) -} - -func TestHostedInAttributeResolverWithNoConfiguredName(t *testing.T) { - resolver := newHostedInAttributeResolver("", DefaultHostedInAttributes) - - attributes := pcommon.NewMap() - resourceAttributes := pcommon.NewMap() - - resolver.Process(attributes, resourceAttributes) - envAttr, ok := attributes.Get(attr.HostedInEnvironment) - assert.True(t, ok) - assert.Equal(t, "Generic", envAttr.AsString()) -} - -func TestHostedInAttributeResolverWithConfiguredName(t *testing.T) { - resolver := newHostedInAttributeResolver("test", DefaultHostedInAttributes) - - attributes := pcommon.NewMap() - resourceAttributes := pcommon.NewMap() - - resolver.Process(attributes, resourceAttributes) - envAttr, ok := attributes.Get(attr.HostedInEnvironment) - assert.True(t, ok) - assert.Equal(t, "test", envAttr.AsString()) -} - -func TestHostedInAttributeResolverWithConflictedName(t *testing.T) { - resolver := newHostedInAttributeResolver("test", DefaultHostedInAttributes) - - attributes := pcommon.NewMap() - resourceAttributes := pcommon.NewMap() - resourceAttributes.PutStr(attr.AWSHostedInEnvironment, "self-defined") - - resolver.Process(attributes, resourceAttributes) - envAttr, ok := attributes.Get(attr.HostedInEnvironment) - assert.True(t, ok) - assert.Equal(t, "self-defined", envAttr.AsString()) -} - -func TestAttributesResolver_Process(t *testing.T) { - attributes := pcommon.NewMap() - resourceAttributes := pcommon.NewMap() - - mockSubResolver1 := new(MockSubResolver) - mockSubResolver1.On("Process", attributes, resourceAttributes).Return(nil) - - mockSubResolver2 := new(MockSubResolver) - mockSubResolver2.On("Process", attributes, resourceAttributes).Return(errors.New("error")) - - r := &attributesResolver{ - subResolvers: []subResolver{mockSubResolver1, mockSubResolver2}, - } - - err := r.Process(attributes, resourceAttributes, true) - assert.Error(t, err) - mockSubResolver1.AssertExpectations(t) - mockSubResolver2.AssertExpectations(t) -} - -func TestAttributesResolver_Stop(t *testing.T) { - ctx := context.Background() - - mockSubResolver1 := new(MockSubResolver) - mockSubResolver1.On("Stop", ctx).Return(nil) - - mockSubResolver2 := new(MockSubResolver) - mockSubResolver2.On("Stop", ctx).Return(errors.New("error")) - - r := &attributesResolver{ - subResolvers: []subResolver{mockSubResolver1, mockSubResolver2}, - } - - err := r.Stop(ctx) - assert.Error(t, err) - mockSubResolver1.AssertExpectations(t) - mockSubResolver2.AssertExpectations(t) -} diff --git a/plugins/processors/awsappsignals/rules/common_test.go b/plugins/processors/awsappsignals/rules/common_test.go deleted file mode 100644 index e2ab0fe9a1..0000000000 --- a/plugins/processors/awsappsignals/rules/common_test.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -// SPDX-License-Identifier: MIT - -package rules - -import "go.opentelemetry.io/collector/pdata/pcommon" - -func generateTestAttributes(service string, operation string, remoteService string, remoteOperation string, - isTrace bool) pcommon.Map { - attributes := pcommon.NewMap() - if isTrace { - attributes.PutStr("aws.local.service", service) - attributes.PutStr("aws.local.operation", operation) - attributes.PutStr("aws.remote.service", remoteService) - attributes.PutStr("aws.remote.operation", remoteOperation) - } else { - attributes.PutStr("Service", service) - attributes.PutStr("Operation", operation) - attributes.PutStr("RemoteService", remoteService) - attributes.PutStr("RemoteOperation", remoteOperation) - } - return attributes -} diff --git a/plugins/processors/ec2tagger/config.go b/plugins/processors/ec2tagger/config.go index 4c4a9616d4..1b0549de6e 100644 --- a/plugins/processors/ec2tagger/config.go +++ b/plugins/processors/ec2tagger/config.go @@ -16,6 +16,11 @@ var SupportedAppendDimensions = map[string]string{ "InstanceType": "${aws:InstanceType}", } +const ( + AttributeVolumeId = "VolumeId" + ValueAppendDimensionVolumeId = "${aws:VolumeId}" +) + type Config struct { RefreshIntervalSeconds time.Duration `mapstructure:"refresh_interval_seconds"` EC2MetadataTags []string `mapstructure:"ec2_metadata_tags"` diff --git a/plugins/processors/ec2tagger/constants.go b/plugins/processors/ec2tagger/constants.go index 939792f651..782ff303cd 100644 --- a/plugins/processors/ec2tagger/constants.go +++ b/plugins/processors/ec2tagger/constants.go @@ -65,7 +65,6 @@ const ( mdKeyInstanceId = "InstanceId" mdKeyImageId = "ImageId" mdKeyInstanceType = "InstanceType" - ebsVolumeId = "EBSVolumeId" ) var ( diff --git a/plugins/processors/ec2tagger/ebsvolume.go b/plugins/processors/ec2tagger/ebsvolume.go deleted file mode 100644 index e7fae38c2d..0000000000 --- a/plugins/processors/ec2tagger/ebsvolume.go +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -// SPDX-License-Identifier: MIT - -package ec2tagger - -import ( - "fmt" - "os" - "path/filepath" - "strings" - "sync" - - "github.com/aws/aws-sdk-go/service/ec2" -) - -type EbsVolume struct { - // device name to volumeId mapping - dev2Vol map[string]string - sync.RWMutex -} - -func NewEbsVolume() *EbsVolume { - return &EbsVolume{dev2Vol: make(map[string]string)} -} - -func (e *EbsVolume) addEbsVolumeMapping(zone *string, attachement *ec2.VolumeAttachment) { - // *attachement.Device is sth like: /dev/xvda - devPath := findNvmeBlockNameIfPresent(*attachement.Device) - if devPath == "" { - devPath = *attachement.Device - } - - e.Lock() - defer e.Unlock() - e.dev2Vol[devPath] = fmt.Sprintf("aws://%s/%s", *zone, *attachement.VolumeId) -} - -// find nvme block name by symlink, if symlink doesn't exist, return "" -func findNvmeBlockNameIfPresent(devName string) string { - // for nvme(ssd), there is a symlink from devName to nvme block name, i.e. /dev/xvda -> /dev/nvme0n1 - // https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/nvme-ebs-volumes.html - hasRootFs := true - if _, err := os.Lstat("/rootfs/proc"); os.IsNotExist(err) { - hasRootFs = false - } - nvmeName := "" - - if hasRootFs { - devName = "/rootfs" + devName - } - - if info, err := os.Lstat(devName); err == nil { - if info.Mode()&os.ModeSymlink != 0 { - if path, err := filepath.EvalSymlinks(devName); err == nil { - nvmeName = path - } - } - } - - if nvmeName != "" && hasRootFs { - nvmeName = strings.TrimPrefix(nvmeName, "/rootfs") - } - return nvmeName -} - -func (e *EbsVolume) getEbsVolumeId(devName string) string { - e.RLock() - defer e.RUnlock() - - for k, v := range e.dev2Vol { - // The key of dev2Vol is device name like nvme0n1, while the input devName could be a partition name like nvme0n1p1 - if strings.HasPrefix(devName, k) { - return v - } - } - - return "" -} diff --git a/plugins/processors/ec2tagger/ec2metadataprovider.go b/plugins/processors/ec2tagger/ec2metadataprovider.go index b90258605a..6278f69dff 100644 --- a/plugins/processors/ec2tagger/ec2metadataprovider.go +++ b/plugins/processors/ec2tagger/ec2metadataprovider.go @@ -12,7 +12,7 @@ import ( "github.com/aws/aws-sdk-go/aws/ec2metadata" configaws "github.com/aws/amazon-cloudwatch-agent/cfg/aws" - "github.com/aws/amazon-cloudwatch-agent/extension/agenthealth/handler/stats/provider" + "github.com/aws/amazon-cloudwatch-agent/extension/agenthealth/handler/stats/agent" "github.com/aws/amazon-cloudwatch-agent/internal/retryer" ) @@ -52,7 +52,7 @@ func (c *metadataClient) InstanceID(ctx context.Context) (string, error) { log.Printf("D! could not get instance id without imds v1 fallback enable thus enable fallback") instanceInner, errorInner := c.metadataFallbackEnabled.GetMetadataWithContext(ctx, "instance-id") if errorInner == nil { - provider.GetFlagsStats().SetFlag(provider.FlagIMDSFallbackSucceed) + agent.UsageFlags().Set(agent.FlagIMDSFallbackSuccess) } return instanceInner, errorInner } @@ -65,7 +65,7 @@ func (c *metadataClient) Hostname(ctx context.Context) (string, error) { log.Printf("D! could not get hostname without imds v1 fallback enable thus enable fallback") hostnameInner, errorInner := c.metadataFallbackEnabled.GetMetadataWithContext(ctx, "hostname") if errorInner == nil { - provider.GetFlagsStats().SetFlag(provider.FlagIMDSFallbackSucceed) + agent.UsageFlags().Set(agent.FlagIMDSFallbackSuccess) } return hostnameInner, errorInner } @@ -78,7 +78,7 @@ func (c *metadataClient) Get(ctx context.Context) (ec2metadata.EC2InstanceIdenti log.Printf("D! could not get instance document without imds v1 fallback enable thus enable fallback") instanceDocumentInner, errorInner := c.metadataFallbackEnabled.GetInstanceIdentityDocumentWithContext(ctx) if errorInner == nil { - provider.GetFlagsStats().SetFlag(provider.FlagIMDSFallbackSucceed) + agent.UsageFlags().Set(agent.FlagIMDSFallbackSuccess) } return instanceDocumentInner, errorInner } diff --git a/plugins/processors/ec2tagger/ec2tagger.go b/plugins/processors/ec2tagger/ec2tagger.go index 5d49ce5cf7..d4236397d5 100644 --- a/plugins/processors/ec2tagger/ec2tagger.go +++ b/plugins/processors/ec2tagger/ec2tagger.go @@ -19,6 +19,7 @@ import ( "go.uber.org/zap" configaws "github.com/aws/amazon-cloudwatch-agent/cfg/aws" + "github.com/aws/amazon-cloudwatch-agent/plugins/processors/ec2tagger/internal/volume" translatorCtx "github.com/aws/amazon-cloudwatch-agent/translator/context" ) @@ -52,14 +53,13 @@ type Tagger struct { ec2MetadataRespond ec2MetadataRespondType tagFilters []*ec2.Filter ec2API ec2iface.EC2API - ebsVolume *EbsVolume + volumeSerialCache volume.Cache sync.RWMutex //to protect ec2TagCache } // newTagger returns a new EC2 Tagger processor. func newTagger(config *Config, logger *zap.Logger) *Tagger { - _, cancel := context.WithCancel(context.Background()) mdCredentialConfig := &configaws.CredentialConfig{} @@ -77,7 +77,6 @@ func newTagger(config *Config, logger *zap.Logger) *Tagger { }) }, } - return p } @@ -146,11 +145,11 @@ func (t *Tagger) updateOtelAttributes(attributes []pcommon.Map) { if t.ec2MetadataLookup.instanceType { attr.PutStr(mdKeyInstanceType, t.ec2MetadataRespond.instanceType) } - if t.ebsVolume != nil { + if t.volumeSerialCache != nil { if devName, found := attr.Get(t.DiskDeviceTagKey); found { - ebsVolId := t.ebsVolume.getEbsVolumeId(devName.Str()) - if ebsVolId != "" { - attr.PutStr(ebsVolumeId, ebsVolId) + serial := t.volumeSerialCache.Serial(devName.Str()) + if serial != "" { + attr.PutStr(AttributeVolumeId, serial) } } } @@ -270,7 +269,7 @@ func (t *Tagger) ebsVolumesRetrieved() bool { if key == "*" { continue } - if volId := t.ebsVolume.getEbsVolumeId(key); volId == "" { + if volId := t.volumeSerialCache.Serial(key); volId == "" { allVolumesRetrieved = false break } @@ -280,7 +279,7 @@ func (t *Tagger) ebsVolumesRetrieved() bool { // Start acts as input validation and serves the purpose of updating ec2 tags and ebs volumes if necessary. // It will be called when OTel is enabling each processor -func (t *Tagger) Start(ctx context.Context, host component.Host) error { +func (t *Tagger) Start(ctx context.Context, _ component.Host) error { t.shutdownC = make(chan bool) t.ec2TagCache = map[string]string{} @@ -373,34 +372,15 @@ func (t *Tagger) refreshLoopToUpdateTagsAndVolumes() { // updateVolumes calls EC2 describe volume func (t *Tagger) updateVolumes() error { - if t.ebsVolume == nil { - t.ebsVolume = NewEbsVolume() + if t.volumeSerialCache == nil { + t.volumeSerialCache = volume.NewCache(volume.NewProvider(t.ec2API, t.ec2MetadataRespond.instanceId)) } - input := &ec2.DescribeVolumesInput{ - Filters: []*ec2.Filter{ - { - Name: aws.String("attachment.instance-id"), - Values: aws.StringSlice([]string{t.ec2MetadataRespond.instanceId}), - }, - }, + if err := t.volumeSerialCache.Refresh(); err != nil { + return err } - for { - result, err := t.ec2API.DescribeVolumes(input) - if err != nil { - return err - } - for _, volume := range result.Volumes { - for _, attachment := range volume.Attachments { - t.ebsVolume.addEbsVolumeMapping(volume.AvailabilityZone, attachment) - } - } - if result.NextToken == nil { - break - } - input.SetNextToken(*result.NextToken) - } + t.logger.Debug("Volume Serial Cache", zap.Strings("devices", t.volumeSerialCache.Devices())) return nil } diff --git a/plugins/processors/ec2tagger/ec2tagger_test.go b/plugins/processors/ec2tagger/ec2tagger_test.go index c9af7c2f1a..0f3d3d84c8 100644 --- a/plugins/processors/ec2tagger/ec2tagger_test.go +++ b/plugins/processors/ec2tagger/ec2tagger_test.go @@ -6,6 +6,7 @@ package ec2tagger import ( "context" "errors" + "sync" "testing" "time" @@ -18,6 +19,7 @@ import ( "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/pmetric" "go.opentelemetry.io/collector/processor/processortest" + "golang.org/x/exp/maps" configaws "github.com/aws/amazon-cloudwatch-agent/cfg/aws" ) @@ -34,17 +36,6 @@ type mockEC2Client struct { tagsFailLimit int tagsPartialLimit int UseUpdatedTags bool - - //The following fields are used to control how the mocked DescribeVolumes api behave: - //volumesCallCount records how many times DescribeVolumes has been called - //if volumesCallCount <= volumesFailLimit, DescribeVolumes call fails - //if volumesFailLimit < tagsCallCount <= volumesPartialLimit, DescribeVolumes returns partial volumes - //if volumesCallCount > volumesPartialLimit, DescribeVolumes returns all volumes - //DescribeVolumes returns update volumes if UseUpdatedVolumes is true - volumesCallCount int - volumesFailLimit int - volumesPartialLimit int - UseUpdatedVolumes bool } // construct the return results for the mocked DescribeTags api @@ -96,7 +87,7 @@ func (m *mockEC2Client) DescribeTags(*ec2.DescribeTagsInput) (*ec2.DescribeTagsO //when tags are not ready or customer doesn't have permission to call the api if m.tagsCallCount <= m.tagsFailLimit { m.tagsCallCount++ - return nil, errors.New("No tags available now") + return nil, errors.New("no tags available now") } //return partial tags to simulate the case @@ -121,85 +112,19 @@ func (m *mockEC2Client) DescribeTags(*ec2.DescribeTagsInput) (*ec2.DescribeTagsO // construct the return results for the mocked DescribeTags api var ( - device1 = "/dev/xvdc" - volumeId1 = "vol-0303a1cc896c42d28" - volumeAttachmentId1 = "aws://us-east-1a/vol-0303a1cc896c42d28" - volumeAttachment1 = ec2.VolumeAttachment{Device: &device1, VolumeId: &volumeId1} - availabilityZone = "us-east-1a" - volume1 = ec2.Volume{ - Attachments: []*ec2.VolumeAttachment{&volumeAttachment1}, - AvailabilityZone: &availabilityZone, - } + device1 = "xvdc" + volumeId1 = "vol-0303a1cc896c42d28" ) var ( - device2 = "/dev/xvdf" - volumeId2 = "vol-0c241693efb58734a" - volumeAttachmentId2 = "aws://us-east-1a/vol-0c241693efb58734a" - volumeAttachment2 = ec2.VolumeAttachment{Device: &device2, VolumeId: &volumeId2} - volume2 = ec2.Volume{ - Attachments: []*ec2.VolumeAttachment{&volumeAttachment2}, - AvailabilityZone: &availabilityZone, - } + device2 = "xvdf" + volumeId2 = "vol-0c241693efb58734a" ) var ( - volumeId2Updated = "vol-0459607897eaa8148" - volumeAttachmentUpdatedId2 = "aws://us-east-1a/vol-0459607897eaa8148" - volumeAttachment2Updated = ec2.VolumeAttachment{Device: &device2, VolumeId: &volumeId2Updated} - volume2Updated = ec2.Volume{ - Attachments: []*ec2.VolumeAttachment{&volumeAttachment2Updated}, - AvailabilityZone: &availabilityZone, - } + volumeId2Updated = "vol-0459607897eaa8148" ) -func (m *mockEC2Client) DescribeVolumes(*ec2.DescribeVolumesInput) (*ec2.DescribeVolumesOutput, error) { - //volume1 is the initial disk assigned to an ec2 instance when started - partialVolumes := ec2.DescribeVolumesOutput{ - NextToken: nil, - Volumes: []*ec2.Volume{&volume1}, - } - - //later customer attached volume2 to the running ec2 instance - //but this volume might not be known to the api immediately - allVolumes := ec2.DescribeVolumesOutput{ - NextToken: nil, - Volumes: []*ec2.Volume{&volume1, &volume2}, - } - - //later customer updates by attaching a different ebs volume to the same device name - allVolumesUpdated := ec2.DescribeVolumesOutput{ - NextToken: nil, - Volumes: []*ec2.Volume{&volume1, &volume2Updated}, - } - - //return error initially to simulate the case - //when the volumes are not ready or customer doesn't have permission to call the api - if m.volumesCallCount <= m.volumesFailLimit { - m.volumesCallCount++ - return nil, errors.New("No volumes available now") - } - - //return partial volumes to simulate the case - //when the api knows about some but not all volumes at early stage - if m.volumesCallCount <= m.volumesPartialLimit { - m.volumesCallCount++ - return &partialVolumes, nil - } - - //return all volumes to simulate the case - //when the api knows about all volumes at later stage - if m.volumesCallCount > m.volumesPartialLimit { - m.volumesCallCount++ - //return updated result after customer edits volumes - if m.UseUpdatedVolumes { - return &allVolumesUpdated, nil - } - return &allVolumes, nil - } - return nil, nil -} - type mockMetadataProvider struct { InstanceIdentityDocument *ec2metadata.EC2InstanceIdentityDocument } @@ -226,6 +151,42 @@ var mockedInstanceIdentityDoc = &ec2metadata.EC2InstanceIdentityDocument{ ImageID: "ami-09edd32d9b0990d49", } +type mockVolumeCache struct { + sync.RWMutex + cache map[string]string + refreshCount int + volumesPartialLimit int + UseUpdatedVolumes bool +} + +func (m *mockVolumeCache) Refresh() error { + m.Lock() + defer m.Unlock() + if m.refreshCount <= m.volumesPartialLimit { + m.cache[device1] = volumeId1 + } else { + if m.UseUpdatedVolumes { + m.cache[device2] = volumeId2Updated + } else { + m.cache[device2] = volumeId2 + } + } + m.refreshCount++ + return nil +} + +func (m *mockVolumeCache) Serial(devName string) string { + m.RLock() + defer m.RUnlock() + return m.cache[devName] +} + +func (m *mockVolumeCache) Devices() []string { + m.RLock() + defer m.RUnlock() + return maps.Keys(m.cache) +} + // createTestMetrics create new pmetric.Metrics pm that satisfies: // // pm.ResourceMetrics().Len() == 1 @@ -294,10 +255,11 @@ func TestStartFailWithNoMetadata(t *testing.T) { cfg := createDefaultConfig().(*Config) _, cancel := context.WithCancel(context.Background()) tagger := &Tagger{ - Config: cfg, - logger: processortest.NewNopCreateSettings().Logger, - cancelFunc: cancel, - metadataProvider: &mockMetadataProvider{InstanceIdentityDocument: nil}, + Config: cfg, + logger: processortest.NewNopCreateSettings().Logger, + cancelFunc: cancel, + metadataProvider: &mockMetadataProvider{InstanceIdentityDocument: nil}, + volumeSerialCache: &mockVolumeCache{cache: make(map[string]string)}, } err := tagger.Start(context.Background(), componenttest.NewNopHost()) @@ -314,39 +276,38 @@ func TestStartSuccessWithNoTagsVolumesUpdate(t *testing.T) { cfg.EBSDeviceKeys = []string{device1, device2} _, cancel := context.WithCancel(context.Background()) ec2Client := &mockEC2Client{ - tagsCallCount: 0, - tagsFailLimit: 0, - tagsPartialLimit: 1, - UseUpdatedTags: false, - volumesCallCount: 0, - volumesFailLimit: -1, - volumesPartialLimit: 0, - UseUpdatedVolumes: false, + tagsCallCount: 0, + tagsFailLimit: 0, + tagsPartialLimit: 1, + UseUpdatedTags: false, } ec2Provider := func(*configaws.CredentialConfig) ec2iface.EC2API { return ec2Client } + volumeCache := &mockVolumeCache{cache: make(map[string]string)} backoffSleepArray = []time.Duration{10 * time.Millisecond, 20 * time.Millisecond, 30 * time.Millisecond} defaultRefreshInterval = 50 * time.Millisecond tagger := &Tagger{ - Config: cfg, - logger: processortest.NewNopCreateSettings().Logger, - cancelFunc: cancel, - metadataProvider: &mockMetadataProvider{InstanceIdentityDocument: mockedInstanceIdentityDoc}, - ec2Provider: ec2Provider, + Config: cfg, + logger: processortest.NewNopCreateSettings().Logger, + cancelFunc: cancel, + metadataProvider: &mockMetadataProvider{InstanceIdentityDocument: mockedInstanceIdentityDoc}, + ec2Provider: ec2Provider, + volumeSerialCache: volumeCache, } err := tagger.Start(context.Background(), componenttest.NewNopHost()) assert.Nil(t, err) //assume one second is long enough for the api to be called many times so that all tags/volumes are retrieved time.Sleep(time.Second) assert.Equal(t, 3, ec2Client.tagsCallCount) - assert.Equal(t, 2, ec2Client.volumesCallCount) + assert.Equal(t, 2, volumeCache.refreshCount) //check tags and volumes expectedTags := map[string]string{tagKey1: tagVal1, tagKey2: tagVal2, "AutoScalingGroupName": tagVal3} assert.Equal(t, expectedTags, tagger.ec2TagCache) - expectedVolumes := map[string]string{device1: volumeAttachmentId1, device2: volumeAttachmentId2} - assert.Equal(t, expectedVolumes, tagger.ebsVolume.dev2Vol) + assert.Len(t, tagger.volumeSerialCache.Devices(), 2) + assert.Equal(t, volumeId1, tagger.volumeSerialCache.Serial(device1)) + assert.Equal(t, volumeId2, tagger.volumeSerialCache.Serial(device2)) } // run Start() and check all tags/volumes are retrieved and saved and then updated @@ -359,27 +320,25 @@ func TestStartSuccessWithTagsVolumesUpdate(t *testing.T) { cfg.EBSDeviceKeys = []string{device1, device2} _, cancel := context.WithCancel(context.Background()) ec2Client := &mockEC2Client{ - tagsCallCount: 0, - tagsFailLimit: 1, - tagsPartialLimit: 2, - UseUpdatedTags: false, - volumesCallCount: 0, - volumesFailLimit: -1, - volumesPartialLimit: 0, - UseUpdatedVolumes: false, + tagsCallCount: 0, + tagsFailLimit: 1, + tagsPartialLimit: 2, + UseUpdatedTags: false, } ec2Provider := func(*configaws.CredentialConfig) ec2iface.EC2API { return ec2Client } + volumeCache := &mockVolumeCache{cache: make(map[string]string)} backoffSleepArray = []time.Duration{10 * time.Millisecond, 20 * time.Millisecond, 30 * time.Millisecond} defaultRefreshInterval = 10 * time.Millisecond tagger := &Tagger{ - Config: cfg, - logger: processortest.NewNopCreateSettings().Logger, - cancelFunc: cancel, - metadataProvider: &mockMetadataProvider{InstanceIdentityDocument: mockedInstanceIdentityDoc}, - ec2Provider: ec2Provider, + Config: cfg, + logger: processortest.NewNopCreateSettings().Logger, + cancelFunc: cancel, + metadataProvider: &mockMetadataProvider{InstanceIdentityDocument: mockedInstanceIdentityDoc}, + ec2Provider: ec2Provider, + volumeSerialCache: volumeCache, } err := tagger.Start(context.Background(), componenttest.NewNopHost()) @@ -390,19 +349,21 @@ func TestStartSuccessWithTagsVolumesUpdate(t *testing.T) { //check tags and volumes expectedTags := map[string]string{tagKey1: tagVal1, tagKey2: tagVal2, "AutoScalingGroupName": tagVal3} assert.Equal(t, expectedTags, tagger.ec2TagCache) - expectedVolumes := map[string]string{device1: volumeAttachmentId1, device2: volumeAttachmentId2} - assert.Equal(t, expectedVolumes, tagger.ebsVolume.dev2Vol) + assert.Len(t, tagger.volumeSerialCache.Devices(), 2) + assert.Equal(t, volumeId1, tagger.volumeSerialCache.Serial(device1)) + assert.Equal(t, volumeId2, tagger.volumeSerialCache.Serial(device2)) //update the tags and volumes ec2Client.UseUpdatedTags = true - ec2Client.UseUpdatedVolumes = true + volumeCache.UseUpdatedVolumes = true //assume one second is long enough for the api to be called many times //so that all tags/volumes are updated time.Sleep(time.Second) expectedTags = map[string]string{tagKey1: tagVal1, tagKey2: updatedTagVal2, "AutoScalingGroupName": tagVal3} assert.Equal(t, expectedTags, tagger.ec2TagCache) - expectedVolumes = map[string]string{device1: volumeAttachmentId1, device2: volumeAttachmentUpdatedId2} - assert.Equal(t, expectedVolumes, tagger.ebsVolume.dev2Vol) + assert.Len(t, tagger.volumeSerialCache.Devices(), 2) + assert.Equal(t, volumeId1, tagger.volumeSerialCache.Serial(device1)) + assert.Equal(t, volumeId2Updated, tagger.volumeSerialCache.Serial(device2)) } // run Start() with ec2_instance_tag_keys = ["*"] and ebs_device_keys = ["*"] @@ -415,26 +376,24 @@ func TestStartSuccessWithWildcardTagVolumeKey(t *testing.T) { cfg.EBSDeviceKeys = []string{"*"} _, cancel := context.WithCancel(context.Background()) ec2Client := &mockEC2Client{ - tagsCallCount: 0, - tagsFailLimit: 0, - tagsPartialLimit: 1, - UseUpdatedTags: false, - volumesCallCount: 0, - volumesFailLimit: -1, - volumesPartialLimit: 0, - UseUpdatedVolumes: false, + tagsCallCount: 0, + tagsFailLimit: 0, + tagsPartialLimit: 1, + UseUpdatedTags: false, } ec2Provider := func(*configaws.CredentialConfig) ec2iface.EC2API { return ec2Client } + volumeCache := &mockVolumeCache{cache: make(map[string]string)} backoffSleepArray = []time.Duration{10 * time.Millisecond, 20 * time.Millisecond, 30 * time.Millisecond} defaultRefreshInterval = 50 * time.Millisecond tagger := &Tagger{ - Config: cfg, - logger: processortest.NewNopCreateSettings().Logger, - cancelFunc: cancel, - metadataProvider: &mockMetadataProvider{InstanceIdentityDocument: mockedInstanceIdentityDoc}, - ec2Provider: ec2Provider, + Config: cfg, + logger: processortest.NewNopCreateSettings().Logger, + cancelFunc: cancel, + metadataProvider: &mockMetadataProvider{InstanceIdentityDocument: mockedInstanceIdentityDoc}, + ec2Provider: ec2Provider, + volumeSerialCache: volumeCache, } err := tagger.Start(context.Background(), componenttest.NewNopHost()) @@ -443,12 +402,12 @@ func TestStartSuccessWithWildcardTagVolumeKey(t *testing.T) { time.Sleep(time.Second) //check only partial tags/volumes are returned assert.Equal(t, 2, ec2Client.tagsCallCount) - assert.Equal(t, 1, ec2Client.volumesCallCount) + assert.Equal(t, 1, volumeCache.refreshCount) //check partial tags/volumes are saved expectedTags := map[string]string{tagKey1: tagVal1} assert.Equal(t, expectedTags, tagger.ec2TagCache) - expectedVolumes := map[string]string{device1: volumeAttachmentId1} - assert.Equal(t, expectedVolumes, tagger.ebsVolume.dev2Vol) + assert.Len(t, tagger.volumeSerialCache.Devices(), 1) + assert.Equal(t, volumeId1, tagger.volumeSerialCache.Serial(device1)) } // run Start() and then processMetrics and check the output metrics contain expected tags @@ -462,26 +421,24 @@ func TestApplyWithTagsVolumesUpdate(t *testing.T) { cfg.DiskDeviceTagKey = "device" _, cancel := context.WithCancel(context.Background()) ec2Client := &mockEC2Client{ - tagsCallCount: 0, - tagsFailLimit: 0, - tagsPartialLimit: 1, - UseUpdatedTags: false, - volumesCallCount: 0, - volumesFailLimit: -1, - volumesPartialLimit: 0, - UseUpdatedVolumes: false, + tagsCallCount: 0, + tagsFailLimit: 0, + tagsPartialLimit: 1, + UseUpdatedTags: false, } ec2Provider := func(*configaws.CredentialConfig) ec2iface.EC2API { return ec2Client } + volumeCache := &mockVolumeCache{cache: make(map[string]string)} backoffSleepArray = []time.Duration{10 * time.Millisecond, 20 * time.Millisecond, 30 * time.Millisecond} defaultRefreshInterval = 50 * time.Millisecond tagger := &Tagger{ - Config: cfg, - logger: processortest.NewNopCreateSettings().Logger, - cancelFunc: cancel, - metadataProvider: &mockMetadataProvider{InstanceIdentityDocument: mockedInstanceIdentityDoc}, - ec2Provider: ec2Provider, + Config: cfg, + logger: processortest.NewNopCreateSettings().Logger, + cancelFunc: cancel, + metadataProvider: &mockMetadataProvider{InstanceIdentityDocument: mockedInstanceIdentityDoc}, + ec2Provider: ec2Provider, + volumeSerialCache: volumeCache, } err := tagger.Start(context.Background(), componenttest.NewNopHost()) assert.Nil(t, err) @@ -509,7 +466,7 @@ func TestApplyWithTagsVolumesUpdate(t *testing.T) { }, map[string]string{ "AutoScalingGroupName": tagVal3, - "EBSVolumeId": volumeAttachmentId2, + "VolumeId": volumeId2, "InstanceId": "i-01d2417c27a396e44", "InstanceType": "m5ad.large", tagKey1: tagVal1, @@ -521,7 +478,7 @@ func TestApplyWithTagsVolumesUpdate(t *testing.T) { //update tags and volumes and check metrics are updated as well ec2Client.UseUpdatedTags = true - ec2Client.UseUpdatedVolumes = true + volumeCache.UseUpdatedVolumes = true //assume one second is long enough for the api to be called many times //so that all tags/volumes are updated time.Sleep(time.Second) @@ -537,7 +494,7 @@ func TestApplyWithTagsVolumesUpdate(t *testing.T) { }, map[string]string{ "AutoScalingGroupName": tagVal3, - "EBSVolumeId": volumeAttachmentUpdatedId2, + "VolumeId": volumeId2Updated, "InstanceId": "i-01d2417c27a396e44", "InstanceType": "m5ad.large", tagKey1: tagVal1, @@ -557,26 +514,24 @@ func TestMetricsDroppedBeforeStarted(t *testing.T) { cfg.EBSDeviceKeys = []string{"*"} _, cancel := context.WithCancel(context.Background()) ec2Client := &mockEC2Client{ - tagsCallCount: 0, - tagsFailLimit: 0, - tagsPartialLimit: 1, - UseUpdatedTags: false, - volumesCallCount: 0, - volumesFailLimit: -1, - volumesPartialLimit: 0, - UseUpdatedVolumes: false, + tagsCallCount: 0, + tagsFailLimit: 0, + tagsPartialLimit: 1, + UseUpdatedTags: false, } ec2Provider := func(*configaws.CredentialConfig) ec2iface.EC2API { return ec2Client } + volumeCache := &mockVolumeCache{cache: make(map[string]string)} backoffSleepArray = []time.Duration{10 * time.Millisecond, 20 * time.Millisecond, 30 * time.Millisecond} defaultRefreshInterval = 50 * time.Millisecond tagger := &Tagger{ - Config: cfg, - logger: processortest.NewNopCreateSettings().Logger, - cancelFunc: cancel, - metadataProvider: &mockMetadataProvider{InstanceIdentityDocument: mockedInstanceIdentityDoc}, - ec2Provider: ec2Provider, + Config: cfg, + logger: processortest.NewNopCreateSettings().Logger, + cancelFunc: cancel, + metadataProvider: &mockMetadataProvider{InstanceIdentityDocument: mockedInstanceIdentityDoc}, + ec2Provider: ec2Provider, + volumeSerialCache: volumeCache, } md := createTestMetrics([]map[string]string{ @@ -602,13 +557,12 @@ func TestMetricsDroppedBeforeStarted(t *testing.T) { time.Sleep(time.Second) //check only partial tags/volumes are returned assert.Equal(t, 2, ec2Client.tagsCallCount) - assert.Equal(t, 1, ec2Client.volumesCallCount) //check partial tags/volumes are saved expectedTags := map[string]string{tagKey1: tagVal1} assert.Equal(t, expectedTags, tagger.ec2TagCache) - expectedVolumes := map[string]string{device1: volumeAttachmentId1} - assert.Equal(t, expectedVolumes, tagger.ebsVolume.dev2Vol) + assert.Len(t, tagger.volumeSerialCache.Devices(), 1) + assert.Equal(t, volumeId1, tagger.volumeSerialCache.Serial(device1)) assert.Equal(t, tagger.started, true) output, err = tagger.processMetrics(context.Background(), md) @@ -625,14 +579,10 @@ func TestTaggerStartDoesNotBlock(t *testing.T) { cfg.EBSDeviceKeys = []string{"*"} _, cancel := context.WithCancel(context.Background()) ec2Client := &mockEC2Client{ - tagsCallCount: 0, - tagsFailLimit: 0, - tagsPartialLimit: 1, - UseUpdatedTags: false, - volumesCallCount: 0, - volumesFailLimit: -1, - volumesPartialLimit: 0, - UseUpdatedVolumes: false, + tagsCallCount: 0, + tagsFailLimit: 0, + tagsPartialLimit: 1, + UseUpdatedTags: false, } ec2Provider := func(*configaws.CredentialConfig) ec2iface.EC2API { return ec2Client @@ -640,11 +590,12 @@ func TestTaggerStartDoesNotBlock(t *testing.T) { backoffSleepArray = []time.Duration{1 * time.Minute, 1 * time.Minute, 1 * time.Minute, 3 * time.Minute, 3 * time.Minute, 3 * time.Minute, 10 * time.Minute} defaultRefreshInterval = 180 * time.Second tagger := &Tagger{ - Config: cfg, - logger: processortest.NewNopCreateSettings().Logger, - cancelFunc: cancel, - metadataProvider: &mockMetadataProvider{InstanceIdentityDocument: mockedInstanceIdentityDoc}, - ec2Provider: ec2Provider, + Config: cfg, + logger: processortest.NewNopCreateSettings().Logger, + cancelFunc: cancel, + metadataProvider: &mockMetadataProvider{InstanceIdentityDocument: mockedInstanceIdentityDoc}, + ec2Provider: ec2Provider, + volumeSerialCache: &mockVolumeCache{cache: make(map[string]string)}, } deadline := time.NewTimer(1 * time.Second) @@ -670,10 +621,11 @@ func TestTaggerStartsWithoutTagOrVolume(t *testing.T) { _, cancel := context.WithCancel(context.Background()) tagger := &Tagger{ - Config: cfg, - logger: processortest.NewNopCreateSettings().Logger, - cancelFunc: cancel, - metadataProvider: &mockMetadataProvider{InstanceIdentityDocument: mockedInstanceIdentityDoc}, + Config: cfg, + logger: processortest.NewNopCreateSettings().Logger, + cancelFunc: cancel, + metadataProvider: &mockMetadataProvider{InstanceIdentityDocument: mockedInstanceIdentityDoc}, + volumeSerialCache: &mockVolumeCache{cache: make(map[string]string)}, } deadline := time.NewTimer(1 * time.Second) diff --git a/plugins/processors/ec2tagger/factory.go b/plugins/processors/ec2tagger/factory.go index 824129f1f9..72b5576b9d 100644 --- a/plugins/processors/ec2tagger/factory.go +++ b/plugins/processors/ec2tagger/factory.go @@ -14,11 +14,13 @@ import ( ) const ( - TypeStr = "ec2tagger" stability = component.StabilityLevelStable ) -var processorCapabilities = consumer.Capabilities{MutatesData: true} +var ( + TypeStr, _ = component.NewType("ec2tagger") + processorCapabilities = consumer.Capabilities{MutatesData: true} +) func createDefaultConfig() component.Config { return &Config{} diff --git a/plugins/processors/ec2tagger/internal/volume/describevolumes.go b/plugins/processors/ec2tagger/internal/volume/describevolumes.go new file mode 100644 index 0000000000..11108c996b --- /dev/null +++ b/plugins/processors/ec2tagger/internal/volume/describevolumes.go @@ -0,0 +1,51 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package volume + +import ( + "fmt" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/aws/aws-sdk-go/service/ec2/ec2iface" +) + +type describeVolumesProvider struct { + ec2Client ec2iface.EC2API + instanceID string +} + +func newDescribeVolumesProvider(ec2Client ec2iface.EC2API, instanceID string) Provider { + return &describeVolumesProvider{ec2Client: ec2Client, instanceID: instanceID} +} + +func (p *describeVolumesProvider) DeviceToSerialMap() (map[string]string, error) { + result := map[string]string{} + input := &ec2.DescribeVolumesInput{ + Filters: []*ec2.Filter{ + { + Name: aws.String("attachment.instance-id"), + Values: aws.StringSlice([]string{p.instanceID}), + }, + }, + } + for { + output, err := p.ec2Client.DescribeVolumes(input) + if err != nil { + return nil, fmt.Errorf("unable to describe volumes: %w", err) + } + for _, volume := range output.Volumes { + for _, attachment := range volume.Attachments { + if attachment.Device != nil && attachment.VolumeId != nil { + result[aws.StringValue(attachment.Device)] = aws.StringValue(attachment.VolumeId) + } + } + } + if output.NextToken == nil { + break + } + input.SetNextToken(*output.NextToken) + } + return result, nil +} diff --git a/plugins/processors/ec2tagger/internal/volume/describevolumes_test.go b/plugins/processors/ec2tagger/internal/volume/describevolumes_test.go new file mode 100644 index 0000000000..d83452f1a7 --- /dev/null +++ b/plugins/processors/ec2tagger/internal/volume/describevolumes_test.go @@ -0,0 +1,77 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package volume + +import ( + "errors" + "testing" + + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/aws/aws-sdk-go/service/ec2/ec2iface" + "github.com/stretchr/testify/assert" +) + +// construct the return results for the mocked DescribeTags api +var ( + device1 = "/dev/xvdc" + volumeId1 = "vol-0303a1cc896c42d28" + volumeAttachment1 = ec2.VolumeAttachment{Device: &device1, VolumeId: &volumeId1} + availabilityZone = "us-east-1a" + volume1 = ec2.Volume{ + Attachments: []*ec2.VolumeAttachment{&volumeAttachment1}, + AvailabilityZone: &availabilityZone, + } +) + +var ( + device2 = "/dev/xvdf" + volumeId2 = "vol-0c241693efb58734a" + volumeAttachment2 = ec2.VolumeAttachment{Device: &device2, VolumeId: &volumeId2} + volume2 = ec2.Volume{ + Attachments: []*ec2.VolumeAttachment{&volumeAttachment2}, + AvailabilityZone: &availabilityZone, + } +) + +type mockEC2Client struct { + ec2iface.EC2API + + callCount int + err error +} + +func (m *mockEC2Client) DescribeVolumes(input *ec2.DescribeVolumesInput) (*ec2.DescribeVolumesOutput, error) { + m.callCount++ + + if m.err != nil { + return nil, m.err + } + + if input.NextToken == nil { + return &ec2.DescribeVolumesOutput{ + NextToken: &device2, + Volumes: []*ec2.Volume{&volume1}, + }, nil + } + return &ec2.DescribeVolumesOutput{ + NextToken: nil, + Volumes: []*ec2.Volume{&volume2}, + }, nil +} + +func TestDescribeVolumesProvider(t *testing.T) { + ec2Client := &mockEC2Client{} + p := newDescribeVolumesProvider(ec2Client, "") + got, err := p.DeviceToSerialMap() + assert.NoError(t, err) + assert.Equal(t, 2, ec2Client.callCount) + want := map[string]string{device1: volumeId1, device2: volumeId2} + assert.Equal(t, want, got) + ec2Client.err = errors.New("test") + ec2Client.callCount = 0 + got, err = p.DeviceToSerialMap() + assert.Error(t, err) + assert.Equal(t, 1, ec2Client.callCount) + assert.Nil(t, got) +} diff --git a/plugins/processors/ec2tagger/internal/volume/host_linux.go b/plugins/processors/ec2tagger/internal/volume/host_linux.go new file mode 100644 index 0000000000..170fa56acd --- /dev/null +++ b/plugins/processors/ec2tagger/internal/volume/host_linux.go @@ -0,0 +1,73 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +//go:build linux + +package volume + +import ( + "bytes" + "errors" + "fmt" + "os" + "path/filepath" + "strings" +) + +const ( + ebsSerialPrefix = "vol" + ebsSerialSeparator = "-" + + sysBlockPath = "/sys/block/" + serialFile = "device/serial" + + loopDevicePrefix = "loop" +) + +type hostProvider struct { + osReadDir func(string) ([]os.DirEntry, error) + osReadFile func(string) ([]byte, error) +} + +func newHostProvider() Provider { + return &hostProvider{ + osReadDir: os.ReadDir, + osReadFile: os.ReadFile, + } +} + +func (p *hostProvider) DeviceToSerialMap() (map[string]string, error) { + result := map[string]string{} + dirs, err := p.osReadDir(sysBlockPath) + if err != nil { + return nil, fmt.Errorf("unable to read %s: %w", sysBlockPath, err) + } + for _, dir := range dirs { + deviceName := dir.Name() + // skip loop devices + if strings.HasPrefix(deviceName, loopDevicePrefix) { + continue + } + serial, _ := p.osReadFile(serialFilePath(deviceName)) + serial = bytes.TrimSpace(serial) + if len(serial) > 0 { + result[deviceName] = formatSerial(string(serial)) + } + } + if len(result) == 0 { + return nil, errors.New("no devices/serials found") + } + return result, nil +} + +func formatSerial(serial string) string { + suffix, ok := strings.CutPrefix(serial, ebsSerialPrefix) + if !ok || strings.HasPrefix(suffix, ebsSerialSeparator) { + return serial + } + return ebsSerialPrefix + ebsSerialSeparator + suffix +} + +func serialFilePath(deviceName string) string { + return filepath.Join(sysBlockPath, deviceName, serialFile) +} diff --git a/plugins/processors/ec2tagger/internal/volume/host_linux_test.go b/plugins/processors/ec2tagger/internal/volume/host_linux_test.go new file mode 100644 index 0000000000..0e6b34e589 --- /dev/null +++ b/plugins/processors/ec2tagger/internal/volume/host_linux_test.go @@ -0,0 +1,81 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +//go:build linux + +package volume + +import ( + "errors" + "os" + "testing" + + "github.com/stretchr/testify/assert" +) + +var ( + testDirEntries = []os.DirEntry{ + &mockDirEntry{name: "xvdc"}, + &mockDirEntry{name: "loop1"}, + &mockDirEntry{name: "xvdc1"}, + &mockDirEntry{name: "xvdf"}, + &mockDirEntry{name: "loop2"}, + &mockDirEntry{name: "xvdh"}, + } + testSerialMap = map[string]string{ + serialFilePath("xvdc"): "vol-0303a1cc896c42d28", + serialFilePath("xvdf"): "vol0c241693efb58734a", + serialFilePath("xvdh"): "otherserial", + serialFilePath("loop1"): "skip", + } +) + +type mockDirEntry struct { + os.DirEntry + name string +} + +func (m *mockDirEntry) Name() string { + return m.name +} + +type mockFileSystem struct { + serialMap map[string]string + errDir error +} + +func (m *mockFileSystem) ReadDir(string) ([]os.DirEntry, error) { + if m.errDir != nil { + return nil, m.errDir + } + return testDirEntries, nil +} + +func (m *mockFileSystem) ReadFile(path string) ([]byte, error) { + return []byte(m.serialMap[path]), nil +} + +func TestHostProvider(t *testing.T) { + testErr := errors.New("test") + m := &mockFileSystem{ + errDir: testErr, + } + p := newHostProvider().(*hostProvider) + p.osReadDir = m.ReadDir + p.osReadFile = m.ReadFile + got, err := p.DeviceToSerialMap() + assert.Error(t, err) + assert.Nil(t, got) + m.errDir = nil + got, err = p.DeviceToSerialMap() + assert.Error(t, err) + assert.Nil(t, got) + m.serialMap = testSerialMap + got, err = p.DeviceToSerialMap() + assert.NoError(t, err) + assert.Equal(t, map[string]string{ + "xvdc": "vol-0303a1cc896c42d28", + "xvdf": "vol-0c241693efb58734a", + "xvdh": "otherserial", + }, got) +} diff --git a/plugins/processors/ec2tagger/internal/volume/host_nonlinux.go b/plugins/processors/ec2tagger/internal/volume/host_nonlinux.go new file mode 100644 index 0000000000..24c00ba2e2 --- /dev/null +++ b/plugins/processors/ec2tagger/internal/volume/host_nonlinux.go @@ -0,0 +1,21 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +//go:build !linux + +package volume + +import ( + "errors" +) + +type hostProvider struct { +} + +func newHostProvider() Provider { + return &hostProvider{} +} + +func (*hostProvider) DeviceToSerialMap() (map[string]string, error) { + return nil, errors.New("local block device retrieval only supported on linux") +} diff --git a/plugins/processors/ec2tagger/internal/volume/host_nonlinux_test.go b/plugins/processors/ec2tagger/internal/volume/host_nonlinux_test.go new file mode 100644 index 0000000000..43c1df013c --- /dev/null +++ b/plugins/processors/ec2tagger/internal/volume/host_nonlinux_test.go @@ -0,0 +1,19 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +//go:build !linux + +package volume + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestHostProvider(t *testing.T) { + p := newHostProvider() + got, err := p.DeviceToSerialMap() + assert.Error(t, err) + assert.Nil(t, got) +} diff --git a/plugins/processors/ec2tagger/internal/volume/merge.go b/plugins/processors/ec2tagger/internal/volume/merge.go new file mode 100644 index 0000000000..ccce0cecdd --- /dev/null +++ b/plugins/processors/ec2tagger/internal/volume/merge.go @@ -0,0 +1,34 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package volume + +import ( + "errors" + + "github.com/aws/amazon-cloudwatch-agent/internal/util/collections" +) + +type mergeProvider struct { + providers []Provider +} + +func newMergeProvider(providers []Provider) Provider { + return &mergeProvider{providers: providers} +} + +func (p *mergeProvider) DeviceToSerialMap() (map[string]string, error) { + var errs error + results := make([]map[string]string, 0, len(p.providers)) + for _, provider := range p.providers { + if result, err := provider.DeviceToSerialMap(); err != nil { + errs = errors.Join(errs, err) + } else { + results = append(results, result) + } + } + if len(results) == 0 { + return nil, errs + } + return collections.MergeMaps(results...), nil +} diff --git a/plugins/processors/ec2tagger/internal/volume/merge_test.go b/plugins/processors/ec2tagger/internal/volume/merge_test.go new file mode 100644 index 0000000000..a6e09054d2 --- /dev/null +++ b/plugins/processors/ec2tagger/internal/volume/merge_test.go @@ -0,0 +1,74 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package volume + +import ( + "errors" + "testing" + + "github.com/stretchr/testify/assert" +) + +type mockProvider struct { + serialMap map[string]string + err error +} + +func (m *mockProvider) DeviceToSerialMap() (map[string]string, error) { + return m.serialMap, m.err +} + +func TestMergeProvider(t *testing.T) { + errFirstTest := errors.New("skip first") + errSecondTest := errors.New("skip second") + testCases := map[string]struct { + providers []Provider + wantSerialMap map[string]string + wantErr error + }{ + "WithErrors": { + providers: []Provider{ + &mockProvider{err: errFirstTest}, + &mockProvider{err: errSecondTest}, + }, + wantErr: errSecondTest, + }, + "WithPartialError": { + providers: []Provider{ + &mockProvider{err: errFirstTest}, + &mockProvider{serialMap: map[string]string{ + "key": "value", + }}, + }, + wantSerialMap: map[string]string{ + "key": "value", + }, + }, + "WithMerge": { + providers: []Provider{ + &mockProvider{serialMap: map[string]string{ + "foo": "bar", + "key": "first", + }}, + &mockProvider{serialMap: map[string]string{ + "key": "second", + "hello": "world", + }}, + }, + wantSerialMap: map[string]string{ + "foo": "bar", + "key": "second", + "hello": "world", + }, + }, + } + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + p := newMergeProvider(testCase.providers) + got, err := p.DeviceToSerialMap() + assert.ErrorIs(t, err, testCase.wantErr) + assert.Equal(t, testCase.wantSerialMap, got) + }) + } +} diff --git a/plugins/processors/ec2tagger/internal/volume/volume.go b/plugins/processors/ec2tagger/internal/volume/volume.go new file mode 100644 index 0000000000..b19f3df81d --- /dev/null +++ b/plugins/processors/ec2tagger/internal/volume/volume.go @@ -0,0 +1,145 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package volume + +import ( + "errors" + "fmt" + "os" + "path/filepath" + "strings" + "sync" + + "github.com/aws/aws-sdk-go/service/ec2/ec2iface" + "golang.org/x/exp/maps" +) + +var ( + errNoProviders = errors.New("no available volume info providers") +) + +type Provider interface { + // DeviceToSerialMap provides a map with device name keys and serial number values. + DeviceToSerialMap() (map[string]string, error) +} + +func NewProvider(ec2Client ec2iface.EC2API, instanceID string) Provider { + return newMergeProvider([]Provider{ + newHostProvider(), + newDescribeVolumesProvider(ec2Client, instanceID), + }) +} + +type Cache interface { + Refresh() error + Serial(devName string) string + Devices() []string +} + +type cache struct { + sync.RWMutex + // device name to serial mapping + cache map[string]string + provider Provider + fetchBlockName func(string) string +} + +func NewCache(provider Provider) Cache { + return &cache{ + cache: make(map[string]string), + provider: provider, + fetchBlockName: findNvmeBlockNameIfPresent, + } +} + +func (c *cache) add(devName, serial string) { + normalizedName := c.normalizeName(devName) + + c.Lock() + defer c.Unlock() + c.cache[normalizedName] = serial +} + +func (c *cache) reset() { + c.Lock() + defer c.Unlock() + maps.Clear(c.cache) +} + +func (c *cache) Refresh() error { + if c.provider == nil { + return errNoProviders + } + result, err := c.provider.DeviceToSerialMap() + if err != nil { + return fmt.Errorf("unable to refresh volume cache: %w", err) + } + c.reset() + for deviceName, serial := range result { + c.add(deviceName, serial) + } + return nil +} + +func (c *cache) Serial(devName string) string { + c.RLock() + defer c.RUnlock() + + // check exact match first + if v, ok := c.cache[devName]; ok && v != "" { + return v + } + + for k, v := range c.cache { + // The key of cache is device name like nvme0n1, while the input devName could be a partition name like nvme0n1p1 + if strings.HasPrefix(devName, k) { + return v + } + } + return "" +} + +func (c *cache) Devices() []string { + c.RLock() + defer c.RUnlock() + return maps.Keys(c.cache) +} + +func (c *cache) normalizeName(devName string) string { + normalized := c.fetchBlockName(devName) + if normalized == "" { + normalized = devName + } + + // to match the disk device tag + return strings.ReplaceAll(normalized, "/dev/", "") +} + +// find nvme block name by symlink, if symlink doesn't exist, return "" +func findNvmeBlockNameIfPresent(devName string) string { + // for nvme(ssd), there is a symlink from devName to nvme block name, i.e. /dev/xvda -> /dev/nvme0n1 + // https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/nvme-ebs-volumes.html + hasRootFs := true + if _, err := os.Lstat("/rootfs/proc"); os.IsNotExist(err) { + hasRootFs = false + } + nvmeName := "" + + if hasRootFs { + devName = "/rootfs" + devName + } + + if info, err := os.Lstat(devName); err == nil { + if info.Mode()&os.ModeSymlink != 0 { + if path, err := filepath.EvalSymlinks(devName); err == nil { + nvmeName = path + } + } + } + + if nvmeName != "" && hasRootFs { + nvmeName = strings.TrimPrefix(nvmeName, "/rootfs") + } + return nvmeName +} diff --git a/plugins/processors/ec2tagger/internal/volume/volume_test.go b/plugins/processors/ec2tagger/internal/volume/volume_test.go new file mode 100644 index 0000000000..f08844582f --- /dev/null +++ b/plugins/processors/ec2tagger/internal/volume/volume_test.go @@ -0,0 +1,52 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package volume + +import ( + "errors" + "sort" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestProvider(t *testing.T) { + p := NewProvider(nil, "") + mp, ok := p.(*mergeProvider) + assert.True(t, ok) + assert.Len(t, mp.providers, 2) + _, ok = mp.providers[0].(*hostProvider) + assert.True(t, ok) + _, ok = mp.providers[1].(*describeVolumesProvider) + assert.True(t, ok) +} + +func TestCache(t *testing.T) { + testErr := errors.New("test") + p := &mockProvider{ + serialMap: map[string]string{ + "/dev/xvdf": "foo", + "xvdc": "bar", + "xvdc1": "baz", + }, + err: testErr, + } + c := NewCache(nil).(*cache) + c.fetchBlockName = func(s string) string { + return "" + } + assert.ErrorIs(t, c.Refresh(), errNoProviders) + c.provider = p + assert.ErrorIs(t, c.Refresh(), testErr) + p.err = nil + assert.NoError(t, c.Refresh()) + assert.Equal(t, "foo", c.Serial("xvdf")) + assert.Equal(t, "bar", c.Serial("xvdc")) + assert.Equal(t, "baz", c.Serial("xvdc1")) + assert.Equal(t, "bar", c.Serial("xvdc2")) + assert.Equal(t, "", c.Serial("xvde")) + got := c.Devices() + sort.Strings(got) + assert.Equal(t, []string{"xvdc", "xvdc1", "xvdf"}, got) +} diff --git a/plugins/processors/ecsdecorator/ecsdecorator_test.go b/plugins/processors/ecsdecorator/ecsdecorator_test.go index f68be5e8e4..203c49933a 100644 --- a/plugins/processors/ecsdecorator/ecsdecorator_test.go +++ b/plugins/processors/ecsdecorator/ecsdecorator_test.go @@ -15,7 +15,7 @@ import ( ) func TestTagMetricSourceForTypeInstance(t *testing.T) { - tags := map[string]string{MetricType: TypeInstance, InstanceId: "TestEC2InstanceId", ContainerInstanceIdKey: "TestContainerInstanceId", ClusterNameKey: "TestClusterName"} + tags := map[string]string{MetricType: TypeInstance, InstanceIdKey: "TestEC2InstanceId", ContainerInstanceIdKey: "TestContainerInstanceId", ClusterNameKey: "TestClusterName"} fields := map[string]interface{}{MetricName(TypeInstance, CpuUtilization): 0, MetricName(TypeInstance, MemUtilization): 0, MetricName(TypeInstance, NetTotalBytes): 0, MetricName(TypeInstance, CpuReservedCapacity): 0, MetricName(TypeInstance, MemReservedCapacity): 0, MetricName(TypeInstance, RunningTaskCount): 0, MetricName(TypeInstance, CpuTotal): 0, @@ -29,7 +29,7 @@ func TestTagMetricSourceForTypeInstance(t *testing.T) { } func TestTagMetricSourceForTypeInstanceFS(t *testing.T) { - tags := map[string]string{MetricType: TypeInstanceFS, InstanceId: "TestEC2InstanceId", ContainerInstanceIdKey: "TestContainerInstanceId", ClusterNameKey: "TestClusterName"} + tags := map[string]string{MetricType: TypeInstanceFS, InstanceIdKey: "TestEC2InstanceId", ContainerInstanceIdKey: "TestContainerInstanceId", ClusterNameKey: "TestClusterName"} fields := map[string]interface{}{MetricName(TypeInstance, CpuUtilization): 0, MetricName(TypeInstance, MemUtilization): 0, MetricName(TypeInstance, NetTotalBytes): 0, MetricName(TypeInstance, CpuReservedCapacity): 0, MetricName(TypeInstance, MemReservedCapacity): 0, MetricName(TypeInstance, RunningTaskCount): 0, MetricName(TypeInstance, CpuTotal): 0, @@ -43,7 +43,7 @@ func TestTagMetricSourceForTypeInstanceFS(t *testing.T) { } func TestTagMetricSourceForTypeInstanceNet(t *testing.T) { - tags := map[string]string{MetricType: TypeInstanceNet, InstanceId: "TestEC2InstanceId", ContainerInstanceIdKey: "TestContainerInstanceId", ClusterNameKey: "TestClusterName"} + tags := map[string]string{MetricType: TypeInstanceNet, InstanceIdKey: "TestEC2InstanceId", ContainerInstanceIdKey: "TestContainerInstanceId", ClusterNameKey: "TestClusterName"} fields := map[string]interface{}{MetricName(TypeInstance, CpuUtilization): 0, MetricName(TypeInstance, MemUtilization): 0, MetricName(TypeInstance, NetTotalBytes): 0, MetricName(TypeInstance, CpuReservedCapacity): 0, MetricName(TypeInstance, MemReservedCapacity): 0, MetricName(TypeInstance, RunningTaskCount): 0, MetricName(TypeInstance, CpuTotal): 0, @@ -57,7 +57,7 @@ func TestTagMetricSourceForTypeInstanceNet(t *testing.T) { } func TestTagMetricSourceForTypeInstanceDiskIO(t *testing.T) { - tags := map[string]string{MetricType: TypeInstanceDiskIO, InstanceId: "TestEC2InstanceId", ContainerInstanceIdKey: "TestContainerInstanceId", ClusterNameKey: "TestClusterName"} + tags := map[string]string{MetricType: TypeInstanceDiskIO, InstanceIdKey: "TestEC2InstanceId", ContainerInstanceIdKey: "TestContainerInstanceId", ClusterNameKey: "TestClusterName"} fields := map[string]interface{}{MetricName(TypeInstance, CpuUtilization): 0, MetricName(TypeInstance, MemUtilization): 0, MetricName(TypeInstance, NetTotalBytes): 0, MetricName(TypeInstance, CpuReservedCapacity): 0, MetricName(TypeInstance, MemReservedCapacity): 0, MetricName(TypeInstance, RunningTaskCount): 0, MetricName(TypeInstance, CpuTotal): 0, @@ -71,7 +71,7 @@ func TestTagMetricSourceForTypeInstanceDiskIO(t *testing.T) { } func TestTagLogGroup(t *testing.T) { - tags := map[string]string{MetricType: TypeInstance, InstanceId: "TestEC2InstanceId", ContainerInstanceIdKey: "TestContainerInstanceId", ClusterNameKey: "TestClusterName"} + tags := map[string]string{MetricType: TypeInstance, InstanceIdKey: "TestEC2InstanceId", ContainerInstanceIdKey: "TestContainerInstanceId", ClusterNameKey: "TestClusterName"} fields := map[string]interface{}{MetricName(TypeInstance, CpuUtilization): 0, MetricName(TypeInstance, MemUtilization): 0, MetricName(TypeInstance, NetTotalBytes): 0, MetricName(TypeInstance, CpuReservedCapacity): 0, MetricName(TypeInstance, MemReservedCapacity): 0, MetricName(TypeInstance, RunningTaskCount): 0, MetricName(TypeInstance, CpuTotal): 0, diff --git a/plugins/processors/ecsdecorator/metricRule.go b/plugins/processors/ecsdecorator/metricRule.go index bbaa0e72e4..1cc2a08958 100644 --- a/plugins/processors/ecsdecorator/metricRule.go +++ b/plugins/processors/ecsdecorator/metricRule.go @@ -24,7 +24,7 @@ var nodeMetricRules = []structuredlogscommon.MetricRule{ {Unit: Percent, Name: MetricName(TypeInstance, MemReservedCapacity)}, {Unit: BytesPerSec, Name: MetricName(TypeInstance, NetTotalBytes)}, {Unit: Count, Name: MetricName(TypeInstance, RunningTaskCount)}}, - DimensionSets: [][]string{{ContainerInstanceIdKey, InstanceId, ClusterNameKey}}, + DimensionSets: [][]string{{ContainerInstanceIdKey, InstanceIdKey, ClusterNameKey}}, Namespace: cloudwatchNamespace, }, { @@ -48,7 +48,7 @@ var nodeFSMetricRules = []structuredlogscommon.MetricRule{ { Metrics: []structuredlogscommon.MetricAttr{ {Unit: Percent, Name: MetricName(TypeInstanceFS, FSUtilization)}}, - DimensionSets: [][]string{{ContainerInstanceIdKey, InstanceId, ClusterNameKey}, {ClusterNameKey}}, + DimensionSets: [][]string{{ContainerInstanceIdKey, InstanceIdKey, ClusterNameKey}, {ClusterNameKey}}, Namespace: cloudwatchNamespace, }, } diff --git a/plugins/processors/ecsdecorator/metricRule_test.go b/plugins/processors/ecsdecorator/metricRule_test.go index 650a1b429e..2cd3bf6942 100644 --- a/plugins/processors/ecsdecorator/metricRule_test.go +++ b/plugins/processors/ecsdecorator/metricRule_test.go @@ -17,7 +17,7 @@ import ( ) func TestNodeFull(t *testing.T) { - tags := map[string]string{MetricType: TypeInstance, InstanceId: "TestEC2InstanceId", ContainerInstanceIdKey: "TestContainerInstanceId", ClusterNameKey: "TestClusterName"} + tags := map[string]string{MetricType: TypeInstance, InstanceIdKey: "TestEC2InstanceId", ContainerInstanceIdKey: "TestContainerInstanceId", ClusterNameKey: "TestClusterName"} fields := map[string]interface{}{MetricName(TypeInstance, CpuUtilization): 0, MetricName(TypeInstance, MemUtilization): 0, MetricName(TypeInstance, NetTotalBytes): 0, MetricName(TypeInstance, CpuReservedCapacity): 0, MetricName(TypeInstance, MemReservedCapacity): 0, MetricName(TypeInstance, RunningTaskCount): 0, MetricName(TypeInstance, CpuTotal): 0, @@ -31,7 +31,7 @@ func TestNodeFull(t *testing.T) { } func TestNodeLackOfCpuUtilization(t *testing.T) { - tags := map[string]string{MetricType: TypeInstance, InstanceId: "TestEC2InstanceId", ContainerInstanceIdKey: "TestContainerInstanceId", ClusterNameKey: "TestClusterName"} + tags := map[string]string{MetricType: TypeInstance, InstanceIdKey: "TestEC2InstanceId", ContainerInstanceIdKey: "TestContainerInstanceId", ClusterNameKey: "TestClusterName"} fields := map[string]interface{}{MetricName(TypeInstance, MemUtilization): 0, MetricName(TypeInstance, NetTotalBytes): 0, MetricName(TypeInstance, CpuReservedCapacity): 0, MetricName(TypeInstance, MemReservedCapacity): 0, MetricName(TypeInstance, RunningTaskCount): 0, MetricName(TypeInstance, CpuTotal): 0, @@ -64,7 +64,7 @@ func TestNodeLackOfInstanceId(t *testing.T) { } func TestNodeFSFull(t *testing.T) { - tags := map[string]string{MetricType: TypeInstanceFS, InstanceId: "TestEC2InstanceId", ContainerInstanceIdKey: "TestContainerInstanceId", ClusterNameKey: "TestClusterName"} + tags := map[string]string{MetricType: TypeInstanceFS, InstanceIdKey: "TestEC2InstanceId", ContainerInstanceIdKey: "TestContainerInstanceId", ClusterNameKey: "TestClusterName"} fields := map[string]interface{}{MetricName(TypeInstanceFS, FSUtilization): 0} m := metric.New("test", tags, fields, time.Now()) new(ECSDecorator).tagMetricRule(m) diff --git a/plugins/processors/gpuattributes/config.go b/plugins/processors/gpuattributes/config.go new file mode 100644 index 0000000000..6dfd340d45 --- /dev/null +++ b/plugins/processors/gpuattributes/config.go @@ -0,0 +1,19 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package gpuattributes + +import ( + "go.opentelemetry.io/collector/component" +) + +type Config struct{} + +// Verify Config implements Processor interface. +var _ component.Config = (*Config)(nil) + +// Validate does not check for unsupported dimension key-value pairs, because those +// get silently dropped and ignored during translation. +func (cfg *Config) Validate() error { + return nil +} diff --git a/plugins/processors/gpuattributes/config_test.go b/plugins/processors/gpuattributes/config_test.go new file mode 100644 index 0000000000..50f76e3ac8 --- /dev/null +++ b/plugins/processors/gpuattributes/config_test.go @@ -0,0 +1,19 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package gpuattributes + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/confmap" +) + +func TestUnmarshalDefaultConfig(t *testing.T) { + factory := NewFactory() + cfg := factory.CreateDefaultConfig() + assert.NoError(t, component.UnmarshalConfig(confmap.New(), cfg)) + assert.Equal(t, factory.CreateDefaultConfig(), cfg) +} diff --git a/plugins/processors/gpuattributes/factory.go b/plugins/processors/gpuattributes/factory.go new file mode 100644 index 0000000000..fcbc4f3950 --- /dev/null +++ b/plugins/processors/gpuattributes/factory.go @@ -0,0 +1,56 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package gpuattributes + +import ( + "context" + "fmt" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/processor" + "go.opentelemetry.io/collector/processor/processorhelper" +) + +const ( + stability = component.StabilityLevelBeta +) + +var ( + TypeStr, _ = component.NewType("gpuattributes") + processorCapabilities = consumer.Capabilities{MutatesData: true} +) + +func NewFactory() processor.Factory { + return processor.NewFactory( + TypeStr, + createDefaultConfig, + processor.WithMetrics(createMetricsProcessor, stability)) +} + +func createDefaultConfig() component.Config { + return &Config{} +} + +func createMetricsProcessor( + ctx context.Context, + set processor.CreateSettings, + cfg component.Config, + nextConsumer consumer.Metrics, +) (processor.Metrics, error) { + processorConfig, ok := cfg.(*Config) + if !ok { + return nil, fmt.Errorf("configuration parsing error") + } + + metricsProcessor := newGpuAttributesProcessor(processorConfig, set.Logger) + + return processorhelper.NewMetricsProcessor( + ctx, + set, + cfg, + nextConsumer, + metricsProcessor.processMetrics, + processorhelper.WithCapabilities(processorCapabilities)) +} diff --git a/plugins/processors/gpuattributes/factory_test.go b/plugins/processors/gpuattributes/factory_test.go new file mode 100644 index 0000000000..7fd46aca74 --- /dev/null +++ b/plugins/processors/gpuattributes/factory_test.go @@ -0,0 +1,45 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package gpuattributes + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/consumer/consumertest" + "go.opentelemetry.io/collector/processor/processortest" +) + +func TestCreateDefaultConfig(t *testing.T) { + factory := NewFactory() + require.NotNil(t, factory) + + cfg := factory.CreateDefaultConfig() + assert.NotNil(t, cfg, "failed to create default config") + assert.NoError(t, componenttest.CheckConfigStruct(cfg)) +} + +func TestCreateProcessor(t *testing.T) { + factory := NewFactory() + require.NotNil(t, factory) + + cfg := factory.CreateDefaultConfig() + setting := processortest.NewNopCreateSettings() + + tProcessor, err := factory.CreateTracesProcessor(context.Background(), setting, cfg, consumertest.NewNop()) + assert.Equal(t, err, component.ErrDataTypeIsNotSupported) + assert.Nil(t, tProcessor) + + mProcessor, err := factory.CreateMetricsProcessor(context.Background(), setting, cfg, consumertest.NewNop()) + assert.NoError(t, err) + assert.NotNil(t, mProcessor) + + lProcessor, err := factory.CreateLogsProcessor(context.Background(), setting, cfg, consumertest.NewNop()) + assert.Equal(t, err, component.ErrDataTypeIsNotSupported) + assert.Nil(t, lProcessor) +} diff --git a/plugins/processors/gpuattributes/internal/awsneuron_memory_metric_aggregator.go b/plugins/processors/gpuattributes/internal/awsneuron_memory_metric_aggregator.go new file mode 100644 index 0000000000..eb321ddf61 --- /dev/null +++ b/plugins/processors/gpuattributes/internal/awsneuron_memory_metric_aggregator.go @@ -0,0 +1,92 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package internal + +import ( + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" + + "github.com/aws/amazon-cloudwatch-agent/internal/containerinsightscommon" +) + +var memoryMetricsNames = map[string]struct{}{ + containerinsightscommon.NeuronCoreMemoryUtilizationConstants: {}, + containerinsightscommon.NeuronCoreMemoryUtilizationModelCode: {}, + containerinsightscommon.NeuronCoreMemoryUtilizationSharedScratchpad: {}, + containerinsightscommon.NeuronCoreMemoryUtilizationRuntimeMemory: {}, + containerinsightscommon.NeuronCoreMemoryUtilizationTensors: {}, +} + +type NeuronCoreInfo struct { + neuronCoreIndex string + neuronDeviceIndex string + runtimeTag string +} + +type AwsNeuronMemoryMetricsAggregator struct { + memoryMetricValuesAggregator map[NeuronCoreInfo]float64 + aggregatedMemoryMetricAttributes pcommon.Map + metricTimestamp pcommon.Timestamp + MemoryMetricsFound bool +} + +func NewMemoryMemoryAggregator() *AwsNeuronMemoryMetricsAggregator { + return &AwsNeuronMemoryMetricsAggregator{memoryMetricValuesAggregator: map[NeuronCoreInfo]float64{}, MemoryMetricsFound: false} +} + +func (d *AwsNeuronMemoryMetricsAggregator) AggregateMemoryMetric(originalMetric pmetric.Metric) { + if _, exists := memoryMetricsNames[originalMetric.Name()]; !exists { + return + } + + datapoints := originalMetric.Gauge().DataPoints() + + if datapoints.Len() <= 0 { + return + } + + d.MemoryMetricsFound = true + d.aggregatedMemoryMetricAttributes = datapoints.At(0).Attributes() + d.metricTimestamp = datapoints.At(0).Timestamp() + + for i := 0; i < datapoints.Len(); i++ { + datapoint := datapoints.At(i) + + neuronCoreIndexValue, neuronCoreIndexValueExists := datapoint.Attributes().Get(NeuronCoreAttributeKey) + neuronDeviceIndexValue, neuronDeviceIndexValueExists := datapoint.Attributes().Get(NeuronDeviceAttributeKey) + runtimeTagValue, runtimeTagExists := datapoint.Attributes().Get(RuntimeTag) + + if neuronCoreIndexValueExists && neuronDeviceIndexValueExists && runtimeTagExists { + neuronCoreInfo := NeuronCoreInfo{neuronCoreIndex: neuronCoreIndexValue.AsString(), neuronDeviceIndex: neuronDeviceIndexValue.AsString(), runtimeTag: runtimeTagValue.AsString()} + d.memoryMetricValuesAggregator[neuronCoreInfo] += datapoint.DoubleValue() + } + } + +} + +func (d *AwsNeuronMemoryMetricsAggregator) FlushAggregatedMemoryMetric() pmetric.Metric { + aggregatedMemoryMetric := pmetric.NewMetric() + aggregatedMemoryMetric.SetName(containerinsightscommon.NeuronCoreMemoryUtilizationTotal) + datapoints := aggregatedMemoryMetric.SetEmptySum().DataPoints() + + for neuronCoreInfo, totalMemoryUsed := range d.memoryMetricValuesAggregator { + datapoint := datapoints.AppendEmpty() + datapoint.SetDoubleValue(totalMemoryUsed) + d.aggregatedMemoryMetricAttributes.CopyTo(datapoint.Attributes()) + + datapoint.Attributes().PutStr(NeuronCoreAttributeKey, neuronCoreInfo.neuronCoreIndex) + datapoint.Attributes().PutStr(NeuronDeviceAttributeKey, neuronCoreInfo.neuronDeviceIndex) + datapoint.Attributes().PutStr(RuntimeTag, neuronCoreInfo.runtimeTag) + datapoint.SetTimestamp(d.metricTimestamp) + } + + // Reset the aggregator + d.resetMemoryMetricAggregator() + return aggregatedMemoryMetric +} + +func (d *AwsNeuronMemoryMetricsAggregator) resetMemoryMetricAggregator() { + d.memoryMetricValuesAggregator = map[NeuronCoreInfo]float64{} + d.MemoryMetricsFound = false +} diff --git a/plugins/processors/gpuattributes/internal/awsneuron_memory_metric_aggregator_test.go b/plugins/processors/gpuattributes/internal/awsneuron_memory_metric_aggregator_test.go new file mode 100644 index 0000000000..a2612d6708 --- /dev/null +++ b/plugins/processors/gpuattributes/internal/awsneuron_memory_metric_aggregator_test.go @@ -0,0 +1,128 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package internal + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" + + "github.com/aws/amazon-cloudwatch-agent/internal/containerinsightscommon" +) + +const ( + dummy = "dummy" +) + +var ( + memoryUsageMetricValuesMap = map[string]float64{ + "0": 20, + "2": 40, + } +) + +func TestMemoryMetricAggregator_AggregateMemoryMetric(t *testing.T) { + aggregator := NewMemoryMemoryAggregator() + + // Create a sample original metric with gauge data points + tensorsMemoryUsage := createSampleMetric(containerinsightscommon.NeuronCoreMemoryUtilizationTensors) + nonNeuronMetric := createSampleMetric(dummy) + + // Call the method being tested + aggregator.AggregateMemoryMetric(tensorsMemoryUsage) + aggregator.AggregateMemoryMetric(nonNeuronMetric) + + // Assert that memory metrics were found + assert.True(t, aggregator.MemoryMetricsFound) +} + +func TestMemoryMetricAggregator_NonNeuronMetric(t *testing.T) { + aggregator := NewMemoryMemoryAggregator() + + // Create a sample original metric with gauge data points + nonNeuronMetric := createSampleMetric("dummy") + + // Call the method being tested + aggregator.AggregateMemoryMetric(nonNeuronMetric) + + // Assert that memory metrics were found + assert.False(t, aggregator.MemoryMetricsFound) +} + +func TestMemoryMetricAggregator_FlushAggregatedMemoryMetric(t *testing.T) { + aggregator := NewMemoryMemoryAggregator() + aggregator.aggregatedMemoryMetricAttributes = pcommon.NewMap() + aggregator.aggregatedMemoryMetricAttributes.FromRaw(map[string]any{ + NeuronCoreAttributeKey: "9", + NeuronDeviceAttributeKey: "9", + dummy: dummy, + }) + + aggregator.metricTimestamp = staticTimestamp + + // Add some data to the aggregator + // Create a sample original metric with gauge data points + tensorsMemoryUsage := createSampleMetric(containerinsightscommon.NeuronCoreMemoryUtilizationTensors) + constantsMemoryUsage := createSampleMetric(containerinsightscommon.NeuronCoreMemoryUtilizationConstants) + nonNeuronMetric := createSampleMetric(dummy) + + // Call the method being tested + aggregator.AggregateMemoryMetric(tensorsMemoryUsage) + aggregator.AggregateMemoryMetric(constantsMemoryUsage) + aggregator.AggregateMemoryMetric(nonNeuronMetric) + + // Call the method being tested + aggregatedMetric := aggregator.FlushAggregatedMemoryMetric() + aggregatedMetricDatapoints := aggregatedMetric.Sum().DataPoints() + // Assert the result + assert.NotNil(t, aggregatedMetric) + assert.Equal(t, containerinsightscommon.NeuronCoreMemoryUtilizationTotal, aggregatedMetric.Name()) + assert.Equal(t, 2, aggregatedMetricDatapoints.Len()) + + for i := 0; i < aggregatedMetricDatapoints.Len(); i++ { + datapoint := aggregatedMetricDatapoints.At(i) + assert.Equal(t, staticTimestamp.String(), datapoint.Timestamp().String()) + assert.Equal(t, 4, datapoint.Attributes().Len()) + + actualNeuronCoreIndex, _ := datapoint.Attributes().Get(NeuronCoreAttributeKey) + actualNeuronDeviceIndex, _ := datapoint.Attributes().Get(NeuronDeviceAttributeKey) + actualRuntimeTag, _ := datapoint.Attributes().Get(RuntimeTag) + + assert.Equal(t, memoryUsageMetricValuesMap[actualNeuronCoreIndex.AsString()], datapoint.DoubleValue()) + assert.Equal(t, "1", actualRuntimeTag.AsString()) + assert.NotEqual(t, "9", actualNeuronCoreIndex.AsString()) + assert.NotEqual(t, "9", actualNeuronDeviceIndex.AsString()) + } +} + +func createSampleMetric(metricName string) pmetric.Metric { + metric := pmetric.NewMetric() + metric.SetName(metricName) + + // Add gauge data points + dataPoints := metric.SetEmptyGauge().DataPoints() + dataPoint1 := dataPoints.AppendEmpty() + dataPoint1.SetDoubleValue(10.0) + dataPoint1.SetTimestamp(staticTimestamp) + dataPoint1.Attributes().FromRaw(map[string]any{ + NeuronCoreAttributeKey: "0", + NeuronDeviceAttributeKey: "0", + dummy: dummy, + RuntimeTag: "1", + }) + + dataPoint2 := dataPoints.AppendEmpty() + dataPoint2.SetDoubleValue(20.0) + dataPoint1.SetTimestamp(staticTimestamp) + dataPoint2.Attributes().FromRaw(map[string]any{ + NeuronCoreAttributeKey: "2", + NeuronDeviceAttributeKey: "1", + dummy: dummy, + RuntimeTag: "1", + }) + + return metric +} diff --git a/plugins/processors/gpuattributes/internal/awsneuron_metric_modifier.go b/plugins/processors/gpuattributes/internal/awsneuron_metric_modifier.go new file mode 100644 index 0000000000..a2a463e83a --- /dev/null +++ b/plugins/processors/gpuattributes/internal/awsneuron_metric_modifier.go @@ -0,0 +1,367 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package internal + +import ( + "strings" + + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.uber.org/zap" + + "github.com/aws/amazon-cloudwatch-agent/internal/containerinsightscommon" +) + +const ( + ErrorType = "error_type" + StatusType = "status_type" + EventType = "event_type" + logTypeSuffix = "AWSNeuron" + MemoryLocation = "memory_location" + + Core = "Core" + Device = "Device" + Percentile = "percentile" + PodName = "PodName" + Count = "Count" + Bytes = "Bytes" + Seconds = "Seconds" + Percent = "Percent" + NeuronCoreAttributeKey = "NeuronCore" + NeuronDeviceAttributeKey = "NeuronDevice" + RuntimeTag = "runtime_tag" + ClusterName = "ClusterName" + ContainerName = "ContainerName" + FullPodName = "FullPodName" + InstanceId = "InstanceId" + InstanceType = "InstanceType" + K8sPodName = "K8sPodName" + Namespace = "Namespace" + NeuronCore = "NeuronCore" + NeuronDevice = "NeuronDevice" + NodeName = "NodeName" + Service = "Service" + AvailabilityZone = "availability_zone" + Kubernetes = "kubernetes" + Region = "region" + SubnetId = "subnet_id" + NeuronExecutionErrorsAggregatedMetric = containerinsightscommon.NeuronExecutionErrors + "_total" + NeuronDeviceHardwareEccEventsAggregatedMetric = containerinsightscommon.NeuronDeviceHardwareEccEvents + "_total" +) + +type AwsNeuronMetricModifier struct { + logger *zap.Logger +} + +type MetricModifications struct { + DuplicationTypes []string + UniqueAttribute string + LogTypeSuffix string + Unit string +} + +type MetricDatapointAggregationKey struct { + runtimeTag string + aggregatedMetricName string + deviceId string +} + +var ( + metricModificationsMap = map[string]MetricModifications{ + containerinsightscommon.NeuronExecutionErrors: {DuplicationTypes: []string{containerinsightscommon.TypeNode}, UniqueAttribute: ErrorType, LogTypeSuffix: "", Unit: Count}, + containerinsightscommon.NeuronExecutionStatus: {DuplicationTypes: []string{containerinsightscommon.TypeNode}, UniqueAttribute: StatusType, LogTypeSuffix: "", Unit: Count}, + containerinsightscommon.NeuronRuntimeMemoryUsage: {DuplicationTypes: []string{containerinsightscommon.TypeNode}, UniqueAttribute: "", LogTypeSuffix: "", Unit: Bytes}, + containerinsightscommon.NeuronCoreMemoryUtilizationTotal: {DuplicationTypes: []string{containerinsightscommon.TypeContainer, containerinsightscommon.TypePod, containerinsightscommon.TypeNode}, UniqueAttribute: "", LogTypeSuffix: Core, Unit: Bytes}, + containerinsightscommon.NeuronCoreMemoryUtilizationConstants: {DuplicationTypes: []string{containerinsightscommon.TypeContainer, containerinsightscommon.TypePod, containerinsightscommon.TypeNode}, UniqueAttribute: "", LogTypeSuffix: Core, Unit: Bytes}, + containerinsightscommon.NeuronCoreMemoryUtilizationModelCode: {DuplicationTypes: []string{containerinsightscommon.TypeContainer, containerinsightscommon.TypePod, containerinsightscommon.TypeNode}, UniqueAttribute: "", LogTypeSuffix: Core, Unit: Bytes}, + containerinsightscommon.NeuronCoreMemoryUtilizationSharedScratchpad: {DuplicationTypes: []string{containerinsightscommon.TypeContainer, containerinsightscommon.TypePod, containerinsightscommon.TypeNode}, UniqueAttribute: "", LogTypeSuffix: Core, Unit: Bytes}, + containerinsightscommon.NeuronCoreMemoryUtilizationRuntimeMemory: {DuplicationTypes: []string{containerinsightscommon.TypeContainer, containerinsightscommon.TypePod, containerinsightscommon.TypeNode}, UniqueAttribute: "", LogTypeSuffix: Core, Unit: Bytes}, + containerinsightscommon.NeuronCoreMemoryUtilizationTensors: {DuplicationTypes: []string{containerinsightscommon.TypeContainer, containerinsightscommon.TypePod, containerinsightscommon.TypeNode}, UniqueAttribute: "", LogTypeSuffix: Core, Unit: Bytes}, + containerinsightscommon.NeuronCoreUtilization: {DuplicationTypes: []string{containerinsightscommon.TypeContainer, containerinsightscommon.TypePod, containerinsightscommon.TypeNode}, UniqueAttribute: "", LogTypeSuffix: Core, Unit: Percent}, + containerinsightscommon.NeuronInstanceInfo: {DuplicationTypes: []string{}, UniqueAttribute: "", LogTypeSuffix: "", Unit: Count}, + containerinsightscommon.NeuronHardware: {DuplicationTypes: []string{}, UniqueAttribute: "", LogTypeSuffix: "", Unit: Count}, + containerinsightscommon.NeuronExecutionLatency: {DuplicationTypes: []string{containerinsightscommon.TypeNode}, UniqueAttribute: "", LogTypeSuffix: "", Unit: Seconds}, + containerinsightscommon.NeuronDeviceHardwareEccEvents: {DuplicationTypes: []string{containerinsightscommon.TypeContainer, containerinsightscommon.TypePod, containerinsightscommon.TypeNode}, UniqueAttribute: EventType, LogTypeSuffix: Device, Unit: Count}, + } + attributeValuePrefixingMap = map[string]string{NeuronCoreAttributeKey: "core", NeuronDeviceAttributeKey: "device"} + + uniquesDatapointsToAggregatedMetricMappings = map[string]map[string]string{ + containerinsightscommon.NeuronExecutionErrors: {"generic": NeuronExecutionErrorsAggregatedMetric, + "numerical": NeuronExecutionErrorsAggregatedMetric, + "transient": NeuronExecutionErrorsAggregatedMetric, + "model": NeuronExecutionErrorsAggregatedMetric, + "runtime": NeuronExecutionErrorsAggregatedMetric, + "hardware": NeuronExecutionErrorsAggregatedMetric}, + // execution_status metric will be added here incrementally + containerinsightscommon.NeuronDeviceHardwareEccEvents: {"mem_ecc_corrected": NeuronDeviceHardwareEccEventsAggregatedMetric, + "mem_ecc_uncorrected": NeuronDeviceHardwareEccEventsAggregatedMetric, + "sram_ecc_corrected": NeuronDeviceHardwareEccEventsAggregatedMetric, + "sram_ecc_uncorrected": NeuronDeviceHardwareEccEventsAggregatedMetric}, + } + + MetricAttributesToKeep = map[string]struct{}{ + ClusterName: {}, + ContainerName: {}, + FullPodName: {}, + InstanceId: {}, + InstanceType: {}, + K8sPodName: {}, + Namespace: {}, + NeuronDevice: {}, + NodeName: {}, + PodName: {}, + Service: {}, + AvailabilityZone: {}, + Kubernetes: {}, + Region: {}, + RuntimeTag: {}, + SubnetId: {}, + NeuronCore: {}, + } +) + +func NewMetricModifier(logger *zap.Logger) *AwsNeuronMetricModifier { + d := &AwsNeuronMetricModifier{ + logger: logger, + } + return d +} + +func (md *AwsNeuronMetricModifier) ModifyMetric(originalMetric pmetric.Metric, metrics pmetric.MetricSlice) { + // only decorate Aws Neuron metrics + // another option is to separate Aws Neuron in its own pipeline to minimize extra processing of metrics + if _, isNeuronMetric := metricModificationsMap[originalMetric.Name()]; !isNeuronMetric { + return + } + + // Since the otel to grouped metrics conversions takes type into account, + // thus we need to convert all metrics to the same type so that they are grouped together. + if originalMetric.Type() == pmetric.MetricTypeGauge { + convertGaugeToSum(originalMetric) + } + // Neuron metrics sent by the neuron monitor don't have any units so we add them in the agent. + addUnit(originalMetric) + prefixCoreAndDeviceLabels(originalMetric) + resetStaleDatapoints(originalMetric) + + originalMetricName := originalMetric.Name() + // The neuron metrics sent by the neuron monitor are not homogeneous + // and some metrics require special processing. + // We perform those special processing before duplicating metric for pod, node and container. + if originalMetricName == containerinsightscommon.NeuronExecutionLatency { + keepSpecificDatapointBasedOnAttribute(originalMetric, Percentile, "p50") + } else if originalMetricName == containerinsightscommon.NeuronRuntimeMemoryUsage { + keepSpecificDatapointBasedOnAttribute(originalMetric, MemoryLocation, "neuron_device") + } + + modifiedMetricSlice := md.extractDatapointsAsMetricsAndAggregate(originalMetric) + filterLabels(modifiedMetricSlice, originalMetricName) + md.duplicateMetrics(modifiedMetricSlice, originalMetricName, originalMetric.Sum().DataPoints(), metrics) +} + +// This method converts gauges to sum so that all metrics can be grouped in the same grouped metrics. +// The default value of temporality is undefined so even after conversion from gauge to sum the agent won't take delta. +func convertGaugeToSum(originalMetric pmetric.Metric) { + datapoints := originalMetric.Gauge().DataPoints() + originalMetric.SetEmptySum() + datapoints.MoveAndAppendTo(originalMetric.Sum().DataPoints()) +} + +func addUnit(originalMetric pmetric.Metric) { + originalMetric.SetUnit(metricModificationsMap[originalMetric.Name()].Unit) +} + +// This method keeps a specific datapoint in the list of datapoints, +// filtering out the rest based on value of the target attribute. +// - For neuron_execution_latency metric we keep p50 percentile +// - For neurondevice_runtime_memory we keep the neuron_device memory datapoint +// example : +// +// in : neurondevice_runtime_memory {datapoints: [ 0 : {Attributes : {..., percentile:p50, ....}, value 3}, 1: {Attributes : {..., percentile:p99, ....}, , value 4}]} +// out : neurondevice_runtime_memory {datapoints: [ 0 : {Attributes : {..., percentile:p50, ....}, value 3}]} +func keepSpecificDatapointBasedOnAttribute(originalMetric pmetric.Metric, attributeKey string, attributeValueToKeep string) { + originalMetric.Sum().DataPoints().RemoveIf(func(dp pmetric.NumberDataPoint) bool { + value, exists := dp.Attributes().Get(attributeKey) + return !exists || value.Str() != attributeValueToKeep + }) +} + +// This method takes a metric and creates an aggregated metric from its datapoint values. +// It also creates a new metric for each datapoint based on the unique target attribute. +// example : +// in: unique_target_attribute = error_type +// and error_type: A,B,C need to be aggregated in neuron_execution_errors_total metric then +// +// neuron_execution_errors { +// datapoints : [ +// 0 : { Attribute : {..., error_type : A, ....}, value = 1 }, +// 1 : { Attribute : {..., error_type : B, ....}, value = 2 }, +// 2 : { Attribute : {..., error_type : C, ....}, value = 3 } +// ] +// } +// +// out: unique_target_attribute = error_type +// [ +// +// neuron_execution_errors_total { +// datapoints : [ 0 : { Attribute : {..., error_type : A, ....}, value = 6 }] +// }, +// neuron_execution_errors_A { +// datapoints : [ 0 : { Attribute : {..., error_type : A, ....}, value = 1 }] +// }, +// neuron_execution_errors_B { +// datapoints : [ 0 : { Attribute : {..., error_type : B, ....}, value = 2 }] +// }, +// neuron_execution_errors_C { +// datapoints : [ 0 : { Attribute : {..., error_type : C, ....}, value = 3 }] +// }, +// +// ] +func (md *AwsNeuronMetricModifier) extractDatapointsAsMetricsAndAggregate(originalMetric pmetric.Metric) pmetric.MetricSlice { + newMetricSlice := pmetric.NewMetricSlice() + uniqueAttribute := metricModificationsMap[originalMetric.Name()].UniqueAttribute + if uniqueAttribute == "" { + originalMetric.CopyTo(newMetricSlice.AppendEmpty()) + return newMetricSlice + } + + originalMetricDatapoints := originalMetric.Sum().DataPoints() + + aggregatedValuesPerRuntimeTag := map[MetricDatapointAggregationKey]float64{} + uniqueAttributeToAggregatedMetricMappings, needsAggregation := uniquesDatapointsToAggregatedMetricMappings[originalMetric.Name()] + for i := 0; i < originalMetricDatapoints.Len(); i++ { + originalDatapoint := originalMetricDatapoints.At(i) + runtimeTag, _ := originalDatapoint.Attributes().Get(RuntimeTag) + deviceId, _ := originalDatapoint.Attributes().Get(NeuronDeviceAttributeKey) + uniqueAttributeValue, _ := originalDatapoint.Attributes().Get(uniqueAttribute) + + // only add to the aggregation map if the datapoint to aggregated metric mappings are defined for the original metric + if needsAggregation { + aggregatedMetricName := uniqueAttributeToAggregatedMetricMappings[uniqueAttributeValue.Str()] + aggregatedValuesPerRuntimeTag[MetricDatapointAggregationKey{runtimeTag: runtimeTag.Str(), aggregatedMetricName: aggregatedMetricName, deviceId: deviceId.Str()}] += originalDatapoint.DoubleValue() + } + + // Creating a new metric from the current datapoint and adding it to the new newMetricSlice + newNameMetric := setMetricMetadata(newMetricSlice.AppendEmpty(), originalMetric.Name()+"_"+uniqueAttributeValue.Str(), originalMetric.Unit()) + originalDatapoint.CopyTo(newNameMetric.SetEmptySum().DataPoints().AppendEmpty()) + // setting value of temporality to cumulative so that agent performs delta conversion on this metric + newNameMetric.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + } + + // Creating body for the aggregated metric and add it to the new newMetricSlice for each runtime + for aggregatedMetricMetadata, value := range aggregatedValuesPerRuntimeTag { + // Aggregated metric for neuron device ecc events is not required + aggregatedMetric := setMetricMetadata(newMetricSlice.AppendEmpty(), aggregatedMetricMetadata.aggregatedMetricName, originalMetric.Unit()) + + originalMetricDatapoints.At(0).CopyTo(aggregatedMetric.SetEmptySum().DataPoints().AppendEmpty()) + aggregatedMetric.Sum().DataPoints().At(0).SetDoubleValue(value) + aggregatedMetric.Sum().DataPoints().At(0).Attributes().PutStr(RuntimeTag, aggregatedMetricMetadata.runtimeTag) + + if aggregatedMetricMetadata.deviceId != "" { + aggregatedMetric.Sum().DataPoints().At(0).Attributes().PutStr(NeuronDeviceAttributeKey, aggregatedMetricMetadata.deviceId) + } + + // setting value of temporality to cumulative so that agent performs delta conversion on this metric + aggregatedMetric.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + } + + return newMetricSlice +} + +// This method removes the attribute keys which are not required. The removal is necessary so that the metrics are grouped together +func filterLabels(slice pmetric.MetricSlice, originalMetricName string) { + _, exists := metricModificationsMap[originalMetricName] + if !exists { + return + } + + for i := 0; i < slice.Len(); i++ { + m := slice.At(i) + + dps := m.Sum().DataPoints() + for j := 0; j < dps.Len(); j++ { + attributes := dps.At(j).Attributes() + attributes.RemoveIf(func(label string, value pcommon.Value) bool { + _, exists := MetricAttributesToKeep[label] + if !exists { + return true + } + return false + }) + } + } +} + +// This method prefixes NeuronCore and NeuronDevice values with `core` and `device` respectively +// to make the attribute values more verbose +func prefixCoreAndDeviceLabels(originalMetric pmetric.Metric) { + dps := originalMetric.Sum().DataPoints() + for i := 0; i < dps.Len(); i++ { + dp := dps.At(i) + for attributeKey, attributeValuePrefix := range attributeValuePrefixingMap { + if value, exists := dp.Attributes().Get(attributeKey); exists { + dp.Attributes().PutStr(attributeKey, attributeValuePrefix+value.Str()) + } + } + } +} + +// This method performs selective duplication of a metric based on the types for which duplication needs to be performed. +// A metric is duplicated for pod and container only if pod correlation has been done successfully +func (md *AwsNeuronMetricModifier) duplicateMetrics(metricsSlice pmetric.MetricSlice, originalMetricName string, originalMetricDatapoints pmetric.NumberDataPointSlice, metrics pmetric.MetricSlice) { + metricModifications := metricModificationsMap[originalMetricName] + + // check if pod correlation has been performed, if not then don't emit metric for container and pod + duplicateForNodeOnly := false + podName, exists := originalMetricDatapoints.At(0).Attributes().Get(PodName) + if !exists || len(podName.Str()) == 0 { + duplicateForNodeOnly = true + } + + for i := 0; i < metricsSlice.Len(); i++ { + metric := metricsSlice.At(i) + if duplicateForNodeOnly { + duplicateMetricForType(metric, containerinsightscommon.TypeNode, originalMetricName, metrics) + } else { + for _, prefix := range metricModifications.DuplicationTypes { + duplicateMetricForType(metric, prefix, originalMetricName, metrics) + } + } + } +} + +// This method creates new metrics by prefixing the metric name with each k8 concepts (pod, node and container). +// It also adds logTypes to all the metric datapoint attributes. +func duplicateMetricForType(metric pmetric.Metric, duplicateType string, originalMetricName string, metrics pmetric.MetricSlice) { + metricCopy := metrics.AppendEmpty() + metric.CopyTo(metricCopy) + metricCopy.SetName(strings.ToLower(duplicateType) + "_" + metricCopy.Name()) + + datapoints := metricCopy.Sum().DataPoints() + for i := 0; i < datapoints.Len(); i++ { + datapoints.At(i).Attributes().PutStr(containerinsightscommon.MetricType, duplicateType+logTypeSuffix+metricModificationsMap[originalMetricName].LogTypeSuffix) + } +} + +func setMetricMetadata(metric pmetric.Metric, name string, unit string) pmetric.Metric { + metric.SetName(name) + metric.SetUnit(unit) + return metric +} + +// This method updates the stale or nan datapoints so that they report the default value of 0 instead. This is needed so that we can see the default values instead of a gap. +// - return the assigned value converted to a double if possible, else 0 +// - set the runtime tag to default since the runtime associated no longer exists +// - reset the NoRecordedValue flag so that the metric is not dropped +func resetStaleDatapoints(originalMetric pmetric.Metric) { + dps := originalMetric.Sum().DataPoints() + for i := 0; i < dps.Len(); i++ { + dp := dps.At(i) + if dp.ValueType() == pmetric.NumberDataPointValueTypeEmpty || dp.Flags().NoRecordedValue() { + dp.SetDoubleValue(dp.DoubleValue()) + dp.Attributes().PutStr(RuntimeTag, "default") + dp.SetFlags(dp.Flags().WithNoRecordedValue(false)) + } + } +} diff --git a/plugins/processors/gpuattributes/internal/awsneuron_metric_modifier_test.go b/plugins/processors/gpuattributes/internal/awsneuron_metric_modifier_test.go new file mode 100644 index 0000000000..b0140b831c --- /dev/null +++ b/plugins/processors/gpuattributes/internal/awsneuron_metric_modifier_test.go @@ -0,0 +1,423 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package internal + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.uber.org/zap" + "golang.org/x/exp/maps" +) + +var staticAttributes = map[string]any{ + ClusterName: "dummyAttribute", + InstanceId: "dummyAttribute", + InstanceType: "dummyAttribute", + NodeName: "dummyAttribute", + AvailabilityZone: "dummyAttribute", + Kubernetes: "dummyAttribute", + RuntimeTag: "dummyAttribute", + SubnetId: "dummyAttribute", +} +var staticTimestamp = pcommon.NewTimestampFromTime(time.Date(2023, time.March, 12, 11, 0, 0, 0, time.UTC)) + +const ( + NonNeuronMetric = "non_neuron_metric" + NeuronExecutionErrors = "neuron_execution_errors" + NeuronExecutionStatus = "neuron_execution_status" + NeuronCoreMemoryUsageModelSharedScratchpad = "neuroncore_memory_usage_model_shared_scratchpad" + NeuronDeviceRuntimeMemoryUsedBytes = "neurondevice_runtime_memory_used_bytes" + NeuronExecutionLatency = "neuron_execution_latency" + NeuronDeviceHwEccEvents = "neurondevice_hw_ecc_events" + NeuronDeviceIndex = "neuron_device_index" + DummyPod = "DummyPod" + Type = "Type" + NodeAWSNeuronDevice = "NodeAWSNeuronDevice" + PodAWSNeuronDevice = "PodAWSNeuronDevice" + ContainerAWSNeuronDevice = "ContainerAWSNeuronDevice" + NodeAWSNeuronCore = "NodeAWSNeuronCore" + PodAWSNeuronCore = "PodAWSNeuronCore" + ContainerAWSNeuronCore = "ContainerAWSNeuronCore" + NodeAWSNeuron = "NodeAWSNeuron" +) + +type MetricDefinition struct { + MetricType pmetric.MetricType + MetricValues []float64 + SpecialAttributes [][]string + Unit string +} + +var metricNameToMetricLayout = map[string]MetricDefinition{ + NonNeuronMetric: {MetricType: pmetric.MetricTypeGauge, MetricValues: []float64{1}, SpecialAttributes: [][]string{}, Unit: Count}, + NeuronExecutionErrors: {MetricType: pmetric.MetricTypeSum, MetricValues: []float64{1, 2, 3, 4, 5, 6}, SpecialAttributes: [][]string{{ErrorType, "generic", RuntimeTag, "1"}, {ErrorType, "numerical", RuntimeTag, "1"}, {ErrorType, "transient", RuntimeTag, "1"}, {ErrorType, "model", RuntimeTag, "1"}, {ErrorType, "runtime", RuntimeTag, "1"}, {ErrorType, "hardware", RuntimeTag, "1"}}, Unit: Count}, + NeuronExecutionStatus: {MetricType: pmetric.MetricTypeSum, MetricValues: []float64{1, 2, 3, 4, 5, 6}, SpecialAttributes: [][]string{{StatusType, "completed", RuntimeTag, "1"}, {StatusType, "completed_with_err", RuntimeTag, "1"}, {StatusType, "completed_with_num_err", RuntimeTag, "1"}, {StatusType, "timed_out", RuntimeTag, "1"}, {StatusType, "incorrect_input", RuntimeTag, "1"}, {StatusType, "failed_to_queue", RuntimeTag, "1"}}, Unit: Count}, + NeuronCoreMemoryUsageModelSharedScratchpad: {MetricType: pmetric.MetricTypeGauge, MetricValues: []float64{1, 2, 3}, SpecialAttributes: [][]string{{NeuronCore, "0", NeuronDevice, "0", MemoryLocation, "None", PodName, DummyPod}, {NeuronCore, "1", NeuronDevice, "0", MemoryLocation, "None", PodName, DummyPod}, {NeuronCore, "2", NeuronDevice, "1", MemoryLocation, "None", PodName, DummyPod}}, Unit: Bytes}, + NeuronDeviceRuntimeMemoryUsedBytes: {MetricType: pmetric.MetricTypeGauge, MetricValues: []float64{1, 2}, SpecialAttributes: [][]string{{MemoryLocation, "host"}, {MemoryLocation, "neuron_device"}}, Unit: Bytes}, + NeuronExecutionLatency: {MetricType: pmetric.MetricTypeGauge, MetricValues: []float64{0, 0, 0, 0, 1, 0, 0}, SpecialAttributes: [][]string{{Percentile, "p0"}, {Percentile, "p1"}, {Percentile, "p100"}, {Percentile, "p25"}, {Percentile, "p50"}, {Percentile, "p75"}, {Percentile, "p99"}}, Unit: Seconds}, + NeuronDeviceHwEccEvents: {MetricType: pmetric.MetricTypeSum, MetricValues: []float64{1, 2, 3, 4}, SpecialAttributes: [][]string{{NeuronDeviceIndex, "1", NeuronDevice, "1", EventType, "mem_ecc_corrected", PodName, DummyPod, RuntimeTag, "1"}, {NeuronDeviceIndex, "1", NeuronDevice, "1", EventType, "mem_ecc_uncorrected", PodName, DummyPod, RuntimeTag, "1"}, {NeuronDeviceIndex, "1", NeuronDevice, "1", EventType, "sram_ecc_corrected", PodName, DummyPod, RuntimeTag, "1"}, {NeuronDeviceIndex, "1", NeuronDevice, "1", EventType, "sram_ecc_uncorrected", PodName, DummyPod, RuntimeTag, "1"}}, Unit: Count}, +} + +func setupMetricModifier() *AwsNeuronMetricModifier { + logger, _ := zap.NewDevelopment() + return &AwsNeuronMetricModifier{logger: logger} +} +func TestMetricModifierForExecutionLatencyMetric(t *testing.T) { + metricModifier := setupMetricModifier() + metricsList := pmetric.NewMetricSlice() + createActualMetricForKey(NeuronExecutionLatency).CopyTo(metricsList.AppendEmpty()) + metricModifier.ModifyMetric(metricsList.At(0), metricsList) + + expectedMetrics := map[string]pmetric.Metric{ + NeuronExecutionLatency: metricsList.At(0), + "node_neuron_execution_latency": createExpectedMetric("node_neuron_execution_latency", false, []map[string]string{{Type: NodeAWSNeuron}}, []float64{1}, pmetric.MetricTypeSum, Seconds), + } + + assertModifiedMetric(t, metricsList, expectedMetrics) +} +func TestMetricModifierForExecutionErrorMetric(t *testing.T) { + metricModifier := setupMetricModifier() + metricsList := pmetric.NewMetricSlice() + createActualMetricForKey(NeuronExecutionErrors).CopyTo(metricsList.AppendEmpty()) + metricModifier.ModifyMetric(metricsList.At(0), metricsList) + + expectedMetrics := map[string]pmetric.Metric{ + NeuronExecutionErrors: metricsList.At(0), + "node_neuron_execution_errors_generic": createExpectedMetric("node_neuron_execution_errors_generic", true, []map[string]string{{Type: NodeAWSNeuron, RuntimeTag: "1"}}, []float64{1}, pmetric.MetricTypeSum, Count), + "node_neuron_execution_errors_numerical": createExpectedMetric("node_neuron_execution_errors_numerical", true, []map[string]string{{Type: NodeAWSNeuron, RuntimeTag: "1"}}, []float64{2}, pmetric.MetricTypeSum, Count), + "node_neuron_execution_errors_transient": createExpectedMetric("node_neuron_execution_errors_transient", true, []map[string]string{{Type: NodeAWSNeuron, RuntimeTag: "1"}}, []float64{3}, pmetric.MetricTypeSum, Count), + "node_neuron_execution_errors_model": createExpectedMetric("node_neuron_execution_errors_model", true, []map[string]string{{Type: NodeAWSNeuron, RuntimeTag: "1"}}, []float64{4}, pmetric.MetricTypeSum, Count), + "node_neuron_execution_errors_runtime": createExpectedMetric("node_neuron_execution_errors_runtime", true, []map[string]string{{Type: NodeAWSNeuron, RuntimeTag: "1"}}, []float64{5}, pmetric.MetricTypeSum, Count), + "node_neuron_execution_errors_hardware": createExpectedMetric("node_neuron_execution_errors_hardware", true, []map[string]string{{Type: NodeAWSNeuron, RuntimeTag: "1"}}, []float64{6}, pmetric.MetricTypeSum, Count), + "node_neuron_execution_errors_total": createExpectedMetric("node_neuron_execution_errors_total", true, []map[string]string{{Type: NodeAWSNeuron, RuntimeTag: "1"}}, []float64{21}, pmetric.MetricTypeSum, Count), + } + + assertModifiedMetric(t, metricsList, expectedMetrics) +} + +func TestMetricModifierForExecutionStatusMetric(t *testing.T) { + metricModifier := setupMetricModifier() + metricsList := pmetric.NewMetricSlice() + createActualMetricForKey(NeuronExecutionStatus).CopyTo(metricsList.AppendEmpty()) + metricModifier.ModifyMetric(metricsList.At(0), metricsList) + + expectedMap := maps.Clone(staticAttributes) + expectedMap[Type] = NodeAWSNeuron + + expectedMetrics := map[string]pmetric.Metric{ + NeuronExecutionStatus: metricsList.At(0), + "node_neuron_execution_status_completed": createExpectedMetric("node_neuron_execution_status_completed", true, []map[string]string{{Type: NodeAWSNeuron, RuntimeTag: "1"}}, []float64{1}, pmetric.MetricTypeSum, Count), + "node_neuron_execution_status_completed_with_err": createExpectedMetric("node_neuron_execution_status_completed_with_err", true, []map[string]string{{Type: NodeAWSNeuron, RuntimeTag: "1"}}, []float64{2}, pmetric.MetricTypeSum, Count), + "node_neuron_execution_status_completed_with_num_err": createExpectedMetric("node_neuron_execution_status_completed_with_num_err", true, []map[string]string{{Type: NodeAWSNeuron, RuntimeTag: "1"}}, []float64{3}, pmetric.MetricTypeSum, Count), + "node_neuron_execution_status_timed_out": createExpectedMetric("node_neuron_execution_status_timed_out", true, []map[string]string{{Type: NodeAWSNeuron, RuntimeTag: "1"}}, []float64{4}, pmetric.MetricTypeSum, Count), + "node_neuron_execution_status_incorrect_input": createExpectedMetric("node_neuron_execution_status_incorrect_input", true, []map[string]string{{Type: NodeAWSNeuron, RuntimeTag: "1"}}, []float64{5}, pmetric.MetricTypeSum, Count), + "node_neuron_execution_status_failed_to_queue": createExpectedMetric("node_neuron_execution_status_failed_to_queue", true, []map[string]string{{Type: NodeAWSNeuron, RuntimeTag: "1"}}, []float64{6}, pmetric.MetricTypeSum, Count), + } + + assertModifiedMetric(t, metricsList, expectedMetrics) +} + +func TestMetricModifierForNeuronCoreMemoryUsageMetric(t *testing.T) { + metricModifier := setupMetricModifier() + metricsList := pmetric.NewMetricSlice() + createActualMetricForKey(NeuronCoreMemoryUsageModelSharedScratchpad).CopyTo(metricsList.AppendEmpty()) + metricModifier.ModifyMetric(metricsList.At(0), metricsList) + + expectedMetrics := map[string]pmetric.Metric{ + NeuronCoreMemoryUsageModelSharedScratchpad: metricsList.At(0), + "node_neuroncore_memory_usage_model_shared_scratchpad": createExpectedMetric("node_neuroncore_memory_usage_model_shared_scratchpad", false, []map[string]string{{NeuronCore: "core0", NeuronDevice: "device0", Type: NodeAWSNeuronCore, PodName: DummyPod}, {NeuronCore: "core1", NeuronDevice: "device0", Type: NodeAWSNeuronCore, PodName: DummyPod}, {NeuronCore: "core2", NeuronDevice: "device1", Type: NodeAWSNeuronCore, PodName: DummyPod}}, []float64{1, 2, 3}, pmetric.MetricTypeSum, Bytes), + "pod_neuroncore_memory_usage_model_shared_scratchpad": createExpectedMetric("pod_neuroncore_memory_usage_model_shared_scratchpad", false, []map[string]string{{NeuronCore: "core0", NeuronDevice: "device0", Type: PodAWSNeuronCore, PodName: DummyPod}, {NeuronCore: "core1", NeuronDevice: "device0", Type: PodAWSNeuronCore, PodName: DummyPod}, {NeuronCore: "core2", NeuronDevice: "device1", Type: PodAWSNeuronCore, PodName: DummyPod}}, []float64{1, 2, 3}, pmetric.MetricTypeSum, Bytes), + "container_neuroncore_memory_usage_model_shared_scratchpad": createExpectedMetric("container_neuroncore_memory_usage_model_shared_scratchpad", false, []map[string]string{{NeuronCore: "core0", NeuronDevice: "device0", Type: ContainerAWSNeuronCore, PodName: DummyPod}, {NeuronCore: "core1", NeuronDevice: "device0", Type: ContainerAWSNeuronCore, PodName: DummyPod}, {NeuronCore: "core2", NeuronDevice: "device1", Type: ContainerAWSNeuronCore, PodName: DummyPod}}, []float64{1, 2, 3}, pmetric.MetricTypeSum, Bytes), + } + + assertModifiedMetric(t, metricsList, expectedMetrics) +} + +func TestMetricModifierForNeuronCoreMemoryUsageMetric_PodNameMissing(t *testing.T) { + metricModifier := setupMetricModifier() + metricsList := pmetric.NewMetricSlice() + removeAttributefromMetric(createActualMetricForKey(NeuronCoreMemoryUsageModelSharedScratchpad), PodName).CopyTo(metricsList.AppendEmpty()) + metricModifier.ModifyMetric(metricsList.At(0), metricsList) + + expectedMetrics := map[string]pmetric.Metric{ + NeuronCoreMemoryUsageModelSharedScratchpad: metricsList.At(0), + "node_neuroncore_memory_usage_model_shared_scratchpad": createExpectedMetric("node_neuroncore_memory_usage_model_shared_scratchpad", false, []map[string]string{{NeuronCore: "core0", NeuronDevice: "device0", Type: NodeAWSNeuronCore}, {NeuronCore: "core1", NeuronDevice: "device0", Type: NodeAWSNeuronCore}, {NeuronCore: "core2", NeuronDevice: "device1", Type: NodeAWSNeuronCore}}, []float64{1, 2, 3}, pmetric.MetricTypeSum, Bytes), + } + + assertModifiedMetric(t, metricsList, expectedMetrics) +} + +func TestMetricModifierForNeuronDeviceRuntimeMemoryUsageMetric(t *testing.T) { + metricModifier := setupMetricModifier() + metricsList := pmetric.NewMetricSlice() + createActualMetricForKey(NeuronDeviceRuntimeMemoryUsedBytes).CopyTo(metricsList.AppendEmpty()) + metricModifier.ModifyMetric(metricsList.At(0), metricsList) + + expectedMetrics := map[string]pmetric.Metric{ + NeuronDeviceRuntimeMemoryUsedBytes: metricsList.At(0), + "node_neurondevice_runtime_memory_used_bytes": createExpectedMetric("node_neurondevice_runtime_memory_used_bytes", false, []map[string]string{{Type: NodeAWSNeuron}}, []float64{2}, pmetric.MetricTypeSum, Bytes), + } + + assertModifiedMetric(t, metricsList, expectedMetrics) +} + +func TestMetricModifierForNeuronDeviceEccEventMetric(t *testing.T) { + metricModifier := setupMetricModifier() + metricsList := pmetric.NewMetricSlice() + createActualMetricForKey(NeuronDeviceHwEccEvents).CopyTo(metricsList.AppendEmpty()) + metricModifier.ModifyMetric(metricsList.At(0), metricsList) + + expectedMetrics := map[string]pmetric.Metric{ + NeuronDeviceHwEccEvents: metricsList.At(0), + "node_neurondevice_hw_ecc_events_mem_ecc_corrected": createExpectedMetric("node_neurondevice_hw_ecc_events_mem_ecc_corrected", false, []map[string]string{{NeuronDevice: "device1", PodName: DummyPod, Type: NodeAWSNeuronDevice, RuntimeTag: "1"}}, []float64{1}, pmetric.MetricTypeSum, Count), + "node_neurondevice_hw_ecc_events_mem_ecc_uncorrected": createExpectedMetric("node_neurondevice_hw_ecc_events_mem_ecc_uncorrected", false, []map[string]string{{NeuronDevice: "device1", PodName: DummyPod, Type: NodeAWSNeuronDevice, RuntimeTag: "1"}}, []float64{2}, pmetric.MetricTypeSum, Count), + "node_neurondevice_hw_ecc_events_sram_ecc_corrected": createExpectedMetric("node_neurondevice_hw_ecc_events_sram_ecc_corrected", false, []map[string]string{{NeuronDevice: "device1", PodName: DummyPod, Type: NodeAWSNeuronDevice, RuntimeTag: "1"}}, []float64{3}, pmetric.MetricTypeSum, Count), + "node_neurondevice_hw_ecc_events_sram_ecc_uncorrected": createExpectedMetric("node_neurondevice_hw_ecc_events_sram_ecc_uncorrected", false, []map[string]string{{NeuronDevice: "device1", PodName: DummyPod, Type: NodeAWSNeuronDevice, RuntimeTag: "1"}}, []float64{4}, pmetric.MetricTypeSum, Count), + "node_neurondevice_hw_ecc_events_total": createExpectedMetric("node_neurondevice_hw_ecc_events_total", false, []map[string]string{{NeuronDevice: "device1", PodName: DummyPod, Type: NodeAWSNeuronDevice, RuntimeTag: "1"}}, []float64{10}, pmetric.MetricTypeSum, Count), + "pod_neurondevice_hw_ecc_events_mem_ecc_corrected": createExpectedMetric("pod_neurondevice_hw_ecc_events_mem_ecc_corrected", false, []map[string]string{{NeuronDevice: "device1", PodName: DummyPod, Type: PodAWSNeuronDevice, RuntimeTag: "1"}}, []float64{1}, pmetric.MetricTypeSum, Count), + "pod_neurondevice_hw_ecc_events_mem_ecc_uncorrected": createExpectedMetric("pod_neurondevice_hw_ecc_events_mem_ecc_uncorrected", false, []map[string]string{{NeuronDevice: "device1", PodName: DummyPod, Type: PodAWSNeuronDevice, RuntimeTag: "1"}}, []float64{2}, pmetric.MetricTypeSum, Count), + "pod_neurondevice_hw_ecc_events_sram_ecc_corrected": createExpectedMetric("pod_neurondevice_hw_ecc_events_sram_ecc_corrected", false, []map[string]string{{NeuronDevice: "device1", PodName: DummyPod, Type: PodAWSNeuronDevice, RuntimeTag: "1"}}, []float64{3}, pmetric.MetricTypeSum, Count), + "pod_neurondevice_hw_ecc_events_sram_ecc_uncorrected": createExpectedMetric("pod_neurondevice_hw_ecc_events_sram_ecc_uncorrected", false, []map[string]string{{NeuronDevice: "device1", PodName: DummyPod, Type: PodAWSNeuronDevice, RuntimeTag: "1"}}, []float64{4}, pmetric.MetricTypeSum, Count), + "pod_neurondevice_hw_ecc_events_total": createExpectedMetric("pod_neurondevice_hw_ecc_events_total", false, []map[string]string{{NeuronDevice: "device1", PodName: DummyPod, Type: PodAWSNeuronDevice, RuntimeTag: "1"}}, []float64{10}, pmetric.MetricTypeSum, Count), + "container_neurondevice_hw_ecc_events_mem_ecc_corrected": createExpectedMetric("container_neurondevice_hw_ecc_events_mem_ecc_corrected", false, []map[string]string{{NeuronDevice: "device1", PodName: DummyPod, Type: ContainerAWSNeuronDevice, RuntimeTag: "1"}}, []float64{1}, pmetric.MetricTypeSum, Count), + "container_neurondevice_hw_ecc_events_mem_ecc_uncorrected": createExpectedMetric("container_neurondevice_hw_ecc_events_mem_ecc_uncorrected", false, []map[string]string{{NeuronDevice: "device1", PodName: DummyPod, Type: ContainerAWSNeuronDevice, RuntimeTag: "1"}}, []float64{2}, pmetric.MetricTypeSum, Count), + "container_neurondevice_hw_ecc_events_sram_ecc_corrected": createExpectedMetric("container_neurondevice_hw_ecc_events_sram_ecc_corrected", false, []map[string]string{{NeuronDevice: "device1", PodName: DummyPod, Type: ContainerAWSNeuronDevice, RuntimeTag: "1"}}, []float64{3}, pmetric.MetricTypeSum, Count), + "container_neurondevice_hw_ecc_events_sram_ecc_uncorrected": createExpectedMetric("container_neurondevice_hw_ecc_events_sram_ecc_uncorrected", false, []map[string]string{{NeuronDevice: "device1", PodName: DummyPod, Type: ContainerAWSNeuronDevice, RuntimeTag: "1"}}, []float64{4}, pmetric.MetricTypeSum, Count), + "container_neurondevice_hw_ecc_events_total": createExpectedMetric("container_neurondevice_hw_ecc_events_total", false, []map[string]string{{NeuronDevice: "device1", PodName: DummyPod, Type: ContainerAWSNeuronDevice, RuntimeTag: "1"}}, []float64{10}, pmetric.MetricTypeSum, Count), + } + + assertModifiedMetric(t, metricsList, expectedMetrics) +} + +func TestMetricModifierForNeuronDeviceEccEventMetric_PodNameMissing(t *testing.T) { + metricModifier := setupMetricModifier() + metricsList := pmetric.NewMetricSlice() + removeAttributefromMetric(createActualMetricForKey(NeuronDeviceHwEccEvents), PodName).CopyTo(metricsList.AppendEmpty()) + metricModifier.ModifyMetric(metricsList.At(0), metricsList) + + expectedMetrics := map[string]pmetric.Metric{ + NeuronDeviceHwEccEvents: metricsList.At(0), + "node_neurondevice_hw_ecc_events_mem_ecc_corrected": createExpectedMetric("node_neurondevice_hw_ecc_events_mem_ecc_corrected", false, []map[string]string{{NeuronDevice: "device1", Type: NodeAWSNeuronDevice, RuntimeTag: "1"}}, []float64{1}, pmetric.MetricTypeSum, Count), + "node_neurondevice_hw_ecc_events_mem_ecc_uncorrected": createExpectedMetric("node_neurondevice_hw_ecc_events_mem_ecc_uncorrected", false, []map[string]string{{NeuronDevice: "device1", Type: NodeAWSNeuronDevice, RuntimeTag: "1"}}, []float64{2}, pmetric.MetricTypeSum, Count), + "node_neurondevice_hw_ecc_events_sram_ecc_corrected": createExpectedMetric("node_neurondevice_hw_ecc_events_sram_ecc_corrected", false, []map[string]string{{NeuronDevice: "device1", Type: NodeAWSNeuronDevice, RuntimeTag: "1"}}, []float64{3}, pmetric.MetricTypeSum, Count), + "node_neurondevice_hw_ecc_events_sram_ecc_uncorrected": createExpectedMetric("node_neurondevice_hw_ecc_events_sram_ecc_uncorrected", false, []map[string]string{{NeuronDevice: "device1", Type: NodeAWSNeuronDevice, RuntimeTag: "1"}}, []float64{4}, pmetric.MetricTypeSum, Count), + "node_neurondevice_hw_ecc_events_total": createExpectedMetric("node_neurondevice_hw_ecc_events_total", false, []map[string]string{{NeuronDevice: "device1", Type: NodeAWSNeuronDevice, RuntimeTag: "1"}}, []float64{10}, pmetric.MetricTypeSum, Count), + } + + assertModifiedMetric(t, metricsList, expectedMetrics) +} + +func TestMetricModifierForNonNeuronMonitorMetric(t *testing.T) { + metricModifier := setupMetricModifier() + metricsList := pmetric.NewMetricSlice() + createActualMetricForKey(NonNeuronMetric).CopyTo(metricsList.AppendEmpty()) + metricModifier.ModifyMetric(metricsList.At(0), metricsList) + + expectedMetrics := map[string]pmetric.Metric{ + NonNeuronMetric: metricsList.At(0), + } + + assertModifiedMetric(t, metricsList, expectedMetrics) +} + +func TestListWithMultipleMetrics(t *testing.T) { + metricModifier := setupMetricModifier() + metricsList := pmetric.NewMetricSlice() + createActualMetricForKey(NeuronExecutionLatency).CopyTo(metricsList.AppendEmpty()) + createActualMetricForKey(NeuronExecutionErrors).CopyTo(metricsList.AppendEmpty()) + createActualMetricForKey(NeuronExecutionStatus).CopyTo(metricsList.AppendEmpty()) + createActualMetricForKey(NeuronCoreMemoryUsageModelSharedScratchpad).CopyTo(metricsList.AppendEmpty()) + createActualMetricForKey(NeuronDeviceRuntimeMemoryUsedBytes).CopyTo(metricsList.AppendEmpty()) + createActualMetricForKey(NeuronDeviceHwEccEvents).CopyTo(metricsList.AppendEmpty()) + createActualMetricForKey(NonNeuronMetric).CopyTo(metricsList.AppendEmpty()) + + for i := 0; i < metricsList.Len(); i++ { + metricModifier.ModifyMetric(metricsList.At(i), metricsList) + } + + expectedMetrics := map[string]pmetric.Metric{ + NeuronExecutionLatency: metricsList.At(0), + NeuronExecutionErrors: metricsList.At(1), + NeuronExecutionStatus: metricsList.At(2), + NeuronCoreMemoryUsageModelSharedScratchpad: metricsList.At(3), + NeuronDeviceRuntimeMemoryUsedBytes: metricsList.At(4), + NeuronDeviceHwEccEvents: metricsList.At(5), + NonNeuronMetric: metricsList.At(6), + + "node_neuron_execution_latency": createExpectedMetric("node_neuron_execution_latency", false, []map[string]string{{Type: NodeAWSNeuron}}, []float64{1}, pmetric.MetricTypeSum, Seconds), + + "node_neuron_execution_errors_generic": createExpectedMetric("node_neuron_execution_errors_generic", true, []map[string]string{{Type: NodeAWSNeuron, RuntimeTag: "1"}}, []float64{1}, pmetric.MetricTypeSum, Count), + "node_neuron_execution_errors_numerical": createExpectedMetric("node_neuron_execution_errors_numerical", true, []map[string]string{{Type: NodeAWSNeuron, RuntimeTag: "1"}}, []float64{2}, pmetric.MetricTypeSum, Count), + "node_neuron_execution_errors_transient": createExpectedMetric("node_neuron_execution_errors_transient", true, []map[string]string{{Type: NodeAWSNeuron, RuntimeTag: "1"}}, []float64{3}, pmetric.MetricTypeSum, Count), + "node_neuron_execution_errors_model": createExpectedMetric("node_neuron_execution_errors_model", true, []map[string]string{{Type: NodeAWSNeuron, RuntimeTag: "1"}}, []float64{4}, pmetric.MetricTypeSum, Count), + "node_neuron_execution_errors_runtime": createExpectedMetric("node_neuron_execution_errors_runtime", true, []map[string]string{{Type: NodeAWSNeuron, RuntimeTag: "1"}}, []float64{5}, pmetric.MetricTypeSum, Count), + "node_neuron_execution_errors_hardware": createExpectedMetric("node_neuron_execution_errors_hardware", true, []map[string]string{{Type: NodeAWSNeuron, RuntimeTag: "1"}}, []float64{6}, pmetric.MetricTypeSum, Count), + "node_neuron_execution_errors_total": createExpectedMetric("node_neuron_execution_errors_total", true, []map[string]string{{Type: NodeAWSNeuron, RuntimeTag: "1"}}, []float64{21}, pmetric.MetricTypeSum, Count), + + "node_neuron_execution_status_completed": createExpectedMetric("node_neuron_execution_status_completed", true, []map[string]string{{Type: NodeAWSNeuron, RuntimeTag: "1"}}, []float64{1}, pmetric.MetricTypeSum, Count), + "node_neuron_execution_status_completed_with_err": createExpectedMetric("node_neuron_execution_status_completed_with_err", true, []map[string]string{{Type: NodeAWSNeuron, RuntimeTag: "1"}}, []float64{2}, pmetric.MetricTypeSum, Count), + "node_neuron_execution_status_completed_with_num_err": createExpectedMetric("node_neuron_execution_status_completed_with_num_err", true, []map[string]string{{Type: NodeAWSNeuron, RuntimeTag: "1"}}, []float64{3}, pmetric.MetricTypeSum, Count), + "node_neuron_execution_status_timed_out": createExpectedMetric("node_neuron_execution_status_timed_out", true, []map[string]string{{Type: NodeAWSNeuron, RuntimeTag: "1"}}, []float64{4}, pmetric.MetricTypeSum, Count), + "node_neuron_execution_status_incorrect_input": createExpectedMetric("node_neuron_execution_status_incorrect_input", true, []map[string]string{{Type: NodeAWSNeuron, RuntimeTag: "1"}}, []float64{5}, pmetric.MetricTypeSum, Count), + "node_neuron_execution_status_failed_to_queue": createExpectedMetric("node_neuron_execution_status_failed_to_queue", true, []map[string]string{{Type: NodeAWSNeuron, RuntimeTag: "1"}}, []float64{6}, pmetric.MetricTypeSum, Count), + + "node_neuroncore_memory_usage_model_shared_scratchpad": createExpectedMetric("node_neuroncore_memory_usage_model_shared_scratchpad", false, []map[string]string{{NeuronCore: "core0", NeuronDevice: "device0", Type: NodeAWSNeuronCore, PodName: DummyPod}, {NeuronCore: "core1", NeuronDevice: "device0", Type: NodeAWSNeuronCore, PodName: DummyPod}, {NeuronCore: "core2", NeuronDevice: "device1", Type: NodeAWSNeuronCore, PodName: DummyPod}}, []float64{1, 2, 3}, pmetric.MetricTypeSum, Bytes), + "pod_neuroncore_memory_usage_model_shared_scratchpad": createExpectedMetric("pod_neuroncore_memory_usage_model_shared_scratchpad", false, []map[string]string{{NeuronCore: "core0", NeuronDevice: "device0", Type: PodAWSNeuronCore, PodName: DummyPod}, {NeuronCore: "core1", NeuronDevice: "device0", Type: PodAWSNeuronCore, PodName: DummyPod}, {NeuronCore: "core2", NeuronDevice: "device1", Type: PodAWSNeuronCore, PodName: DummyPod}}, []float64{1, 2, 3}, pmetric.MetricTypeSum, Bytes), + "container_neuroncore_memory_usage_model_shared_scratchpad": createExpectedMetric("container_neuroncore_memory_usage_model_shared_scratchpad", false, []map[string]string{{NeuronCore: "core0", NeuronDevice: "device0", Type: ContainerAWSNeuronCore, PodName: DummyPod}, {NeuronCore: "core1", NeuronDevice: "device0", Type: ContainerAWSNeuronCore, PodName: DummyPod}, {NeuronCore: "core2", NeuronDevice: "device1", Type: ContainerAWSNeuronCore, PodName: DummyPod}}, []float64{1, 2, 3}, pmetric.MetricTypeSum, Bytes), + + "node_neurondevice_runtime_memory_used_bytes": createExpectedMetric("node_neurondevice_runtime_memory_used_bytes", false, []map[string]string{{Type: NodeAWSNeuron}}, []float64{2}, pmetric.MetricTypeSum, Bytes), + + "node_neurondevice_hw_ecc_events_mem_ecc_corrected": createExpectedMetric("node_neurondevice_hw_ecc_events_mem_ecc_corrected", false, []map[string]string{{NeuronDevice: "device1", PodName: DummyPod, Type: NodeAWSNeuronDevice, RuntimeTag: "1"}}, []float64{1}, pmetric.MetricTypeSum, Count), + "node_neurondevice_hw_ecc_events_mem_ecc_uncorrected": createExpectedMetric("node_neurondevice_hw_ecc_events_mem_ecc_uncorrected", false, []map[string]string{{NeuronDevice: "device1", PodName: DummyPod, Type: NodeAWSNeuronDevice, RuntimeTag: "1"}}, []float64{2}, pmetric.MetricTypeSum, Count), + "node_neurondevice_hw_ecc_events_sram_ecc_corrected": createExpectedMetric("node_neurondevice_hw_ecc_events_sram_ecc_corrected", false, []map[string]string{{NeuronDevice: "device1", PodName: DummyPod, Type: NodeAWSNeuronDevice, RuntimeTag: "1"}}, []float64{3}, pmetric.MetricTypeSum, Count), + "node_neurondevice_hw_ecc_events_sram_ecc_uncorrected": createExpectedMetric("node_neurondevice_hw_ecc_events_sram_ecc_uncorrected", false, []map[string]string{{NeuronDevice: "device1", PodName: DummyPod, Type: NodeAWSNeuronDevice, RuntimeTag: "1"}}, []float64{4}, pmetric.MetricTypeSum, Count), + "node_neurondevice_hw_ecc_events_total": createExpectedMetric("node_neurondevice_hw_ecc_events_total", false, []map[string]string{{NeuronDevice: "device1", PodName: DummyPod, Type: NodeAWSNeuronDevice, RuntimeTag: "1"}}, []float64{10}, pmetric.MetricTypeSum, Count), + "pod_neurondevice_hw_ecc_events_mem_ecc_corrected": createExpectedMetric("pod_neurondevice_hw_ecc_events_mem_ecc_corrected", false, []map[string]string{{NeuronDevice: "device1", PodName: DummyPod, Type: PodAWSNeuronDevice, RuntimeTag: "1"}}, []float64{1}, pmetric.MetricTypeSum, Count), + "pod_neurondevice_hw_ecc_events_mem_ecc_uncorrected": createExpectedMetric("pod_neurondevice_hw_ecc_events_mem_ecc_uncorrected", false, []map[string]string{{NeuronDevice: "device1", PodName: DummyPod, Type: PodAWSNeuronDevice, RuntimeTag: "1"}}, []float64{2}, pmetric.MetricTypeSum, Count), + "pod_neurondevice_hw_ecc_events_sram_ecc_corrected": createExpectedMetric("pod_neurondevice_hw_ecc_events_sram_ecc_corrected", false, []map[string]string{{NeuronDevice: "device1", PodName: DummyPod, Type: PodAWSNeuronDevice, RuntimeTag: "1"}}, []float64{3}, pmetric.MetricTypeSum, Count), + "pod_neurondevice_hw_ecc_events_sram_ecc_uncorrected": createExpectedMetric("pod_neurondevice_hw_ecc_events_sram_ecc_uncorrected", false, []map[string]string{{NeuronDevice: "device1", PodName: DummyPod, Type: PodAWSNeuronDevice, RuntimeTag: "1"}}, []float64{4}, pmetric.MetricTypeSum, Count), + "pod_neurondevice_hw_ecc_events_total": createExpectedMetric("pod_neurondevice_hw_ecc_events_total", false, []map[string]string{{NeuronDevice: "device1", PodName: DummyPod, Type: PodAWSNeuronDevice, RuntimeTag: "1"}}, []float64{10}, pmetric.MetricTypeSum, Count), + "container_neurondevice_hw_ecc_events_mem_ecc_corrected": createExpectedMetric("container_neurondevice_hw_ecc_events_mem_ecc_corrected", false, []map[string]string{{NeuronDevice: "device1", PodName: DummyPod, Type: ContainerAWSNeuronDevice, RuntimeTag: "1"}}, []float64{1}, pmetric.MetricTypeSum, Count), + "container_neurondevice_hw_ecc_events_mem_ecc_uncorrected": createExpectedMetric("container_neurondevice_hw_ecc_events_mem_ecc_uncorrected", false, []map[string]string{{NeuronDevice: "device1", PodName: DummyPod, Type: ContainerAWSNeuronDevice, RuntimeTag: "1"}}, []float64{2}, pmetric.MetricTypeSum, Count), + "container_neurondevice_hw_ecc_events_sram_ecc_corrected": createExpectedMetric("container_neurondevice_hw_ecc_events_sram_ecc_corrected", false, []map[string]string{{NeuronDevice: "device1", PodName: DummyPod, Type: ContainerAWSNeuronDevice, RuntimeTag: "1"}}, []float64{3}, pmetric.MetricTypeSum, Count), + "container_neurondevice_hw_ecc_events_sram_ecc_uncorrected": createExpectedMetric("container_neurondevice_hw_ecc_events_sram_ecc_uncorrected", false, []map[string]string{{NeuronDevice: "device1", PodName: DummyPod, Type: ContainerAWSNeuronDevice, RuntimeTag: "1"}}, []float64{4}, pmetric.MetricTypeSum, Count), + "container_neurondevice_hw_ecc_events_total": createExpectedMetric("container_neurondevice_hw_ecc_events_total", false, []map[string]string{{NeuronDevice: "device1", PodName: DummyPod, Type: ContainerAWSNeuronDevice, RuntimeTag: "1"}}, []float64{10}, pmetric.MetricTypeSum, Count), + } + assertModifiedMetric(t, metricsList, expectedMetrics) +} + +func TestMetricWithStaleDatapoint(t *testing.T) { + metricModifier := setupMetricModifier() + metricsList := pmetric.NewMetricSlice() + createActualMetricForKey(NeuronExecutionLatency).CopyTo(metricsList.AppendEmpty()) + + originalMetricDatapoint := metricsList.At(0).Gauge().DataPoints().At(0) + originalMetricDatapoint.SetFlags(originalMetricDatapoint.Flags().WithNoRecordedValue(true)) + + metricModifier.ModifyMetric(metricsList.At(0), metricsList) + + expectedMetrics := map[string]pmetric.Metric{ + NeuronExecutionLatency: metricsList.At(0), + "node_neuron_execution_latency": createExpectedMetric("node_neuron_execution_latency", false, []map[string]string{{Type: NodeAWSNeuron}}, []float64{1}, pmetric.MetricTypeSum, Seconds), + } + + assertModifiedMetric(t, metricsList, expectedMetrics) +} + +func createActualMetricForKey(key string) pmetric.Metric { + metricDefinition := metricNameToMetricLayout[key] + + metric := pmetric.NewMetric() + metric.SetName(key) + metric.SetUnit(metricDefinition.Unit) + datapoints := pmetric.NumberDataPointSlice{} + if metricDefinition.MetricType == pmetric.MetricTypeGauge { + datapoints = metric.SetEmptyGauge().DataPoints() + } else { + datapoints = metric.SetEmptySum().DataPoints() + } + + for i := 0; i < len(metricDefinition.MetricValues); i++ { + datapoint := datapoints.AppendEmpty() + datapoint.SetDoubleValue(metricDefinition.MetricValues[i]) + datapoint.SetTimestamp(staticTimestamp) + datapoint.Attributes().FromRaw(staticAttributes) + + if len(metricDefinition.SpecialAttributes) > 0 { + for j := 0; j < len(metricDefinition.SpecialAttributes[i])-1; j = j + 2 { + datapoint.Attributes().PutStr(metricDefinition.SpecialAttributes[i][j], metricDefinition.SpecialAttributes[i][j+1]) + } + } + } + + return metric +} + +func assertModifiedMetric(t *testing.T, actualSlice pmetric.MetricSlice, expectedMetrics map[string]pmetric.Metric) { + assert.Equal(t, len(expectedMetrics), actualSlice.Len()) + for i := 0; i < actualSlice.Len(); i++ { + actualMetric := actualSlice.At(i) + expectedMetric, exists := expectedMetrics[actualMetric.Name()] + + assert.True(t, exists) + assert.Equal(t, expectedMetric.Name(), actualMetric.Name()) + assert.Equal(t, expectedMetric.Type(), actualMetric.Type()) + assert.Equal(t, expectedMetric.Unit(), actualMetric.Unit()) + + actualDatapoints := pmetric.NumberDataPointSlice{} + expectedDatapoints := pmetric.NumberDataPointSlice{} + if actualMetric.Type() == pmetric.MetricTypeGauge { + actualDatapoints = actualMetric.Gauge().DataPoints() + expectedDatapoints = expectedMetric.Gauge().DataPoints() + } else { + actualDatapoints = actualMetric.Sum().DataPoints() + expectedDatapoints = expectedMetric.Sum().DataPoints() + } + + assert.Equal(t, expectedDatapoints.Len(), actualDatapoints.Len()) + + for j := 0; j < actualDatapoints.Len(); j++ { + actualDatapoint := actualDatapoints.At(j) + expectedDatapoint := expectedDatapoints.At(j) + + assert.Equal(t, expectedDatapoint.Attributes().Len(), actualDatapoint.Attributes().Len()) + for key, val := range actualDatapoint.Attributes().AsRaw() { + expectedVal, _ := expectedDatapoint.Attributes().Get(key) + assert.Equal(t, expectedVal.AsString(), val) + } + + assert.Equal(t, expectedDatapoint.ValueType(), actualDatapoint.ValueType()) + assert.Equal(t, expectedDatapoint.DoubleValue(), actualDatapoint.DoubleValue()) + assert.Equal(t, expectedDatapoint.Timestamp(), actualDatapoint.Timestamp()) + assert.False(t, actualDatapoint.Flags().NoRecordedValue()) + assert.NotEqual(t, pmetric.NumberDataPointValueTypeEmpty, actualDatapoint.ValueType()) + } + } +} + +func createExpectedMetric(name string, isCumulative bool, attributes []map[string]string, values []float64, metricType pmetric.MetricType, unit string) pmetric.Metric { + metric := pmetric.NewMetric() + metric.SetName(name) + metric.SetUnit(unit) + + datapoints := metric.SetEmptySum().DataPoints() + if metricType == pmetric.MetricTypeGauge { + datapoints = metric.SetEmptyGauge().DataPoints() + } + + if isCumulative { + metric.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + } + + for i := 0; i < len(values); i++ { + datapoint := datapoints.AppendEmpty() + datapoint.SetTimestamp(staticTimestamp) + datapoint.SetDoubleValue(values[i]) + datapoint.Attributes().FromRaw(staticAttributes) + + for key, val := range attributes[i] { + datapoint.Attributes().PutStr(key, val) + } + } + + return metric +} + +func removeAttributefromMetric(metric pmetric.Metric, key string) pmetric.Metric { + datapoints := pmetric.NewNumberDataPointSlice() + if metric.Type() == pmetric.MetricTypeGauge { + datapoints = metric.Gauge().DataPoints() + } else { + datapoints = metric.Sum().DataPoints() + } + + for i := 0; i < datapoints.Len(); i++ { + datapoints.At(i).Attributes().Remove(key) + } + return metric +} diff --git a/plugins/processors/gpuattributes/processor.go b/plugins/processors/gpuattributes/processor.go new file mode 100644 index 0000000000..c62dc7e6b0 --- /dev/null +++ b/plugins/processors/gpuattributes/processor.go @@ -0,0 +1,252 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package gpuattributes + +import ( + "context" + "encoding/json" + "strings" + + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.uber.org/zap" + + "github.com/aws/amazon-cloudwatch-agent/internal/containerinsightscommon" + "github.com/aws/amazon-cloudwatch-agent/plugins/processors/gpuattributes/internal" +) + +const ( + gpuMetricIdentifier = "_gpu_" + gpuContainerMetricPrefix = "container_" + gpuPodMetricPrefix = "pod_" + gpuNodeMetricPrefix = "node_" +) + +// schemas at each resource level +// - Container Schema +// - ClusterName +// - ClusterName, Namespace, PodName, ContainerName +// - ClusterName, Namespace, PodName, FullPodName, ContainerName +// - ClusterName, Namespace, PodName, FullPodName, ContainerName, GpuDevice +// +// - Pod +// - ClusterName +// - ClusterName, Namespace +// - ClusterName, Namespace, Service +// - ClusterName, Namespace, PodName +// - ClusterName, Namespace, PodName, FullPodName +// - ClusterName, Namespace, PodName, FullPodName, GpuDevice +// +// - Node +// - ClusterName +// - ClusterName, InstanceIdKey, NodeName +// - ClusterName, InstanceIdKey, NodeName, GpuDevice +var containerLabelFilter = map[string]map[string]interface{}{ + containerinsightscommon.ClusterNameKey: nil, + containerinsightscommon.InstanceIdKey: nil, + containerinsightscommon.GpuDeviceKey: nil, + containerinsightscommon.MetricType: nil, + containerinsightscommon.NodeNameKey: nil, + containerinsightscommon.K8sNamespace: nil, + containerinsightscommon.FullPodNameKey: nil, + containerinsightscommon.PodNameKey: nil, + containerinsightscommon.TypeService: nil, + containerinsightscommon.GpuUniqueId: nil, + containerinsightscommon.ContainerNamekey: nil, + containerinsightscommon.InstanceTypeKey: nil, + containerinsightscommon.VersionKey: nil, + containerinsightscommon.SourcesKey: nil, + containerinsightscommon.Timestamp: nil, + containerinsightscommon.K8sKey: { + containerinsightscommon.HostKey: nil, + "labels": nil, + "pod_id": nil, + "pod_name": nil, + "pod_owners": nil, + "namespace": nil, + "container_name": nil, + "containerd": nil, + }, +} +var podLabelFilter = map[string]map[string]interface{}{ + containerinsightscommon.ClusterNameKey: nil, + containerinsightscommon.InstanceIdKey: nil, + containerinsightscommon.GpuDeviceKey: nil, + containerinsightscommon.MetricType: nil, + containerinsightscommon.NodeNameKey: nil, + containerinsightscommon.K8sNamespace: nil, + containerinsightscommon.FullPodNameKey: nil, + containerinsightscommon.PodNameKey: nil, + containerinsightscommon.TypeService: nil, + containerinsightscommon.GpuUniqueId: nil, + containerinsightscommon.InstanceTypeKey: nil, + containerinsightscommon.VersionKey: nil, + containerinsightscommon.SourcesKey: nil, + containerinsightscommon.Timestamp: nil, + containerinsightscommon.K8sKey: { + containerinsightscommon.HostKey: nil, + "labels": nil, + "pod_id": nil, + "pod_name": nil, + "pod_owners": nil, + "namespace": nil, + }, +} +var nodeLabelFilter = map[string]map[string]interface{}{ + containerinsightscommon.ClusterNameKey: nil, + containerinsightscommon.InstanceIdKey: nil, + containerinsightscommon.GpuDeviceKey: nil, + containerinsightscommon.MetricType: nil, + containerinsightscommon.NodeNameKey: nil, + containerinsightscommon.InstanceTypeKey: nil, + containerinsightscommon.VersionKey: nil, + containerinsightscommon.SourcesKey: nil, + containerinsightscommon.Timestamp: nil, + containerinsightscommon.K8sKey: { + containerinsightscommon.HostKey: nil, + }, +} + +type gpuAttributesProcessor struct { + *Config + logger *zap.Logger + awsNeuronMetricModifier *internal.AwsNeuronMetricModifier + awsNeuronMemoryMetricAggregator *internal.AwsNeuronMemoryMetricsAggregator +} + +func newGpuAttributesProcessor(config *Config, logger *zap.Logger) *gpuAttributesProcessor { + d := &gpuAttributesProcessor{ + Config: config, + logger: logger, + awsNeuronMetricModifier: internal.NewMetricModifier(logger), + awsNeuronMemoryMetricAggregator: internal.NewMemoryMemoryAggregator(), + } + return d +} + +func (d *gpuAttributesProcessor) processMetrics(_ context.Context, md pmetric.Metrics) (pmetric.Metrics, error) { + rms := md.ResourceMetrics() + for i := 0; i < rms.Len(); i++ { + rs := rms.At(i) + ilms := rs.ScopeMetrics() + for j := 0; j < ilms.Len(); j++ { + ils := ilms.At(j) + metrics := ils.Metrics() + + d.filterGpuMetricsWithoutPodName(metrics) + + metricsLength := metrics.Len() + for k := 0; k < metricsLength; k++ { + m := metrics.At(k) + d.processGPUMetricAttributes(m) + d.awsNeuronMemoryMetricAggregator.AggregateMemoryMetric(m) + // non neuron metric is returned as a singleton list + d.awsNeuronMetricModifier.ModifyMetric(m, metrics) + } + if d.awsNeuronMemoryMetricAggregator.MemoryMetricsFound { + aggregatedMemoryMetric := d.awsNeuronMemoryMetricAggregator.FlushAggregatedMemoryMetric() + d.awsNeuronMetricModifier.ModifyMetric(aggregatedMemoryMetric, metrics) + } + } + } + return md, nil +} + +func (d *gpuAttributesProcessor) processGPUMetricAttributes(m pmetric.Metric) { + // only decorate GPU metrics + if !strings.Contains(m.Name(), gpuMetricIdentifier) { + return + } + + labelFilter := map[string]map[string]interface{}{} + if strings.HasPrefix(m.Name(), gpuContainerMetricPrefix) { + labelFilter = containerLabelFilter + } else if strings.HasPrefix(m.Name(), gpuPodMetricPrefix) { + labelFilter = podLabelFilter + } else if strings.HasPrefix(m.Name(), gpuNodeMetricPrefix) { + labelFilter = nodeLabelFilter + } + + var dps pmetric.NumberDataPointSlice + switch m.Type() { + case pmetric.MetricTypeGauge: + dps = m.Gauge().DataPoints() + case pmetric.MetricTypeSum: + dps = m.Sum().DataPoints() + default: + d.logger.Debug("Ignore unknown metric type", zap.String(containerinsightscommon.MetricType, m.Type().String())) + } + + for i := 0; i < dps.Len(); i++ { + d.filterAttributes(dps.At(i).Attributes(), labelFilter) + } +} + +func (d *gpuAttributesProcessor) filterAttributes(attributes pcommon.Map, labels map[string]map[string]interface{}) { + if len(labels) == 0 { + return + } + // remove labels that are not in the keep list + attributes.RemoveIf(func(k string, _ pcommon.Value) bool { + if _, ok := labels[k]; ok { + return false + } + return true + }) + + // if a label has child level filter list, that means the label is map type + // only handles map type since there are currently only map and value types with GPU + for lk, ls := range labels { + if len(ls) == 0 { + continue + } + if av, ok := attributes.Get(lk); ok { + // decode json formatted string value into a map then encode again after filtering elements + var blob map[string]json.RawMessage + strVal := av.Str() + err := json.Unmarshal([]byte(strVal), &blob) + if err != nil { + d.logger.Warn("gpuAttributesProcessor: failed to unmarshal label", zap.String("label", lk)) + continue + } + newBlob := make(map[string]json.RawMessage) + for bkey, bval := range blob { + if _, ok := ls[bkey]; ok { + newBlob[bkey] = bval + } + } + bytes, err := json.Marshal(newBlob) + if err != nil { + d.logger.Warn("gpuAttributesProcessor: failed to marshall label", zap.String("label", lk)) + continue + } + attributes.PutStr(lk, string(bytes)) + } + } +} + +// remove dcgm metrics that do not contain PodName attribute which means there is no workload associated to container/pod +func (d *gpuAttributesProcessor) filterGpuMetricsWithoutPodName(metrics pmetric.MetricSlice) { + metrics.RemoveIf(func(m pmetric.Metric) bool { + isGpu := strings.Contains(m.Name(), gpuMetricIdentifier) + isContainerOrPod := strings.HasPrefix(m.Name(), gpuContainerMetricPrefix) || strings.HasPrefix(m.Name(), gpuPodMetricPrefix) + + if !isGpu || !isContainerOrPod { + return false + } + + var dps pmetric.NumberDataPointSlice + switch m.Type() { + case pmetric.MetricTypeGauge: + dps = m.Gauge().DataPoints() + case pmetric.MetricTypeSum: + dps = m.Sum().DataPoints() + default: + d.logger.Debug("Ignore unknown metric type", zap.String(containerinsightscommon.MetricType, m.Type().String())) + } + + _, hasPodInfo := dps.At(0).Attributes().Get(internal.PodName) + return !hasPodInfo + }) +} diff --git a/plugins/processors/gpuattributes/processor_test.go b/plugins/processors/gpuattributes/processor_test.go new file mode 100644 index 0000000000..02bbd02ad7 --- /dev/null +++ b/plugins/processors/gpuattributes/processor_test.go @@ -0,0 +1,142 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package gpuattributes + +import ( + "context" + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.uber.org/zap" +) + +func TestProcessMetrics(t *testing.T) { + logger, _ := zap.NewDevelopment() + gp := newGpuAttributesProcessor(createDefaultConfig().(*Config), logger) + ctx := context.Background() + + testcases := map[string]struct { + resource string + metrics pmetric.Metrics + wantCnt int + want map[string]string + }{ + "nonNode": { + metrics: generateMetrics("prefix", map[string]string{ + "ClusterName": "cluster", + }), + wantCnt: 1, + want: map[string]string{ + "ClusterName": "cluster", + }, + }, + "nodeDropSimple": { + metrics: generateMetrics("node", map[string]string{ + "ClusterName": "cluster", + "Drop": "val", + }), + wantCnt: 1, + want: map[string]string{ + "ClusterName": "cluster", + }, + }, + "nodeDropJson": { + metrics: generateMetrics("node", map[string]string{ + "ClusterName": "cluster", + "kubernetes": "{\"host\":\"test\"}", + }), + wantCnt: 1, + want: map[string]string{ + "ClusterName": "cluster", + "kubernetes": "{\"host\":\"test\"}", + }, + }, + "nodeDropMixed": { + metrics: generateMetrics("node", map[string]string{ + "ClusterName": "cluster", + "Drop": "val", + "kubernetes": "{\"host\":\"test\",\"b\":\"2\"}", + }), + wantCnt: 1, + want: map[string]string{ + "ClusterName": "cluster", + "kubernetes": "{\"host\":\"test\"}", + }, + }, + "dropPodWithoutPodName": { + metrics: generateMetrics("pod", map[string]string{ + "ClusterName": "cluster", + "kubernetes": "{\"host\":\"test\",\"b\":\"2\"}", + }), + wantCnt: 0, + want: map[string]string{}, + }, + "keepPodWithoutPodName": { + metrics: generateMetrics("pod", map[string]string{ + "ClusterName": "cluster", + "PodName": "pod", + "kubernetes": "{\"host\":\"test\",\"b\":\"2\"}", + }), + wantCnt: 1, + want: map[string]string{ + "ClusterName": "cluster", + "PodName": "pod", + "kubernetes": "{\"host\":\"test\"}", + }, + }, + "dropContainerWithoutPodName": { + metrics: generateMetrics("container", map[string]string{ + "ClusterName": "cluster", + "kubernetes": "{\"host\":\"test\",\"b\":\"2\"}", + }), + wantCnt: 0, + want: map[string]string{}, + }, + "keepContainerWithoutPodName": { + metrics: generateMetrics("container", map[string]string{ + "ClusterName": "cluster", + "PodName": "pod", + "kubernetes": "{\"host\":\"test\",\"b\":\"2\"}", + }), + wantCnt: 1, + want: map[string]string{ + "ClusterName": "cluster", + "PodName": "pod", + "kubernetes": "{\"host\":\"test\"}", + }, + }, + } + + for tname, tc := range testcases { + fmt.Printf("running %s\n", tname) + ms, _ := gp.processMetrics(ctx, tc.metrics) + assert.Equal(t, tc.wantCnt, ms.MetricCount()) + if tc.wantCnt > 0 { + attrs := ms.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Gauge().DataPoints().At(0).Attributes() + assert.Equal(t, len(tc.want), attrs.Len()) + for k, v := range tc.want { + got, ok := attrs.Get(k) + assert.True(t, ok) + assert.Equal(t, v, got.Str()) + } + } + } +} + +func generateMetrics(prefix string, dimensions map[string]string) pmetric.Metrics { + md := pmetric.NewMetrics() + + m := md.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty().Metrics().AppendEmpty() + m.SetName(prefix + gpuMetricIdentifier) + gauge := m.SetEmptyGauge().DataPoints().AppendEmpty() + gauge.SetIntValue(10) + + for k, v := range dimensions { + gauge.Attributes().PutStr(k, v) + } + + return md +} diff --git a/plugins/processors/k8sdecorator/structuredlogsadapter/metricruletagger.go b/plugins/processors/k8sdecorator/structuredlogsadapter/metricruletagger.go index 66613547ab..d44a428a5e 100644 --- a/plugins/processors/k8sdecorator/structuredlogsadapter/metricruletagger.go +++ b/plugins/processors/k8sdecorator/structuredlogsadapter/metricruletagger.go @@ -28,7 +28,7 @@ var nodeMetricRules = []structuredlogscommon.MetricRule{ {Unit: Percent, Name: MetricName(TypeNode, MemReservedCapacity)}, {Unit: Count, Name: MetricName(TypeNode, RunningPodCount)}, {Unit: Count, Name: MetricName(TypeNode, RunningContainerCount)}}, - DimensionSets: [][]string{{NodeNameKey, InstanceId, ClusterNameKey}}, + DimensionSets: [][]string{{NodeNameKey, InstanceIdKey, ClusterNameKey}}, Namespace: cloudwatchNamespace, }, { @@ -80,7 +80,7 @@ var nodeFSMetricRules = []structuredlogscommon.MetricRule{ { Metrics: []structuredlogscommon.MetricAttr{ {Unit: Percent, Name: MetricName(TypeNodeFS, FSUtilization)}}, - DimensionSets: [][]string{{NodeNameKey, InstanceId, ClusterNameKey}, {ClusterNameKey}}, + DimensionSets: [][]string{{NodeNameKey, InstanceIdKey, ClusterNameKey}, {ClusterNameKey}}, Namespace: cloudwatchNamespace, }, } diff --git a/plugins/processors/k8sdecorator/structuredlogsadapter/metricruletagger_test.go b/plugins/processors/k8sdecorator/structuredlogsadapter/metricruletagger_test.go index df48517ca3..778eaf4325 100644 --- a/plugins/processors/k8sdecorator/structuredlogsadapter/metricruletagger_test.go +++ b/plugins/processors/k8sdecorator/structuredlogsadapter/metricruletagger_test.go @@ -17,7 +17,7 @@ import ( ) func TestNodeFull(t *testing.T) { - tags := map[string]string{MetricType: TypeNode, NodeNameKey: "TestNodeName", ClusterNameKey: "TestClusterName", InstanceId: "i-123"} + tags := map[string]string{MetricType: TypeNode, NodeNameKey: "TestNodeName", ClusterNameKey: "TestClusterName", InstanceIdKey: "i-123"} fields := map[string]interface{}{MetricName(TypeNode, CpuUtilization): 0, MetricName(TypeNode, MemUtilization): 0, MetricName(TypeNode, NetTotalBytes): 0, MetricName(TypeNode, CpuReservedCapacity): 0, MetricName(TypeNode, MemReservedCapacity): 0, MetricName(TypeNode, RunningPodCount): 0, MetricName(TypeNode, RunningContainerCount): 0, MetricName(TypeNode, CpuTotal): 0, @@ -31,7 +31,7 @@ func TestNodeFull(t *testing.T) { } func TestNodeLackOfCpuUtilization(t *testing.T) { - tags := map[string]string{MetricType: TypeNode, NodeNameKey: "TestNodeName", ClusterNameKey: "TestClusterName", InstanceId: "i-123"} + tags := map[string]string{MetricType: TypeNode, NodeNameKey: "TestNodeName", ClusterNameKey: "TestClusterName", InstanceIdKey: "i-123"} fields := map[string]interface{}{MetricName(TypeNode, MemUtilization): 0, MetricName(TypeNode, NetTotalBytes): 0, MetricName(TypeNode, CpuReservedCapacity): 0, MetricName(TypeNode, MemReservedCapacity): 0, MetricName(TypeNode, RunningPodCount): 0, MetricName(TypeNode, RunningContainerCount): 0, MetricName(TypeNode, CpuTotal): 0, @@ -48,7 +48,7 @@ func TestNodeLackOfCpuUtilization(t *testing.T) { } func TestNodeLackOfNodeNameKey(t *testing.T) { - tags := map[string]string{MetricType: TypeNode, ClusterNameKey: "TestClusterName", InstanceId: "i-123"} + tags := map[string]string{MetricType: TypeNode, ClusterNameKey: "TestClusterName", InstanceIdKey: "i-123"} fields := map[string]interface{}{MetricName(TypeNode, CpuUtilization): 0, MetricName(TypeNode, MemUtilization): 0, MetricName(TypeNode, NetTotalBytes): 0, MetricName(TypeNode, CpuReservedCapacity): 0, MetricName(TypeNode, MemReservedCapacity): 0, MetricName(TypeNode, RunningPodCount): 0, MetricName(TypeNode, RunningContainerCount): 0, MetricName(TypeNode, CpuTotal): 0, @@ -91,7 +91,7 @@ func TestPodFullLackOfService(t *testing.T) { } func TestNodeFSFull(t *testing.T) { - tags := map[string]string{MetricType: TypeNodeFS, NodeNameKey: "TestNodeName", ClusterNameKey: "TestClusterName", InstanceId: "i-123"} + tags := map[string]string{MetricType: TypeNodeFS, NodeNameKey: "TestNodeName", ClusterNameKey: "TestClusterName", InstanceIdKey: "i-123"} fields := map[string]interface{}{MetricName(TypeNodeFS, FSUtilization): 0} m := metric.New("test", tags, fields, time.Now()) TagMetricRule(m) diff --git a/receiver/adapter/config.go b/receiver/adapter/config.go index 2e8bc6d208..7c04528dfc 100644 --- a/receiver/adapter/config.go +++ b/receiver/adapter/config.go @@ -9,7 +9,7 @@ import ( ) type Config struct { - scraperhelper.ScraperControllerSettings `mapstructure:",squash"` + scraperhelper.ControllerConfig `mapstructure:",squash"` // The different name of the plugin, share the similar structure with https://github.com/influxdata/telegraf/pull/6207 AliasName string `mapstructure:"alias_name,omitempty"` diff --git a/receiver/adapter/factory.go b/receiver/adapter/factory.go index 943c23af1d..7840135d65 100644 --- a/receiver/adapter/factory.go +++ b/receiver/adapter/factory.go @@ -32,13 +32,14 @@ func NewAdapter(telegrafConfig *telegrafconfig.Config) Adapter { // Type joins the TelegrafPrefix to the input. func Type(input string) component.Type { - return component.Type(TelegrafPrefix + input) + newType, _ := component.NewType(TelegrafPrefix + input) + return newType } func createDefaultConfig() func() component.Config { return func() component.Config { return &Config{ - ScraperControllerSettings: scraperhelper.ScraperControllerSettings{ + ControllerConfig: scraperhelper.ControllerConfig{ CollectionInterval: time.Minute, }, } @@ -53,7 +54,7 @@ func (a Adapter) NewReceiverFactory(telegrafInputName string) receiver.Factory { func (a Adapter) createMetricsReceiver(ctx context.Context, settings receiver.CreateSettings, config component.Config, consumer consumer.Metrics) (receiver.Metrics, error) { cfg := config.(*Config) - input, err := a.initializeInput(string(settings.ID.Type()), settings.ID.Name()) + input, err := a.initializeInput(settings.ID.Type().String(), settings.ID.Name()) if err != nil { return nil, err @@ -62,7 +63,7 @@ func (a Adapter) createMetricsReceiver(ctx context.Context, settings receiver.Cr rcvr := newAdaptedReceiver(input, ctx, consumer, settings.Logger) scraper, err := scraperhelper.NewScraper( - settings.ID.Name(), + settings.ID.Type().String(), rcvr.scrape, scraperhelper.WithStart(rcvr.start), scraperhelper.WithShutdown(rcvr.shutdown), @@ -73,7 +74,7 @@ func (a Adapter) createMetricsReceiver(ctx context.Context, settings receiver.Cr } return scraperhelper.NewScraperControllerReceiver( - &cfg.ScraperControllerSettings, settings, consumer, + &cfg.ControllerConfig, settings, consumer, scraperhelper.AddScraper(scraper), ) } diff --git a/receiver/adapter/factory_test.go b/receiver/adapter/factory_test.go index 1d8e9a3416..7510d316d7 100644 --- a/receiver/adapter/factory_test.go +++ b/receiver/adapter/factory_test.go @@ -29,7 +29,8 @@ func Test_Type(t *testing.T) { adapter := NewAdapter(c) factory := adapter.NewReceiverFactory("cpu") ft := factory.Type() - as.Equal(component.Type("telegraf_cpu"), ft) + telegrafCPUType, _ := component.NewType("telegraf_cpu") + as.Equal(telegrafCPUType, ft) } func Test_ValidConfig(t *testing.T) { @@ -61,13 +62,13 @@ func Test_CreateMetricsReceiver(t *testing.T) { factory := adapter.NewReceiverFactory("cpu") set := receivertest.NewNopCreateSettings() - set.ID = component.NewIDWithName(factory.Type(), "") + set.ID = component.NewIDWithName(factory.Type(), "cpu") metricsReceiver, err := factory.CreateMetricsReceiver( context.Background(), set, &Config{ - ScraperControllerSettings: scraperhelper.ScraperControllerSettings{ + ControllerConfig: scraperhelper.ControllerConfig{ CollectionInterval: time.Minute, }, }, @@ -93,7 +94,7 @@ func Test_CreateInvalidMetricsReceiver(t *testing.T) { context.Background(), receivertest.NewNopCreateSettings(), &Config{ - ScraperControllerSettings: scraperhelper.ScraperControllerSettings{ + ControllerConfig: scraperhelper.ControllerConfig{ CollectionInterval: time.Minute, }, }, diff --git a/receiver/adapter/testdata/cpu_plugin.toml b/receiver/adapter/testdata/cpu_plugin.toml index 3c3e06f1c9..5832b93cf6 100644 --- a/receiver/adapter/testdata/cpu_plugin.toml +++ b/receiver/adapter/testdata/cpu_plugin.toml @@ -1,4 +1,5 @@ [[inputs.cpu]] fieldpass = ["usage_idle", "usage_iowait", "usage_user", "usage_system"] percpu = false - totalcpu = false \ No newline at end of file + totalcpu = false + alias = "cpu" \ No newline at end of file diff --git a/service/configprovider/provider.go b/service/configprovider/provider.go index 122890d5e7..9fd63484df 100644 --- a/service/configprovider/provider.go +++ b/service/configprovider/provider.go @@ -11,11 +11,11 @@ import ( ) func Get(configPath string) (otelcol.ConfigProvider, error) { - fprovider := fileprovider.New() + fprovider := fileprovider.NewWithSettings(confmap.ProviderSettings{}) settings := otelcol.ConfigProviderSettings{ ResolverSettings: confmap.ResolverSettings{ URIs: []string{configPath}, - Converters: []confmap.Converter{expandconverter.New()}, + Converters: []confmap.Converter{expandconverter.New(confmap.ConverterSettings{})}, Providers: map[string]confmap.Provider{ fprovider.Scheme(): fprovider, }, diff --git a/service/configprovider/provider_test.go b/service/configprovider/provider_test.go index 1a7088c99e..e3b2ddb2c9 100644 --- a/service/configprovider/provider_test.go +++ b/service/configprovider/provider_test.go @@ -31,7 +31,8 @@ func TestConfigProvider(t *testing.T) { assert.NoError(t, err) actualCfg, err := actualProvider.Get(context.Background(), factories) assert.NoError(t, err) - got, ok := actualCfg.Exporters[component.NewIDWithName("awscloudwatchlogs", "emf_logs")] + cloudwatchType, _ := component.NewType("awscloudwatchlogs") + got, ok := actualCfg.Exporters[component.NewIDWithName(cloudwatchType, "emf_logs")] require.True(t, ok) gotCfg, ok := got.(*awscloudwatchlogsexporter.Config) require.True(t, ok) diff --git a/service/defaultcomponents/components.go b/service/defaultcomponents/components.go index 602371fa40..d2c314d5a6 100644 --- a/service/defaultcomponents/components.go +++ b/service/defaultcomponents/components.go @@ -27,8 +27,9 @@ import ( "github.com/aws/amazon-cloudwatch-agent/extension/agenthealth" "github.com/aws/amazon-cloudwatch-agent/plugins/outputs/cloudwatch" - "github.com/aws/amazon-cloudwatch-agent/plugins/processors/awsappsignals" + "github.com/aws/amazon-cloudwatch-agent/plugins/processors/awsapplicationsignals" "github.com/aws/amazon-cloudwatch-agent/plugins/processors/ec2tagger" + "github.com/aws/amazon-cloudwatch-agent/plugins/processors/gpuattributes" ) func Factories() (otelcol.Factories, error) { @@ -46,13 +47,14 @@ func Factories() (otelcol.Factories, error) { } if factories.Processors, err = processor.MakeFactoryMap( - awsappsignals.NewFactory(), + awsapplicationsignals.NewFactory(), batchprocessor.NewFactory(), cumulativetodeltaprocessor.NewFactory(), ec2tagger.NewFactory(), metricstransformprocessor.NewFactory(), resourcedetectionprocessor.NewFactory(), transformprocessor.NewFactory(), + gpuattributes.NewFactory(), ); err != nil { return otelcol.Factories{}, err } diff --git a/service/defaultcomponents/components_test.go b/service/defaultcomponents/components_test.go index 17369775b4..6c5f3b04c7 100644 --- a/service/defaultcomponents/components_test.go +++ b/service/defaultcomponents/components_test.go @@ -7,11 +7,12 @@ import ( "testing" "github.com/stretchr/testify/assert" + "go.opentelemetry.io/collector/component" ) const ( receiversCount = 5 - processorCount = 7 + processorCount = 8 exportersCount = 5 extensionsCount = 2 ) @@ -21,31 +22,50 @@ func TestComponents(t *testing.T) { assert.NoError(t, err) receivers := factories.Receivers assert.Len(t, receivers, receiversCount) - assert.NotNil(t, receivers["awscontainerinsightreceiver"]) - assert.NotNil(t, receivers["awsxray"]) - assert.NotNil(t, receivers["otlp"]) - assert.NotNil(t, receivers["tcplog"]) - assert.NotNil(t, receivers["udplog"]) + awscontainerinsightreceiverType, _ := component.NewType("awscontainerinsightreceiver") + awsxrayType, _ := component.NewType("awsxray") + otlpType, _ := component.NewType("otlp") + tcplogType, _ := component.NewType("tcplog") + udplogType, _ := component.NewType("udplog") + assert.NotNil(t, receivers[awscontainerinsightreceiverType]) + assert.NotNil(t, receivers[awsxrayType]) + assert.NotNil(t, receivers[otlpType]) + assert.NotNil(t, receivers[tcplogType]) + assert.NotNil(t, receivers[udplogType]) processors := factories.Processors assert.Len(t, processors, processorCount) - assert.NotNil(t, processors["awsappsignals"]) - assert.NotNil(t, processors["batch"]) - assert.NotNil(t, processors["cumulativetodelta"]) - assert.NotNil(t, processors["ec2tagger"]) - assert.NotNil(t, processors["metricstransform"]) - assert.NotNil(t, processors["transform"]) + awsapplicationsignalsType, _ := component.NewType("awsapplicationsignals") + batchType, _ := component.NewType("batch") + cumulativetodeltaType, _ := component.NewType("cumulativetodelta") + ec2taggerType, _ := component.NewType("ec2tagger") + metricstransformType, _ := component.NewType("metricstransform") + transformType, _ := component.NewType("transform") + gpuattributesType, _ := component.NewType("gpuattributes") + assert.NotNil(t, processors[awsapplicationsignalsType]) + assert.NotNil(t, processors[batchType]) + assert.NotNil(t, processors[cumulativetodeltaType]) + assert.NotNil(t, processors[ec2taggerType]) + assert.NotNil(t, processors[metricstransformType]) + assert.NotNil(t, processors[transformType]) + assert.NotNil(t, processors[gpuattributesType]) exporters := factories.Exporters assert.Len(t, exporters, exportersCount) - assert.NotNil(t, exporters["awscloudwatchlogs"]) - assert.NotNil(t, exporters["awsemf"]) - assert.NotNil(t, exporters["awsxray"]) - assert.NotNil(t, exporters["awscloudwatch"]) - assert.NotNil(t, exporters["logging"]) + awscloudwatchlogsType, _ := component.NewType("awscloudwatchlogs") + awsemfType, _ := component.NewType("awsemf") + awscloudwatchType, _ := component.NewType("awscloudwatch") + loggingType, _ := component.NewType("logging") + assert.NotNil(t, exporters[awscloudwatchlogsType]) + assert.NotNil(t, exporters[awsemfType]) + assert.NotNil(t, exporters[awsemfType]) + assert.NotNil(t, exporters[awscloudwatchType]) + assert.NotNil(t, exporters[loggingType]) extensions := factories.Extensions assert.Len(t, extensions, extensionsCount) - assert.NotNil(t, extensions["agenthealth"]) - assert.NotNil(t, extensions["awsproxy"]) + agenthealthType, _ := component.NewType("agenthealth") + awsproxyType, _ := component.NewType("awsproxy") + assert.NotNil(t, extensions[agenthealthType]) + assert.NotNil(t, extensions[awsproxyType]) } diff --git a/service/registry/registry_test.go b/service/registry/registry_test.go index 0a8f7317c0..2458d23c25 100644 --- a/service/registry/registry_test.go +++ b/service/registry/registry_test.go @@ -7,6 +7,7 @@ import ( "testing" "github.com/stretchr/testify/assert" + "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/exporter/exportertest" "go.opentelemetry.io/collector/extension/extensiontest" "go.opentelemetry.io/collector/otelcol" @@ -22,17 +23,18 @@ func TestRegistry(t *testing.T) { for _, apply := range Options() { apply(&got) } - assert.NotNil(t, got.Receivers["nop"]) - assert.NotNil(t, got.Processors["nop"]) - assert.NotNil(t, got.Exporters["nop"]) - assert.NotNil(t, got.Extensions["nop"]) + nop, _ := component.NewType("nop") + assert.NotNil(t, got.Receivers[nop]) + assert.NotNil(t, got.Processors[nop]) + assert.NotNil(t, got.Exporters[nop]) + assert.NotNil(t, got.Extensions[nop]) assert.Len(t, got.Receivers, 1) - origReceiver := got.Receivers["nop"] + origReceiver := got.Receivers[nop] Register(WithReceiver(receivertest.NewNopFactory())) for _, apply := range Options() { apply(&got) } - newReceiver := got.Receivers["nop"] + newReceiver := got.Receivers[nop] assert.NotEqual(t, origReceiver, newReceiver) assert.Len(t, got.Receivers, 1) Reset() diff --git a/tool/clean/clean_ebs/clean_ebs.go b/tool/clean/clean_ebs/clean_ebs.go new file mode 100644 index 0000000000..195edb0983 --- /dev/null +++ b/tool/clean/clean_ebs/clean_ebs.go @@ -0,0 +1,68 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package main + +import ( + "context" + "log" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/config" + "github.com/aws/aws-sdk-go-v2/service/ec2" + "github.com/aws/aws-sdk-go-v2/service/ec2/types" + + "github.com/aws/amazon-cloudwatch-agent/tool/clean" +) + +// Clean ebs volumes if they have been open longer than 7 day and unused +func main() { + err := cleanVolumes() + if err != nil { + log.Fatalf("errors cleaning %v", err) + } +} + +func cleanVolumes() error { + log.Print("Begin to clean EBS Volumes") + ctx := context.Background() + defaultConfig, err := config.LoadDefaultConfig(ctx) + if err != nil { + return err + } + ec2Client := ec2.NewFromConfig(defaultConfig) + + return deleteUnusedVolumes(ctx, ec2Client) + +} + +func deleteUnusedVolumes(ctx context.Context, client *ec2.Client) error { + + input := &ec2.DescribeVolumesInput{ + Filters: []types.Filter{ + { + //if the status is availble, then EBS volume is not currently attached to any ec2 instance (so not being used) + Name: aws.String("status"), + Values: []string{"available"}, + }, + }, + } + + volumes, err := client.DescribeVolumes(ctx, input) + if err != nil { + return err + } + for _, volume := range volumes.Volumes { + if time.Since(*volume.CreateTime) > clean.KeepDurationOneWeek && len(volume.Attachments) == 0 { + log.Printf("Deleting unused volume %s", *volume.VolumeId) + _, err = client.DeleteVolume(ctx, &ec2.DeleteVolumeInput{ + VolumeId: volume.VolumeId, + }) + } + if err != nil { + log.Printf("Error deleting volume %s: %v", *volume.VolumeId, err) + } + } + return nil +} diff --git a/tool/clean/clean_eks/clean_eks.go b/tool/clean/clean_eks/clean_eks.go index 2e02bf18e8..89f8b6a75d 100644 --- a/tool/clean/clean_eks/clean_eks.go +++ b/tool/clean/clean_eks/clean_eks.go @@ -16,6 +16,17 @@ import ( "github.com/aws/amazon-cloudwatch-agent/tool/clean" ) +// make this configurable or construct based on some configs? +const betaEksEndpoint = "https://api.beta.us-west-2.wesley.amazonaws.com" + +var ( + ClustersToClean = []string{ + "cwagent-eks-integ-", + "cwagent-operator-helm-integ-", + "cwagent-operator-eks-integ-", + } +) + // Clean eks clusters if they have been open longer than 7 day func main() { err := cleanCluster() @@ -32,14 +43,43 @@ func cleanCluster() error { return err } eksClient := eks.NewFromConfig(defaultConfig) - terminateClusters(ctx, eksClient) + + // delete beta clusters + betaConfig, err := config.LoadDefaultConfig(ctx, config.WithEndpointResolverWithOptions(eksBetaEndpointResolver())) + if err != nil { + return err + } + betaClient := eks.NewFromConfig(betaConfig) + terminateClusters(ctx, betaClient) + return nil } +func eksBetaEndpointResolver() aws.EndpointResolverWithOptionsFunc { + return func(service, region string, options ...interface{}) (aws.Endpoint, error) { + endpoint, err := eks.NewDefaultEndpointResolver().ResolveEndpoint(region, eks.EndpointResolverOptions{}) + if err != nil { + return aws.Endpoint{}, err + } + endpoint.URL = betaEksEndpoint + return endpoint, nil + } +} + +func clusterNameMatchesClustersToClean(clusterName string, clustersToClean []string) bool { + for _, clusterToClean := range clustersToClean { + if strings.HasPrefix(clusterName, clusterToClean) { + return true + } + } + return false +} + func terminateClusters(ctx context.Context, client *eks.Client) { listClusterInput := eks.ListClustersInput{} - expirationDateCluster := time.Now().UTC().Add(clean.KeepDurationOneWeek) + expirationDateCluster := time.Now().UTC().Add(clean.KeepDurationFourDays) + clusters, err := client.ListClusters(ctx, &listClusterInput) if err != nil { log.Fatalf("could not get cluster list") @@ -50,33 +90,39 @@ func terminateClusters(ctx context.Context, client *eks.Client) { if err != nil { return } - if expirationDateCluster.After(*describeClusterOutput.Cluster.CreatedAt) && strings.HasPrefix(*describeClusterOutput.Cluster.Name, "cwagent-eks-integ-") { - log.Printf("Try to delete cluster %s launch-date %s", cluster, *describeClusterOutput.Cluster.CreatedAt) - describeNodegroupInput := eks.ListNodegroupsInput{ClusterName: aws.String(cluster)} - nodeGroupOutput, err := client.ListNodegroups(ctx, &describeNodegroupInput) - if err != nil { - log.Printf("could not query node groups cluster %s err %v", cluster, err) - } - // it takes about 5 minutes to delete node groups - // it will fail to delete cluster if we need to delete node groups - // this will delete the cluster on next run the next day - // I do not want to wait for node groups to be deleted - // as it will increase the runtime of this cleaner - for _, nodegroup := range nodeGroupOutput.Nodegroups { - deleteNodegroupInput := eks.DeleteNodegroupInput{ - ClusterName: aws.String(cluster), - NodegroupName: aws.String(nodegroup), - } - _, err := client.DeleteNodegroup(ctx, &deleteNodegroupInput) - if err != nil { - log.Printf("could delete node groups %s cluster %s err %v", nodegroup, cluster, err) - } + if !expirationDateCluster.After(*describeClusterOutput.Cluster.CreatedAt) { + log.Printf("Ignoring cluster %s with a launch-date %s since it was created in the last %s", cluster, *describeClusterOutput.Cluster.CreatedAt, clean.KeepDurationFourDays) + continue + } + if !clusterNameMatchesClustersToClean(*describeClusterOutput.Cluster.Name, ClustersToClean) { + log.Printf("Ignoring cluster %s since it doesnt match any of the clean regexes", cluster) + continue + } + log.Printf("Try to delete cluster %s launch-date %s", cluster, *describeClusterOutput.Cluster.CreatedAt) + describeNodegroupInput := eks.ListNodegroupsInput{ClusterName: aws.String(cluster)} + nodeGroupOutput, err := client.ListNodegroups(ctx, &describeNodegroupInput) + if err != nil { + log.Printf("could not query node groups cluster %s err %v", cluster, err) + } + // it takes about 5 minutes to delete node groups + // it will fail to delete cluster if we need to delete node groups + // this will delete the cluster on next run the next day + // I do not want to wait for node groups to be deleted + // as it will increase the runtime of this cleaner + for _, nodegroup := range nodeGroupOutput.Nodegroups { + deleteNodegroupInput := eks.DeleteNodegroupInput{ + ClusterName: aws.String(cluster), + NodegroupName: aws.String(nodegroup), } - deleteClusterInput := eks.DeleteClusterInput{Name: aws.String(cluster)} - _, err = client.DeleteCluster(ctx, &deleteClusterInput) + _, err := client.DeleteNodegroup(ctx, &deleteNodegroupInput) if err != nil { - log.Printf("could not delete cluster %s err %v", cluster, err) + log.Printf("could not delete node groups %s cluster %s err %v", nodegroup, cluster, err) } } + deleteClusterInput := eks.DeleteClusterInput{Name: aws.String(cluster)} + _, err = client.DeleteCluster(ctx, &deleteClusterInput) + if err != nil { + log.Printf("could not delete cluster %s err %v", cluster, err) + } } } diff --git a/tool/clean/clean_host/clean_host.go b/tool/clean/clean_host/clean_host.go index e62e3c2c1b..31a7d9b5d4 100644 --- a/tool/clean/clean_host/clean_host.go +++ b/tool/clean/clean_host/clean_host.go @@ -1,14 +1,11 @@ // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: MIT - -//go:build clean -// +build clean - package main import ( "context" "log" + "os" "time" "github.com/aws/aws-sdk-go-v2/config" @@ -31,7 +28,7 @@ func cleanHost() error { log.Print("Begin to clean EC2 Host") cxt := context.Background() - defaultConfig, err := config.LoadDefaultConfig(cxt) + defaultConfig, err := config.LoadDefaultConfig(cxt, config.WithRegion(os.Args[1])) if err != nil { return err } @@ -57,6 +54,7 @@ func terminateInstances(cxt context.Context, ec2client *ec2.Client) { "cwagent-performance-*", "cwagent-stress-*", "LocalStackIntegrationTestInstance", + "NvidiaDataCollector-*", }} instanceInput := ec2.DescribeInstancesInput{ diff --git a/tool/clean/clean_util.go b/tool/clean/clean_util.go index ab0de2cde6..903907e342 100644 --- a/tool/clean/clean_util.go +++ b/tool/clean/clean_util.go @@ -7,6 +7,7 @@ import "time" const ( KeepDurationOneWeek = KeepDurationOneDay * 7 + KeepDurationFourDays = KeepDurationOneDay * 4 KeepDurationOneDay = -1 * time.Hour * 24 KeepDurationSixtyDay = KeepDurationOneDay * time.Duration(60) KeepDurationTwentySixHours = KeepDurationOneDay + time.Hour*2 diff --git a/tool/paths/paths.go b/tool/paths/paths.go index 26543934fb..dc612bdf39 100644 --- a/tool/paths/paths.go +++ b/tool/paths/paths.go @@ -10,8 +10,6 @@ const ( YAML = "amazon-cloudwatch-agent.yaml" ENV = "env-config.json" AGENT_LOG_FILE = "amazon-cloudwatch-agent.log" - //TODO this CONFIG_DIR_IN_CONTAINER should change to something indicate dir, keep it for now to avoid break testing - CONFIG_DIR_IN_CONTAINER = "/etc/cwagentconfig" ) var ( diff --git a/tool/paths/paths_unix.go b/tool/paths/paths_unix.go index c59c70232a..a99a39a4a9 100644 --- a/tool/paths/paths_unix.go +++ b/tool/paths/paths_unix.go @@ -16,6 +16,8 @@ const ( AgentBinaryName = "amazon-cloudwatch-agent" WizardBinaryName = "amazon-cloudwatch-agent-config-wizard" AgentStartName = "amazon-cloudwatch-agent-ctl" + //TODO this CONFIG_DIR_IN_CONTAINER should change to something indicate dir, keep it for now to avoid break testing + CONFIG_DIR_IN_CONTAINER = "/etc/cwagentconfig" ) func init() { diff --git a/tool/paths/paths_windows.go b/tool/paths/paths_windows.go index 69c51ac889..2d8eca162b 100644 --- a/tool/paths/paths_windows.go +++ b/tool/paths/paths_windows.go @@ -9,6 +9,8 @@ package paths import ( "os" "path/filepath" + + "github.com/aws/amazon-cloudwatch-agent/cfg/envconfig" ) const ( @@ -21,6 +23,8 @@ const ( AgentStartName = "amazon-cloudwatch-agent-ctl.ps1" ) +var CONFIG_DIR_IN_CONTAINER = filepath.Join(os.Getenv("ProgramFiles"), AgentDir, "cwagentconfig") + func init() { programFiles := os.Getenv("ProgramFiles") var programData string @@ -31,6 +35,12 @@ func init() { programData = filepath.Join(os.Getenv("ALLUSERSPROFILE"), "Application Data") } + if envconfig.IsWindowsHostProcessContainer() { + CONFIG_DIR_IN_CONTAINER = filepath.Join(os.Getenv("CONTAINER_SANDBOX_MOUNT_POINT"), "Program Files", AgentDir, "cwagentconfig", "cwagentconfig.json") + programFiles = filepath.Join(os.Getenv("CONTAINER_SANDBOX_MOUNT_POINT"), "Program Files") + programData = filepath.Join(os.Getenv("CONTAINER_SANDBOX_MOUNT_POINT"), "ProgramData") + } + AgentRootDir := filepath.Join(programFiles, AgentDir) AgentConfigDir := filepath.Join(programData, AgentDir) JsonConfigPath = filepath.Join(AgentConfigDir, JSON) diff --git a/translator/cmdutil/translatorutil.go b/translator/cmdutil/translatorutil.go index b9d52d77fc..aead607eff 100644 --- a/translator/cmdutil/translatorutil.go +++ b/translator/cmdutil/translatorutil.go @@ -11,8 +11,9 @@ import ( "strings" "github.com/xeipuuv/gojsonschema" - "go.opentelemetry.io/collector/confmap" + "github.com/aws/amazon-cloudwatch-agent/cfg/envconfig" + "github.com/aws/amazon-cloudwatch-agent/internal/mapstructure" "github.com/aws/amazon-cloudwatch-agent/translator" "github.com/aws/amazon-cloudwatch-agent/translator/config" "github.com/aws/amazon-cloudwatch-agent/translator/context" @@ -128,21 +129,26 @@ func GenerateMergedJsonConfigMap(ctx *context.Context) (map[string]interface{}, fmt.Printf("Cannot access %v: %v \n", path, err) return err } - if info.Mode()&os.ModeSymlink != 0 { - log.Printf("Find symbolic link %s \n", path) - path, err := filepath.EvalSymlinks(path) - if err != nil { - log.Printf("Symbolic link %v will be ignored due to err: %v. \n", path, err) - return nil + if envconfig.IsWindowsHostProcessContainer() { + log.Printf("Skipping checking symbolic link for Windows host process containers %s. \n"+ + "These symbolic links are skipped as valuating symlinks is common failures for Windows containers", path) + } else { + if info.Mode()&os.ModeSymlink != 0 { + log.Printf("Find symbolic link %s \n", path) + path, err := filepath.EvalSymlinks(path) + if err != nil { + log.Printf("Symbolic link %v will be ignored due to err: %v. \n", path, err) + return nil + } + info, err = os.Stat(path) + if err != nil { + log.Printf("Path %v will be ignored due to err: %v. \n", path, err) + } } - info, err = os.Stat(path) - if err != nil { - log.Printf("Path %v will be ignored due to err: %v. \n", path, err) + if info.IsDir() { + return nil } } - if info.IsDir() { - return nil - } if filepath.Ext(path) == context.TmpFileSuffix { // .tmp files @@ -220,11 +226,11 @@ func TranslateJsonMapToYamlConfig(jsonConfigValue interface{}) (interface{}, err if err != nil { return nil, err } - conf := confmap.New() - if err = conf.Marshal(cfg); err != nil { + var result map[string]any + if result, err = mapstructure.Marshal(cfg); err != nil { return nil, err } - return conf.ToStringMap(), nil + return result, nil } func ConfigToTomlFile(config interface{}, tomlConfigFilePath string) error { diff --git a/translator/config/mode.go b/translator/config/mode.go index 95638ba471..5f8c8f93ed 100644 --- a/translator/config/mode.go +++ b/translator/config/mode.go @@ -11,7 +11,20 @@ const ( ) const ( - ShortModeEC2 = "EC2" - ShortModeOnPrem = "OP" - ShortModeWithIRSA = "WI" + ModeECS = "ECS" +) + +const ( + ModeEKS = "EKS" + ModeK8sEC2 = "K8sEC2" + ModeK8sOnPrem = "K8sOnPrem" +) + +const ( + ShortModeEC2 = "EC2" + ShortModeOnPrem = "OP" + ShortModeWithIRSA = "WI" + ShortModeEKS = "EKS" + ShortModeK8sEC2 = "K8E" + ShortModeK8sOnPrem = "K8OP" ) diff --git a/translator/config/schema.json b/translator/config/schema.json index 0ab4e1d17d..b442482cfb 100644 --- a/translator/config/schema.json +++ b/translator/config/schema.json @@ -323,6 +323,29 @@ }, "additionalProperties": false }, + "tlsDefinitions": { + "type": "object", + "properties": { + "ca_file": { + "type": "string", + "minLength": 1, + "maxLength": 255 + }, + "cert_file": { + "type": "string", + "minLength": 1, + "maxLength": 255 + }, + "key_file": { + "type": "string", + "minLength": 1, + "maxLength": 255 + }, + "insecure": { + "type": "boolean" + } + } + }, "swapDefinitions": { "$ref": "#/definitions/metricsDefinition/definitions/basicMetricDefinition" }, @@ -590,6 +613,93 @@ } } }, + "tls": { + "$ref": "#/definitions/metricsDefinition/definitions/tlsDefinitions" + }, + "additionalProperties": true + }, + "application_signals": { + "type": "object", + "properties": { + "hosted_in": { + "type": "string", + "minLength": 1, + "maxLength": 1024 + }, + "rules": { + "description": "Custom rules defined by customer", + "type": "array", + "items": { + "type": "object", + "properties": { + "selectors": { + "type": "array", + "items": { + "type": "object", + "properties": { + "dimension": { + "description": "dimension used for matching", + "type": "string", + "minLength": 1 + }, + "match": { + "description": "regex used for match", + "type": "string", + "minLength": 1 + } + }, + "required": [ + "dimension", + "match" + ] + } + }, + "replacements": { + "type": "array", + "items": { + "type": "object", + "properties": { + "target_dimension": { + "description": "dimension to be replaced", + "type": "string", + "minLength": 1 + }, + "value": { + "description": "replacement value", + "type": "string" + } + }, + "required": [ + "target_dimension", + "value" + ] + } + }, + "action": { + "description": "action to be done, either keep, drop or replace", + "type": "string", + "enum": [ + "drop", + "keep", + "replace" + ] + }, + "rule_name": { + "description": "name of rule", + "type": "string", + "minLength": 1 + } + }, + "required": [ + "selectors", + "action" + ] + } + } + }, + "tls": { + "$ref": "#/definitions/metricsDefinition/definitions/tlsDefinitions" + }, "additionalProperties": true }, "ecs": { @@ -909,11 +1019,19 @@ "properties": {}, "additionalProperties": true }, + "application_signals": { + "type": "object", + "properties": {}, + "additionalProperties": true + }, "xray": { "$ref": "#/definitions/tracesDefinition/definitions/xrayDefinition" }, "otlp": { - "$ref": "#/definitions/tracesDefinition/definitions/otlpDefinition" + "tls": { + "$ref": "#/definitions/metricsDefinition/definitions/tlsDefinitions" + }, + "$ref": "#/definitions/tracesDefinition/definitions/otlpDefinitions" } }, "minProperties": 1, @@ -977,19 +1095,20 @@ }, "additionalProperties": false }, - "otlpDefinition": { - "type": "object", - "properties": { - "grpc_endpoint": { - "description": "gRPC endpoint to use to listen for OTLP protobuf traces", - "$ref": "#/definitions/endpointOverrideDefinition" + "otlpDefinitions": { + "oneOf": [ + { + "type": "array", + "minItems": 1, + "maxItems": 255, + "items": { + "$ref": "#/definitions/otlpObjectDefinition" + } }, - "http_endpoint": { - "description": "HTTP endpoint to use to listen for OTLP JSON traces", - "$ref": "#/definitions/endpointOverrideDefinition" + { + "$ref": "#/definitions/otlpObjectDefinition" } - }, - "additionalProperties": false + ] } } }, @@ -1045,6 +1164,20 @@ }, "additionalProperties": false }, + "otlpObjectDefinition": { + "type": "object", + "properties": { + "grpc_endpoint": { + "description": "gRPC endpoint to use to listen for OTLP protobuf information", + "$ref": "#/definitions/endpointOverrideDefinition" + }, + "http_endpoint": { + "description": "HTTP endpoint to use to listen for OTLP JSON information", + "$ref": "#/definitions/endpointOverrideDefinition" + } + }, + "additionalProperties": false + }, "ecsServiceDiscoveryDefinition": { "type": "object", "descriptions": "Define ECS service discovery for Prometheus", diff --git a/translator/context/context.go b/translator/context/context.go index 9dcc7ae7a1..30091ce399 100644 --- a/translator/context/context.go +++ b/translator/context/context.go @@ -41,6 +41,7 @@ type Context struct { multiConfig string outputTomlFilePath string mode string + kubernetesMode string shortMode string credentials map[string]string proxy map[string]string @@ -97,6 +98,10 @@ func (ctx *Context) Mode() string { return ctx.mode } +func (ctx *Context) KubernetesMode() string { + return ctx.kubernetesMode +} + func (ctx *Context) ShortMode() string { return ctx.shortMode } @@ -128,7 +133,23 @@ func (ctx *Context) SetMode(mode string) { ctx.mode = config.ModeWithIRSA ctx.shortMode = config.ShortModeWithIRSA default: - log.Panicf("Invalid mode %s. Valid mode values are %s, %s, %s and %s.", mode, config.ModeEC2, config.ModeOnPrem, config.ModeOnPremise, config.ModeWithIRSA) + log.Panicf("Invalid mode %s. Valid mode values are %s, %s, %s, and %s.", mode, config.ModeEC2, config.ModeOnPrem, config.ModeOnPremise, config.ModeWithIRSA) + } +} + +func (ctx *Context) SetKubernetesMode(mode string) { + switch mode { + case config.ModeEKS: + ctx.kubernetesMode = config.ModeEKS + ctx.shortMode = config.ShortModeEKS + case config.ModeK8sEC2: + ctx.kubernetesMode = config.ModeK8sEC2 + ctx.shortMode = config.ShortModeK8sEC2 + case config.ModeK8sOnPrem: + ctx.kubernetesMode = config.ModeK8sOnPrem + ctx.shortMode = config.ShortModeK8sOnPrem + default: + ctx.kubernetesMode = "" } } diff --git a/translator/tocwconfig/sampleConfig/advanced_config_darwin.yaml b/translator/tocwconfig/sampleConfig/advanced_config_darwin.yaml index e8a7e707f2..3e4faa03ef 100644 --- a/translator/tocwconfig/sampleConfig/advanced_config_darwin.yaml +++ b/translator/tocwconfig/sampleConfig/advanced_config_darwin.yaml @@ -1,14 +1,11 @@ -connectors: {} exporters: awscloudwatch: force_flush_interval: 1m0s max_datums_per_call: 1000 max_values_per_datum: 150 middleware: agenthealth/metrics - mode: EC2 namespace: CWAgent region: us-west-2 - region_type: ACJ resource_to_telemetry_conversion: enabled: true extensions: @@ -17,6 +14,9 @@ extensions: stats: operations: - PutMetricData + usage_flags: + mode: EC2 + region_type: ACJ processors: cumulativetodelta/hostDeltaMetrics: exclude: @@ -24,11 +24,8 @@ processors: metrics: - iops_in_progress - diskio_iops_in_progress - regexp: null include: match_type: "" - metrics: [] - regexp: null initial_value: 0 max_staleness: 0s ec2tagger: @@ -75,11 +72,11 @@ service: processors: - ec2tagger receivers: + - telegraf_mem - telegraf_netstat - telegraf_swap - telegraf_cpu - telegraf_disk - - telegraf_mem metrics/hostDeltaMetrics: exporters: - awscloudwatch @@ -94,8 +91,6 @@ service: disable_caller: false disable_stacktrace: false encoding: console - error_output_paths: [] - initial_fields: {} level: info output_paths: - /opt/aws/amazon-cloudwatch-agent/logs/amazon-cloudwatch-agent.log @@ -107,8 +102,4 @@ service: metrics: address: "" level: None - readers: [] - resource: {} - traces: - processors: [] - propagators: [] + traces: {} diff --git a/translator/tocwconfig/sampleConfig/advanced_config_linux.yaml b/translator/tocwconfig/sampleConfig/advanced_config_linux.yaml index aea99aa4d3..80e9460692 100644 --- a/translator/tocwconfig/sampleConfig/advanced_config_linux.yaml +++ b/translator/tocwconfig/sampleConfig/advanced_config_linux.yaml @@ -1,14 +1,11 @@ -connectors: {} exporters: awscloudwatch: force_flush_interval: 1m0s max_datums_per_call: 1000 max_values_per_datum: 150 middleware: agenthealth/metrics - mode: EC2 namespace: CWAgent region: us-west-2 - region_type: ACJ resource_to_telemetry_conversion: enabled: true extensions: @@ -17,6 +14,9 @@ extensions: stats: operations: - PutMetricData + usage_flags: + mode: EC2 + region_type: ACJ processors: cumulativetodelta/hostDeltaMetrics: exclude: @@ -24,11 +24,8 @@ processors: metrics: - iops_in_progress - diskio_iops_in_progress - regexp: null include: match_type: "" - metrics: [] - regexp: null initial_value: 0 max_staleness: 0s ec2tagger: @@ -83,13 +80,13 @@ service: processors: - ec2tagger receivers: + - telegraf_disk - telegraf_mem - telegraf_netstat - telegraf_swap - telegraf_ethtool - telegraf_nvidia_smi - telegraf_cpu - - telegraf_disk metrics/hostDeltaMetrics: exporters: - awscloudwatch @@ -104,8 +101,6 @@ service: disable_caller: false disable_stacktrace: false encoding: console - error_output_paths: [] - initial_fields: {} level: info output_paths: - /opt/aws/amazon-cloudwatch-agent/logs/amazon-cloudwatch-agent.log @@ -117,8 +112,4 @@ service: metrics: address: "" level: None - readers: [] - resource: {} - traces: - processors: [] - propagators: [] + traces: {} diff --git a/translator/tocwconfig/sampleConfig/advanced_config_windows.yaml b/translator/tocwconfig/sampleConfig/advanced_config_windows.yaml index c09b16648b..7222672004 100644 --- a/translator/tocwconfig/sampleConfig/advanced_config_windows.yaml +++ b/translator/tocwconfig/sampleConfig/advanced_config_windows.yaml @@ -1,14 +1,11 @@ -connectors: {} exporters: awscloudwatch: force_flush_interval: 1m0s max_datums_per_call: 1000 max_values_per_datum: 150 middleware: agenthealth/metrics - mode: EC2 namespace: CWAgent region: us-west-2 - region_type: ACJ resource_to_telemetry_conversion: enabled: true extensions: @@ -17,6 +14,9 @@ extensions: stats: operations: - PutMetricData + usage_flags: + mode: EC2 + region_type: ACJ processors: ec2tagger: ec2_instance_tag_keys: @@ -73,21 +73,19 @@ service: processors: - ec2tagger receivers: + - telegraf_win_perf_counters/3762679655 - telegraf_win_perf_counters/2073218482 - telegraf_win_perf_counters/2039663244 - telegraf_win_perf_counters/4283769065 - telegraf_win_perf_counters/1492679118 - telegraf_win_perf_counters/3610923661 - telegraf_win_perf_counters/3446270237 - - telegraf_win_perf_counters/3762679655 telemetry: logs: development: false disable_caller: false disable_stacktrace: false encoding: console - error_output_paths: [] - initial_fields: {} level: info output_paths: - c:\ProgramData\Amazon\AmazonCloudWatchAgent\Logs\amazon-cloudwatch-agent.log @@ -99,8 +97,4 @@ service: metrics: address: "" level: None - readers: [] - resource: {} - traces: - processors: [] - propagators: [] + traces: {} diff --git a/translator/tocwconfig/sampleConfig/appsignals_and_eks_config.json b/translator/tocwconfig/sampleConfig/appsignals_and_eks_config.json index b2b98310c7..0c44e570b4 100644 --- a/translator/tocwconfig/sampleConfig/appsignals_and_eks_config.json +++ b/translator/tocwconfig/sampleConfig/appsignals_and_eks_config.json @@ -4,14 +4,23 @@ }, "logs": { "metrics_collected": { - "app_signals": { - "hosted_in": "TestCluster" + "application_signals": { + "tls": { + "cert_file": "path/to/cert.crt", + "key_file": "path/to/key.key" + }, + "hosted_in": "TestCluster", + "limiter": { + "log_dropped_metrics": true, + "rotation_interval": "10m" + } }, "kubernetes": { "cluster_name": "TestCluster", "metrics_collection_interval": 30, "disable_metric_extraction": true, - "enhanced_container_insights": false + "enhanced_container_insights": false, + "accelerated_compute_metrics": false } }, "force_flush_interval": 5, @@ -19,7 +28,7 @@ }, "traces": { "traces_collected": { - "app_signals": {} + "application_signals": {} } } } \ No newline at end of file diff --git a/translator/tocwconfig/sampleConfig/appsignals_and_eks_config.yaml b/translator/tocwconfig/sampleConfig/appsignals_and_eks_config.yaml index c780070374..fb91c0a6cf 100644 --- a/translator/tocwconfig/sampleConfig/appsignals_and_eks_config.yaml +++ b/translator/tocwconfig/sampleConfig/appsignals_and_eks_config.yaml @@ -1,133 +1,122 @@ -connectors: {} exporters: - awsemf/app_signals: + awsemf/application_signals: certificate_file_path: "" detailed_metrics: false dimension_rollup_option: NoDimensionRollup disable_metric_extraction: false eks_fargate_container_insights_enabled: false - endpoint: "" + endpoint: https://fake_endpoint enhanced_container_insights: false - imds_retries: 0 + imds_retries: 1 local_mode: false - log_group_name: /aws/appsignals/eks + log_group_name: /aws/application-signals/data log_retention: 0 log_stream_name: "" max_retries: 2 metric_declarations: - dimensions: - - - HostedIn.EKS.Cluster - - HostedIn.K8s.Namespace + - - Environment - Operation - Service - - - HostedIn.EKS.Cluster - - HostedIn.K8s.Namespace + - - Environment - Service label_matchers: - label_names: - - aws.span.kind - regex: ^(SERVER|LOCAL_ROOT)$ + - Telemetry.Source + regex: ^(ServerSpan|LocalRootSpan)$ separator: ; metric_name_selectors: - Latency - Fault - Error - dimensions: - - - HostedIn.EKS.Cluster - - HostedIn.K8s.Namespace - - K8s.RemoteNamespace + - - Environment - Operation + - RemoteEnvironment - RemoteOperation + - RemoteResourceIdentifier + - RemoteResourceType - RemoteService - - RemoteTarget - Service - - - HostedIn.EKS.Cluster - - HostedIn.K8s.Namespace - - K8s.RemoteNamespace + - - Environment - Operation + - RemoteEnvironment - RemoteOperation - RemoteService - Service - - - HostedIn.EKS.Cluster - - HostedIn.K8s.Namespace + - - Environment - Operation - RemoteOperation + - RemoteResourceIdentifier + - RemoteResourceType - RemoteService - - RemoteTarget - Service - - - HostedIn.EKS.Cluster - - HostedIn.K8s.Namespace + - - Environment - Operation - RemoteOperation - RemoteService - Service - - - HostedIn.EKS.Cluster - - HostedIn.K8s.Namespace - - K8s.RemoteNamespace + - - Environment + - RemoteEnvironment - RemoteService - Service - - - HostedIn.EKS.Cluster - - HostedIn.K8s.Namespace + - - Environment - RemoteService - Service - - - HostedIn.EKS.Cluster - - HostedIn.K8s.Namespace - - K8s.RemoteNamespace + - - Environment + - RemoteEnvironment - RemoteOperation + - RemoteResourceIdentifier + - RemoteResourceType - RemoteService - - RemoteTarget - Service - - - HostedIn.EKS.Cluster - - HostedIn.K8s.Namespace - - K8s.RemoteNamespace + - - Environment + - RemoteEnvironment - RemoteOperation - RemoteService - Service - - - HostedIn.EKS.Cluster - - HostedIn.K8s.Namespace + - - Environment - RemoteOperation + - RemoteResourceIdentifier + - RemoteResourceType - RemoteService - - RemoteTarget - Service - - - HostedIn.EKS.Cluster - - HostedIn.K8s.Namespace + - - Environment - RemoteOperation - RemoteService - Service - - - HostedIn.EKS.Cluster - - HostedIn.K8s.Namespace + - - Environment + - RemoteResourceIdentifier + - RemoteResourceType - RemoteService - - RemoteTarget - Service - - - RemoteService - - RemoteTarget + - - RemoteResourceIdentifier + - RemoteResourceType + - RemoteService - - RemoteService label_matchers: - label_names: - - aws.span.kind - regex: ^(CLIENT|PRODUCER|CONSUMER)$ + - Telemetry.Source + regex: ^(ClientSpan|ProducerSpan|ConsumerSpan)$ separator: ; metric_name_selectors: - Latency - Fault - Error - metric_descriptors: [] middleware: agenthealth/logs - namespace: AppSignals + namespace: ApplicationSignals no_verify_ssl: false num_workers: 8 output_destination: cloudwatch - parse_json_encoded_attr_values: [] profile: "" proxy_address: "" - region: "" + region: us-east-1 request_timeout_seconds: 30 resource_arn: "" resource_to_telemetry_conversion: enabled: false retain_initial_value_of_delta_metric: false role_arn: "" - shared_credentials_file: [] version: "1" awsemf/containerinsights: certificate_file_path: "" @@ -154,7 +143,6 @@ exporters: - Service - - ClusterName - Namespace - label_matchers: [] metric_name_selectors: - pod_cpu_utilization - pod_memory_utilization @@ -166,7 +154,6 @@ exporters: - - ClusterName - Namespace - PodName - label_matchers: [] metric_name_selectors: - pod_number_of_container_restarts - dimensions: @@ -174,7 +161,6 @@ exporters: - Namespace - PodName - - ClusterName - label_matchers: [] metric_name_selectors: - pod_cpu_reserved_capacity - pod_memory_reserved_capacity @@ -183,7 +169,6 @@ exporters: - InstanceId - NodeName - - ClusterName - label_matchers: [] metric_name_selectors: - node_cpu_utilization - node_memory_utilization @@ -194,7 +179,6 @@ exporters: - node_number_of_running_containers - dimensions: - - ClusterName - label_matchers: [] metric_name_selectors: - node_cpu_usage_total - node_cpu_limit @@ -205,7 +189,6 @@ exporters: - InstanceId - NodeName - - ClusterName - label_matchers: [] metric_name_selectors: - node_filesystem_utilization - dimensions: @@ -213,23 +196,19 @@ exporters: - Namespace - Service - - ClusterName - label_matchers: [] metric_name_selectors: - service_number_of_running_pods - dimensions: - - ClusterName - Namespace - - ClusterName - label_matchers: [] metric_name_selectors: - namespace_number_of_running_pods - dimensions: - - ClusterName - label_matchers: [] metric_name_selectors: - cluster_node_count - cluster_failed_node_count - metric_descriptors: [] middleware: agenthealth/logs namespace: ContainerInsights no_verify_ssl: false @@ -247,10 +226,8 @@ exporters: enabled: true retain_initial_value_of_delta_metric: false role_arn: "" - shared_credentials_file: [] version: "0" - awsxray/app_signals: - aws_log_groups: [] + awsxray/application_signals: certificate_file_path: "" endpoint: "" imds_retries: 1 @@ -258,13 +235,12 @@ exporters: indexed_attributes: - aws.local.service - aws.local.operation + - aws.local.environment - aws.remote.service - aws.remote.operation - - HostedIn.EKS.Cluster - - HostedIn.K8s.Namespace - - K8s.RemoteNamespace - - aws.remote.target - - HostedIn.Environment + - aws.remote.environment + - aws.remote.resource.identifier + - aws.remote.resource.type local_mode: false max_retries: 2 middleware: agenthealth/traces @@ -276,7 +252,6 @@ exporters: request_timeout_seconds: 30 resource_arn: "" role_arn: "" - shared_credentials_file: [] telemetry: enabled: true include_metadata: true @@ -286,27 +261,43 @@ extensions: stats: operations: - PutLogEvents + usage_flags: + mode: EKS + region_type: ACJ agenthealth/traces: is_usage_data_enabled: true stats: operations: - PutTraceSegments - awsproxy/app_signals: + usage_flags: + mode: EKS + region_type: ACJ + awsproxy/application_signals: aws_endpoint: "" + certificate_file_path: "" + dialer: + timeout: "0s" endpoint: 0.0.0.0:2000 + imds_retries: 1 local_mode: false + profile: "" proxy_address: "" - region: "" + region: us-east-1 + service_name: "" role_arn: "" processors: - awsappsignals: + awsapplicationsignals: + limiter: + disabled: false + drop_threshold: 500 + garbage_collection_interval: 10m0s + log_dropped_metrics: true + rotation_interval: 10m0s resolvers: - - platform: eks - name: TestCluster - rules: [] + - name: TestCluster + platform: eks batch/containerinsights: metadata_cardinality_limit: 1000 - metadata_keys: [] send_batch_max_size: 0 send_batch_size: 8192 timeout: 5s @@ -317,8 +308,8 @@ processors: enabled: true cloud.provider: enabled: true - attributes: [] - auth: null + k8s.cluster.name: + enabled: false azure: resource_attributes: azure.resourcegroup.name: @@ -345,30 +336,14 @@ processors: consul: address: "" datacenter: "" - meta: {} namespace: "" resource_attributes: - azure.resourcegroup.name: - enabled: true - azure.vm.name: - enabled: true - azure.vm.scaleset.name: - enabled: true - azure.vm.size: - enabled: true - cloud.account.id: - enabled: true - cloud.platform: - enabled: true - cloud.provider: - enabled: true cloud.region: enabled: true host.id: enabled: true host.name: enabled: true - token: '[REDACTED]' token_file: "" detectors: - eks @@ -403,6 +378,7 @@ processors: enabled: true tags: - ^kubernetes.io/cluster/.*$ + - ^aws:autoscaling:groupName ecs: resource_attributes: aws.ecs.cluster.arn: @@ -413,6 +389,8 @@ processors: enabled: true aws.ecs.task.family: enabled: true + aws.ecs.task.id: + enabled: true aws.ecs.task.revision: enabled: true aws.log.group.arns: @@ -439,6 +417,8 @@ processors: enabled: true cloud.provider: enabled: true + k8s.cluster.name: + enabled: false elasticbeanstalk: resource_attributes: cloud.platform: @@ -488,7 +468,6 @@ processors: enabled: true k8s.cluster.name: enabled: true - headers: {} heroku: resource_attributes: cloud.provider: @@ -507,6 +486,8 @@ processors: enabled: true service.version: enabled: true + http2_ping_timeout: "0s" + http2_read_idle_timeout: "0s" idle_conn_timeout: 1m30s k8snode: auth_type: serviceAccount @@ -537,9 +518,7 @@ processors: enabled: true faas.version: enabled: true - max_conns_per_host: null max_idle_conns: 100 - max_idle_conns_per_host: null openshift: address: "" resource_attributes: @@ -552,23 +531,21 @@ processors: k8s.cluster.name: enabled: true tls: - ca_file: "" - ca_pem: '[REDACTED]' - cert_file: "" - cert_pem: '[REDACTED]' - insecure: false - insecure_skip_verify: false - key_file: "" - key_pem: '[REDACTED]' - max_version: "" - min_version: "" - reload_interval: 0s - server_name_override: "" + ca_file: "" + cert_file: "" + include_system_ca_certs_pool: false + insecure: false + insecure_skip_verify: false + key_file: "" + max_version: "" + min_version: "" + reload_interval: "0s" + server_name_override: "" token: "" override: true + proxy_url: "" read_buffer_size: 0 system: - hostname_sources: [] resource_attributes: host.arch: enabled: false @@ -586,6 +563,10 @@ processors: enabled: false host.id: enabled: false + host.ip: + enabled: false + host.mac: + enabled: false host.name: enabled: true os.description: @@ -593,9 +574,21 @@ processors: os.type: enabled: true timeout: 2s + tls: + ca_file: "" + cert_file: "" + include_system_ca_certs_pool: false + insecure: false + insecure_skip_verify: false + key_file: "" + max_version: "" + min_version: "" + reload_interval: "0s" + server_name_override: "" write_buffer_size: 0 receivers: awscontainerinsightreceiver: + accelerated_compute_metrics: false add_container_name_metric_label: false add_full_pod_name_metric_label: false add_service_as_attribute: true @@ -619,45 +612,59 @@ receivers: request_timeout_seconds: 0 resource_arn: "" role_arn: "" - shared_credentials_file: [] - otlp/app_signals: + otlp/application_signals: protocols: grpc: - auth: null endpoint: 0.0.0.0:4315 + dialer: + timeout: "0s" include_metadata: false - keepalive: null max_concurrent_streams: 0 max_recv_msg_size_mib: 0 read_buffer_size: 524288 - tls: null + tls: + ca_file: "" + cert_file: path/to/cert.crt + client_ca_file: "" + client_ca_file_reload: false + key_file: path/to/key.key + max_version: "" + min_version: "" + reload_interval: 0s + include_system_ca_certs_pool: false transport: tcp write_buffer_size: 0 http: - auth: null - cors: null endpoint: 0.0.0.0:4316 include_metadata: false logs_url_path: /v1/logs max_request_body_size: 0 metrics_url_path: /v1/metrics - response_headers: {} - tls: null + tls: + ca_file: "" + cert_file: path/to/cert.crt + client_ca_file: "" + client_ca_file_reload: false + key_file: path/to/key.key + max_version: "" + min_version: "" + reload_interval: 0s + include_system_ca_certs_pool: false traces_url_path: /v1/traces service: extensions: - - awsproxy/app_signals + - awsproxy/application_signals - agenthealth/traces - agenthealth/logs pipelines: - metrics/app_signals: + metrics/application_signals: exporters: - - awsemf/app_signals + - awsemf/application_signals processors: - resourcedetection - - awsappsignals + - awsapplicationsignals receivers: - - otlp/app_signals + - otlp/application_signals metrics/containerinsights: exporters: - awsemf/containerinsights @@ -665,24 +672,21 @@ service: - batch/containerinsights receivers: - awscontainerinsightreceiver - traces/app_signals: + traces/application_signals: exporters: - - awsxray/app_signals + - awsxray/application_signals processors: - resourcedetection - - awsappsignals + - awsapplicationsignals receivers: - - otlp/app_signals + - otlp/application_signals telemetry: logs: development: false disable_caller: false disable_stacktrace: false encoding: console - error_output_paths: [] - initial_fields: {} level: info - output_paths: [] sampling: enabled: true initial: 2 @@ -691,8 +695,4 @@ service: metrics: address: "" level: None - readers: [] - resource: {} - traces: - processors: [] - propagators: [] + traces: {} diff --git a/translator/tocwconfig/sampleConfig/appsignals_and_k8s_config.json b/translator/tocwconfig/sampleConfig/appsignals_and_k8s_config.json index b2b98310c7..dcb42fcd2e 100644 --- a/translator/tocwconfig/sampleConfig/appsignals_and_k8s_config.json +++ b/translator/tocwconfig/sampleConfig/appsignals_and_k8s_config.json @@ -4,14 +4,19 @@ }, "logs": { "metrics_collected": { - "app_signals": { - "hosted_in": "TestCluster" + "application_signals": { + "hosted_in": "TestCluster", + "limiter": { + "log_dropped_metrics": true, + "rotation_interval": "10m" + } }, "kubernetes": { "cluster_name": "TestCluster", "metrics_collection_interval": 30, "disable_metric_extraction": true, - "enhanced_container_insights": false + "enhanced_container_insights": false, + "accelerated_compute_metrics": false } }, "force_flush_interval": 5, @@ -19,7 +24,7 @@ }, "traces": { "traces_collected": { - "app_signals": {} + "application_signals": {} } } } \ No newline at end of file diff --git a/translator/tocwconfig/sampleConfig/appsignals_and_k8s_config.yaml b/translator/tocwconfig/sampleConfig/appsignals_and_k8s_config.yaml index eeba0f8f00..a49ebeeb37 100644 --- a/translator/tocwconfig/sampleConfig/appsignals_and_k8s_config.yaml +++ b/translator/tocwconfig/sampleConfig/appsignals_and_k8s_config.yaml @@ -1,133 +1,122 @@ -connectors: {} exporters: - awsemf/app_signals: + awsemf/application_signals: certificate_file_path: "" detailed_metrics: false dimension_rollup_option: NoDimensionRollup disable_metric_extraction: false eks_fargate_container_insights_enabled: false - endpoint: "" + endpoint: https://fake_endpoint enhanced_container_insights: false - imds_retries: 0 + imds_retries: 1 local_mode: false - log_group_name: /aws/appsignals/k8s + log_group_name: /aws/application-signals/data log_retention: 0 log_stream_name: "" max_retries: 2 metric_declarations: - dimensions: - - - HostedIn.K8s.Cluster - - HostedIn.K8s.Namespace + - - Environment - Operation - Service - - - HostedIn.K8s.Cluster - - HostedIn.K8s.Namespace + - - Environment - Service label_matchers: - label_names: - - aws.span.kind - regex: ^(SERVER|LOCAL_ROOT)$ + - Telemetry.Source + regex: ^(ServerSpan|LocalRootSpan)$ separator: ; metric_name_selectors: - Latency - Fault - Error - dimensions: - - - HostedIn.K8s.Cluster - - HostedIn.K8s.Namespace - - K8s.RemoteNamespace + - - Environment - Operation + - RemoteEnvironment - RemoteOperation + - RemoteResourceIdentifier + - RemoteResourceType - RemoteService - - RemoteTarget - Service - - - HostedIn.K8s.Cluster - - HostedIn.K8s.Namespace - - K8s.RemoteNamespace + - - Environment - Operation + - RemoteEnvironment - RemoteOperation - RemoteService - Service - - - HostedIn.K8s.Cluster - - HostedIn.K8s.Namespace + - - Environment - Operation - RemoteOperation + - RemoteResourceIdentifier + - RemoteResourceType - RemoteService - - RemoteTarget - Service - - - HostedIn.K8s.Cluster - - HostedIn.K8s.Namespace + - - Environment - Operation - RemoteOperation - RemoteService - Service - - - HostedIn.K8s.Cluster - - HostedIn.K8s.Namespace - - K8s.RemoteNamespace + - - Environment + - RemoteEnvironment - RemoteService - Service - - - HostedIn.K8s.Cluster - - HostedIn.K8s.Namespace + - - Environment - RemoteService - Service - - - HostedIn.K8s.Cluster - - HostedIn.K8s.Namespace - - K8s.RemoteNamespace + - - Environment + - RemoteEnvironment - RemoteOperation + - RemoteResourceIdentifier + - RemoteResourceType - RemoteService - - RemoteTarget - Service - - - HostedIn.K8s.Cluster - - HostedIn.K8s.Namespace - - K8s.RemoteNamespace + - - Environment + - RemoteEnvironment - RemoteOperation - RemoteService - Service - - - HostedIn.K8s.Cluster - - HostedIn.K8s.Namespace + - - Environment - RemoteOperation + - RemoteResourceIdentifier + - RemoteResourceType - RemoteService - - RemoteTarget - Service - - - HostedIn.K8s.Cluster - - HostedIn.K8s.Namespace + - - Environment - RemoteOperation - RemoteService - Service - - - HostedIn.K8s.Cluster - - HostedIn.K8s.Namespace + - - Environment + - RemoteResourceIdentifier + - RemoteResourceType - RemoteService - - RemoteTarget - Service - - - RemoteService - - RemoteTarget + - - RemoteResourceIdentifier + - RemoteResourceType + - RemoteService - - RemoteService label_matchers: - label_names: - - aws.span.kind - regex: ^(CLIENT|PRODUCER|CONSUMER)$ + - Telemetry.Source + regex: ^(ClientSpan|ProducerSpan|ConsumerSpan)$ separator: ; metric_name_selectors: - Latency - Fault - Error - metric_descriptors: [] middleware: agenthealth/logs - namespace: AppSignals + namespace: ApplicationSignals no_verify_ssl: false num_workers: 8 output_destination: cloudwatch - parse_json_encoded_attr_values: [] profile: "" proxy_address: "" - region: "" + region: us-east-1 request_timeout_seconds: 30 resource_arn: "" resource_to_telemetry_conversion: enabled: false retain_initial_value_of_delta_metric: false role_arn: "" - shared_credentials_file: [] version: "1" awsemf/containerinsights: certificate_file_path: "" @@ -154,7 +143,6 @@ exporters: - Service - - ClusterName - Namespace - label_matchers: [] metric_name_selectors: - pod_cpu_utilization - pod_memory_utilization @@ -166,7 +154,6 @@ exporters: - - ClusterName - Namespace - PodName - label_matchers: [] metric_name_selectors: - pod_number_of_container_restarts - dimensions: @@ -174,7 +161,6 @@ exporters: - Namespace - PodName - - ClusterName - label_matchers: [] metric_name_selectors: - pod_cpu_reserved_capacity - pod_memory_reserved_capacity @@ -183,7 +169,6 @@ exporters: - InstanceId - NodeName - - ClusterName - label_matchers: [] metric_name_selectors: - node_cpu_utilization - node_memory_utilization @@ -194,7 +179,6 @@ exporters: - node_number_of_running_containers - dimensions: - - ClusterName - label_matchers: [] metric_name_selectors: - node_cpu_usage_total - node_cpu_limit @@ -205,7 +189,6 @@ exporters: - InstanceId - NodeName - - ClusterName - label_matchers: [] metric_name_selectors: - node_filesystem_utilization - dimensions: @@ -213,23 +196,19 @@ exporters: - Namespace - Service - - ClusterName - label_matchers: [] metric_name_selectors: - service_number_of_running_pods - dimensions: - - ClusterName - Namespace - - ClusterName - label_matchers: [] metric_name_selectors: - namespace_number_of_running_pods - dimensions: - - ClusterName - label_matchers: [] metric_name_selectors: - cluster_node_count - cluster_failed_node_count - metric_descriptors: [] middleware: agenthealth/logs namespace: ContainerInsights no_verify_ssl: false @@ -247,10 +226,8 @@ exporters: enabled: true retain_initial_value_of_delta_metric: false role_arn: "" - shared_credentials_file: [] version: "0" - awsxray/app_signals: - aws_log_groups: [] + awsxray/application_signals: certificate_file_path: "" endpoint: "" imds_retries: 1 @@ -258,13 +235,12 @@ exporters: indexed_attributes: - aws.local.service - aws.local.operation + - aws.local.environment - aws.remote.service - aws.remote.operation - - HostedIn.K8s.Namespace - - K8s.RemoteNamespace - - aws.remote.target - - HostedIn.Environment - - HostedIn.K8s.Cluster + - aws.remote.environment + - aws.remote.resource.identifier + - aws.remote.resource.type local_mode: false max_retries: 2 middleware: agenthealth/traces @@ -276,7 +252,6 @@ exporters: request_timeout_seconds: 30 resource_arn: "" role_arn: "" - shared_credentials_file: [] telemetry: enabled: true include_metadata: true @@ -286,32 +261,334 @@ extensions: stats: operations: - PutLogEvents + usage_flags: + mode: K8E + region_type: ACJ agenthealth/traces: is_usage_data_enabled: true stats: operations: - PutTraceSegments - awsproxy/app_signals: + usage_flags: + mode: K8E + region_type: ACJ + awsproxy/application_signals: aws_endpoint: "" + certificate_file_path: "" + dialer: + timeout: "0s" endpoint: 0.0.0.0:2000 + imds_retries: 1 local_mode: false + profile: "" proxy_address: "" - region: "" + region: us-east-1 + service_name: "" role_arn: "" processors: - awsappsignals: + awsapplicationsignals: + limiter: + disabled: false + drop_threshold: 500 + garbage_collection_interval: 10m0s + log_dropped_metrics: true + rotation_interval: 10m0s resolvers: - - platform: k8s - name: TestCluster - rules: [] + - name: TestCluster + platform: k8s batch/containerinsights: metadata_cardinality_limit: 1000 - metadata_keys: [] send_batch_max_size: 0 send_batch_size: 8192 timeout: 5s + resourcedetection: + aks: + resource_attributes: + cloud.platform: + enabled: true + cloud.provider: + enabled: true + k8s.cluster.name: + enabled: false + azure: + resource_attributes: + azure.resourcegroup.name: + enabled: true + azure.vm.name: + enabled: true + azure.vm.scaleset.name: + enabled: true + azure.vm.size: + enabled: true + cloud.account.id: + enabled: true + cloud.platform: + enabled: true + cloud.provider: + enabled: true + cloud.region: + enabled: true + host.id: + enabled: true + host.name: + enabled: true + compression: "" + consul: + address: "" + datacenter: "" + namespace: "" + resource_attributes: + cloud.region: + enabled: true + host.id: + enabled: true + host.name: + enabled: true + token_file: "" + detectors: + - eks + - env + - ec2 + disable_keep_alives: false + docker: + resource_attributes: + host.name: + enabled: true + os.type: + enabled: true + ec2: + resource_attributes: + cloud.account.id: + enabled: true + cloud.availability_zone: + enabled: true + cloud.platform: + enabled: true + cloud.provider: + enabled: true + cloud.region: + enabled: true + host.id: + enabled: true + host.image.id: + enabled: true + host.name: + enabled: true + host.type: + enabled: true + tags: + - ^kubernetes.io/cluster/.*$ + - ^aws:autoscaling:groupName + ecs: + resource_attributes: + aws.ecs.cluster.arn: + enabled: true + aws.ecs.launchtype: + enabled: true + aws.ecs.task.arn: + enabled: true + aws.ecs.task.family: + enabled: true + aws.ecs.task.revision: + enabled: true + aws.ecs.task.id: + enabled: true + aws.log.group.arns: + enabled: true + aws.log.group.names: + enabled: true + aws.log.stream.arns: + enabled: true + aws.log.stream.names: + enabled: true + cloud.account.id: + enabled: true + cloud.availability_zone: + enabled: true + cloud.platform: + enabled: true + cloud.provider: + enabled: true + cloud.region: + enabled: true + eks: + resource_attributes: + cloud.platform: + enabled: true + cloud.provider: + enabled: true + k8s.cluster.name: + enabled: false + elasticbeanstalk: + resource_attributes: + cloud.platform: + enabled: true + cloud.provider: + enabled: true + deployment.environment: + enabled: true + service.instance.id: + enabled: true + service.version: + enabled: true + endpoint: "" + gcp: + resource_attributes: + cloud.account.id: + enabled: true + cloud.availability_zone: + enabled: true + cloud.platform: + enabled: true + cloud.provider: + enabled: true + cloud.region: + enabled: true + faas.id: + enabled: true + faas.instance: + enabled: true + faas.name: + enabled: true + faas.version: + enabled: true + gcp.cloud_run.job.execution: + enabled: true + gcp.cloud_run.job.task_index: + enabled: true + gcp.gce.instance.hostname: + enabled: false + gcp.gce.instance.name: + enabled: false + host.id: + enabled: true + host.name: + enabled: true + host.type: + enabled: true + k8s.cluster.name: + enabled: true + heroku: + resource_attributes: + cloud.provider: + enabled: true + heroku.app.id: + enabled: true + heroku.dyno.id: + enabled: true + heroku.release.commit: + enabled: true + heroku.release.creation_timestamp: + enabled: true + service.instance.id: + enabled: true + service.name: + enabled: true + service.version: + enabled: true + http2_ping_timeout: "0s" + http2_read_idle_timeout: "0s" + idle_conn_timeout: 1m30s + k8snode: + auth_type: serviceAccount + context: "" + node_from_env_var: "" + resource_attributes: + k8s.node.name: + enabled: true + k8s.node.uid: + enabled: true + lambda: + resource_attributes: + aws.log.group.names: + enabled: true + aws.log.stream.names: + enabled: true + cloud.platform: + enabled: true + cloud.provider: + enabled: true + cloud.region: + enabled: true + faas.instance: + enabled: true + faas.max_memory: + enabled: true + faas.name: + enabled: true + faas.version: + enabled: true + max_idle_conns: 100 + openshift: + address: "" + resource_attributes: + cloud.platform: + enabled: true + cloud.provider: + enabled: true + cloud.region: + enabled: true + k8s.cluster.name: + enabled: true + tls: + ca_file: "" + cert_file: "" + include_system_ca_certs_pool: false + insecure: false + insecure_skip_verify: false + key_file: "" + max_version: "" + min_version: "" + reload_interval: "0s" + server_name_override: "" + token: "" + override: true + proxy_url: "" + read_buffer_size: 0 + system: + resource_attributes: + host.arch: + enabled: false + host.cpu.cache.l2.size: + enabled: false + host.cpu.family: + enabled: false + host.cpu.model.id: + enabled: false + host.cpu.model.name: + enabled: false + host.cpu.stepping: + enabled: false + host.cpu.vendor.id: + enabled: false + host.id: + enabled: false + host.ip: + enabled: false + host.mac: + enabled: false + host.name: + enabled: true + os.description: + enabled: false + os.type: + enabled: true + timeout: 2s + tls: + ca_file: "" + cert_file: "" + include_system_ca_certs_pool: false + insecure: false + insecure_skip_verify: false + key_file: "" + max_version: "" + min_version: "" + reload_interval: "0s" + server_name_override: "" + write_buffer_size: 0 receivers: awscontainerinsightreceiver: + accelerated_compute_metrics: false add_container_name_metric_label: false add_full_pod_name_metric_label: false add_service_as_attribute: true @@ -335,44 +612,39 @@ receivers: request_timeout_seconds: 0 resource_arn: "" role_arn: "" - shared_credentials_file: [] - otlp/app_signals: + otlp/application_signals: protocols: grpc: - auth: null endpoint: 0.0.0.0:4315 + dialer: + timeout: "0s" include_metadata: false - keepalive: null max_concurrent_streams: 0 max_recv_msg_size_mib: 0 read_buffer_size: 524288 - tls: null transport: tcp write_buffer_size: 0 http: - auth: null - cors: null endpoint: 0.0.0.0:4316 include_metadata: false logs_url_path: /v1/logs max_request_body_size: 0 metrics_url_path: /v1/metrics - response_headers: {} - tls: null traces_url_path: /v1/traces service: extensions: - - awsproxy/app_signals + - awsproxy/application_signals - agenthealth/traces - agenthealth/logs pipelines: - metrics/app_signals: + metrics/application_signals: exporters: - - awsemf/app_signals + - awsemf/application_signals processors: - - awsappsignals + - resourcedetection + - awsapplicationsignals receivers: - - otlp/app_signals + - otlp/application_signals metrics/containerinsights: exporters: - awsemf/containerinsights @@ -380,23 +652,21 @@ service: - batch/containerinsights receivers: - awscontainerinsightreceiver - traces/app_signals: + traces/application_signals: exporters: - - awsxray/app_signals + - awsxray/application_signals processors: - - awsappsignals + - resourcedetection + - awsapplicationsignals receivers: - - otlp/app_signals + - otlp/application_signals telemetry: logs: development: false disable_caller: false disable_stacktrace: false encoding: console - error_output_paths: [] - initial_fields: {} level: info - output_paths: [] sampling: enabled: true initial: 2 @@ -405,8 +675,4 @@ service: metrics: address: "" level: None - readers: [] - resource: {} - traces: - processors: [] - propagators: [] + traces: {} diff --git a/translator/tocwconfig/sampleConfig/appsignals_fallback_and_eks_config.conf b/translator/tocwconfig/sampleConfig/appsignals_fallback_and_eks_config.conf new file mode 100644 index 0000000000..007bb60efb --- /dev/null +++ b/translator/tocwconfig/sampleConfig/appsignals_fallback_and_eks_config.conf @@ -0,0 +1,27 @@ +[agent] + collection_jitter = "0s" + debug = false + flush_interval = "1s" + flush_jitter = "0s" + hostname = "host_name_from_env" + interval = "60s" + logfile = "" + logtarget = "lumberjack" + metric_batch_size = 1000 + metric_buffer_limit = 10000 + omit_hostname = false + precision = "" + quiet = false + round_interval = false + +[inputs] + +[outputs] + + [[outputs.cloudwatchlogs]] + endpoint_override = "https://fake_endpoint" + force_flush_interval = "5s" + log_stream_name = "host_name_from_env" + region = "us-east-1" + +[processors] diff --git a/translator/tocwconfig/sampleConfig/appsignals_fallback_and_eks_config.json b/translator/tocwconfig/sampleConfig/appsignals_fallback_and_eks_config.json new file mode 100644 index 0000000000..e289b8adae --- /dev/null +++ b/translator/tocwconfig/sampleConfig/appsignals_fallback_and_eks_config.json @@ -0,0 +1,34 @@ +{ + "agent": { + "region": "us-east-1" + }, + "logs": { + "metrics_collected": { + "app_signals": { + "tls": { + "cert_file": "path/to/cert.crt", + "key_file": "path/to/key.key" + }, + "hosted_in": "TestCluster", + "limiter": { + "log_dropped_metrics": true, + "rotation_interval": "10m" + } + }, + "kubernetes": { + "cluster_name": "TestCluster", + "metrics_collection_interval": 30, + "disable_metric_extraction": true, + "enhanced_container_insights": false, + "accelerated_compute_metrics": false + } + }, + "force_flush_interval": 5, + "endpoint_override":"https://fake_endpoint" + }, + "traces": { + "traces_collected": { + "app_signals": {} + } + } +} \ No newline at end of file diff --git a/translator/tocwconfig/sampleConfig/appsignals_fallback_and_eks_config.yaml b/translator/tocwconfig/sampleConfig/appsignals_fallback_and_eks_config.yaml new file mode 100644 index 0000000000..6a4d070a80 --- /dev/null +++ b/translator/tocwconfig/sampleConfig/appsignals_fallback_and_eks_config.yaml @@ -0,0 +1,698 @@ +exporters: + awsemf/application_signals: + certificate_file_path: "" + detailed_metrics: false + dimension_rollup_option: NoDimensionRollup + disable_metric_extraction: false + eks_fargate_container_insights_enabled: false + endpoint: https://fake_endpoint + enhanced_container_insights: false + imds_retries: 1 + local_mode: false + log_group_name: /aws/application-signals/data + log_retention: 0 + log_stream_name: "" + max_retries: 2 + metric_declarations: + - dimensions: + - - Environment + - Operation + - Service + - - Environment + - Service + label_matchers: + - label_names: + - Telemetry.Source + regex: ^(ServerSpan|LocalRootSpan)$ + separator: ; + metric_name_selectors: + - Latency + - Fault + - Error + - dimensions: + - - Environment + - Operation + - RemoteEnvironment + - RemoteOperation + - RemoteResourceIdentifier + - RemoteResourceType + - RemoteService + - Service + - - Environment + - Operation + - RemoteEnvironment + - RemoteOperation + - RemoteService + - Service + - - Environment + - Operation + - RemoteOperation + - RemoteResourceIdentifier + - RemoteResourceType + - RemoteService + - Service + - - Environment + - Operation + - RemoteOperation + - RemoteService + - Service + - - Environment + - RemoteEnvironment + - RemoteService + - Service + - - Environment + - RemoteService + - Service + - - Environment + - RemoteEnvironment + - RemoteOperation + - RemoteResourceIdentifier + - RemoteResourceType + - RemoteService + - Service + - - Environment + - RemoteEnvironment + - RemoteOperation + - RemoteService + - Service + - - Environment + - RemoteOperation + - RemoteResourceIdentifier + - RemoteResourceType + - RemoteService + - Service + - - Environment + - RemoteOperation + - RemoteService + - Service + - - Environment + - RemoteResourceIdentifier + - RemoteResourceType + - RemoteService + - Service + - - RemoteResourceIdentifier + - RemoteResourceType + - RemoteService + - - RemoteService + label_matchers: + - label_names: + - Telemetry.Source + regex: ^(ClientSpan|ProducerSpan|ConsumerSpan)$ + separator: ; + metric_name_selectors: + - Latency + - Fault + - Error + middleware: agenthealth/logs + namespace: ApplicationSignals + no_verify_ssl: false + num_workers: 8 + output_destination: cloudwatch + profile: "" + proxy_address: "" + region: us-east-1 + request_timeout_seconds: 30 + resource_arn: "" + resource_to_telemetry_conversion: + enabled: false + retain_initial_value_of_delta_metric: false + role_arn: "" + version: "1" + awsemf/containerinsights: + certificate_file_path: "" + detailed_metrics: false + dimension_rollup_option: NoDimensionRollup + disable_metric_extraction: true + eks_fargate_container_insights_enabled: false + endpoint: https://fake_endpoint + enhanced_container_insights: false + imds_retries: 1 + local_mode: false + log_group_name: /aws/containerinsights/{ClusterName}/performance + log_retention: 0 + log_stream_name: '{NodeName}' + max_retries: 2 + metric_declarations: + - dimensions: + - - ClusterName + - Namespace + - PodName + - - ClusterName + - - ClusterName + - Namespace + - Service + - - ClusterName + - Namespace + metric_name_selectors: + - pod_cpu_utilization + - pod_memory_utilization + - pod_network_rx_bytes + - pod_network_tx_bytes + - pod_cpu_utilization_over_pod_limit + - pod_memory_utilization_over_pod_limit + - dimensions: + - - ClusterName + - Namespace + - PodName + metric_name_selectors: + - pod_number_of_container_restarts + - dimensions: + - - ClusterName + - Namespace + - PodName + - - ClusterName + metric_name_selectors: + - pod_cpu_reserved_capacity + - pod_memory_reserved_capacity + - dimensions: + - - ClusterName + - InstanceId + - NodeName + - - ClusterName + metric_name_selectors: + - node_cpu_utilization + - node_memory_utilization + - node_network_total_bytes + - node_cpu_reserved_capacity + - node_memory_reserved_capacity + - node_number_of_running_pods + - node_number_of_running_containers + - dimensions: + - - ClusterName + metric_name_selectors: + - node_cpu_usage_total + - node_cpu_limit + - node_memory_working_set + - node_memory_limit + - dimensions: + - - ClusterName + - InstanceId + - NodeName + - - ClusterName + metric_name_selectors: + - node_filesystem_utilization + - dimensions: + - - ClusterName + - Namespace + - Service + - - ClusterName + metric_name_selectors: + - service_number_of_running_pods + - dimensions: + - - ClusterName + - Namespace + - - ClusterName + metric_name_selectors: + - namespace_number_of_running_pods + - dimensions: + - - ClusterName + metric_name_selectors: + - cluster_node_count + - cluster_failed_node_count + middleware: agenthealth/logs + namespace: ContainerInsights + no_verify_ssl: false + num_workers: 8 + output_destination: cloudwatch + parse_json_encoded_attr_values: + - Sources + - kubernetes + profile: "" + proxy_address: "" + region: us-east-1 + request_timeout_seconds: 30 + resource_arn: "" + resource_to_telemetry_conversion: + enabled: true + retain_initial_value_of_delta_metric: false + role_arn: "" + version: "0" + awsxray/application_signals: + certificate_file_path: "" + endpoint: "" + imds_retries: 1 + index_all_attributes: false + indexed_attributes: + - aws.local.service + - aws.local.operation + - aws.local.environment + - aws.remote.service + - aws.remote.operation + - aws.remote.environment + - aws.remote.resource.identifier + - aws.remote.resource.type + local_mode: false + max_retries: 2 + middleware: agenthealth/traces + no_verify_ssl: false + num_workers: 8 + profile: "" + proxy_address: "" + region: us-east-1 + request_timeout_seconds: 30 + resource_arn: "" + role_arn: "" + telemetry: + enabled: true + include_metadata: true +extensions: + agenthealth/logs: + is_usage_data_enabled: true + stats: + operations: + - PutLogEvents + usage_flags: + mode: EKS + region_type: ACJ + agenthealth/traces: + is_usage_data_enabled: true + stats: + operations: + - PutTraceSegments + usage_flags: + mode: EKS + region_type: ACJ + awsproxy/application_signals: + aws_endpoint: "" + certificate_file_path: "" + dialer: + timeout: "0s" + endpoint: 0.0.0.0:2000 + imds_retries: 1 + local_mode: false + profile: "" + proxy_address: "" + region: us-east-1 + service_name: "" + role_arn: "" +processors: + awsapplicationsignals: + limiter: + disabled: false + drop_threshold: 500 + garbage_collection_interval: 10m0s + log_dropped_metrics: true + rotation_interval: 10m0s + resolvers: + - name: TestCluster + platform: eks + batch/containerinsights: + metadata_cardinality_limit: 1000 + send_batch_max_size: 0 + send_batch_size: 8192 + timeout: 5s + resourcedetection: + aks: + resource_attributes: + cloud.platform: + enabled: true + cloud.provider: + enabled: true + k8s.cluster.name: + enabled: false + azure: + resource_attributes: + azure.resourcegroup.name: + enabled: true + azure.vm.name: + enabled: true + azure.vm.scaleset.name: + enabled: true + azure.vm.size: + enabled: true + cloud.account.id: + enabled: true + cloud.platform: + enabled: true + cloud.provider: + enabled: true + cloud.region: + enabled: true + host.id: + enabled: true + host.name: + enabled: true + compression: "" + consul: + address: "" + datacenter: "" + namespace: "" + resource_attributes: + cloud.region: + enabled: true + host.id: + enabled: true + host.name: + enabled: true + token_file: "" + detectors: + - eks + - env + - ec2 + disable_keep_alives: false + docker: + resource_attributes: + host.name: + enabled: true + os.type: + enabled: true + ec2: + resource_attributes: + cloud.account.id: + enabled: true + cloud.availability_zone: + enabled: true + cloud.platform: + enabled: true + cloud.provider: + enabled: true + cloud.region: + enabled: true + host.id: + enabled: true + host.image.id: + enabled: true + host.name: + enabled: true + host.type: + enabled: true + tags: + - ^kubernetes.io/cluster/.*$ + - ^aws:autoscaling:groupName + ecs: + resource_attributes: + aws.ecs.cluster.arn: + enabled: true + aws.ecs.launchtype: + enabled: true + aws.ecs.task.arn: + enabled: true + aws.ecs.task.family: + enabled: true + aws.ecs.task.id: + enabled: true + aws.ecs.task.revision: + enabled: true + aws.log.group.arns: + enabled: true + aws.log.group.names: + enabled: true + aws.log.stream.arns: + enabled: true + aws.log.stream.names: + enabled: true + cloud.account.id: + enabled: true + cloud.availability_zone: + enabled: true + cloud.platform: + enabled: true + cloud.provider: + enabled: true + cloud.region: + enabled: true + eks: + resource_attributes: + cloud.platform: + enabled: true + cloud.provider: + enabled: true + k8s.cluster.name: + enabled: false + elasticbeanstalk: + resource_attributes: + cloud.platform: + enabled: true + cloud.provider: + enabled: true + deployment.environment: + enabled: true + service.instance.id: + enabled: true + service.version: + enabled: true + endpoint: "" + gcp: + resource_attributes: + cloud.account.id: + enabled: true + cloud.availability_zone: + enabled: true + cloud.platform: + enabled: true + cloud.provider: + enabled: true + cloud.region: + enabled: true + faas.id: + enabled: true + faas.instance: + enabled: true + faas.name: + enabled: true + faas.version: + enabled: true + gcp.cloud_run.job.execution: + enabled: true + gcp.cloud_run.job.task_index: + enabled: true + gcp.gce.instance.hostname: + enabled: false + gcp.gce.instance.name: + enabled: false + host.id: + enabled: true + host.name: + enabled: true + host.type: + enabled: true + k8s.cluster.name: + enabled: true + heroku: + resource_attributes: + cloud.provider: + enabled: true + heroku.app.id: + enabled: true + heroku.dyno.id: + enabled: true + heroku.release.commit: + enabled: true + heroku.release.creation_timestamp: + enabled: true + service.instance.id: + enabled: true + service.name: + enabled: true + service.version: + enabled: true + http2_ping_timeout: "0s" + http2_read_idle_timeout: "0s" + idle_conn_timeout: 1m30s + k8snode: + auth_type: serviceAccount + context: "" + node_from_env_var: "" + resource_attributes: + k8s.node.name: + enabled: true + k8s.node.uid: + enabled: true + lambda: + resource_attributes: + aws.log.group.names: + enabled: true + aws.log.stream.names: + enabled: true + cloud.platform: + enabled: true + cloud.provider: + enabled: true + cloud.region: + enabled: true + faas.instance: + enabled: true + faas.max_memory: + enabled: true + faas.name: + enabled: true + faas.version: + enabled: true + max_idle_conns: 100 + openshift: + address: "" + resource_attributes: + cloud.platform: + enabled: true + cloud.provider: + enabled: true + cloud.region: + enabled: true + k8s.cluster.name: + enabled: true + tls: + ca_file: "" + cert_file: "" + include_system_ca_certs_pool: false + insecure: false + insecure_skip_verify: false + key_file: "" + max_version: "" + min_version: "" + reload_interval: "0s" + server_name_override: "" + token: "" + override: true + proxy_url: "" + read_buffer_size: 0 + system: + resource_attributes: + host.arch: + enabled: false + host.cpu.cache.l2.size: + enabled: false + host.cpu.family: + enabled: false + host.cpu.model.id: + enabled: false + host.cpu.model.name: + enabled: false + host.cpu.stepping: + enabled: false + host.cpu.vendor.id: + enabled: false + host.id: + enabled: false + host.ip: + enabled: false + host.mac: + enabled: false + host.name: + enabled: true + os.description: + enabled: false + os.type: + enabled: true + timeout: 2s + tls: + ca_file: "" + cert_file: "" + include_system_ca_certs_pool: false + insecure: false + insecure_skip_verify: false + key_file: "" + max_version: "" + min_version: "" + reload_interval: "0s" + server_name_override: "" + write_buffer_size: 0 +receivers: + awscontainerinsightreceiver: + accelerated_compute_metrics: false + add_container_name_metric_label: false + add_full_pod_name_metric_label: false + add_service_as_attribute: true + certificate_file_path: "" + cluster_name: TestCluster + collection_interval: 30s + container_orchestrator: eks + enable_control_plane_metrics: false + endpoint: "" + imds_retries: 1 + leader_lock_name: cwagent-clusterleader + leader_lock_using_config_map_only: true + local_mode: false + max_retries: 0 + no_verify_ssl: false + num_workers: 0 + prefer_full_pod_name: false + profile: "" + proxy_address: "" + region: us-east-1 + request_timeout_seconds: 0 + resource_arn: "" + role_arn: "" + otlp/application_signals: + protocols: + grpc: + endpoint: 0.0.0.0:4315 + dialer: + timeout: "0s" + include_metadata: false + max_concurrent_streams: 0 + max_recv_msg_size_mib: 0 + read_buffer_size: 524288 + tls: + ca_file: "" + cert_file: path/to/cert.crt + client_ca_file: "" + client_ca_file_reload: false + key_file: path/to/key.key + max_version: "" + min_version: "" + reload_interval: 0s + include_system_ca_certs_pool: false + transport: tcp + write_buffer_size: 0 + http: + endpoint: 0.0.0.0:4316 + include_metadata: false + logs_url_path: /v1/logs + max_request_body_size: 0 + metrics_url_path: /v1/metrics + tls: + ca_file: "" + cert_file: path/to/cert.crt + client_ca_file: "" + client_ca_file_reload: false + key_file: path/to/key.key + max_version: "" + min_version: "" + reload_interval: 0s + include_system_ca_certs_pool: false + traces_url_path: /v1/traces +service: + extensions: + - awsproxy/application_signals + - agenthealth/traces + - agenthealth/logs + pipelines: + metrics/application_signals: + exporters: + - awsemf/application_signals + processors: + - resourcedetection + - awsapplicationsignals + receivers: + - otlp/application_signals + metrics/containerinsights: + exporters: + - awsemf/containerinsights + processors: + - batch/containerinsights + receivers: + - awscontainerinsightreceiver + traces/application_signals: + exporters: + - awsxray/application_signals + processors: + - resourcedetection + - awsapplicationsignals + receivers: + - otlp/application_signals + telemetry: + logs: + development: false + disable_caller: false + disable_stacktrace: false + encoding: console + level: info + sampling: + enabled: true + initial: 2 + thereafter: 500 + tick: 10s + metrics: + address: "" + level: None + traces: {} diff --git a/translator/tocwconfig/sampleConfig/appsignals_over_fallback_config.conf b/translator/tocwconfig/sampleConfig/appsignals_over_fallback_config.conf new file mode 100644 index 0000000000..007bb60efb --- /dev/null +++ b/translator/tocwconfig/sampleConfig/appsignals_over_fallback_config.conf @@ -0,0 +1,27 @@ +[agent] + collection_jitter = "0s" + debug = false + flush_interval = "1s" + flush_jitter = "0s" + hostname = "host_name_from_env" + interval = "60s" + logfile = "" + logtarget = "lumberjack" + metric_batch_size = 1000 + metric_buffer_limit = 10000 + omit_hostname = false + precision = "" + quiet = false + round_interval = false + +[inputs] + +[outputs] + + [[outputs.cloudwatchlogs]] + endpoint_override = "https://fake_endpoint" + force_flush_interval = "5s" + log_stream_name = "host_name_from_env" + region = "us-east-1" + +[processors] diff --git a/translator/tocwconfig/sampleConfig/appsignals_over_fallback_config.json b/translator/tocwconfig/sampleConfig/appsignals_over_fallback_config.json new file mode 100644 index 0000000000..371a6c9957 --- /dev/null +++ b/translator/tocwconfig/sampleConfig/appsignals_over_fallback_config.json @@ -0,0 +1,46 @@ +{ + "agent": { + "region": "us-east-1" + }, + "logs": { + "metrics_collected": { + "application_signals": { + "tls": { + "cert_file": "path/to/cert.crt", + "key_file": "path/to/key.key" + }, + "hosted_in": "TestCluster", + "limiter": { + "log_dropped_metrics": true, + "rotation_interval": "10m" + } + }, + "app_signals": { + "tls": { + "cert_file": "/other/path/to/cert.crt", + "key_file": "/other/path/to/key.key" + }, + "hosted_in": "OtherTestCluster", + "limiter": { + "log_dropped_metrics": false, + "rotation_interval": "20m" + } + }, + "kubernetes": { + "cluster_name": "TestCluster", + "metrics_collection_interval": 30, + "disable_metric_extraction": true, + "enhanced_container_insights": false, + "accelerated_compute_metrics": false + } + }, + "force_flush_interval": 5, + "endpoint_override":"https://fake_endpoint" + }, + "traces": { + "traces_collected": { + "application_signals": {}, + "app_signals": {} + } + } +} \ No newline at end of file diff --git a/translator/tocwconfig/sampleConfig/appsignals_over_fallback_config.yaml b/translator/tocwconfig/sampleConfig/appsignals_over_fallback_config.yaml new file mode 100644 index 0000000000..a8c3e656d9 --- /dev/null +++ b/translator/tocwconfig/sampleConfig/appsignals_over_fallback_config.yaml @@ -0,0 +1,698 @@ +exporters: + awsemf/application_signals: + certificate_file_path: "" + detailed_metrics: false + dimension_rollup_option: NoDimensionRollup + disable_metric_extraction: false + eks_fargate_container_insights_enabled: false + endpoint: https://fake_endpoint + enhanced_container_insights: false + imds_retries: 1 + local_mode: false + log_group_name: /aws/application-signals/data + log_retention: 0 + log_stream_name: "" + max_retries: 2 + metric_declarations: + - dimensions: + - - Environment + - Operation + - Service + - - Environment + - Service + label_matchers: + - label_names: + - Telemetry.Source + regex: ^(ServerSpan|LocalRootSpan)$ + separator: ; + metric_name_selectors: + - Latency + - Fault + - Error + - dimensions: + - - Environment + - Operation + - RemoteEnvironment + - RemoteOperation + - RemoteResourceIdentifier + - RemoteResourceType + - RemoteService + - Service + - - Environment + - Operation + - RemoteEnvironment + - RemoteOperation + - RemoteService + - Service + - - Environment + - Operation + - RemoteOperation + - RemoteResourceIdentifier + - RemoteResourceType + - RemoteService + - Service + - - Environment + - Operation + - RemoteOperation + - RemoteService + - Service + - - Environment + - RemoteEnvironment + - RemoteService + - Service + - - Environment + - RemoteService + - Service + - - Environment + - RemoteEnvironment + - RemoteOperation + - RemoteResourceIdentifier + - RemoteResourceType + - RemoteService + - Service + - - Environment + - RemoteEnvironment + - RemoteOperation + - RemoteService + - Service + - - Environment + - RemoteOperation + - RemoteResourceIdentifier + - RemoteResourceType + - RemoteService + - Service + - - Environment + - RemoteOperation + - RemoteService + - Service + - - Environment + - RemoteResourceIdentifier + - RemoteResourceType + - RemoteService + - Service + - - RemoteResourceIdentifier + - RemoteResourceType + - RemoteService + - - RemoteService + label_matchers: + - label_names: + - Telemetry.Source + regex: ^(ClientSpan|ProducerSpan|ConsumerSpan)$ + separator: ; + metric_name_selectors: + - Latency + - Fault + - Error + middleware: agenthealth/logs + namespace: ApplicationSignals + no_verify_ssl: false + num_workers: 8 + output_destination: cloudwatch + profile: "" + proxy_address: "" + region: us-east-1 + request_timeout_seconds: 30 + resource_arn: "" + resource_to_telemetry_conversion: + enabled: false + retain_initial_value_of_delta_metric: false + role_arn: "" + version: "1" + awsemf/containerinsights: + certificate_file_path: "" + detailed_metrics: false + dimension_rollup_option: NoDimensionRollup + disable_metric_extraction: true + eks_fargate_container_insights_enabled: false + endpoint: https://fake_endpoint + enhanced_container_insights: false + imds_retries: 1 + local_mode: false + log_group_name: /aws/containerinsights/{ClusterName}/performance + log_retention: 0 + log_stream_name: '{NodeName}' + max_retries: 2 + metric_declarations: + - dimensions: + - - ClusterName + - Namespace + - PodName + - - ClusterName + - - ClusterName + - Namespace + - Service + - - ClusterName + - Namespace + metric_name_selectors: + - pod_cpu_utilization + - pod_memory_utilization + - pod_network_rx_bytes + - pod_network_tx_bytes + - pod_cpu_utilization_over_pod_limit + - pod_memory_utilization_over_pod_limit + - dimensions: + - - ClusterName + - Namespace + - PodName + metric_name_selectors: + - pod_number_of_container_restarts + - dimensions: + - - ClusterName + - Namespace + - PodName + - - ClusterName + metric_name_selectors: + - pod_cpu_reserved_capacity + - pod_memory_reserved_capacity + - dimensions: + - - ClusterName + - InstanceId + - NodeName + - - ClusterName + metric_name_selectors: + - node_cpu_utilization + - node_memory_utilization + - node_network_total_bytes + - node_cpu_reserved_capacity + - node_memory_reserved_capacity + - node_number_of_running_pods + - node_number_of_running_containers + - dimensions: + - - ClusterName + metric_name_selectors: + - node_cpu_usage_total + - node_cpu_limit + - node_memory_working_set + - node_memory_limit + - dimensions: + - - ClusterName + - InstanceId + - NodeName + - - ClusterName + metric_name_selectors: + - node_filesystem_utilization + - dimensions: + - - ClusterName + - Namespace + - Service + - - ClusterName + metric_name_selectors: + - service_number_of_running_pods + - dimensions: + - - ClusterName + - Namespace + - - ClusterName + metric_name_selectors: + - namespace_number_of_running_pods + - dimensions: + - - ClusterName + metric_name_selectors: + - cluster_node_count + - cluster_failed_node_count + middleware: agenthealth/logs + namespace: ContainerInsights + no_verify_ssl: false + num_workers: 8 + output_destination: cloudwatch + parse_json_encoded_attr_values: + - Sources + - kubernetes + profile: "" + proxy_address: "" + region: us-east-1 + request_timeout_seconds: 30 + resource_arn: "" + resource_to_telemetry_conversion: + enabled: true + retain_initial_value_of_delta_metric: false + role_arn: "" + version: "0" + awsxray/application_signals: + certificate_file_path: "" + endpoint: "" + imds_retries: 1 + index_all_attributes: false + indexed_attributes: + - aws.local.service + - aws.local.operation + - aws.local.environment + - aws.remote.service + - aws.remote.operation + - aws.remote.environment + - aws.remote.resource.identifier + - aws.remote.resource.type + local_mode: false + max_retries: 2 + middleware: agenthealth/traces + no_verify_ssl: false + num_workers: 8 + profile: "" + proxy_address: "" + region: us-east-1 + request_timeout_seconds: 30 + resource_arn: "" + role_arn: "" + telemetry: + enabled: true + include_metadata: true +extensions: + agenthealth/logs: + is_usage_data_enabled: true + stats: + operations: + - PutLogEvents + usage_flags: + mode: EKS + region_type: ACJ + agenthealth/traces: + is_usage_data_enabled: true + stats: + operations: + - PutTraceSegments + usage_flags: + mode: EKS + region_type: ACJ + awsproxy/application_signals: + aws_endpoint: "" + dialer: + timeout: "0s" + certificate_file_path: "" + endpoint: 0.0.0.0:2000 + imds_retries: 1 + local_mode: false + profile: "" + proxy_address: "" + region: us-east-1 + service_name: "" + role_arn: "" +processors: + awsapplicationsignals: + limiter: + disabled: false + drop_threshold: 500 + garbage_collection_interval: 10m0s + log_dropped_metrics: true + rotation_interval: 10m0s + resolvers: + - name: TestCluster + platform: eks + batch/containerinsights: + metadata_cardinality_limit: 1000 + send_batch_max_size: 0 + send_batch_size: 8192 + timeout: 5s + resourcedetection: + aks: + resource_attributes: + cloud.platform: + enabled: true + cloud.provider: + enabled: true + k8s.cluster.name: + enabled: false + azure: + resource_attributes: + azure.resourcegroup.name: + enabled: true + azure.vm.name: + enabled: true + azure.vm.scaleset.name: + enabled: true + azure.vm.size: + enabled: true + cloud.account.id: + enabled: true + cloud.platform: + enabled: true + cloud.provider: + enabled: true + cloud.region: + enabled: true + host.id: + enabled: true + host.name: + enabled: true + compression: "" + consul: + address: "" + datacenter: "" + namespace: "" + resource_attributes: + cloud.region: + enabled: true + host.id: + enabled: true + host.name: + enabled: true + token_file: "" + detectors: + - eks + - env + - ec2 + disable_keep_alives: false + docker: + resource_attributes: + host.name: + enabled: true + os.type: + enabled: true + ec2: + resource_attributes: + cloud.account.id: + enabled: true + cloud.availability_zone: + enabled: true + cloud.platform: + enabled: true + cloud.provider: + enabled: true + cloud.region: + enabled: true + host.id: + enabled: true + host.image.id: + enabled: true + host.name: + enabled: true + host.type: + enabled: true + tags: + - ^kubernetes.io/cluster/.*$ + - ^aws:autoscaling:groupName + ecs: + resource_attributes: + aws.ecs.cluster.arn: + enabled: true + aws.ecs.launchtype: + enabled: true + aws.ecs.task.arn: + enabled: true + aws.ecs.task.family: + enabled: true + aws.ecs.task.revision: + enabled: true + aws.ecs.task.id: + enabled: true + aws.log.group.arns: + enabled: true + aws.log.group.names: + enabled: true + aws.log.stream.arns: + enabled: true + aws.log.stream.names: + enabled: true + cloud.account.id: + enabled: true + cloud.availability_zone: + enabled: true + cloud.platform: + enabled: true + cloud.provider: + enabled: true + cloud.region: + enabled: true + eks: + resource_attributes: + cloud.platform: + enabled: true + cloud.provider: + enabled: true + k8s.cluster.name: + enabled: false + elasticbeanstalk: + resource_attributes: + cloud.platform: + enabled: true + cloud.provider: + enabled: true + deployment.environment: + enabled: true + service.instance.id: + enabled: true + service.version: + enabled: true + endpoint: "" + gcp: + resource_attributes: + cloud.account.id: + enabled: true + cloud.availability_zone: + enabled: true + cloud.platform: + enabled: true + cloud.provider: + enabled: true + cloud.region: + enabled: true + faas.id: + enabled: true + faas.instance: + enabled: true + faas.name: + enabled: true + faas.version: + enabled: true + gcp.cloud_run.job.execution: + enabled: true + gcp.cloud_run.job.task_index: + enabled: true + gcp.gce.instance.hostname: + enabled: false + gcp.gce.instance.name: + enabled: false + host.id: + enabled: true + host.name: + enabled: true + host.type: + enabled: true + k8s.cluster.name: + enabled: true + heroku: + resource_attributes: + cloud.provider: + enabled: true + heroku.app.id: + enabled: true + heroku.dyno.id: + enabled: true + heroku.release.commit: + enabled: true + heroku.release.creation_timestamp: + enabled: true + service.instance.id: + enabled: true + service.name: + enabled: true + service.version: + enabled: true + http2_ping_timeout: "0s" + http2_read_idle_timeout: "0s" + idle_conn_timeout: 1m30s + k8snode: + auth_type: serviceAccount + context: "" + node_from_env_var: "" + resource_attributes: + k8s.node.name: + enabled: true + k8s.node.uid: + enabled: true + lambda: + resource_attributes: + aws.log.group.names: + enabled: true + aws.log.stream.names: + enabled: true + cloud.platform: + enabled: true + cloud.provider: + enabled: true + cloud.region: + enabled: true + faas.instance: + enabled: true + faas.max_memory: + enabled: true + faas.name: + enabled: true + faas.version: + enabled: true + max_idle_conns: 100 + openshift: + address: "" + resource_attributes: + cloud.platform: + enabled: true + cloud.provider: + enabled: true + cloud.region: + enabled: true + k8s.cluster.name: + enabled: true + tls: + ca_file: "" + cert_file: "" + include_system_ca_certs_pool: false + insecure: false + insecure_skip_verify: false + key_file: "" + max_version: "" + min_version: "" + reload_interval: "0s" + server_name_override: "" + token: "" + override: true + proxy_url: "" + read_buffer_size: 0 + system: + resource_attributes: + host.arch: + enabled: false + host.cpu.cache.l2.size: + enabled: false + host.cpu.family: + enabled: false + host.cpu.model.id: + enabled: false + host.cpu.model.name: + enabled: false + host.cpu.stepping: + enabled: false + host.cpu.vendor.id: + enabled: false + host.id: + enabled: false + host.ip: + enabled: false + host.mac: + enabled: false + host.name: + enabled: true + os.description: + enabled: false + os.type: + enabled: true + timeout: 2s + tls: + ca_file: "" + cert_file: "" + include_system_ca_certs_pool: false + insecure: false + insecure_skip_verify: false + key_file: "" + max_version: "" + min_version: "" + reload_interval: "0s" + server_name_override: "" + write_buffer_size: 0 +receivers: + awscontainerinsightreceiver: + accelerated_compute_metrics: false + add_container_name_metric_label: false + add_full_pod_name_metric_label: false + add_service_as_attribute: true + certificate_file_path: "" + cluster_name: TestCluster + collection_interval: 30s + container_orchestrator: eks + enable_control_plane_metrics: false + endpoint: "" + imds_retries: 1 + leader_lock_name: cwagent-clusterleader + leader_lock_using_config_map_only: true + local_mode: false + max_retries: 0 + no_verify_ssl: false + num_workers: 0 + prefer_full_pod_name: false + profile: "" + proxy_address: "" + region: us-east-1 + request_timeout_seconds: 0 + resource_arn: "" + role_arn: "" + otlp/application_signals: + protocols: + grpc: + endpoint: 0.0.0.0:4315 + dialer: + timeout: "0s" + include_metadata: false + max_concurrent_streams: 0 + max_recv_msg_size_mib: 0 + read_buffer_size: 524288 + tls: + ca_file: "" + cert_file: path/to/cert.crt + client_ca_file: "" + client_ca_file_reload: false + include_system_ca_certs_pool: false + key_file: path/to/key.key + max_version: "" + min_version: "" + reload_interval: 0s + transport: tcp + write_buffer_size: 0 + http: + endpoint: 0.0.0.0:4316 + include_metadata: false + logs_url_path: /v1/logs + max_request_body_size: 0 + metrics_url_path: /v1/metrics + tls: + ca_file: "" + cert_file: path/to/cert.crt + client_ca_file: "" + client_ca_file_reload: false + include_system_ca_certs_pool: false + key_file: path/to/key.key + max_version: "" + min_version: "" + reload_interval: 0s + traces_url_path: /v1/traces +service: + extensions: + - awsproxy/application_signals + - agenthealth/traces + - agenthealth/logs + pipelines: + metrics/application_signals: + exporters: + - awsemf/application_signals + processors: + - resourcedetection + - awsapplicationsignals + receivers: + - otlp/application_signals + metrics/containerinsights: + exporters: + - awsemf/containerinsights + processors: + - batch/containerinsights + receivers: + - awscontainerinsightreceiver + traces/application_signals: + exporters: + - awsxray/application_signals + processors: + - resourcedetection + - awsapplicationsignals + receivers: + - otlp/application_signals + telemetry: + logs: + development: false + disable_caller: false + disable_stacktrace: false + encoding: console + level: info + sampling: + enabled: true + initial: 2 + thereafter: 500 + tick: 10s + metrics: + address: "" + level: None + traces: {} diff --git a/translator/tocwconfig/sampleConfig/base_appsignals_config.conf b/translator/tocwconfig/sampleConfig/base_appsignals_config.conf index 007bb60efb..850afc3baf 100644 --- a/translator/tocwconfig/sampleConfig/base_appsignals_config.conf +++ b/translator/tocwconfig/sampleConfig/base_appsignals_config.conf @@ -3,9 +3,9 @@ debug = false flush_interval = "1s" flush_jitter = "0s" - hostname = "host_name_from_env" + hostname = "" interval = "60s" - logfile = "" + logfile = "/opt/aws/amazon-cloudwatch-agent/logs/amazon-cloudwatch-agent.log" logtarget = "lumberjack" metric_batch_size = 1000 metric_buffer_limit = 10000 diff --git a/translator/tocwconfig/sampleConfig/base_appsignals_config.json b/translator/tocwconfig/sampleConfig/base_appsignals_config.json index 255feae87a..095180d2ec 100644 --- a/translator/tocwconfig/sampleConfig/base_appsignals_config.json +++ b/translator/tocwconfig/sampleConfig/base_appsignals_config.json @@ -3,14 +3,15 @@ "region": "us-east-1" }, "logs": { + "log_stream_name": "host_name_from_env", "metrics_collected": { - "app_signals": {} + "application_signals": {} }, "endpoint_override":"https://fake_endpoint" }, "traces": { "traces_collected": { - "app_signals": {} + "application_signals": {} }, "endpoint_override":"https://fake_endpoint" } diff --git a/translator/tocwconfig/sampleConfig/base_appsignals_config.yaml b/translator/tocwconfig/sampleConfig/base_appsignals_config.yaml index f4e3e759b9..dd5c0e8d4d 100644 --- a/translator/tocwconfig/sampleConfig/base_appsignals_config.yaml +++ b/translator/tocwconfig/sampleConfig/base_appsignals_config.yaml @@ -1,95 +1,96 @@ -connectors: {} exporters: - awsemf/app_signals: + awsemf/application_signals: certificate_file_path: "" detailed_metrics: false dimension_rollup_option: NoDimensionRollup disable_metric_extraction: false eks_fargate_container_insights_enabled: false - endpoint: "" + endpoint: https://fake_endpoint enhanced_container_insights: false - imds_retries: 0 - local_mode: false - log_group_name: /aws/appsignals/generic + imds_retries: 1 + local_mode: true + log_group_name: /aws/application-signals/data log_retention: 0 log_stream_name: "" max_retries: 2 metric_declarations: - dimensions: - - - HostedIn.Environment + - - Environment - Operation - Service - - - HostedIn.Environment + - - Environment - Service label_matchers: - label_names: - - aws.span.kind - regex: ^(SERVER|LOCAL_ROOT)$ + - Telemetry.Source + regex: ^(ServerSpan|LocalRootSpan)$ separator: ; metric_name_selectors: - Latency - Fault - Error - dimensions: - - - HostedIn.Environment + - - Environment - Operation - RemoteOperation + - RemoteResourceIdentifier + - RemoteResourceType - RemoteService - - RemoteTarget - Service - - - HostedIn.Environment + - - Environment - Operation - RemoteOperation - RemoteService - Service - - - HostedIn.Environment + - - Environment - RemoteService - Service - - - HostedIn.Environment + - - Environment - RemoteOperation + - RemoteResourceIdentifier + - RemoteResourceType - RemoteService - - RemoteTarget - Service - - - HostedIn.Environment + - - Environment - RemoteOperation - RemoteService - Service - - - HostedIn.Environment + - - Environment + - RemoteResourceIdentifier + - RemoteResourceType - RemoteService - - RemoteTarget - Service - - - RemoteService - - RemoteTarget + - - RemoteResourceIdentifier + - RemoteResourceType + - RemoteService - - RemoteService label_matchers: - label_names: - - aws.span.kind - regex: ^(CLIENT|PRODUCER|CONSUMER)$ + - Telemetry.Source + regex: ^(ClientSpan|ProducerSpan|ConsumerSpan)$ separator: ; metric_name_selectors: - Latency - Fault - Error - metric_descriptors: [] middleware: agenthealth/logs - namespace: AppSignals + namespace: ApplicationSignals no_verify_ssl: false num_workers: 8 output_destination: cloudwatch - parse_json_encoded_attr_values: [] - profile: "" + profile: AmazonCloudWatchAgent proxy_address: "" - region: "" + region: us-east-1 request_timeout_seconds: 30 resource_arn: "" resource_to_telemetry_conversion: enabled: false retain_initial_value_of_delta_metric: false role_arn: "" - shared_credentials_file: [] + shared_credentials_file: + - fake-path version: "1" - awsxray/app_signals: - aws_log_groups: [] + awsxray/application_signals: certificate_file_path: "" endpoint: https://fake_endpoint imds_retries: 1 @@ -97,25 +98,25 @@ exporters: indexed_attributes: - aws.local.service - aws.local.operation + - aws.local.environment - aws.remote.service - aws.remote.operation - - HostedIn.K8s.Namespace - - K8s.RemoteNamespace - - aws.remote.target - - HostedIn.Environment - - HostedIn.EKS.Cluster - local_mode: false + - aws.remote.environment + - aws.remote.resource.identifier + - aws.remote.resource.type + local_mode: true max_retries: 2 middleware: agenthealth/traces no_verify_ssl: false num_workers: 8 - profile: "" + profile: AmazonCloudWatchAgent proxy_address: "" region: us-east-1 request_timeout_seconds: 30 resource_arn: "" role_arn: "" - shared_credentials_file: [] + shared_credentials_file: + - fake-path telemetry: enabled: true include_metadata: true @@ -125,24 +126,37 @@ extensions: stats: operations: - PutLogEvents + usage_flags: + mode: OP + region_type: ACJ agenthealth/traces: is_usage_data_enabled: true stats: operations: - PutTraceSegments - awsproxy/app_signals: - aws_endpoint: "" + usage_flags: + mode: OP + region_type: ACJ + awsproxy/application_signals: + aws_endpoint: https://fake_endpoint + dialer: + timeout: "0s" + certificate_file_path: "" endpoint: 0.0.0.0:2000 - local_mode: false + imds_retries: 1 + local_mode: true + profile: AmazonCloudWatchAgent proxy_address: "" - region: "" + region: us-east-1 role_arn: "" + service_name: "" + shared_credentials_file: + - fake-path processors: - awsappsignals: + awsapplicationsignals: resolvers: - name: "" platform: generic - rules: [] resourcedetection: aks: resource_attributes: @@ -150,8 +164,8 @@ processors: enabled: true cloud.provider: enabled: true - attributes: [] - auth: null + k8s.cluster.name: + enabled: false azure: resource_attributes: azure.resourcegroup.name: @@ -178,30 +192,14 @@ processors: consul: address: "" datacenter: "" - meta: {} namespace: "" resource_attributes: - azure.resourcegroup.name: - enabled: true - azure.vm.name: - enabled: true - azure.vm.scaleset.name: - enabled: true - azure.vm.size: - enabled: true - cloud.account.id: - enabled: true - cloud.platform: - enabled: true - cloud.provider: - enabled: true cloud.region: enabled: true host.id: enabled: true host.name: enabled: true - token: '[REDACTED]' token_file: "" detectors: - eks @@ -236,6 +234,7 @@ processors: enabled: true tags: - ^kubernetes.io/cluster/.*$ + - ^aws:autoscaling:groupName ecs: resource_attributes: aws.ecs.cluster.arn: @@ -246,6 +245,8 @@ processors: enabled: true aws.ecs.task.family: enabled: true + aws.ecs.task.id: + enabled: true aws.ecs.task.revision: enabled: true aws.log.group.arns: @@ -272,6 +273,8 @@ processors: enabled: true cloud.provider: enabled: true + k8s.cluster.name: + enabled: false elasticbeanstalk: resource_attributes: cloud.platform: @@ -321,7 +324,6 @@ processors: enabled: true k8s.cluster.name: enabled: true - headers: {} heroku: resource_attributes: cloud.provider: @@ -340,6 +342,8 @@ processors: enabled: true service.version: enabled: true + http2_ping_timeout: "0s" + http2_read_idle_timeout: "0s" idle_conn_timeout: 1m30s k8snode: auth_type: serviceAccount @@ -370,9 +374,7 @@ processors: enabled: true faas.version: enabled: true - max_conns_per_host: null max_idle_conns: 100 - max_idle_conns_per_host: null openshift: address: "" resource_attributes: @@ -386,22 +388,20 @@ processors: enabled: true tls: ca_file: "" - ca_pem: '[REDACTED]' cert_file: "" - cert_pem: '[REDACTED]' + include_system_ca_certs_pool: false insecure: false insecure_skip_verify: false key_file: "" - key_pem: '[REDACTED]' max_version: "" min_version: "" - reload_interval: 0s + reload_interval: "0s" server_name_override: "" token: "" override: true + proxy_url: "" read_buffer_size: 0 system: - hostname_sources: [] resource_attributes: host.arch: enabled: false @@ -419,6 +419,10 @@ processors: enabled: false host.id: enabled: false + host.ip: + enabled: false + host.mac: + enabled: false host.name: enabled: true os.description: @@ -426,64 +430,69 @@ processors: os.type: enabled: true timeout: 2s + tls: + ca_file: "" + cert_file: "" + include_system_ca_certs_pool: false + insecure: false + insecure_skip_verify: false + key_file: "" + max_version: "" + min_version: "" + reload_interval: "0s" + server_name_override: "" write_buffer_size: 0 receivers: - otlp/app_signals: + otlp/application_signals: protocols: grpc: - auth: null + dialer: + timeout: "0s" endpoint: 0.0.0.0:4315 include_metadata: false - keepalive: null max_concurrent_streams: 0 max_recv_msg_size_mib: 0 read_buffer_size: 524288 - tls: null transport: tcp write_buffer_size: 0 http: - auth: null - cors: null endpoint: 0.0.0.0:4316 include_metadata: false logs_url_path: /v1/logs max_request_body_size: 0 metrics_url_path: /v1/metrics - response_headers: {} - tls: null traces_url_path: /v1/traces service: extensions: - - awsproxy/app_signals + - awsproxy/application_signals - agenthealth/traces - agenthealth/logs pipelines: - metrics/app_signals: + metrics/application_signals: exporters: - - awsemf/app_signals + - awsemf/application_signals processors: - resourcedetection - - awsappsignals + - awsapplicationsignals receivers: - - otlp/app_signals - traces/app_signals: + - otlp/application_signals + traces/application_signals: exporters: - - awsxray/app_signals + - awsxray/application_signals processors: - resourcedetection - - awsappsignals + - awsapplicationsignals receivers: - - otlp/app_signals + - otlp/application_signals telemetry: logs: development: false disable_caller: false disable_stacktrace: false encoding: console - error_output_paths: [] - initial_fields: {} level: info - output_paths: [] + output_paths: + - /opt/aws/amazon-cloudwatch-agent/logs/amazon-cloudwatch-agent.log sampling: enabled: true initial: 2 @@ -492,8 +501,4 @@ service: metrics: address: "" level: None - readers: [] - resource: {} - traces: - processors: [] - propagators: [] + traces: {} diff --git a/translator/tocwconfig/sampleConfig/base_appsignals_fallback_config.conf b/translator/tocwconfig/sampleConfig/base_appsignals_fallback_config.conf new file mode 100644 index 0000000000..850afc3baf --- /dev/null +++ b/translator/tocwconfig/sampleConfig/base_appsignals_fallback_config.conf @@ -0,0 +1,27 @@ +[agent] + collection_jitter = "0s" + debug = false + flush_interval = "1s" + flush_jitter = "0s" + hostname = "" + interval = "60s" + logfile = "/opt/aws/amazon-cloudwatch-agent/logs/amazon-cloudwatch-agent.log" + logtarget = "lumberjack" + metric_batch_size = 1000 + metric_buffer_limit = 10000 + omit_hostname = false + precision = "" + quiet = false + round_interval = false + +[inputs] + +[outputs] + + [[outputs.cloudwatchlogs]] + endpoint_override = "https://fake_endpoint" + force_flush_interval = "5s" + log_stream_name = "host_name_from_env" + region = "us-east-1" + +[processors] diff --git a/translator/tocwconfig/sampleConfig/base_appsignals_fallback_config.json b/translator/tocwconfig/sampleConfig/base_appsignals_fallback_config.json new file mode 100644 index 0000000000..40a45266e0 --- /dev/null +++ b/translator/tocwconfig/sampleConfig/base_appsignals_fallback_config.json @@ -0,0 +1,18 @@ +{ + "agent": { + "region": "us-east-1" + }, + "logs": { + "log_stream_name": "host_name_from_env", + "metrics_collected": { + "app_signals": {} + }, + "endpoint_override":"https://fake_endpoint" + }, + "traces": { + "traces_collected": { + "app_signals": {} + }, + "endpoint_override":"https://fake_endpoint" + } +} \ No newline at end of file diff --git a/translator/tocwconfig/sampleConfig/base_appsignals_fallback_config.yaml b/translator/tocwconfig/sampleConfig/base_appsignals_fallback_config.yaml new file mode 100644 index 0000000000..11c0eefb2b --- /dev/null +++ b/translator/tocwconfig/sampleConfig/base_appsignals_fallback_config.yaml @@ -0,0 +1,504 @@ +exporters: + awsemf/application_signals: + certificate_file_path: "" + detailed_metrics: false + dimension_rollup_option: NoDimensionRollup + disable_metric_extraction: false + eks_fargate_container_insights_enabled: false + endpoint: https://fake_endpoint + enhanced_container_insights: false + imds_retries: 1 + local_mode: true + log_group_name: /aws/application-signals/data + log_retention: 0 + log_stream_name: "" + max_retries: 2 + metric_declarations: + - dimensions: + - - Environment + - Operation + - Service + - - Environment + - Service + label_matchers: + - label_names: + - Telemetry.Source + regex: ^(ServerSpan|LocalRootSpan)$ + separator: ; + metric_name_selectors: + - Latency + - Fault + - Error + - dimensions: + - - Environment + - Operation + - RemoteOperation + - RemoteResourceIdentifier + - RemoteResourceType + - RemoteService + - Service + - - Environment + - Operation + - RemoteOperation + - RemoteService + - Service + - - Environment + - RemoteService + - Service + - - Environment + - RemoteOperation + - RemoteResourceIdentifier + - RemoteResourceType + - RemoteService + - Service + - - Environment + - RemoteOperation + - RemoteService + - Service + - - Environment + - RemoteResourceIdentifier + - RemoteResourceType + - RemoteService + - Service + - - RemoteResourceIdentifier + - RemoteResourceType + - RemoteService + - - RemoteService + label_matchers: + - label_names: + - Telemetry.Source + regex: ^(ClientSpan|ProducerSpan|ConsumerSpan)$ + separator: ; + metric_name_selectors: + - Latency + - Fault + - Error + middleware: agenthealth/logs + namespace: ApplicationSignals + no_verify_ssl: false + num_workers: 8 + output_destination: cloudwatch + profile: AmazonCloudWatchAgent + proxy_address: "" + region: us-east-1 + request_timeout_seconds: 30 + resource_arn: "" + resource_to_telemetry_conversion: + enabled: false + retain_initial_value_of_delta_metric: false + role_arn: "" + shared_credentials_file: + - fake-path + version: "1" + awsxray/application_signals: + certificate_file_path: "" + endpoint: https://fake_endpoint + imds_retries: 1 + index_all_attributes: false + indexed_attributes: + - aws.local.service + - aws.local.operation + - aws.local.environment + - aws.remote.service + - aws.remote.operation + - aws.remote.environment + - aws.remote.resource.identifier + - aws.remote.resource.type + local_mode: true + max_retries: 2 + middleware: agenthealth/traces + no_verify_ssl: false + num_workers: 8 + profile: AmazonCloudWatchAgent + proxy_address: "" + region: us-east-1 + request_timeout_seconds: 30 + resource_arn: "" + role_arn: "" + shared_credentials_file: + - fake-path + telemetry: + enabled: true + include_metadata: true +extensions: + agenthealth/logs: + is_usage_data_enabled: true + stats: + operations: + - PutLogEvents + usage_flags: + mode: OP + region_type: ACJ + agenthealth/traces: + is_usage_data_enabled: true + stats: + operations: + - PutTraceSegments + usage_flags: + mode: OP + region_type: ACJ + awsproxy/application_signals: + aws_endpoint: https://fake_endpoint + dialer: + timeout: "0s" + certificate_file_path: "" + endpoint: 0.0.0.0:2000 + imds_retries: 1 + local_mode: true + profile: AmazonCloudWatchAgent + proxy_address: "" + region: us-east-1 + role_arn: "" + service_name: "" + shared_credentials_file: + - fake-path +processors: + awsapplicationsignals: + resolvers: + - name: "" + platform: generic + resourcedetection: + aks: + resource_attributes: + cloud.platform: + enabled: true + cloud.provider: + enabled: true + k8s.cluster.name: + enabled: false + azure: + resource_attributes: + azure.resourcegroup.name: + enabled: true + azure.vm.name: + enabled: true + azure.vm.scaleset.name: + enabled: true + azure.vm.size: + enabled: true + cloud.account.id: + enabled: true + cloud.platform: + enabled: true + cloud.provider: + enabled: true + cloud.region: + enabled: true + host.id: + enabled: true + host.name: + enabled: true + compression: "" + consul: + address: "" + datacenter: "" + namespace: "" + resource_attributes: + cloud.region: + enabled: true + host.id: + enabled: true + host.name: + enabled: true + token_file: "" + detectors: + - eks + - env + - ec2 + disable_keep_alives: false + docker: + resource_attributes: + host.name: + enabled: true + os.type: + enabled: true + ec2: + resource_attributes: + cloud.account.id: + enabled: true + cloud.availability_zone: + enabled: true + cloud.platform: + enabled: true + cloud.provider: + enabled: true + cloud.region: + enabled: true + host.id: + enabled: true + host.image.id: + enabled: true + host.name: + enabled: true + host.type: + enabled: true + tags: + - ^kubernetes.io/cluster/.*$ + - ^aws:autoscaling:groupName + ecs: + resource_attributes: + aws.ecs.cluster.arn: + enabled: true + aws.ecs.launchtype: + enabled: true + aws.ecs.task.arn: + enabled: true + aws.ecs.task.family: + enabled: true + aws.ecs.task.id: + enabled: true + aws.ecs.task.revision: + enabled: true + aws.log.group.arns: + enabled: true + aws.log.group.names: + enabled: true + aws.log.stream.arns: + enabled: true + aws.log.stream.names: + enabled: true + cloud.account.id: + enabled: true + cloud.availability_zone: + enabled: true + cloud.platform: + enabled: true + cloud.provider: + enabled: true + cloud.region: + enabled: true + eks: + resource_attributes: + cloud.platform: + enabled: true + cloud.provider: + enabled: true + k8s.cluster.name: + enabled: false + elasticbeanstalk: + resource_attributes: + cloud.platform: + enabled: true + cloud.provider: + enabled: true + deployment.environment: + enabled: true + service.instance.id: + enabled: true + service.version: + enabled: true + endpoint: "" + gcp: + resource_attributes: + cloud.account.id: + enabled: true + cloud.availability_zone: + enabled: true + cloud.platform: + enabled: true + cloud.provider: + enabled: true + cloud.region: + enabled: true + faas.id: + enabled: true + faas.instance: + enabled: true + faas.name: + enabled: true + faas.version: + enabled: true + gcp.cloud_run.job.execution: + enabled: true + gcp.cloud_run.job.task_index: + enabled: true + gcp.gce.instance.hostname: + enabled: false + gcp.gce.instance.name: + enabled: false + host.id: + enabled: true + host.name: + enabled: true + host.type: + enabled: true + k8s.cluster.name: + enabled: true + heroku: + resource_attributes: + cloud.provider: + enabled: true + heroku.app.id: + enabled: true + heroku.dyno.id: + enabled: true + heroku.release.commit: + enabled: true + heroku.release.creation_timestamp: + enabled: true + service.instance.id: + enabled: true + service.name: + enabled: true + service.version: + enabled: true + http2_ping_timeout: "0s" + http2_read_idle_timeout: "0s" + idle_conn_timeout: 1m30s + k8snode: + auth_type: serviceAccount + context: "" + node_from_env_var: "" + resource_attributes: + k8s.node.name: + enabled: true + k8s.node.uid: + enabled: true + lambda: + resource_attributes: + aws.log.group.names: + enabled: true + aws.log.stream.names: + enabled: true + cloud.platform: + enabled: true + cloud.provider: + enabled: true + cloud.region: + enabled: true + faas.instance: + enabled: true + faas.max_memory: + enabled: true + faas.name: + enabled: true + faas.version: + enabled: true + max_idle_conns: 100 + openshift: + address: "" + resource_attributes: + cloud.platform: + enabled: true + cloud.provider: + enabled: true + cloud.region: + enabled: true + k8s.cluster.name: + enabled: true + tls: + ca_file: "" + cert_file: "" + include_system_ca_certs_pool: false + insecure: false + insecure_skip_verify: false + key_file: "" + max_version: "" + min_version: "" + reload_interval: "0s" + server_name_override: "" + token: "" + override: true + proxy_url: "" + read_buffer_size: 0 + system: + resource_attributes: + host.arch: + enabled: false + host.cpu.cache.l2.size: + enabled: false + host.cpu.family: + enabled: false + host.cpu.model.id: + enabled: false + host.cpu.model.name: + enabled: false + host.cpu.stepping: + enabled: false + host.cpu.vendor.id: + enabled: false + host.id: + enabled: false + host.ip: + enabled: false + host.mac: + enabled: false + host.name: + enabled: true + os.description: + enabled: false + os.type: + enabled: true + timeout: 2s + tls: + ca_file: "" + cert_file: "" + include_system_ca_certs_pool: false + insecure: false + insecure_skip_verify: false + key_file: "" + max_version: "" + min_version: "" + reload_interval: "0s" + server_name_override: "" + write_buffer_size: 0 +receivers: + otlp/application_signals: + protocols: + grpc: + dialer: + timeout: "0s" + endpoint: 0.0.0.0:4315 + include_metadata: false + max_concurrent_streams: 0 + max_recv_msg_size_mib: 0 + read_buffer_size: 524288 + transport: tcp + write_buffer_size: 0 + http: + endpoint: 0.0.0.0:4316 + include_metadata: false + logs_url_path: /v1/logs + max_request_body_size: 0 + metrics_url_path: /v1/metrics + traces_url_path: /v1/traces +service: + extensions: + - awsproxy/application_signals + - agenthealth/traces + - agenthealth/logs + pipelines: + metrics/application_signals: + exporters: + - awsemf/application_signals + processors: + - resourcedetection + - awsapplicationsignals + receivers: + - otlp/application_signals + traces/application_signals: + exporters: + - awsxray/application_signals + processors: + - resourcedetection + - awsapplicationsignals + receivers: + - otlp/application_signals + telemetry: + logs: + development: false + disable_caller: false + disable_stacktrace: false + encoding: console + level: info + output_paths: + - /opt/aws/amazon-cloudwatch-agent/logs/amazon-cloudwatch-agent.log + sampling: + enabled: true + initial: 2 + thereafter: 500 + tick: 10s + metrics: + address: "" + level: None + traces: {} diff --git a/translator/tocwconfig/sampleConfig/base_container_insights_config.json b/translator/tocwconfig/sampleConfig/base_container_insights_config.json index 3c69c7cc65..510cb41463 100644 --- a/translator/tocwconfig/sampleConfig/base_container_insights_config.json +++ b/translator/tocwconfig/sampleConfig/base_container_insights_config.json @@ -10,7 +10,8 @@ "cluster_name": "TestCluster", "metrics_collection_interval": 30, "disable_metric_extraction": true, - "prefer_full_pod_name": true + "prefer_full_pod_name": true, + "accelerated_compute_metrics": false } }, "force_flush_interval": 5, diff --git a/translator/tocwconfig/sampleConfig/base_container_insights_config.yaml b/translator/tocwconfig/sampleConfig/base_container_insights_config.yaml index 8f9fe87028..4a923363fe 100644 --- a/translator/tocwconfig/sampleConfig/base_container_insights_config.yaml +++ b/translator/tocwconfig/sampleConfig/base_container_insights_config.yaml @@ -1,4 +1,3 @@ -connectors: {} exporters: awscloudwatchlogs/emf_logs: certificate_file_path: /etc/test/ca_bundle.pem @@ -31,8 +30,6 @@ exporters: enabled: true num_consumers: 1 queue_size: 1000 - storage: null - shared_credentials_file: [] awsemf/containerinsights: certificate_file_path: /etc/test/ca_bundle.pem detailed_metrics: false @@ -58,7 +55,6 @@ exporters: - Service - - ClusterName - Namespace - label_matchers: [] metric_name_selectors: - pod_cpu_utilization - pod_memory_utilization @@ -70,7 +66,6 @@ exporters: - - ClusterName - Namespace - PodName - label_matchers: [] metric_name_selectors: - pod_number_of_container_restarts - dimensions: @@ -78,7 +73,6 @@ exporters: - Namespace - PodName - - ClusterName - label_matchers: [] metric_name_selectors: - pod_cpu_reserved_capacity - pod_memory_reserved_capacity @@ -87,7 +81,6 @@ exporters: - InstanceId - NodeName - - ClusterName - label_matchers: [] metric_name_selectors: - node_cpu_utilization - node_memory_utilization @@ -98,7 +91,6 @@ exporters: - node_number_of_running_containers - dimensions: - - ClusterName - label_matchers: [] metric_name_selectors: - node_cpu_usage_total - node_cpu_limit @@ -109,7 +101,6 @@ exporters: - InstanceId - NodeName - - ClusterName - label_matchers: [] metric_name_selectors: - node_filesystem_utilization - dimensions: @@ -117,23 +108,19 @@ exporters: - Namespace - Service - - ClusterName - label_matchers: [] metric_name_selectors: - service_number_of_running_pods - dimensions: - - ClusterName - Namespace - - ClusterName - label_matchers: [] metric_name_selectors: - namespace_number_of_running_pods - dimensions: - - ClusterName - label_matchers: [] metric_name_selectors: - cluster_node_count - cluster_failed_node_count - metric_descriptors: [] middleware: agenthealth/logs namespace: ContainerInsights no_verify_ssl: false @@ -151,7 +138,6 @@ exporters: enabled: true retain_initial_value_of_delta_metric: false role_arn: "" - shared_credentials_file: [] version: "0" extensions: agenthealth/logs: @@ -159,21 +145,23 @@ extensions: stats: operations: - PutLogEvents + usage_flags: + mode: EC2 + region_type: ACJ processors: batch/containerinsights: metadata_cardinality_limit: 1000 - metadata_keys: [] send_batch_max_size: 0 send_batch_size: 8192 timeout: 5s batch/emf_logs: metadata_cardinality_limit: 1000 - metadata_keys: [] send_batch_max_size: 0 send_batch_size: 8192 timeout: 5s receivers: awscontainerinsightreceiver: + accelerated_compute_metrics: false add_container_name_metric_label: false add_full_pod_name_metric_label: false add_service_as_attribute: true @@ -197,24 +185,18 @@ receivers: request_timeout_seconds: 0 resource_arn: "" role_arn: "" - shared_credentials_file: [] tcplog/emf_logs: - attributes: {} encoding: utf-8 id: tcp_input listen_address: 0.0.0.0:25888 operators: [] - output: [] - resource: {} retry_on_failure: enabled: false initial_interval: 0s max_elapsed_time: 0s max_interval: 0s - storage: null type: tcp_input udplog/emf_logs: - attributes: {} encoding: utf-8 id: udp_input listen_address: 0.0.0.0:25888 @@ -223,14 +205,11 @@ receivers: line_start_pattern: "" omit_pattern: false operators: [] - output: [] - resource: {} retry_on_failure: enabled: false initial_interval: 0s max_elapsed_time: 0s max_interval: 0s - storage: null type: udp_input service: extensions: @@ -257,10 +236,7 @@ service: disable_caller: false disable_stacktrace: false encoding: console - error_output_paths: [] - initial_fields: {} level: info - output_paths: [] sampling: enabled: true initial: 2 @@ -269,8 +245,4 @@ service: metrics: address: "" level: None - readers: [] - resource: {} - traces: - processors: [] - propagators: [] + traces: {} diff --git a/translator/tocwconfig/sampleConfig/basic_config_linux.yaml b/translator/tocwconfig/sampleConfig/basic_config_linux.yaml index 2d8346fe1b..a3229236cb 100644 --- a/translator/tocwconfig/sampleConfig/basic_config_linux.yaml +++ b/translator/tocwconfig/sampleConfig/basic_config_linux.yaml @@ -1,14 +1,11 @@ -connectors: {} exporters: awscloudwatch: force_flush_interval: 1m0s max_datums_per_call: 1000 max_values_per_datum: 150 middleware: agenthealth/metrics - mode: EC2 namespace: CWAgent region: us-east-1 - region_type: ACJ resource_to_telemetry_conversion: enabled: true extensions: @@ -17,14 +14,17 @@ extensions: stats: operations: - PutMetricData + usage_flags: + mode: EC2 + region_type: ACJ processors: ec2tagger: ec2_instance_tag_keys: - AutoScalingGroupName ec2_metadata_tags: - - InstanceType - ImageId - InstanceId + - InstanceType imds_retries: 1 refresh_interval_seconds: 0s receivers: @@ -54,8 +54,6 @@ service: disable_caller: false disable_stacktrace: false encoding: console - error_output_paths: [] - initial_fields: {} level: info output_paths: - /opt/aws/amazon-cloudwatch-agent/logs/amazon-cloudwatch-agent.log @@ -67,8 +65,4 @@ service: metrics: address: "" level: None - readers: [] - resource: {} - traces: - processors: [] - propagators: [] + traces: {} diff --git a/translator/tocwconfig/sampleConfig/basic_config_windows.yaml b/translator/tocwconfig/sampleConfig/basic_config_windows.yaml index 8ad39a0b44..2dd4256f61 100644 --- a/translator/tocwconfig/sampleConfig/basic_config_windows.yaml +++ b/translator/tocwconfig/sampleConfig/basic_config_windows.yaml @@ -1,14 +1,11 @@ -connectors: {} exporters: awscloudwatch: force_flush_interval: 1m0s max_datums_per_call: 1000 max_values_per_datum: 150 middleware: agenthealth/metrics - mode: EC2 namespace: CWAgent region: us-west-2 - region_type: ACJ resource_to_telemetry_conversion: enabled: true extensions: @@ -17,14 +14,17 @@ extensions: stats: operations: - PutMetricData + usage_flags: + mode: EC2 + region_type: ACJ processors: ec2tagger: ec2_instance_tag_keys: - AutoScalingGroupName ec2_metadata_tags: + - ImageId - InstanceId - InstanceType - - ImageId imds_retries: 1 refresh_interval_seconds: 0s receivers: @@ -56,8 +56,6 @@ service: disable_caller: false disable_stacktrace: false encoding: console - error_output_paths: [] - initial_fields: {} level: info output_paths: - c:\ProgramData\Amazon\AmazonCloudWatchAgent\Logs\amazon-cloudwatch-agent.log @@ -69,8 +67,4 @@ service: metrics: address: "" level: None - readers: [] - resource: {} - traces: - processors: [] - propagators: [] + traces: {} diff --git a/translator/tocwconfig/sampleConfig/collectd_config_linux.yaml b/translator/tocwconfig/sampleConfig/collectd_config_linux.yaml index 0ef1e140fa..43ee813bf5 100644 --- a/translator/tocwconfig/sampleConfig/collectd_config_linux.yaml +++ b/translator/tocwconfig/sampleConfig/collectd_config_linux.yaml @@ -1,14 +1,11 @@ -connectors: {} exporters: awscloudwatch: force_flush_interval: 1m0s max_datums_per_call: 1000 max_values_per_datum: 150 middleware: agenthealth/metrics - mode: EC2 namespace: CWAgent region: us-west-2 - region_type: ACJ resource_to_telemetry_conversion: enabled: true extensions: @@ -17,7 +14,9 @@ extensions: stats: operations: - PutMetricData -processors: {} + usage_flags: + mode: EC2 + region_type: ACJ receivers: telegraf_socket_listener: collection_interval: 1m0s @@ -39,8 +38,6 @@ service: disable_caller: false disable_stacktrace: false encoding: console - error_output_paths: [] - initial_fields: {} level: info output_paths: - /opt/aws/amazon-cloudwatch-agent/logs/amazon-cloudwatch-agent.log @@ -52,8 +49,4 @@ service: metrics: address: "" level: None - readers: [] - resource: {} - traces: - processors: [] - propagators: [] + traces: {} diff --git a/translator/tocwconfig/sampleConfig/complete_darwin_config.yaml b/translator/tocwconfig/sampleConfig/complete_darwin_config.yaml index 7899904400..71398a6a6c 100644 --- a/translator/tocwconfig/sampleConfig/complete_darwin_config.yaml +++ b/translator/tocwconfig/sampleConfig/complete_darwin_config.yaml @@ -1,4 +1,3 @@ -connectors: {} exporters: awscloudwatch: endpoint_override: https://monitoring-fips.us-west-2.amazonaws.com @@ -6,10 +5,8 @@ exporters: max_datums_per_call: 1000 max_values_per_datum: 5000 middleware: agenthealth/metrics - mode: EC2 namespace: CWAgent region: us-west-2 - region_type: ACJ resource_to_telemetry_conversion: enabled: true role_arn: metrics_role_arn_value_test @@ -50,15 +47,11 @@ exporters: enabled: true num_consumers: 1 queue_size: 1000 - storage: null - shared_credentials_file: [] awsxray: - aws_log_groups: [] certificate_file_path: "" endpoint: https://x-ray-endpoint.us-west-2.amazonaws.com imds_retries: 1 index_all_attributes: false - indexed_attributes: [] local_mode: true max_retries: 2 middleware: agenthealth/traces @@ -70,7 +63,6 @@ exporters: request_timeout_seconds: 30 resource_arn: arn:aws:iam::account:resource role_arn: trace_role_arn_value_test - shared_credentials_file: [] telemetry: enabled: true include_metadata: true @@ -80,26 +72,33 @@ extensions: stats: operations: - PutLogEvents + usage_flags: + mode: EC2 + region_type: ACJ agenthealth/metrics: is_usage_data_enabled: true stats: operations: - PutMetricData + usage_flags: + mode: EC2 + region_type: ACJ agenthealth/traces: is_usage_data_enabled: true stats: operations: - PutTraceSegments + usage_flags: + mode: EC2 + region_type: ACJ processors: batch/emf_logs: metadata_cardinality_limit: 1000 - metadata_keys: [] send_batch_max_size: 0 send_batch_size: 8192 timeout: 1m0s batch/xray: metadata_cardinality_limit: 1000 - metadata_keys: [] send_batch_max_size: 0 send_batch_size: 8192 timeout: 200ms @@ -109,20 +108,17 @@ processors: metrics: - iops_in_progress - diskio_iops_in_progress - regexp: null include: match_type: "" - metrics: [] - regexp: null initial_value: 0 max_staleness: 0s ec2tagger: ec2_instance_tag_keys: - AutoScalingGroupName ec2_metadata_tags: + - InstanceType - ImageId - InstanceId - - InstanceType imds_retries: 1 refresh_interval_seconds: 0s transform: @@ -131,59 +127,60 @@ processors: metric_statements: - context: metric statements: - - set(unit, "unit") where name == "disk_free" - - set(name, "DISK_FREE") where name == "disk_free" - - set(unit, "unit") where name == "cpu_usage_idle" - - set(name, "CPU_USAGE_IDLE") where name == "cpu_usage_idle" - - set(unit, "unit") where name == "cpu_usage_nice" + - set(unit, "unit") where name == "cpu_usage_idle" + - set(name, "CPU_USAGE_IDLE") where name == "cpu_usage_idle" + - set(unit, "unit") where name == "cpu_usage_nice" + - set(unit, "unit") where name == "disk_free" + - set(name, "DISK_FREE") where name == "disk_free" trace_statements: [] receivers: awsxray: + dialer: + timeout: "0s" endpoint: 0.0.0.0:2001 proxy_server: aws_endpoint: https://x-ray-endpoint.us-west-2.amazonaws.com + certificate_file_path: "" + dialer: + timeout: "0s" endpoint: 0.0.0.0:1234 + imds_retries: 1 local_mode: true + profile: "" proxy_address: https://proxy.proxy.com region: us-west-2 role_arn: trace_role_arn_value_test + service_name: "xray" tls: ca_file: "" - ca_pem: '[REDACTED]' cert_file: "" - cert_pem: '[REDACTED]' + include_system_ca_certs_pool: false insecure: true insecure_skip_verify: false key_file: "" - key_pem: '[REDACTED]' max_version: "" min_version: "" reload_interval: 0s server_name_override: "" transport: udp - otlp: + otlp/traces: protocols: grpc: - auth: null + dialer: + timeout: "0s" endpoint: 0.0.0.0:1111 include_metadata: false - keepalive: null max_concurrent_streams: 0 max_recv_msg_size_mib: 0 read_buffer_size: 524288 - tls: null transport: tcp write_buffer_size: 0 http: - auth: null - cors: null endpoint: 0.0.0.0:2222 include_metadata: false logs_url_path: /v1/logs max_request_body_size: 0 metrics_url_path: /v1/metrics - response_headers: {} - tls: null traces_url_path: /v1/traces telegraf_cpu: collection_interval: 10s @@ -231,7 +228,6 @@ receivers: initial_delay: 1s timeout: 0s udplog/emf_logs: - attributes: {} encoding: utf-8 id: udp_input listen_address: 127.0.0.1:25888 @@ -240,14 +236,11 @@ receivers: line_start_pattern: "" omit_pattern: false operators: [] - output: [] - resource: {} retry_on_failure: enabled: false initial_interval: 0s max_elapsed_time: 0s max_interval: 0s - storage: null type: udp_input service: extensions: @@ -269,15 +262,15 @@ service: - ec2tagger - transform receivers: + - telegraf_swap + - telegraf_processes - telegraf_mem + - telegraf_netstat - telegraf_cpu - - telegraf_processes - - telegraf_swap - telegraf_statsd - - telegraf_disk - telegraf_procstat/1917393364 - telegraf_socket_listener - - telegraf_netstat + - telegraf_disk metrics/hostDeltaMetrics: exporters: - awscloudwatch @@ -286,8 +279,8 @@ service: - ec2tagger - transform receivers: - - telegraf_diskio - telegraf_net + - telegraf_diskio traces/xray: exporters: - awsxray @@ -295,15 +288,13 @@ service: - batch/xray receivers: - awsxray - - otlp + - otlp/traces telemetry: logs: development: false disable_caller: false disable_stacktrace: false encoding: console - error_output_paths: [] - initial_fields: {} level: debug output_paths: - /opt/aws/amazon-cloudwatch-agent/logs/amazon-cloudwatch-agent.log @@ -315,8 +306,4 @@ service: metrics: address: "" level: None - readers: [] - resource: {} - traces: - processors: [] - propagators: [] + traces: {} diff --git a/translator/tocwconfig/sampleConfig/complete_linux_config.yaml b/translator/tocwconfig/sampleConfig/complete_linux_config.yaml index f05a5c1134..db528eb469 100644 --- a/translator/tocwconfig/sampleConfig/complete_linux_config.yaml +++ b/translator/tocwconfig/sampleConfig/complete_linux_config.yaml @@ -1,4 +1,3 @@ -connectors: {} exporters: awscloudwatch: drop_original_metrics: @@ -9,10 +8,8 @@ exporters: max_datums_per_call: 1000 max_values_per_datum: 5000 middleware: agenthealth/metrics - mode: EC2 namespace: CWAgent region: us-west-2 - region_type: ACJ resource_to_telemetry_conversion: enabled: true role_arn: metrics_role_arn_value_test @@ -53,15 +50,11 @@ exporters: enabled: true num_consumers: 1 queue_size: 1000 - storage: null - shared_credentials_file: [] awsxray: - aws_log_groups: [] certificate_file_path: "" endpoint: https://x-ray-endpoint.us-west-2.amazonaws.com imds_retries: 1 index_all_attributes: false - indexed_attributes: [] local_mode: true max_retries: 2 middleware: agenthealth/traces @@ -73,7 +66,6 @@ exporters: request_timeout_seconds: 30 resource_arn: arn:aws:iam::account:resource role_arn: trace_role_arn_value_test - shared_credentials_file: [] telemetry: enabled: true include_metadata: true @@ -83,26 +75,33 @@ extensions: stats: operations: - PutLogEvents + usage_flags: + mode: EC2 + region_type: ACJ agenthealth/metrics: is_usage_data_enabled: true stats: operations: - PutMetricData + usage_flags: + mode: EC2 + region_type: ACJ agenthealth/traces: is_usage_data_enabled: true stats: operations: - PutTraceSegments + usage_flags: + mode: EC2 + region_type: ACJ processors: batch/emf_logs: metadata_cardinality_limit: 1000 - metadata_keys: [] send_batch_max_size: 0 send_batch_size: 8192 timeout: 1m0s batch/xray: metadata_cardinality_limit: 1000 - metadata_keys: [] send_batch_max_size: 0 send_batch_size: 8192 timeout: 200ms @@ -112,20 +111,17 @@ processors: metrics: - iops_in_progress - diskio_iops_in_progress - regexp: null include: match_type: "" - metrics: [] - regexp: null initial_value: 0 max_staleness: 0s ec2tagger: ec2_instance_tag_keys: - AutoScalingGroupName ec2_metadata_tags: + - InstanceType - ImageId - InstanceId - - InstanceType imds_retries: 1 refresh_interval_seconds: 0s transform: @@ -142,51 +138,52 @@ processors: trace_statements: [] receivers: awsxray: + dialer: + timeout: "0s" endpoint: 0.0.0.0:2001 proxy_server: aws_endpoint: https://x-ray-endpoint.us-west-2.amazonaws.com + certificate_file_path: "" + dialer: + timeout: "0s" endpoint: 0.0.0.0:1234 + imds_retries: 1 local_mode: true + profile: "" proxy_address: https://proxy.proxy.com region: us-west-2 role_arn: trace_role_arn_value_test + service_name: "xray" tls: ca_file: "" - ca_pem: '[REDACTED]' cert_file: "" - cert_pem: '[REDACTED]' + include_system_ca_certs_pool: false insecure: true insecure_skip_verify: false key_file: "" - key_pem: '[REDACTED]' max_version: "" min_version: "" reload_interval: 0s server_name_override: "" transport: udp - otlp: + otlp/traces: protocols: grpc: - auth: null + dialer: + timeout: "0s" endpoint: 0.0.0.0:1111 include_metadata: false - keepalive: null max_concurrent_streams: 0 max_recv_msg_size_mib: 0 read_buffer_size: 524288 - tls: null transport: tcp write_buffer_size: 0 http: - auth: null - cors: null endpoint: 0.0.0.0:2222 include_metadata: false logs_url_path: /v1/logs max_request_body_size: 0 metrics_url_path: /v1/metrics - response_headers: {} - tls: null traces_url_path: /v1/traces telegraf_cpu: collection_interval: 10s @@ -234,7 +231,6 @@ receivers: initial_delay: 1s timeout: 0s udplog/emf_logs: - attributes: {} encoding: utf-8 id: udp_input listen_address: 127.0.0.1:25888 @@ -243,14 +239,11 @@ receivers: line_start_pattern: "" omit_pattern: false operators: [] - output: [] - resource: {} retry_on_failure: enabled: false initial_interval: 0s max_elapsed_time: 0s max_interval: 0s - storage: null type: udp_input service: extensions: @@ -272,15 +265,15 @@ service: - ec2tagger - transform receivers: - - telegraf_mem - - telegraf_socket_listener - - telegraf_processes - - telegraf_procstat/1917393364 - - telegraf_disk - telegraf_swap + - telegraf_processes + - telegraf_mem - telegraf_netstat - telegraf_cpu - telegraf_statsd + - telegraf_procstat/1917393364 + - telegraf_socket_listener + - telegraf_disk metrics/hostDeltaMetrics: exporters: - awscloudwatch @@ -298,15 +291,13 @@ service: - batch/xray receivers: - awsxray - - otlp + - otlp/traces telemetry: logs: development: false disable_caller: false disable_stacktrace: false encoding: console - error_output_paths: [] - initial_fields: {} level: error output_paths: - /tmp/fake/log/hotdog.log @@ -318,8 +309,4 @@ service: metrics: address: "" level: None - readers: [] - resource: {} - traces: - processors: [] - propagators: [] + traces: {} diff --git a/translator/tocwconfig/sampleConfig/complete_windows_config.yaml b/translator/tocwconfig/sampleConfig/complete_windows_config.yaml index 2d069ca631..eb55ab5b2a 100644 --- a/translator/tocwconfig/sampleConfig/complete_windows_config.yaml +++ b/translator/tocwconfig/sampleConfig/complete_windows_config.yaml @@ -1,4 +1,3 @@ -connectors: {} exporters: awscloudwatch: endpoint_override: https://monitoring-fips.us-west-2.amazonaws.com @@ -6,10 +5,8 @@ exporters: max_datums_per_call: 1000 max_values_per_datum: 5000 middleware: agenthealth/metrics - mode: EC2 namespace: CWAgent region: us-west-2 - region_type: ACJ resource_to_telemetry_conversion: enabled: true role_arn: metrics_role_arn_value_test @@ -50,15 +47,11 @@ exporters: enabled: true num_consumers: 1 queue_size: 1000 - storage: null - shared_credentials_file: [] awsxray: - aws_log_groups: [] certificate_file_path: "" endpoint: https://x-ray-endpoint.us-west-2.amazonaws.com imds_retries: 1 index_all_attributes: false - indexed_attributes: [] local_mode: true max_retries: 2 middleware: agenthealth/traces @@ -70,7 +63,6 @@ exporters: request_timeout_seconds: 30 resource_arn: arn:aws:iam::account:resource role_arn: trace_role_arn_value_test - shared_credentials_file: [] telemetry: enabled: true include_metadata: true @@ -80,26 +72,33 @@ extensions: stats: operations: - PutLogEvents + usage_flags: + mode: EC2 + region_type: ACJ agenthealth/metrics: is_usage_data_enabled: true stats: operations: - PutMetricData + usage_flags: + mode: EC2 + region_type: ACJ agenthealth/traces: is_usage_data_enabled: true stats: operations: - PutTraceSegments + usage_flags: + mode: EC2 + region_type: ACJ processors: batch/emf_logs: metadata_cardinality_limit: 1000 - metadata_keys: [] send_batch_max_size: 0 send_batch_size: 8192 timeout: 1m0s batch/xray: metadata_cardinality_limit: 1000 - metadata_keys: [] send_batch_max_size: 0 send_batch_size: 8192 timeout: 200ms @@ -126,50 +125,51 @@ processors: receivers: awsxray: endpoint: 0.0.0.0:2001 + dialer: + timeout: "0s" proxy_server: aws_endpoint: https://x-ray-endpoint.us-west-2.amazonaws.com + dialer: + timeout: "0s" + certificate_file_path: "" endpoint: 0.0.0.0:1234 + imds_retries: 1 local_mode: true + profile: "" proxy_address: https://proxy.proxy.com region: us-west-2 + service_name: "xray" role_arn: trace_role_arn_value_test tls: ca_file: "" - ca_pem: "[REDACTED]" cert_file: "" - cert_pem: "[REDACTED]" + include_system_ca_certs_pool: false insecure: true insecure_skip_verify: false key_file: "" - key_pem: "[REDACTED]" max_version: "" min_version: "" reload_interval: 0s server_name_override: "" transport: udp - otlp: + otlp/traces: protocols: grpc: - auth: null endpoint: 0.0.0.0:1111 + dialer: + timeout: "0s" include_metadata: false - keepalive: null max_concurrent_streams: 0 max_recv_msg_size_mib: 0 read_buffer_size: 524288 - tls: null transport: tcp write_buffer_size: 0 http: - auth: null - cors: null endpoint: 0.0.0.0:2222 include_metadata: false logs_url_path: /v1/logs max_request_body_size: 0 metrics_url_path: /v1/metrics - response_headers: {} - tls: null traces_url_path: /v1/traces telegraf_nvidia_smi: collection_interval: 1m0s @@ -215,7 +215,6 @@ receivers: initial_delay: "1s" timeout: 0s udplog/emf_logs: - attributes: {} encoding: utf-8 id: udp_input listen_address: 127.0.0.1:25888 @@ -224,14 +223,11 @@ receivers: line_start_pattern: "" omit_pattern: false operators: [] - output: [] - resource: {} retry_on_failure: enabled: false initial_interval: 0s max_elapsed_time: 0s max_interval: 0s - storage: null type: udp_input service: extensions: @@ -269,15 +265,13 @@ service: - batch/xray receivers: - awsxray - - otlp + - otlp/traces telemetry: logs: development: false disable_caller: false disable_stacktrace: false encoding: console - error_output_paths: [] - initial_fields: {} level: debug output_paths: - c:\ProgramData\Amazon\AmazonCloudWatchAgent\Logs\amazon-cloudwatch-agent.log @@ -289,8 +283,4 @@ service: metrics: address: "" level: None - readers: [] - resource: {} - traces: - processors: [] - propagators: [] + traces: {} diff --git a/translator/tocwconfig/sampleConfig/config_with_env.yaml b/translator/tocwconfig/sampleConfig/config_with_env.yaml index 3652d351a4..6ce8ebb230 100644 --- a/translator/tocwconfig/sampleConfig/config_with_env.yaml +++ b/translator/tocwconfig/sampleConfig/config_with_env.yaml @@ -1,4 +1,3 @@ -connectors: {} exporters: awscloudwatchlogs/emf_logs: certificate_file_path: "" @@ -31,39 +30,34 @@ exporters: enabled: true num_consumers: 1 queue_size: 1000 - storage: null - shared_credentials_file: [] extensions: agenthealth/logs: is_usage_data_enabled: true stats: operations: - PutLogEvents + usage_flags: + mode: EC2 + region_type: ACJ processors: batch/emf_logs: metadata_cardinality_limit: 1000 - metadata_keys: [] send_batch_max_size: 0 send_batch_size: 8192 timeout: 5s receivers: tcplog/emf_logs: - attributes: {} encoding: utf-8 id: tcp_input listen_address: 0.0.0.0:25888 operators: [] - output: [] - resource: {} retry_on_failure: enabled: false initial_interval: 0s max_elapsed_time: 0s max_interval: 0s - storage: null type: tcp_input udplog/emf_logs: - attributes: {} encoding: utf-8 id: udp_input listen_address: 0.0.0.0:25888 @@ -72,14 +66,11 @@ receivers: line_start_pattern: "" omit_pattern: false operators: [] - output: [] - resource: {} retry_on_failure: enabled: false initial_interval: 0s max_elapsed_time: 0s max_interval: 0s - storage: null type: udp_input service: extensions: @@ -99,8 +90,6 @@ service: disable_caller: false disable_stacktrace: false encoding: console - error_output_paths: [] - initial_fields: {} level: info output_paths: - /opt/aws/amazon-cloudwatch-agent/logs/amazon-cloudwatch-agent.log @@ -112,8 +101,4 @@ service: metrics: address: "" level: None - readers: [] - resource: {} - traces: - processors: [] - propagators: [] + traces: {} diff --git a/translator/tocwconfig/sampleConfig/delta_config_linux.yaml b/translator/tocwconfig/sampleConfig/delta_config_linux.yaml index 65ae59136d..9cdb77edee 100644 --- a/translator/tocwconfig/sampleConfig/delta_config_linux.yaml +++ b/translator/tocwconfig/sampleConfig/delta_config_linux.yaml @@ -1,14 +1,11 @@ -connectors: {} exporters: awscloudwatch: force_flush_interval: 1m0s max_datums_per_call: 1000 max_values_per_datum: 150 middleware: agenthealth/metrics - mode: EC2 namespace: CWAgent region: us-east-1 - region_type: ACJ resource_to_telemetry_conversion: enabled: true extensions: @@ -17,6 +14,9 @@ extensions: stats: operations: - PutMetricData + usage_flags: + mode: EC2 + region_type: ACJ processors: cumulativetodelta/hostDeltaMetrics: exclude: @@ -24,20 +24,17 @@ processors: metrics: - iops_in_progress - diskio_iops_in_progress - regexp: null include: match_type: "" - metrics: [] - regexp: null initial_value: 0 max_staleness: 0s ec2tagger: ec2_instance_tag_keys: - AutoScalingGroupName ec2_metadata_tags: - - InstanceType - ImageId - InstanceId + - InstanceType imds_retries: 1 refresh_interval_seconds: 0s transform: @@ -77,8 +74,6 @@ service: disable_caller: false disable_stacktrace: false encoding: console - error_output_paths: [] - initial_fields: {} level: info output_paths: - /opt/aws/amazon-cloudwatch-agent/logs/amazon-cloudwatch-agent.log @@ -90,8 +85,4 @@ service: metrics: address: "" level: None - readers: [] - resource: {} - traces: - processors: [] - propagators: [] + traces: {} diff --git a/translator/tocwconfig/sampleConfig/delta_net_config_linux.yaml b/translator/tocwconfig/sampleConfig/delta_net_config_linux.yaml index a27fe76dc5..f06c994620 100644 --- a/translator/tocwconfig/sampleConfig/delta_net_config_linux.yaml +++ b/translator/tocwconfig/sampleConfig/delta_net_config_linux.yaml @@ -1,14 +1,11 @@ -connectors: {} exporters: awscloudwatch: force_flush_interval: 1m0s max_datums_per_call: 1000 max_values_per_datum: 150 middleware: agenthealth/metrics - mode: EC2 namespace: CWAgent region: us-east-1 - region_type: ACJ resource_to_telemetry_conversion: enabled: true extensions: @@ -17,25 +14,24 @@ extensions: stats: operations: - PutMetricData + usage_flags: + mode: EC2 + region_type: ACJ processors: cumulativetodelta/hostDeltaMetrics: exclude: match_type: "" - metrics: [] - regexp: null include: match_type: "" - metrics: [] - regexp: null initial_value: 0 max_staleness: 0s ec2tagger: ec2_instance_tag_keys: - AutoScalingGroupName ec2_metadata_tags: + - InstanceType - ImageId - InstanceId - - InstanceType imds_retries: 1 refresh_interval_seconds: 0s receivers: @@ -61,8 +57,6 @@ service: disable_caller: false disable_stacktrace: false encoding: console - error_output_paths: [] - initial_fields: {} level: info output_paths: - /opt/aws/amazon-cloudwatch-agent/logs/amazon-cloudwatch-agent.log @@ -74,8 +68,4 @@ service: metrics: address: "" level: None - readers: [] - resource: {} - traces: - processors: [] - propagators: [] + traces: {} diff --git a/translator/tocwconfig/sampleConfig/drop_origin_linux.yaml b/translator/tocwconfig/sampleConfig/drop_origin_linux.yaml index a305e493d6..286bdf2e93 100644 --- a/translator/tocwconfig/sampleConfig/drop_origin_linux.yaml +++ b/translator/tocwconfig/sampleConfig/drop_origin_linux.yaml @@ -1,4 +1,3 @@ -connectors: {} exporters: awscloudwatch: drop_original_metrics: @@ -10,10 +9,8 @@ exporters: max_datums_per_call: 1000 max_values_per_datum: 150 middleware: agenthealth/metrics - mode: EC2 namespace: CWAgent region: us-west-2 - region_type: ACJ resource_to_telemetry_conversion: enabled: true extensions: @@ -22,14 +19,17 @@ extensions: stats: operations: - PutMetricData + usage_flags: + mode: EC2 + region_type: ACJ processors: ec2tagger: ec2_instance_tag_keys: - AutoScalingGroupName ec2_metadata_tags: - - ImageId - InstanceId - InstanceType + - ImageId imds_retries: 1 refresh_interval_seconds: 0s transform: @@ -75,8 +75,6 @@ service: disable_caller: false disable_stacktrace: false encoding: console - error_output_paths: [] - initial_fields: {} level: info output_paths: - /opt/aws/amazon-cloudwatch-agent/logs/amazon-cloudwatch-agent.log @@ -88,8 +86,4 @@ service: metrics: address: "" level: None - readers: [] - resource: {} - traces: - processors: [] - propagators: [] + traces: {} diff --git a/translator/tocwconfig/sampleConfig/emf_and_kubernetes_config.json b/translator/tocwconfig/sampleConfig/emf_and_kubernetes_config.json index 8e1ffdbbf9..5581444869 100644 --- a/translator/tocwconfig/sampleConfig/emf_and_kubernetes_config.json +++ b/translator/tocwconfig/sampleConfig/emf_and_kubernetes_config.json @@ -10,7 +10,8 @@ "cluster_name": "TestCluster", "metrics_collection_interval": 30, "disable_metric_extraction": true, - "enhanced_container_insights": true + "enhanced_container_insights": true, + "accelerated_compute_metrics": false } }, "force_flush_interval": 5, diff --git a/translator/tocwconfig/sampleConfig/emf_and_kubernetes_config.yaml b/translator/tocwconfig/sampleConfig/emf_and_kubernetes_config.yaml index 2c7f7b4b18..c72450e249 100644 --- a/translator/tocwconfig/sampleConfig/emf_and_kubernetes_config.yaml +++ b/translator/tocwconfig/sampleConfig/emf_and_kubernetes_config.yaml @@ -1,11 +1,10 @@ -connectors: {} exporters: awscloudwatchlogs/emf_logs: certificate_file_path: "" emf_only: true endpoint: https://fake_endpoint imds_retries: 2 - local_mode: false + local_mode: true log_group_name: emf/logs/default log_retention: 0 log_stream_name: host_name_from_env @@ -31,7 +30,6 @@ exporters: enabled: true num_consumers: 1 queue_size: 1000 - storage: null shared_credentials_file: - /root/.aws/credentials awsemf/containerinsights: @@ -43,7 +41,7 @@ exporters: endpoint: https://fake_endpoint enhanced_container_insights: true imds_retries: 2 - local_mode: false + local_mode: true log_group_name: /aws/containerinsights/{ClusterName}/performance log_retention: 0 log_stream_name: '{NodeName}' @@ -60,7 +58,6 @@ exporters: - ContainerName - Namespace - PodName - label_matchers: [] metric_name_selectors: - container_cpu_utilization - container_cpu_utilization_over_container_limit @@ -88,7 +85,6 @@ exporters: - FullPodName - Namespace - PodName - label_matchers: [] metric_name_selectors: - pod_cpu_utilization - pod_memory_utilization @@ -107,7 +103,6 @@ exporters: - - ClusterName - Namespace - - ClusterName - label_matchers: [] metric_name_selectors: - pod_interface_network_rx_dropped - pod_interface_network_tx_dropped @@ -123,7 +118,6 @@ exporters: - - ClusterName - Namespace - Service - label_matchers: [] metric_name_selectors: - pod_cpu_reserved_capacity - pod_memory_reserved_capacity @@ -155,7 +149,6 @@ exporters: - InstanceId - NodeName - - ClusterName - label_matchers: [] metric_name_selectors: - node_cpu_utilization - node_memory_utilization @@ -181,7 +174,6 @@ exporters: - InstanceId - NodeName - - ClusterName - label_matchers: [] metric_name_selectors: - node_interface_network_rx_dropped - node_interface_network_tx_dropped @@ -192,7 +184,6 @@ exporters: - InstanceId - NodeName - - ClusterName - label_matchers: [] metric_name_selectors: - node_filesystem_utilization - node_filesystem_inodes @@ -202,7 +193,6 @@ exporters: - Namespace - Service - - ClusterName - label_matchers: [] metric_name_selectors: - service_number_of_running_pods - dimensions: @@ -210,7 +200,6 @@ exporters: - Namespace - PodName - - ClusterName - label_matchers: [] metric_name_selectors: - replicas_desired - replicas_ready @@ -221,7 +210,6 @@ exporters: - Namespace - PodName - - ClusterName - label_matchers: [] metric_name_selectors: - daemonset_status_number_available - daemonset_status_number_unavailable @@ -229,12 +217,10 @@ exporters: - - ClusterName - Namespace - - ClusterName - label_matchers: [] metric_name_selectors: - namespace_number_of_running_pods - dimensions: - - ClusterName - label_matchers: [] metric_name_selectors: - cluster_node_count - cluster_failed_node_count @@ -243,7 +229,6 @@ exporters: - - ClusterName - endpoint - - ClusterName - label_matchers: [] metric_name_selectors: - apiserver_storage_size_bytes - apiserver_storage_db_total_size_in_bytes @@ -252,7 +237,6 @@ exporters: - - ClusterName - resource - - ClusterName - label_matchers: [] metric_name_selectors: - apiserver_storage_list_duration_seconds - apiserver_longrunning_requests @@ -261,7 +245,6 @@ exporters: - - ClusterName - verb - - ClusterName - label_matchers: [] metric_name_selectors: - apiserver_request_duration_seconds - rest_client_request_duration_seconds @@ -270,7 +253,6 @@ exporters: - code - verb - - ClusterName - label_matchers: [] metric_name_selectors: - apiserver_request_total - apiserver_request_total_5xx @@ -278,7 +260,6 @@ exporters: - - ClusterName - operation - - ClusterName - label_matchers: [] metric_name_selectors: - apiserver_admission_controller_admission_duration_seconds - apiserver_admission_step_admission_duration_seconds @@ -288,14 +269,12 @@ exporters: - code - method - - ClusterName - label_matchers: [] metric_name_selectors: - rest_client_requests_total - dimensions: - - ClusterName - request_kind - - ClusterName - label_matchers: [] metric_name_selectors: - apiserver_current_inflight_requests - apiserver_current_inqueue_requests @@ -303,28 +282,24 @@ exporters: - - ClusterName - name - - ClusterName - label_matchers: [] metric_name_selectors: - apiserver_admission_webhook_admission_duration_seconds - dimensions: - - ClusterName - group - - ClusterName - label_matchers: [] metric_name_selectors: - apiserver_requested_deprecated_apis - dimensions: - - ClusterName - reason - - ClusterName - label_matchers: [] metric_name_selectors: - apiserver_flowcontrol_rejected_requests_total - dimensions: - - ClusterName - priority_level - - ClusterName - label_matchers: [] metric_name_selectors: - apiserver_flowcontrol_request_concurrency_limit metric_descriptors: @@ -414,16 +389,17 @@ extensions: stats: operations: - PutLogEvents + usage_flags: + mode: OP + region_type: ACJ processors: batch/containerinsights: metadata_cardinality_limit: 1000 - metadata_keys: [] send_batch_max_size: 0 send_batch_size: 8192 timeout: 5s batch/emf_logs: metadata_cardinality_limit: 1000 - metadata_keys: [] send_batch_max_size: 0 send_batch_size: 8192 timeout: 5s @@ -433,14 +409,13 @@ processors: aggregation_type: "" experimental_match_labels: code: ^5.* - group_resource_labels: {} include: apiserver_request_total match_type: regexp new_name: apiserver_request_total_5xx - operations: [] submatch_case: "" receivers: awscontainerinsightreceiver: + accelerated_compute_metrics: false add_container_name_metric_label: true add_full_pod_name_metric_label: true add_service_as_attribute: true @@ -453,7 +428,7 @@ receivers: imds_retries: 2 leader_lock_name: cwagent-clusterleader leader_lock_using_config_map_only: true - local_mode: false + local_mode: true max_retries: 0 no_verify_ssl: false num_workers: 0 @@ -467,22 +442,17 @@ receivers: shared_credentials_file: - /root/.aws/credentials tcplog/emf_logs: - attributes: {} encoding: utf-8 id: tcp_input listen_address: 0.0.0.0:25888 operators: [] - output: [] - resource: {} retry_on_failure: enabled: false initial_interval: 0s max_elapsed_time: 0s max_interval: 0s - storage: null type: tcp_input udplog/emf_logs: - attributes: {} encoding: utf-8 id: udp_input listen_address: 0.0.0.0:25888 @@ -491,14 +461,11 @@ receivers: line_start_pattern: "" omit_pattern: false operators: [] - output: [] - resource: {} retry_on_failure: enabled: false initial_interval: 0s max_elapsed_time: 0s max_interval: 0s - storage: null type: udp_input service: extensions: @@ -526,10 +493,7 @@ service: disable_caller: false disable_stacktrace: false encoding: console - error_output_paths: [] - initial_fields: {} level: info - output_paths: [] sampling: enabled: true initial: 2 @@ -538,8 +502,4 @@ service: metrics: address: "" level: None - readers: [] - resource: {} - traces: - processors: [] - propagators: [] + traces: {} diff --git a/translator/tocwconfig/sampleConfig/emf_and_kubernetes_with_gpu_config.conf b/translator/tocwconfig/sampleConfig/emf_and_kubernetes_with_gpu_config.conf new file mode 100644 index 0000000000..007bb60efb --- /dev/null +++ b/translator/tocwconfig/sampleConfig/emf_and_kubernetes_with_gpu_config.conf @@ -0,0 +1,27 @@ +[agent] + collection_jitter = "0s" + debug = false + flush_interval = "1s" + flush_jitter = "0s" + hostname = "host_name_from_env" + interval = "60s" + logfile = "" + logtarget = "lumberjack" + metric_batch_size = 1000 + metric_buffer_limit = 10000 + omit_hostname = false + precision = "" + quiet = false + round_interval = false + +[inputs] + +[outputs] + + [[outputs.cloudwatchlogs]] + endpoint_override = "https://fake_endpoint" + force_flush_interval = "5s" + log_stream_name = "host_name_from_env" + region = "us-east-1" + +[processors] diff --git a/translator/tocwconfig/sampleConfig/emf_and_kubernetes_with_gpu_config.json b/translator/tocwconfig/sampleConfig/emf_and_kubernetes_with_gpu_config.json new file mode 100644 index 0000000000..8e1ffdbbf9 --- /dev/null +++ b/translator/tocwconfig/sampleConfig/emf_and_kubernetes_with_gpu_config.json @@ -0,0 +1,19 @@ +{ + "agent": { + "region": "us-east-1" + }, + "logs": { + "metrics_collected": { + "emf": { + }, + "kubernetes": { + "cluster_name": "TestCluster", + "metrics_collection_interval": 30, + "disable_metric_extraction": true, + "enhanced_container_insights": true + } + }, + "force_flush_interval": 5, + "endpoint_override":"https://fake_endpoint" + } +} diff --git a/translator/tocwconfig/sampleConfig/emf_and_kubernetes_with_gpu_config.yaml b/translator/tocwconfig/sampleConfig/emf_and_kubernetes_with_gpu_config.yaml new file mode 100644 index 0000000000..f8137e88c9 --- /dev/null +++ b/translator/tocwconfig/sampleConfig/emf_and_kubernetes_with_gpu_config.yaml @@ -0,0 +1,1185 @@ +exporters: + awscloudwatchlogs/emf_logs: + certificate_file_path: "" + emf_only: true + endpoint: https://fake_endpoint + imds_retries: 2 + local_mode: true + log_group_name: emf/logs/default + log_retention: 0 + log_stream_name: host_name_from_env + max_retries: 2 + middleware: agenthealth/logs + no_verify_ssl: false + num_workers: 8 + profile: default + proxy_address: "" + raw_log: true + region: us-east-1 + request_timeout_seconds: 30 + resource_arn: "" + retry_on_failure: + enabled: true + initial_interval: 5s + max_elapsed_time: 5m0s + max_interval: 30s + multiplier: 1.5 + randomization_factor: 0.5 + role_arn: "" + sending_queue: + enabled: true + num_consumers: 1 + queue_size: 1000 + shared_credentials_file: + - /root/.aws/credentials + awsemf/containerinsights: + certificate_file_path: "" + detailed_metrics: false + dimension_rollup_option: NoDimensionRollup + disable_metric_extraction: true + eks_fargate_container_insights_enabled: false + endpoint: https://fake_endpoint + enhanced_container_insights: true + imds_retries: 2 + local_mode: true + log_group_name: /aws/containerinsights/{ClusterName}/performance + log_retention: 0 + log_stream_name: '{NodeName}' + max_retries: 2 + metric_declarations: + - dimensions: + - - ClusterName + - - ClusterName + - ContainerName + - FullPodName + - Namespace + - PodName + - - ClusterName + - ContainerName + - Namespace + - PodName + metric_name_selectors: + - container_cpu_utilization + - container_cpu_utilization_over_container_limit + - container_cpu_limit + - container_cpu_request + - container_memory_utilization + - container_memory_utilization_over_container_limit + - container_memory_failures_total + - container_memory_limit + - container_memory_request + - container_filesystem_usage + - container_filesystem_available + - container_filesystem_utilization + - dimensions: + - - ClusterName + - Namespace + - PodName + - - ClusterName + - - ClusterName + - Namespace + - Service + - - ClusterName + - Namespace + - - ClusterName + - FullPodName + - Namespace + - PodName + metric_name_selectors: + - pod_cpu_utilization + - pod_memory_utilization + - pod_network_rx_bytes + - pod_network_tx_bytes + - pod_cpu_utilization_over_pod_limit + - pod_memory_utilization_over_pod_limit + - dimensions: + - - ClusterName + - FullPodName + - Namespace + - PodName + - - ClusterName + - Namespace + - PodName + - - ClusterName + - Namespace + - - ClusterName + metric_name_selectors: + - pod_interface_network_rx_dropped + - pod_interface_network_tx_dropped + - dimensions: + - - ClusterName + - Namespace + - PodName + - - ClusterName + - - ClusterName + - FullPodName + - Namespace + - PodName + - - ClusterName + - Namespace + - Service + metric_name_selectors: + - pod_cpu_reserved_capacity + - pod_memory_reserved_capacity + - pod_number_of_container_restarts + - pod_number_of_containers + - pod_number_of_running_containers + - pod_status_ready + - pod_status_scheduled + - pod_status_running + - pod_status_pending + - pod_status_failed + - pod_status_unknown + - pod_status_succeeded + - pod_memory_request + - pod_memory_limit + - pod_cpu_limit + - pod_cpu_request + - pod_container_status_running + - pod_container_status_terminated + - pod_container_status_waiting + - pod_container_status_waiting_reason_crash_loop_back_off + - pod_container_status_waiting_reason_image_pull_error + - pod_container_status_waiting_reason_start_error + - pod_container_status_waiting_reason_create_container_error + - pod_container_status_waiting_reason_create_container_config_error + - pod_container_status_terminated_reason_oom_killed + - dimensions: + - - ClusterName + - InstanceId + - NodeName + - - ClusterName + metric_name_selectors: + - node_cpu_utilization + - node_memory_utilization + - node_network_total_bytes + - node_cpu_reserved_capacity + - node_memory_reserved_capacity + - node_number_of_running_pods + - node_number_of_running_containers + - node_cpu_usage_total + - node_cpu_limit + - node_memory_working_set + - node_memory_limit + - node_status_condition_ready + - node_status_condition_disk_pressure + - node_status_condition_memory_pressure + - node_status_condition_pid_pressure + - node_status_condition_network_unavailable + - node_status_condition_unknown + - node_status_capacity_pods + - node_status_allocatable_pods + - dimensions: + - - ClusterName + - InstanceId + - NodeName + - - ClusterName + metric_name_selectors: + - node_interface_network_rx_dropped + - node_interface_network_tx_dropped + - node_diskio_io_service_bytes_total + - node_diskio_io_serviced_total + - dimensions: + - - ClusterName + - InstanceId + - NodeName + - - ClusterName + metric_name_selectors: + - node_filesystem_utilization + - node_filesystem_inodes + - node_filesystem_inodes_free + - dimensions: + - - ClusterName + - Namespace + - Service + - - ClusterName + metric_name_selectors: + - service_number_of_running_pods + - dimensions: + - - ClusterName + - Namespace + - PodName + - - ClusterName + metric_name_selectors: + - replicas_desired + - replicas_ready + - status_replicas_available + - status_replicas_unavailable + - dimensions: + - - ClusterName + - Namespace + - PodName + - - ClusterName + metric_name_selectors: + - daemonset_status_number_available + - daemonset_status_number_unavailable + - dimensions: + - - ClusterName + - Namespace + - - ClusterName + metric_name_selectors: + - namespace_number_of_running_pods + - dimensions: + - - ClusterName + metric_name_selectors: + - cluster_node_count + - cluster_failed_node_count + - cluster_number_of_running_pods + - dimensions: + - - ClusterName + - endpoint + - - ClusterName + metric_name_selectors: + - apiserver_storage_size_bytes + - apiserver_storage_db_total_size_in_bytes + - etcd_db_total_size_in_bytes + - dimensions: + - - ClusterName + - resource + - - ClusterName + metric_name_selectors: + - apiserver_storage_list_duration_seconds + - apiserver_longrunning_requests + - apiserver_storage_objects + - dimensions: + - - ClusterName + - verb + - - ClusterName + metric_name_selectors: + - apiserver_request_duration_seconds + - rest_client_request_duration_seconds + - dimensions: + - - ClusterName + - code + - verb + - - ClusterName + metric_name_selectors: + - apiserver_request_total + - apiserver_request_total_5xx + - dimensions: + - - ClusterName + - operation + - - ClusterName + metric_name_selectors: + - apiserver_admission_controller_admission_duration_seconds + - apiserver_admission_step_admission_duration_seconds + - etcd_request_duration_seconds + - dimensions: + - - ClusterName + - code + - method + - - ClusterName + metric_name_selectors: + - rest_client_requests_total + - dimensions: + - - ClusterName + - request_kind + - - ClusterName + metric_name_selectors: + - apiserver_current_inflight_requests + - apiserver_current_inqueue_requests + - dimensions: + - - ClusterName + - name + - - ClusterName + metric_name_selectors: + - apiserver_admission_webhook_admission_duration_seconds + - dimensions: + - - ClusterName + - group + - - ClusterName + metric_name_selectors: + - apiserver_requested_deprecated_apis + - dimensions: + - - ClusterName + - reason + - - ClusterName + metric_name_selectors: + - apiserver_flowcontrol_rejected_requests_total + - dimensions: + - - ClusterName + - priority_level + - - ClusterName + metric_name_selectors: + - apiserver_flowcontrol_request_concurrency_limit + - dimensions: + - - ClusterName + - - ClusterName + - ContainerName + - Namespace + - PodName + - - ClusterName + - ContainerName + - FullPodName + - Namespace + - PodName + - - ClusterName + - ContainerName + - FullPodName + - GpuDevice + - Namespace + - PodName + metric_name_selectors: + - container_gpu_utilization + - container_gpu_memory_utilization + - container_gpu_memory_total + - container_gpu_memory_used + - container_gpu_power_draw + - container_gpu_temperature + - dimensions: + - - ClusterName + - - ClusterName + - Namespace + - - ClusterName + - Namespace + - Service + - - ClusterName + - Namespace + - PodName + - - ClusterName + - FullPodName + - Namespace + - PodName + - - ClusterName + - FullPodName + - GpuDevice + - Namespace + - PodName + metric_name_selectors: + - pod_gpu_utilization + - pod_gpu_memory_utilization + - pod_gpu_memory_total + - pod_gpu_memory_used + - pod_gpu_power_draw + - pod_gpu_temperature + - dimensions: + - - ClusterName + - - ClusterName + - InstanceId + - NodeName + - - ClusterName + - GpuDevice + - InstanceId + - InstanceType + - NodeName + metric_name_selectors: + - node_gpu_utilization + - node_gpu_memory_utilization + - node_gpu_memory_total + - node_gpu_memory_used + - node_gpu_power_draw + - node_gpu_temperature + - dimensions: + - - ClusterName + - InstanceId + - NodeName + - - ClusterName + metric_name_selectors: + - node_gpu_total + - node_gpu_request + - node_gpu_limit + - dimensions: + - - ClusterName + metric_name_selectors: + - cluster_gpu_request + - cluster_gpu_total + - dimensions: + - - ClusterName + - - ClusterName + - ContainerName + - Namespace + - PodName + - - ClusterName + - ContainerName + - FullPodName + - Namespace + - PodName + - - ClusterName + - ContainerName + - FullPodName + - Namespace + - NeuronCore + - NeuronDevice + - PodName + metric_name_selectors: + - container_neuroncore_utilization + - container_neuroncore_memory_usage_total + - container_neuroncore_memory_usage_constants + - container_neuroncore_memory_usage_model_code + - container_neuroncore_memory_usage_model_shared_scratchpad + - container_neuroncore_memory_usage_runtime_memory + - container_neuroncore_memory_usage_tensors + - dimensions: + - - ClusterName + - - ClusterName + - ContainerName + - Namespace + - PodName + - - ClusterName + - ContainerName + - FullPodName + - Namespace + - PodName + - - ClusterName + - ContainerName + - FullPodName + - Namespace + - NeuronDevice + - PodName + metric_name_selectors: + - container_neurondevice_hw_ecc_events_total + - dimensions: + - - ClusterName + - - ClusterName + - Namespace + - - ClusterName + - Namespace + - Service + - - ClusterName + - Namespace + - PodName + - - ClusterName + - FullPodName + - Namespace + - PodName + - - ClusterName + - FullPodName + - Namespace + - NeuronCore + - NeuronDevice + - PodName + metric_name_selectors: + - pod_neuroncore_utilization + - pod_neuroncore_memory_usage_total + - pod_neuroncore_memory_usage_constants + - pod_neuroncore_memory_usage_model_code + - pod_neuroncore_memory_usage_model_shared_scratchpad + - pod_neuroncore_memory_usage_runtime_memory + - pod_neuroncore_memory_usage_tensors + - dimensions: + - - ClusterName + - - ClusterName + - Namespace + - - ClusterName + - Namespace + - Service + - - ClusterName + - Namespace + - PodName + - - ClusterName + - FullPodName + - Namespace + - PodName + - - ClusterName + - FullPodName + - Namespace + - NeuronDevice + - PodName + metric_name_selectors: + - pod_neurondevice_hw_ecc_events_total + - dimensions: + - - ClusterName + - - ClusterName + - InstanceId + - NodeName + - - ClusterName + - InstanceId + - InstanceType + - NeuronCore + - NeuronDevice + - NodeName + metric_name_selectors: + - node_neuroncore_utilization + - node_neuroncore_memory_usage_total + - node_neuroncore_memory_usage_constants + - node_neuroncore_memory_usage_model_code + - node_neuroncore_memory_usage_model_shared_scratchpad + - node_neuroncore_memory_usage_runtime_memory + - node_neuroncore_memory_usage_tensors + - dimensions: + - - ClusterName + - - ClusterName + - InstanceId + - NodeName + metric_name_selectors: + - node_neuron_execution_errors_total + - node_neurondevice_runtime_memory_used_bytes + - node_neuron_execution_latency + - dimensions: + - - ClusterName + - - ClusterName + - InstanceId + - NodeName + - - ClusterName + - InstanceId + - NeuronDevice + - NodeName + metric_name_selectors: + - node_neurondevice_hw_ecc_events_total + - dimensions: + - - ClusterName + - - ClusterName + - ContainerName + - Namespace + - PodName + - - ClusterName + - ContainerName + - FullPodName + - Namespace + - PodName + metric_name_selectors: + - container_efa_rx_bytes + - container_efa_tx_bytes + - container_efa_rx_dropped + - container_efa_rdma_read_bytes + - container_efa_rdma_write_bytes + - container_efa_rdma_write_recv_bytes + - dimensions: + - - ClusterName + - - ClusterName + - Namespace + - - ClusterName + - Namespace + - Service + - - ClusterName + - Namespace + - PodName + - - ClusterName + - FullPodName + - Namespace + - PodName + metric_name_selectors: + - pod_efa_rx_bytes + - pod_efa_tx_bytes + - pod_efa_rx_dropped + - pod_efa_rdma_read_bytes + - pod_efa_rdma_write_bytes + - pod_efa_rdma_write_recv_bytes + - dimensions: + - - ClusterName + - - ClusterName + - InstanceId + - NodeName + metric_name_selectors: + - node_efa_rx_bytes + - node_efa_tx_bytes + - node_efa_rx_dropped + - node_efa_rdma_read_bytes + - node_efa_rdma_write_bytes + - node_efa_rdma_write_recv_bytes + metric_descriptors: + - metric_name: apiserver_admission_controller_admission_duration_seconds + overwrite: true + unit: Seconds + - metric_name: apiserver_admission_step_admission_duration_seconds + overwrite: true + unit: Seconds + - metric_name: apiserver_admission_webhook_admission_duration_seconds + overwrite: true + unit: Seconds + - metric_name: apiserver_current_inflight_requests + overwrite: true + unit: Count + - metric_name: apiserver_current_inqueue_requests + overwrite: true + unit: Count + - metric_name: apiserver_flowcontrol_rejected_requests_total + overwrite: true + unit: Count + - metric_name: apiserver_flowcontrol_request_concurrency_limit + overwrite: true + unit: Count + - metric_name: apiserver_longrunning_requests + overwrite: true + unit: Count + - metric_name: apiserver_request_duration_seconds + overwrite: true + unit: Seconds + - metric_name: apiserver_request_total + overwrite: true + unit: Count + - metric_name: apiserver_request_total_5xx + overwrite: true + unit: Count + - metric_name: apiserver_requested_deprecated_apis + overwrite: true + unit: Count + - metric_name: apiserver_storage_objects + overwrite: true + unit: Count + - metric_name: etcd_request_duration_seconds + overwrite: true + unit: Seconds + - metric_name: apiserver_storage_list_duration_seconds + overwrite: true + unit: Seconds + - metric_name: apiserver_storage_db_total_size_in_bytes + overwrite: true + unit: Bytes + - metric_name: apiserver_storage_size_bytes + overwrite: true + unit: Bytes + - metric_name: etcd_db_total_size_in_bytes + overwrite: true + unit: Bytes + - metric_name: rest_client_request_duration_seconds + overwrite: true + unit: Seconds + - metric_name: rest_client_requests_total + overwrite: true + unit: Count + middleware: agenthealth/logs + namespace: ContainerInsights + no_verify_ssl: false + num_workers: 8 + output_destination: cloudwatch + parse_json_encoded_attr_values: + - Sources + - kubernetes + profile: default + proxy_address: "" + region: us-east-1 + request_timeout_seconds: 30 + resource_arn: "" + resource_to_telemetry_conversion: + enabled: true + retain_initial_value_of_delta_metric: false + role_arn: "" + shared_credentials_file: + - /root/.aws/credentials + version: "0" +extensions: + agenthealth/logs: + is_usage_data_enabled: true + stats: + operations: + - PutLogEvents + usage_flags: + mode: OP + region_type: ACJ +processors: + batch/containerinsights: + metadata_cardinality_limit: 1000 + send_batch_max_size: 0 + send_batch_size: 8192 + timeout: 5s + batch/emf_logs: + metadata_cardinality_limit: 1000 + send_batch_max_size: 0 + send_batch_size: 8192 + timeout: 5s + gpuattributes/containerinsights: {} + metricstransform/containerinsights: + transforms: + - action: insert + aggregation_type: "" + experimental_match_labels: + code: ^5.* + include: apiserver_request_total + match_type: regexp + new_name: apiserver_request_total_5xx + submatch_case: "" + - action: insert + aggregation_type: "" + include: DCGM_FI_DEV_FB_USED_PERCENT + match_type: "" + new_name: container_gpu_memory_utilization + operations: + - action: add_label + aggregation_type: "" + experimental_scale: 0 + label: "" + label_value: "" + new_label: Type + new_value: ContainerGPU + - action: experimental_scale_value + aggregation_type: "" + experimental_scale: 100 + label: "" + label_value: "" + new_label: "" + new_value: "" + submatch_case: "" + - action: insert + aggregation_type: "" + include: DCGM_FI_DEV_FB_USED_PERCENT + match_type: "" + new_name: pod_gpu_memory_utilization + operations: + - action: add_label + aggregation_type: "" + experimental_scale: 0 + label: "" + label_value: "" + new_label: Type + new_value: PodGPU + - action: experimental_scale_value + aggregation_type: "" + experimental_scale: 100 + label: "" + label_value: "" + new_label: "" + new_value: "" + submatch_case: "" + - action: insert + aggregation_type: "" + include: DCGM_FI_DEV_FB_USED_PERCENT + match_type: "" + new_name: node_gpu_memory_utilization + operations: + - action: add_label + aggregation_type: "" + experimental_scale: 0 + label: "" + label_value: "" + new_label: Type + new_value: NodeGPU + - action: experimental_scale_value + aggregation_type: "" + experimental_scale: 100 + label: "" + label_value: "" + new_label: "" + new_value: "" + submatch_case: "" + - action: insert + aggregation_type: "" + include: DCGM_FI_DEV_FB_USED + match_type: "" + new_name: container_gpu_memory_used + operations: + - action: add_label + aggregation_type: "" + experimental_scale: 0 + label: "" + label_value: "" + new_label: Type + new_value: ContainerGPU + - action: experimental_scale_value + aggregation_type: "" + experimental_scale: 1.048576e+06 + label: "" + label_value: "" + new_label: "" + new_value: "" + submatch_case: "" + - action: insert + aggregation_type: "" + include: DCGM_FI_DEV_FB_USED + match_type: "" + new_name: pod_gpu_memory_used + operations: + - action: add_label + aggregation_type: "" + experimental_scale: 0 + label: "" + label_value: "" + new_label: Type + new_value: PodGPU + - action: experimental_scale_value + aggregation_type: "" + experimental_scale: 1.048576e+06 + label: "" + label_value: "" + new_label: "" + new_value: "" + submatch_case: "" + - action: insert + aggregation_type: "" + include: DCGM_FI_DEV_FB_USED + match_type: "" + new_name: node_gpu_memory_used + operations: + - action: add_label + aggregation_type: "" + experimental_scale: 0 + label: "" + label_value: "" + new_label: Type + new_value: NodeGPU + - action: experimental_scale_value + aggregation_type: "" + experimental_scale: 1.048576e+06 + label: "" + label_value: "" + new_label: "" + new_value: "" + submatch_case: "" + - action: insert + aggregation_type: "" + include: DCGM_FI_DEV_FB_TOTAL + match_type: "" + new_name: container_gpu_memory_total + operations: + - action: add_label + aggregation_type: "" + experimental_scale: 0 + label: "" + label_value: "" + new_label: Type + new_value: ContainerGPU + - action: experimental_scale_value + aggregation_type: "" + experimental_scale: 1.048576e+06 + label: "" + label_value: "" + new_label: "" + new_value: "" + submatch_case: "" + - action: insert + aggregation_type: "" + include: DCGM_FI_DEV_FB_TOTAL + match_type: "" + new_name: pod_gpu_memory_total + operations: + - action: add_label + aggregation_type: "" + experimental_scale: 0 + label: "" + label_value: "" + new_label: Type + new_value: PodGPU + - action: experimental_scale_value + aggregation_type: "" + experimental_scale: 1.048576e+06 + label: "" + label_value: "" + new_label: "" + new_value: "" + submatch_case: "" + - action: insert + aggregation_type: "" + include: DCGM_FI_DEV_FB_TOTAL + match_type: "" + new_name: node_gpu_memory_total + operations: + - action: add_label + aggregation_type: "" + experimental_scale: 0 + label: "" + label_value: "" + new_label: Type + new_value: NodeGPU + - action: experimental_scale_value + aggregation_type: "" + experimental_scale: 1.048576e+06 + label: "" + label_value: "" + new_label: "" + new_value: "" + submatch_case: "" + - action: insert + aggregation_type: "" + include: DCGM_FI_DEV_GPU_TEMP + match_type: "" + new_name: container_gpu_temperature + operations: + - action: add_label + aggregation_type: "" + experimental_scale: 0 + label: "" + label_value: "" + new_label: Type + new_value: ContainerGPU + submatch_case: "" + - action: insert + aggregation_type: "" + include: DCGM_FI_DEV_GPU_TEMP + match_type: "" + new_name: pod_gpu_temperature + operations: + - action: add_label + aggregation_type: "" + experimental_scale: 0 + label: "" + label_value: "" + new_label: Type + new_value: PodGPU + submatch_case: "" + - action: insert + aggregation_type: "" + include: DCGM_FI_DEV_GPU_TEMP + match_type: "" + new_name: node_gpu_temperature + operations: + - action: add_label + aggregation_type: "" + experimental_scale: 0 + label: "" + label_value: "" + new_label: Type + new_value: NodeGPU + submatch_case: "" + - action: insert + aggregation_type: "" + include: DCGM_FI_DEV_POWER_USAGE + match_type: "" + new_name: container_gpu_power_draw + operations: + - action: add_label + aggregation_type: "" + experimental_scale: 0 + label: "" + label_value: "" + new_label: Type + new_value: ContainerGPU + submatch_case: "" + - action: insert + aggregation_type: "" + include: DCGM_FI_DEV_POWER_USAGE + match_type: "" + new_name: pod_gpu_power_draw + operations: + - action: add_label + aggregation_type: "" + experimental_scale: 0 + label: "" + label_value: "" + new_label: Type + new_value: PodGPU + submatch_case: "" + - action: insert + aggregation_type: "" + include: DCGM_FI_DEV_POWER_USAGE + match_type: "" + new_name: node_gpu_power_draw + operations: + - action: add_label + aggregation_type: "" + experimental_scale: 0 + label: "" + label_value: "" + new_label: Type + new_value: NodeGPU + submatch_case: "" + - action: insert + aggregation_type: "" + include: DCGM_FI_DEV_GPU_UTIL + match_type: "" + new_name: container_gpu_utilization + operations: + - action: add_label + aggregation_type: "" + experimental_scale: 0 + label: "" + label_value: "" + new_label: Type + new_value: ContainerGPU + submatch_case: "" + - action: insert + aggregation_type: "" + include: DCGM_FI_DEV_GPU_UTIL + match_type: "" + new_name: pod_gpu_utilization + operations: + - action: add_label + aggregation_type: "" + experimental_scale: 0 + label: "" + label_value: "" + new_label: Type + new_value: PodGPU + submatch_case: "" + - action: insert + aggregation_type: "" + include: DCGM_FI_DEV_GPU_UTIL + match_type: "" + new_name: node_gpu_utilization + operations: + - action: add_label + aggregation_type: "" + experimental_scale: 0 + label: "" + label_value: "" + new_label: Type + new_value: NodeGPU + submatch_case: "" + - action: update + aggregation_type: "" + include: neuroncore_memory_usage_model_shared_scratchpad + match_type: "" + new_name: neuroncore_memory_usage_model_shared_scratchpad + operations: [] + submatch_case: "" + - action: update + aggregation_type: "" + include: neuroncore_memory_usage_tensors + match_type: "" + new_name: neuroncore_memory_usage_tensors + operations: [] + submatch_case: "" + - action: update + aggregation_type: "" + include: hardware_ecc_events_total + match_type: "" + new_name: neurondevice_hw_ecc_events + operations: [] + submatch_case: "" + - action: update + aggregation_type: "" + include: execution_latency_seconds + match_type: "" + new_name: neuron_execution_latency + operations: [] + submatch_case: "" + - action: update + aggregation_type: "" + include: execution_status_total + match_type: "" + new_name: neuron_execution_status + operations: [] + submatch_case: "" + - action: update + aggregation_type: "" + include: neuron_runtime_memory_used_bytes + match_type: "" + new_name: neurondevice_runtime_memory_used_bytes + operations: [] + submatch_case: "" + - action: update + aggregation_type: "" + include: neuroncore_memory_usage_model_code + match_type: "" + new_name: neuroncore_memory_usage_model_code + operations: [] + submatch_case: "" + - action: update + aggregation_type: "" + include: neuroncore_memory_usage_runtime_memory + match_type: "" + new_name: neuroncore_memory_usage_runtime_memory + operations: [] + submatch_case: "" + - action: update + aggregation_type: "" + include: neuroncore_utilization_ratio + match_type: "" + new_name: neuroncore_utilization + operations: + - action: experimental_scale_value + aggregation_type: "" + experimental_scale: 100 + label: "" + label_value: "" + new_label: "" + new_value: "" + submatch_case: "" + - action: update + aggregation_type: "" + include: instance_info + match_type: "" + new_name: instance_info + operations: [] + submatch_case: "" + - action: update + aggregation_type: "" + include: neuron_hardware + match_type: "" + new_name: neuron_hardware + operations: [] + submatch_case: "" + - action: update + aggregation_type: "" + include: execution_errors_total + match_type: "" + new_name: neuron_execution_errors + operations: [] + submatch_case: "" + - action: update + aggregation_type: "" + include: neuroncore_memory_usage_constants + match_type: "" + new_name: neuroncore_memory_usage_constants + operations: [] + submatch_case: "" +receivers: + awscontainerinsightreceiver: + accelerated_compute_metrics: true + add_container_name_metric_label: true + add_full_pod_name_metric_label: true + add_service_as_attribute: true + certificate_file_path: "" + cluster_name: TestCluster + collection_interval: 30s + container_orchestrator: eks + enable_control_plane_metrics: true + endpoint: "" + imds_retries: 2 + leader_lock_name: cwagent-clusterleader + leader_lock_using_config_map_only: true + local_mode: true + max_retries: 0 + no_verify_ssl: false + num_workers: 0 + prefer_full_pod_name: true + profile: default + proxy_address: "" + region: us-east-1 + request_timeout_seconds: 0 + resource_arn: "" + role_arn: "" + shared_credentials_file: + - /root/.aws/credentials + tcplog/emf_logs: + encoding: utf-8 + id: tcp_input + listen_address: 0.0.0.0:25888 + operators: [] + retry_on_failure: + enabled: false + initial_interval: 0s + max_elapsed_time: 0s + max_interval: 0s + type: tcp_input + udplog/emf_logs: + encoding: utf-8 + id: udp_input + listen_address: 0.0.0.0:25888 + multiline: + line_end_pattern: .^ + line_start_pattern: "" + omit_pattern: false + operators: [] + retry_on_failure: + enabled: false + initial_interval: 0s + max_elapsed_time: 0s + max_interval: 0s + type: udp_input +service: + extensions: + - agenthealth/logs + pipelines: + logs/emf_logs: + exporters: + - awscloudwatchlogs/emf_logs + processors: + - batch/emf_logs + receivers: + - tcplog/emf_logs + - udplog/emf_logs + metrics/containerinsights: + exporters: + - awsemf/containerinsights + processors: + - metricstransform/containerinsights + - gpuattributes/containerinsights + - batch/containerinsights + receivers: + - awscontainerinsightreceiver + telemetry: + logs: + development: false + disable_caller: false + disable_stacktrace: false + encoding: console + level: info + sampling: + enabled: true + initial: 2 + thereafter: 500 + tick: 10s + metrics: + address: "" + level: None + traces: {} diff --git a/translator/tocwconfig/sampleConfig/ignore_append_dimensions.yaml b/translator/tocwconfig/sampleConfig/ignore_append_dimensions.yaml index a45a897c49..e323834f4e 100644 --- a/translator/tocwconfig/sampleConfig/ignore_append_dimensions.yaml +++ b/translator/tocwconfig/sampleConfig/ignore_append_dimensions.yaml @@ -1,14 +1,11 @@ -connectors: {} exporters: awscloudwatch: force_flush_interval: 1m0s max_datums_per_call: 1000 max_values_per_datum: 150 middleware: agenthealth/metrics - mode: EC2 namespace: CWAgent region: us-east-1 - region_type: ACJ resource_to_telemetry_conversion: enabled: true extensions: @@ -17,10 +14,11 @@ extensions: stats: operations: - PutMetricData + usage_flags: + mode: EC2 + region_type: ACJ processors: ec2tagger: - ec2_instance_tag_keys: [] - ec2_metadata_tags: [] imds_retries: 1 refresh_interval_seconds: 0s receivers: @@ -42,16 +40,14 @@ service: processors: - ec2tagger receivers: - - telegraf_disk - telegraf_mem + - telegraf_disk telemetry: logs: development: false disable_caller: false disable_stacktrace: false encoding: console - error_output_paths: [] - initial_fields: {} level: info output_paths: - /opt/aws/amazon-cloudwatch-agent/logs/amazon-cloudwatch-agent.log @@ -63,8 +59,4 @@ service: metrics: address: "" level: None - readers: [] - resource: {} - traces: - processors: [] - propagators: [] + traces: {} diff --git a/translator/tocwconfig/sampleConfig/invalid_input_linux.yaml b/translator/tocwconfig/sampleConfig/invalid_input_linux.yaml index efbde6a334..a3229236cb 100644 --- a/translator/tocwconfig/sampleConfig/invalid_input_linux.yaml +++ b/translator/tocwconfig/sampleConfig/invalid_input_linux.yaml @@ -1,14 +1,11 @@ -connectors: {} exporters: awscloudwatch: force_flush_interval: 1m0s max_datums_per_call: 1000 max_values_per_datum: 150 middleware: agenthealth/metrics - mode: EC2 namespace: CWAgent region: us-east-1 - region_type: ACJ resource_to_telemetry_conversion: enabled: true extensions: @@ -17,6 +14,9 @@ extensions: stats: operations: - PutMetricData + usage_flags: + mode: EC2 + region_type: ACJ processors: ec2tagger: ec2_instance_tag_keys: @@ -46,16 +46,14 @@ service: processors: - ec2tagger receivers: - - telegraf_disk - telegraf_mem + - telegraf_disk telemetry: logs: development: false disable_caller: false disable_stacktrace: false encoding: console - error_output_paths: [] - initial_fields: {} level: info output_paths: - /opt/aws/amazon-cloudwatch-agent/logs/amazon-cloudwatch-agent.log @@ -67,8 +65,4 @@ service: metrics: address: "" level: None - readers: [] - resource: {} - traces: - processors: [] - propagators: [] + traces: {} diff --git a/translator/tocwconfig/sampleConfig/kubernetes_on_prem_config.json b/translator/tocwconfig/sampleConfig/kubernetes_on_prem_config.json index cd10578c71..1552a1b7b4 100644 --- a/translator/tocwconfig/sampleConfig/kubernetes_on_prem_config.json +++ b/translator/tocwconfig/sampleConfig/kubernetes_on_prem_config.json @@ -8,7 +8,8 @@ "cluster_name": "TestCluster", "metrics_collection_interval": 30, "disable_metric_extraction": true, - "enhanced_container_insights": true + "enhanced_container_insights": true, + "accelerated_compute_metrics": false } }, "force_flush_interval": 5, diff --git a/translator/tocwconfig/sampleConfig/kubernetes_on_prem_config.yaml b/translator/tocwconfig/sampleConfig/kubernetes_on_prem_config.yaml index fae509497a..e3531c6140 100644 --- a/translator/tocwconfig/sampleConfig/kubernetes_on_prem_config.yaml +++ b/translator/tocwconfig/sampleConfig/kubernetes_on_prem_config.yaml @@ -1,4 +1,3 @@ -connectors: {} exporters: awsemf/containerinsights: certificate_file_path: "" @@ -9,7 +8,7 @@ exporters: endpoint: https://fake_endpoint enhanced_container_insights: true imds_retries: 1 - local_mode: false + local_mode: true log_group_name: /aws/containerinsights/{ClusterName}/performance log_retention: 0 log_stream_name: '{NodeName}' @@ -26,7 +25,6 @@ exporters: - ContainerName - Namespace - PodName - label_matchers: [] metric_name_selectors: - container_cpu_utilization - container_cpu_utilization_over_container_limit @@ -54,7 +52,6 @@ exporters: - FullPodName - Namespace - PodName - label_matchers: [] metric_name_selectors: - pod_cpu_utilization - pod_memory_utilization @@ -73,7 +70,6 @@ exporters: - - ClusterName - Namespace - - ClusterName - label_matchers: [] metric_name_selectors: - pod_interface_network_rx_dropped - pod_interface_network_tx_dropped @@ -89,7 +85,6 @@ exporters: - - ClusterName - Namespace - Service - label_matchers: [] metric_name_selectors: - pod_cpu_reserved_capacity - pod_memory_reserved_capacity @@ -121,7 +116,6 @@ exporters: - InstanceId - NodeName - - ClusterName - label_matchers: [] metric_name_selectors: - node_cpu_utilization - node_memory_utilization @@ -147,7 +141,6 @@ exporters: - InstanceId - NodeName - - ClusterName - label_matchers: [] metric_name_selectors: - node_interface_network_rx_dropped - node_interface_network_tx_dropped @@ -158,7 +151,6 @@ exporters: - InstanceId - NodeName - - ClusterName - label_matchers: [] metric_name_selectors: - node_filesystem_utilization - node_filesystem_inodes @@ -168,7 +160,6 @@ exporters: - Namespace - Service - - ClusterName - label_matchers: [] metric_name_selectors: - service_number_of_running_pods - dimensions: @@ -176,7 +167,6 @@ exporters: - Namespace - PodName - - ClusterName - label_matchers: [] metric_name_selectors: - replicas_desired - replicas_ready @@ -187,7 +177,6 @@ exporters: - Namespace - PodName - - ClusterName - label_matchers: [] metric_name_selectors: - daemonset_status_number_available - daemonset_status_number_unavailable @@ -195,12 +184,10 @@ exporters: - - ClusterName - Namespace - - ClusterName - label_matchers: [] metric_name_selectors: - namespace_number_of_running_pods - dimensions: - - ClusterName - label_matchers: [] metric_name_selectors: - cluster_node_count - cluster_failed_node_count @@ -209,7 +196,6 @@ exporters: - - ClusterName - endpoint - - ClusterName - label_matchers: [] metric_name_selectors: - apiserver_storage_size_bytes - apiserver_storage_db_total_size_in_bytes @@ -218,7 +204,6 @@ exporters: - - ClusterName - resource - - ClusterName - label_matchers: [] metric_name_selectors: - apiserver_storage_list_duration_seconds - apiserver_longrunning_requests @@ -227,7 +212,6 @@ exporters: - - ClusterName - verb - - ClusterName - label_matchers: [] metric_name_selectors: - apiserver_request_duration_seconds - rest_client_request_duration_seconds @@ -236,7 +220,6 @@ exporters: - code - verb - - ClusterName - label_matchers: [] metric_name_selectors: - apiserver_request_total - apiserver_request_total_5xx @@ -244,7 +227,6 @@ exporters: - - ClusterName - operation - - ClusterName - label_matchers: [] metric_name_selectors: - apiserver_admission_controller_admission_duration_seconds - apiserver_admission_step_admission_duration_seconds @@ -254,14 +236,12 @@ exporters: - code - method - - ClusterName - label_matchers: [] metric_name_selectors: - rest_client_requests_total - dimensions: - - ClusterName - request_kind - - ClusterName - label_matchers: [] metric_name_selectors: - apiserver_current_inflight_requests - apiserver_current_inqueue_requests @@ -269,28 +249,24 @@ exporters: - - ClusterName - name - - ClusterName - label_matchers: [] metric_name_selectors: - apiserver_admission_webhook_admission_duration_seconds - dimensions: - - ClusterName - group - - ClusterName - label_matchers: [] metric_name_selectors: - apiserver_requested_deprecated_apis - dimensions: - - ClusterName - reason - - ClusterName - label_matchers: [] metric_name_selectors: - apiserver_flowcontrol_rejected_requests_total - dimensions: - - ClusterName - priority_level - - ClusterName - label_matchers: [] metric_name_selectors: - apiserver_flowcontrol_request_concurrency_limit metric_descriptors: @@ -380,10 +356,12 @@ extensions: stats: operations: - PutLogEvents + usage_flags: + mode: OP + region_type: ACJ processors: batch/containerinsights: metadata_cardinality_limit: 1000 - metadata_keys: [] send_batch_max_size: 0 send_batch_size: 8192 timeout: 5s @@ -393,14 +371,13 @@ processors: aggregation_type: "" experimental_match_labels: code: ^5.* - group_resource_labels: {} include: apiserver_request_total match_type: regexp new_name: apiserver_request_total_5xx - operations: [] submatch_case: "" receivers: awscontainerinsightreceiver: + accelerated_compute_metrics: false add_container_name_metric_label: true add_full_pod_name_metric_label: true add_service_as_attribute: true @@ -444,10 +421,7 @@ service: disable_caller: false disable_stacktrace: false encoding: console - error_output_paths: [] - initial_fields: {} level: info - output_paths: [] sampling: enabled: true initial: 2 @@ -456,8 +430,4 @@ service: metrics: address: "" level: None - readers: [] - resource: {} - traces: - processors: [] - propagators: [] + traces: {} diff --git a/translator/tocwconfig/sampleConfig/log_ecs_metric_only.yaml b/translator/tocwconfig/sampleConfig/log_ecs_metric_only.yaml index f84fb84439..824b9f27df 100644 --- a/translator/tocwconfig/sampleConfig/log_ecs_metric_only.yaml +++ b/translator/tocwconfig/sampleConfig/log_ecs_metric_only.yaml @@ -1,4 +1,3 @@ -connectors: {} exporters: awscloudwatchlogs/emf_logs: certificate_file_path: "" @@ -31,8 +30,6 @@ exporters: enabled: true num_consumers: 1 queue_size: 1000 - storage: null - shared_credentials_file: [] awsemf/containerinsights: certificate_file_path: "" detailed_metrics: false @@ -52,7 +49,6 @@ exporters: - - ClusterName - ContainerInstanceId - InstanceId - label_matchers: [] metric_name_selectors: - instance_cpu_reserved_capacity - instance_cpu_utilization @@ -63,7 +59,6 @@ exporters: - instance_number_of_running_tasks - dimensions: - - ClusterName - label_matchers: [] metric_name_selectors: - instance_cpu_limit - instance_cpu_reserved_capacity @@ -76,7 +71,6 @@ exporters: - instance_memory_working_set - instance_network_total_bytes - instance_number_of_running_tasks - metric_descriptors: [] middleware: agenthealth/logs namespace: ECS/ContainerInsights no_verify_ssl: false @@ -93,7 +87,6 @@ exporters: enabled: true retain_initial_value_of_delta_metric: false role_arn: "" - shared_credentials_file: [] version: "0" extensions: agenthealth/logs: @@ -101,21 +94,23 @@ extensions: stats: operations: - PutLogEvents + usage_flags: + mode: EC2 + region_type: ACJ processors: batch/containerinsights: metadata_cardinality_limit: 1000 - metadata_keys: [] send_batch_max_size: 0 send_batch_size: 8192 timeout: 5s batch/emf_logs: metadata_cardinality_limit: 1000 - metadata_keys: [] send_batch_max_size: 0 send_batch_size: 8192 timeout: 5s receivers: awscontainerinsightreceiver: + accelerated_compute_metrics: true add_container_name_metric_label: false add_full_pod_name_metric_label: false add_service_as_attribute: true @@ -139,24 +134,18 @@ receivers: request_timeout_seconds: 0 resource_arn: "" role_arn: "" - shared_credentials_file: [] tcplog/emf_logs: - attributes: {} encoding: utf-8 id: tcp_input listen_address: 0.0.0.0:25888 operators: [] - output: [] - resource: {} retry_on_failure: enabled: false initial_interval: 0s max_elapsed_time: 0s max_interval: 0s - storage: null type: tcp_input udplog/emf_logs: - attributes: {} encoding: utf-8 id: udp_input listen_address: 0.0.0.0:25888 @@ -165,14 +154,11 @@ receivers: line_start_pattern: "" omit_pattern: false operators: [] - output: [] - resource: {} retry_on_failure: enabled: false initial_interval: 0s max_elapsed_time: 0s max_interval: 0s - storage: null type: udp_input service: extensions: @@ -199,10 +185,7 @@ service: disable_caller: false disable_stacktrace: false encoding: console - error_output_paths: [] - initial_fields: {} level: info - output_paths: [] sampling: enabled: true initial: 2 @@ -211,8 +194,4 @@ service: metrics: address: "" level: None - readers: [] - resource: {} - traces: - processors: [] - propagators: [] + traces: {} diff --git a/translator/tocwconfig/sampleConfig/logs_and_kubernetes_config.json b/translator/tocwconfig/sampleConfig/logs_and_kubernetes_config.json index 5f5fe1b8d6..eed2cbb8ac 100644 --- a/translator/tocwconfig/sampleConfig/logs_and_kubernetes_config.json +++ b/translator/tocwconfig/sampleConfig/logs_and_kubernetes_config.json @@ -9,7 +9,8 @@ "kubernetes": { "cluster_name": "TestCluster", "metrics_collection_interval": 30, - "enhanced_container_insights": true + "enhanced_container_insights": true, + "accelerated_compute_metrics": false } }, "logs_collected": { diff --git a/translator/tocwconfig/sampleConfig/logs_and_kubernetes_config.yaml b/translator/tocwconfig/sampleConfig/logs_and_kubernetes_config.yaml index aed1610b64..05b02c06c9 100644 --- a/translator/tocwconfig/sampleConfig/logs_and_kubernetes_config.yaml +++ b/translator/tocwconfig/sampleConfig/logs_and_kubernetes_config.yaml @@ -1,4 +1,3 @@ -connectors: {} exporters: awscloudwatchlogs/emf_logs: certificate_file_path: "" @@ -31,8 +30,6 @@ exporters: enabled: true num_consumers: 1 queue_size: 1000 - storage: null - shared_credentials_file: [] awsemf/containerinsights: certificate_file_path: "" detailed_metrics: false @@ -59,7 +56,6 @@ exporters: - ContainerName - Namespace - PodName - label_matchers: [] metric_name_selectors: - container_cpu_utilization - container_cpu_utilization_over_container_limit @@ -87,7 +83,6 @@ exporters: - FullPodName - Namespace - PodName - label_matchers: [] metric_name_selectors: - pod_cpu_utilization - pod_memory_utilization @@ -106,7 +101,6 @@ exporters: - - ClusterName - Namespace - - ClusterName - label_matchers: [] metric_name_selectors: - pod_interface_network_rx_dropped - pod_interface_network_tx_dropped @@ -122,7 +116,6 @@ exporters: - - ClusterName - Namespace - Service - label_matchers: [] metric_name_selectors: - pod_cpu_reserved_capacity - pod_memory_reserved_capacity @@ -154,7 +147,6 @@ exporters: - InstanceId - NodeName - - ClusterName - label_matchers: [] metric_name_selectors: - node_cpu_utilization - node_memory_utilization @@ -180,7 +172,6 @@ exporters: - InstanceId - NodeName - - ClusterName - label_matchers: [] metric_name_selectors: - node_interface_network_rx_dropped - node_interface_network_tx_dropped @@ -191,7 +182,6 @@ exporters: - InstanceId - NodeName - - ClusterName - label_matchers: [] metric_name_selectors: - node_filesystem_utilization - node_filesystem_inodes @@ -201,7 +191,6 @@ exporters: - Namespace - Service - - ClusterName - label_matchers: [] metric_name_selectors: - service_number_of_running_pods - dimensions: @@ -209,7 +198,6 @@ exporters: - Namespace - PodName - - ClusterName - label_matchers: [] metric_name_selectors: - replicas_desired - replicas_ready @@ -220,7 +208,6 @@ exporters: - Namespace - PodName - - ClusterName - label_matchers: [] metric_name_selectors: - daemonset_status_number_available - daemonset_status_number_unavailable @@ -228,12 +215,10 @@ exporters: - - ClusterName - Namespace - - ClusterName - label_matchers: [] metric_name_selectors: - namespace_number_of_running_pods - dimensions: - - ClusterName - label_matchers: [] metric_name_selectors: - cluster_node_count - cluster_failed_node_count @@ -242,7 +227,6 @@ exporters: - - ClusterName - endpoint - - ClusterName - label_matchers: [] metric_name_selectors: - apiserver_storage_size_bytes - apiserver_storage_db_total_size_in_bytes @@ -251,7 +235,6 @@ exporters: - - ClusterName - resource - - ClusterName - label_matchers: [] metric_name_selectors: - apiserver_storage_list_duration_seconds - apiserver_longrunning_requests @@ -260,7 +243,6 @@ exporters: - - ClusterName - verb - - ClusterName - label_matchers: [] metric_name_selectors: - apiserver_request_duration_seconds - rest_client_request_duration_seconds @@ -269,7 +251,6 @@ exporters: - code - verb - - ClusterName - label_matchers: [] metric_name_selectors: - apiserver_request_total - apiserver_request_total_5xx @@ -277,7 +258,6 @@ exporters: - - ClusterName - operation - - ClusterName - label_matchers: [] metric_name_selectors: - apiserver_admission_controller_admission_duration_seconds - apiserver_admission_step_admission_duration_seconds @@ -287,14 +267,12 @@ exporters: - code - method - - ClusterName - label_matchers: [] metric_name_selectors: - rest_client_requests_total - dimensions: - - ClusterName - request_kind - - ClusterName - label_matchers: [] metric_name_selectors: - apiserver_current_inflight_requests - apiserver_current_inqueue_requests @@ -302,28 +280,24 @@ exporters: - - ClusterName - name - - ClusterName - label_matchers: [] metric_name_selectors: - apiserver_admission_webhook_admission_duration_seconds - dimensions: - - ClusterName - group - - ClusterName - label_matchers: [] metric_name_selectors: - apiserver_requested_deprecated_apis - dimensions: - - ClusterName - reason - - ClusterName - label_matchers: [] metric_name_selectors: - apiserver_flowcontrol_rejected_requests_total - dimensions: - - ClusterName - priority_level - - ClusterName - label_matchers: [] metric_name_selectors: - apiserver_flowcontrol_request_concurrency_limit metric_descriptors: @@ -404,7 +378,6 @@ exporters: enabled: true retain_initial_value_of_delta_metric: false role_arn: "" - shared_credentials_file: [] version: "0" extensions: agenthealth/logs: @@ -412,16 +385,17 @@ extensions: stats: operations: - PutLogEvents + usage_flags: + mode: EC2 + region_type: ACJ processors: batch/containerinsights: metadata_cardinality_limit: 1000 - metadata_keys: [] send_batch_max_size: 0 send_batch_size: 8192 timeout: 5s batch/emf_logs: metadata_cardinality_limit: 1000 - metadata_keys: [] send_batch_max_size: 0 send_batch_size: 8192 timeout: 5s @@ -431,14 +405,13 @@ processors: aggregation_type: "" experimental_match_labels: code: ^5.* - group_resource_labels: {} include: apiserver_request_total match_type: regexp new_name: apiserver_request_total_5xx - operations: [] submatch_case: "" receivers: awscontainerinsightreceiver: + accelerated_compute_metrics: false add_container_name_metric_label: true add_full_pod_name_metric_label: true add_service_as_attribute: true @@ -462,24 +435,18 @@ receivers: request_timeout_seconds: 0 resource_arn: "" role_arn: "" - shared_credentials_file: [] tcplog/emf_logs: - attributes: {} encoding: utf-8 id: tcp_input listen_address: 0.0.0.0:25888 operators: [] - output: [] - resource: {} retry_on_failure: enabled: false initial_interval: 0s max_elapsed_time: 0s max_interval: 0s - storage: null type: tcp_input udplog/emf_logs: - attributes: {} encoding: utf-8 id: udp_input listen_address: 0.0.0.0:25888 @@ -488,14 +455,11 @@ receivers: line_start_pattern: "" omit_pattern: false operators: [] - output: [] - resource: {} retry_on_failure: enabled: false initial_interval: 0s max_elapsed_time: 0s max_interval: 0s - storage: null type: udp_input service: extensions: @@ -523,10 +487,7 @@ service: disable_caller: false disable_stacktrace: false encoding: console - error_output_paths: [] - initial_fields: {} level: info - output_paths: [] sampling: enabled: true initial: 2 @@ -535,8 +496,4 @@ service: metrics: address: "" level: None - readers: [] - resource: {} - traces: - processors: [] - propagators: [] + traces: {} diff --git a/translator/tocwconfig/sampleConfig/prometheus_config_linux.yaml b/translator/tocwconfig/sampleConfig/prometheus_config_linux.yaml index 22b83a7507..83ca491154 100644 --- a/translator/tocwconfig/sampleConfig/prometheus_config_linux.yaml +++ b/translator/tocwconfig/sampleConfig/prometheus_config_linux.yaml @@ -1,4 +1,3 @@ -connectors: {} exporters: awsemf/prometheus: certificate_file_path: "" @@ -59,7 +58,6 @@ exporters: no_verify_ssl: false num_workers: 8 output_destination: cloudwatch - parse_json_encoded_attr_values: [] profile: "" proxy_address: "" region: us-east-1 @@ -69,7 +67,6 @@ exporters: enabled: true retain_initial_value_of_delta_metric: false role_arn: "" - shared_credentials_file: [] version: "0" extensions: agenthealth/logs: @@ -77,10 +74,12 @@ extensions: stats: operations: - PutLogEvents + usage_flags: + mode: EC2 + region_type: ACJ processors: batch/prometheus: metadata_cardinality_limit: 1000 - metadata_keys: [] send_batch_max_size: 0 send_batch_size: 8192 timeout: 30s @@ -106,10 +105,7 @@ service: disable_caller: false disable_stacktrace: false encoding: console - error_output_paths: [] - initial_fields: {} level: info - output_paths: [] sampling: enabled: true initial: 2 @@ -118,8 +114,4 @@ service: metrics: address: "" level: None - readers: [] - resource: {} - traces: - processors: [] - propagators: [] + traces: {} diff --git a/translator/tocwconfig/sampleConfig/prometheus_config_windows.yaml b/translator/tocwconfig/sampleConfig/prometheus_config_windows.yaml index b4d0625cf6..bb0866fc55 100644 --- a/translator/tocwconfig/sampleConfig/prometheus_config_windows.yaml +++ b/translator/tocwconfig/sampleConfig/prometheus_config_windows.yaml @@ -1,4 +1,3 @@ -connectors: {} exporters: awsemf/prometheus: certificate_file_path: "" @@ -41,7 +40,6 @@ exporters: no_verify_ssl: false num_workers: 8 output_destination: cloudwatch - parse_json_encoded_attr_values: [] profile: "" proxy_address: "" region: us-east-1 @@ -51,7 +49,6 @@ exporters: enabled: true retain_initial_value_of_delta_metric: false role_arn: "" - shared_credentials_file: [] version: "0" extensions: agenthealth/logs: @@ -59,10 +56,12 @@ extensions: stats: operations: - PutLogEvents + usage_flags: + mode: EC2 + region_type: ACJ processors: batch/prometheus: metadata_cardinality_limit: 1000 - metadata_keys: [] send_batch_max_size: 0 send_batch_size: 8192 timeout: 5s @@ -88,10 +87,7 @@ service: disable_caller: false disable_stacktrace: false encoding: console - error_output_paths: [] - initial_fields: {} level: info - output_paths: [] sampling: enabled: true initial: 2 @@ -100,8 +96,4 @@ service: metrics: address: "" level: None - readers: [] - resource: {} - traces: - processors: [] - propagators: [] + traces: {} diff --git a/translator/tocwconfig/sampleConfig/standard_config_linux.yaml b/translator/tocwconfig/sampleConfig/standard_config_linux.yaml index 04ee115774..ac1bf370ba 100644 --- a/translator/tocwconfig/sampleConfig/standard_config_linux.yaml +++ b/translator/tocwconfig/sampleConfig/standard_config_linux.yaml @@ -1,14 +1,11 @@ -connectors: {} exporters: awscloudwatch: force_flush_interval: 1m0s max_datums_per_call: 1000 max_values_per_datum: 150 middleware: agenthealth/metrics - mode: EC2 namespace: CWAgent region: us-west-2 - region_type: ACJ resource_to_telemetry_conversion: enabled: true extensions: @@ -17,6 +14,9 @@ extensions: stats: operations: - PutMetricData + usage_flags: + mode: EC2 + region_type: ACJ processors: cumulativetodelta/hostDeltaMetrics: exclude: @@ -24,20 +24,17 @@ processors: metrics: - iops_in_progress - diskio_iops_in_progress - regexp: null include: match_type: "" - metrics: [] - regexp: null initial_value: 0 max_staleness: 0s ec2tagger: ec2_instance_tag_keys: - AutoScalingGroupName ec2_metadata_tags: - - InstanceId - InstanceType - ImageId + - InstanceId refresh_interval_seconds: 0s receivers: telegraf_cpu: @@ -88,8 +85,6 @@ service: disable_caller: false disable_stacktrace: false encoding: console - error_output_paths: [] - initial_fields: {} level: info output_paths: - /opt/aws/amazon-cloudwatch-agent/logs/amazon-cloudwatch-agent.log @@ -101,8 +96,4 @@ service: metrics: address: "" level: None - readers: [] - resource: {} - traces: - processors: [] - propagators: [] + traces: {} diff --git a/translator/tocwconfig/sampleConfig/standard_config_linux_with_common_config.yaml b/translator/tocwconfig/sampleConfig/standard_config_linux_with_common_config.yaml index cff953db01..415942372b 100644 --- a/translator/tocwconfig/sampleConfig/standard_config_linux_with_common_config.yaml +++ b/translator/tocwconfig/sampleConfig/standard_config_linux_with_common_config.yaml @@ -1,15 +1,12 @@ -connectors: {} exporters: awscloudwatch: force_flush_interval: 1m0s max_datums_per_call: 1000 max_values_per_datum: 150 middleware: agenthealth/metrics - mode: EC2 namespace: CWAgent profile: AmazonCloudWatchAgent region: us-west-2 - region_type: ACJ resource_to_telemetry_conversion: enabled: true shared_credential_file: fake-path @@ -19,6 +16,9 @@ extensions: stats: operations: - PutMetricData + usage_flags: + mode: EC2 + region_type: ACJ processors: cumulativetodelta/hostDeltaMetrics: exclude: @@ -26,20 +26,17 @@ processors: metrics: - iops_in_progress - diskio_iops_in_progress - regexp: null include: match_type: "" - metrics: [] - regexp: null initial_value: 0 max_staleness: 0s ec2tagger: ec2_instance_tag_keys: - AutoScalingGroupName ec2_metadata_tags: - - InstanceType - ImageId - InstanceId + - InstanceType imds_retries: 2 profile: AmazonCloudWatchAgent refresh_interval_seconds: 0s @@ -93,8 +90,6 @@ service: disable_caller: false disable_stacktrace: false encoding: console - error_output_paths: [] - initial_fields: {} level: info output_paths: - /opt/aws/amazon-cloudwatch-agent/logs/amazon-cloudwatch-agent.log @@ -106,8 +101,4 @@ service: metrics: address: "" level: None - readers: [] - resource: {} - traces: - processors: [] - propagators: [] + traces: {} diff --git a/translator/tocwconfig/sampleConfig/standard_config_windows.yaml b/translator/tocwconfig/sampleConfig/standard_config_windows.yaml index a3ae11b32d..54c873f82d 100644 --- a/translator/tocwconfig/sampleConfig/standard_config_windows.yaml +++ b/translator/tocwconfig/sampleConfig/standard_config_windows.yaml @@ -1,14 +1,11 @@ -connectors: {} exporters: awscloudwatch: force_flush_interval: 1m0s max_datums_per_call: 1000 max_values_per_datum: 150 middleware: agenthealth/metrics - mode: EC2 namespace: CWAgent region: us-west-2 - region_type: ACJ resource_to_telemetry_conversion: enabled: true extensions: @@ -17,6 +14,9 @@ extensions: stats: operations: - PutMetricData + usage_flags: + mode: EC2 + region_type: ACJ processors: ec2tagger: ec2_instance_tag_keys: @@ -62,19 +62,17 @@ service: processors: - ec2tagger receivers: + - telegraf_win_perf_counters/3610923661 - telegraf_win_perf_counters/3446270237 - telegraf_win_perf_counters/3762679655 - telegraf_win_perf_counters/4283769065 - telegraf_win_perf_counters/1492679118 - - telegraf_win_perf_counters/3610923661 telemetry: logs: development: false disable_caller: false disable_stacktrace: false encoding: console - error_output_paths: [] - initial_fields: {} level: info output_paths: - c:\ProgramData\Amazon\AmazonCloudWatchAgent\Logs\amazon-cloudwatch-agent.log @@ -86,8 +84,4 @@ service: metrics: address: "" level: None - readers: [] - resource: {} - traces: - processors: [] - propagators: [] + traces: {} diff --git a/translator/tocwconfig/sampleConfig/standard_config_windows_with_common_config.yaml b/translator/tocwconfig/sampleConfig/standard_config_windows_with_common_config.yaml index 43d0d2016f..830c6ad2ca 100644 --- a/translator/tocwconfig/sampleConfig/standard_config_windows_with_common_config.yaml +++ b/translator/tocwconfig/sampleConfig/standard_config_windows_with_common_config.yaml @@ -1,15 +1,12 @@ -connectors: {} exporters: awscloudwatch: force_flush_interval: 1m0s max_datums_per_call: 1000 max_values_per_datum: 150 middleware: agenthealth/metrics - mode: EC2 namespace: CWAgent profile: AmazonCloudWatchAgent region: us-west-2 - region_type: ACJ resource_to_telemetry_conversion: enabled: true shared_credential_file: fake-path @@ -19,6 +16,9 @@ extensions: stats: operations: - PutMetricData + usage_flags: + mode: EC2 + region_type: ACJ processors: ec2tagger: ec2_instance_tag_keys: @@ -67,19 +67,17 @@ service: processors: - ec2tagger receivers: - - telegraf_win_perf_counters/3446270237 - - telegraf_win_perf_counters/3762679655 - telegraf_win_perf_counters/4283769065 - telegraf_win_perf_counters/1492679118 - telegraf_win_perf_counters/3610923661 + - telegraf_win_perf_counters/3446270237 + - telegraf_win_perf_counters/3762679655 telemetry: logs: development: false disable_caller: false disable_stacktrace: false encoding: console - error_output_paths: [] - initial_fields: {} level: info output_paths: - c:\ProgramData\Amazon\AmazonCloudWatchAgent\Logs\amazon-cloudwatch-agent.log @@ -91,8 +89,4 @@ service: metrics: address: "" level: None - readers: [] - resource: {} - traces: - processors: [] - propagators: [] + traces: {} diff --git a/translator/tocwconfig/sampleConfig/statsd_config_linux.yaml b/translator/tocwconfig/sampleConfig/statsd_config_linux.yaml index 900ac2bc12..6e818d628f 100644 --- a/translator/tocwconfig/sampleConfig/statsd_config_linux.yaml +++ b/translator/tocwconfig/sampleConfig/statsd_config_linux.yaml @@ -1,14 +1,11 @@ -connectors: {} exporters: awscloudwatch: force_flush_interval: 1m0s max_datums_per_call: 1000 max_values_per_datum: 150 middleware: agenthealth/metrics - mode: EC2 namespace: CWAgent region: us-west-2 - region_type: ACJ resource_to_telemetry_conversion: enabled: true extensions: @@ -17,7 +14,9 @@ extensions: stats: operations: - PutMetricData -processors: {} + usage_flags: + mode: EC2 + region_type: ACJ receivers: telegraf_statsd: collection_interval: 10s @@ -39,8 +38,6 @@ service: disable_caller: false disable_stacktrace: false encoding: console - error_output_paths: [] - initial_fields: {} level: info output_paths: - /opt/aws/amazon-cloudwatch-agent/logs/amazon-cloudwatch-agent.log @@ -52,8 +49,4 @@ service: metrics: address: "" level: None - readers: [] - resource: {} - traces: - processors: [] - propagators: [] + traces: {} diff --git a/translator/tocwconfig/sampleConfig/statsd_config_windows.yaml b/translator/tocwconfig/sampleConfig/statsd_config_windows.yaml index 96a8b5c7f5..b0c9488ad8 100644 --- a/translator/tocwconfig/sampleConfig/statsd_config_windows.yaml +++ b/translator/tocwconfig/sampleConfig/statsd_config_windows.yaml @@ -1,14 +1,11 @@ -connectors: {} exporters: awscloudwatch: force_flush_interval: 1m0s max_datums_per_call: 1000 max_values_per_datum: 150 middleware: agenthealth/metrics - mode: EC2 namespace: CWAgent region: us-west-2 - region_type: ACJ resource_to_telemetry_conversion: enabled: true extensions: @@ -17,7 +14,9 @@ extensions: stats: operations: - PutMetricData -processors: {} + usage_flags: + mode: EC2 + region_type: ACJ receivers: telegraf_statsd: collection_interval: 10s @@ -39,8 +38,6 @@ service: disable_caller: false disable_stacktrace: false encoding: console - error_output_paths: [] - initial_fields: {} level: info output_paths: - c:\ProgramData\Amazon\AmazonCloudWatchAgent\Logs\amazon-cloudwatch-agent.log @@ -52,8 +49,4 @@ service: metrics: address: "" level: None - readers: [] - resource: {} - traces: - processors: [] - propagators: [] + traces: {} diff --git a/translator/tocwconfig/sampleConfig/trace_config_linux.yaml b/translator/tocwconfig/sampleConfig/trace_config_linux.yaml index b283eff8a0..79dccf9674 100644 --- a/translator/tocwconfig/sampleConfig/trace_config_linux.yaml +++ b/translator/tocwconfig/sampleConfig/trace_config_linux.yaml @@ -1,12 +1,9 @@ -connectors: {} exporters: awsxray: - aws_log_groups: [] certificate_file_path: "" endpoint: "" imds_retries: 2 index_all_attributes: false - indexed_attributes: [] local_mode: false max_retries: 2 middleware: agenthealth/traces @@ -29,47 +26,54 @@ extensions: stats: operations: - PutTraceSegments + usage_flags: + mode: EC2 + region_type: ACJ processors: batch/xray: metadata_cardinality_limit: 1000 - metadata_keys: [] send_batch_max_size: 0 send_batch_size: 8192 timeout: 200ms receivers: awsxray: endpoint: 127.0.0.1:2000 + dialer: + timeout: "0s" proxy_server: aws_endpoint: "" + certificate_file_path: "" + dialer: + timeout: "0s" endpoint: 127.0.0.1:2000 + imds_retries: 2 local_mode: false + profile: default proxy_address: "" region: us-west-2 role_arn: "" + service_name: "xray" + shared_credentials_file: + - /root/.aws/credentials transport: udp - otlp: + otlp/traces: protocols: grpc: - auth: null + dialer: + timeout: "0s" endpoint: 127.0.0.1:4317 include_metadata: false - keepalive: null max_concurrent_streams: 0 max_recv_msg_size_mib: 0 read_buffer_size: 524288 - tls: null transport: tcp write_buffer_size: 0 http: - auth: null - cors: null endpoint: 127.0.0.1:4318 include_metadata: false logs_url_path: /v1/logs max_request_body_size: 0 metrics_url_path: /v1/metrics - response_headers: {} - tls: null traces_url_path: /v1/traces service: extensions: @@ -82,15 +86,13 @@ service: - batch/xray receivers: - awsxray - - otlp + - otlp/traces telemetry: logs: development: false disable_caller: false disable_stacktrace: false encoding: console - error_output_paths: [] - initial_fields: {} level: info output_paths: - /opt/aws/amazon-cloudwatch-agent/logs/amazon-cloudwatch-agent.log @@ -102,8 +104,4 @@ service: metrics: address: "" level: None - readers: [] - resource: {} - traces: - processors: [] - propagators: [] + traces: {} diff --git a/translator/tocwconfig/sampleConfig/trace_config_windows.yaml b/translator/tocwconfig/sampleConfig/trace_config_windows.yaml index f477df8101..f39e70b5e5 100644 --- a/translator/tocwconfig/sampleConfig/trace_config_windows.yaml +++ b/translator/tocwconfig/sampleConfig/trace_config_windows.yaml @@ -1,12 +1,9 @@ -connectors: {} exporters: awsxray: - aws_log_groups: [] certificate_file_path: "" endpoint: "" imds_retries: 2 index_all_attributes: false - indexed_attributes: [] local_mode: false max_retries: 2 middleware: agenthealth/traces @@ -29,47 +26,54 @@ extensions: stats: operations: - PutTraceSegments + usage_flags: + mode: EC2 + region_type: ACJ processors: batch/xray: metadata_cardinality_limit: 1000 - metadata_keys: [] send_batch_max_size: 0 send_batch_size: 8192 timeout: 200ms receivers: awsxray: endpoint: 127.0.0.1:2000 + dialer: + timeout: "0s" proxy_server: + dialer: + timeout: "0s" aws_endpoint: "" + certificate_file_path: "" endpoint: 127.0.0.1:2000 + imds_retries: 2 local_mode: false + profile: default proxy_address: "" region: us-west-2 role_arn: "" + service_name: "xray" + shared_credentials_file: + - /root/.aws/credentials transport: udp - otlp: + otlp/traces: protocols: grpc: - auth: null + dialer: + timeout: "0s" endpoint: 127.0.0.1:4317 include_metadata: false - keepalive: null max_concurrent_streams: 0 max_recv_msg_size_mib: 0 read_buffer_size: 524288 - tls: null transport: tcp write_buffer_size: 0 http: - auth: null - cors: null endpoint: 127.0.0.1:4318 include_metadata: false logs_url_path: /v1/logs max_request_body_size: 0 metrics_url_path: /v1/metrics - response_headers: {} - tls: null traces_url_path: /v1/traces service: extensions: @@ -82,15 +86,13 @@ service: - batch/xray receivers: - awsxray - - otlp + - otlp/traces telemetry: logs: development: false disable_caller: false disable_stacktrace: false encoding: console - error_output_paths: [] - initial_fields: {} level: info output_paths: - c:\ProgramData\Amazon\AmazonCloudWatchAgent\Logs\amazon-cloudwatch-agent.log @@ -102,8 +104,4 @@ service: metrics: address: "" level: None - readers: [] - resource: {} - traces: - processors: [] - propagators: [] + traces: {} diff --git a/translator/tocwconfig/tocwconfig_test.go b/translator/tocwconfig/tocwconfig_test.go index 73e6236f17..cbf7ffde39 100644 --- a/translator/tocwconfig/tocwconfig_test.go +++ b/translator/tocwconfig/tocwconfig_test.go @@ -23,7 +23,6 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "gopkg.in/yaml.v3" - "k8s.io/client-go/kubernetes/fake" "github.com/aws/amazon-cloudwatch-agent/cfg/commonconfig" "github.com/aws/amazon-cloudwatch-agent/cfg/envconfig" @@ -39,6 +38,7 @@ import ( "github.com/aws/amazon-cloudwatch-agent/translator/translate/agent" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/common" "github.com/aws/amazon-cloudwatch-agent/translator/util" + "github.com/aws/amazon-cloudwatch-agent/translator/util/eksdetector" ) const ( @@ -59,6 +59,7 @@ type testCase struct { func TestBaseContainerInsightsConfig(t *testing.T) { resetContext(t) context.CurrentContext().SetRunInContainer(true) + context.CurrentContext().SetMode(config.ModeEC2) t.Setenv(config.HOST_NAME, "host_name_from_env") t.Setenv(config.HOST_IP, "127.0.0.1") t.Setenv(envconfig.AWS_CA_BUNDLE, "/etc/test/ca_bundle.pem") @@ -70,9 +71,9 @@ func TestBaseContainerInsightsConfig(t *testing.T) { } func TestGenericAppSignalsConfig(t *testing.T) { - common.NewDetector = common.TestEKSDetector resetContext(t) - context.CurrentContext().SetRunInContainer(true) + context.CurrentContext().SetRunInContainer(false) + context.CurrentContext().SetMode(config.ModeOnPremise) t.Setenv(config.HOST_NAME, "host_name_from_env") t.Setenv(config.HOST_IP, "127.0.0.1") expectedEnvVars := map[string]string{} @@ -80,22 +81,73 @@ func TestGenericAppSignalsConfig(t *testing.T) { checkTranslation(t, "base_appsignals_config", "windows", expectedEnvVars, "") } -func TestAppSignalsAndKubernetesConfig(t *testing.T) { +func TestGenericAppSignalsFallbackConfig(t *testing.T) { + resetContext(t) + context.CurrentContext().SetRunInContainer(false) + context.CurrentContext().SetMode(config.ModeOnPremise) + t.Setenv(config.HOST_NAME, "host_name_from_env") + t.Setenv(config.HOST_IP, "127.0.0.1") + expectedEnvVars := map[string]string{} + checkTranslation(t, "base_appsignals_fallback_config", "linux", expectedEnvVars, "") + checkTranslation(t, "base_appsignals_fallback_config", "windows", expectedEnvVars, "") +} + +func TestAppSignalsAndEKSConfig(t *testing.T) { resetContext(t) context.CurrentContext().SetRunInContainer(true) t.Setenv(config.HOST_NAME, "host_name_from_env") t.Setenv(config.HOST_IP, "127.0.0.1") t.Setenv(common.KubernetesEnvVar, "use_appsignals_eks_config") - common.NewDetector = common.TestEKSDetector + eksdetector.NewDetector = eksdetector.TestEKSDetector + context.CurrentContext().SetMode(config.ModeEC2) + context.CurrentContext().SetKubernetesMode(config.ModeEKS) expectedEnvVars := map[string]string{} checkTranslation(t, "appsignals_and_eks_config", "linux", expectedEnvVars, "") checkTranslation(t, "appsignals_and_eks_config", "windows", expectedEnvVars, "") +} - common.NewDetector = func() (common.Detector, error) { - return &common.EksDetector{Clientset: fake.NewSimpleClientset()}, nil - } +func TestAppSignalsFallbackAndEKSConfig(t *testing.T) { + resetContext(t) + context.CurrentContext().SetRunInContainer(true) + t.Setenv(config.HOST_NAME, "host_name_from_env") + t.Setenv(config.HOST_IP, "127.0.0.1") + t.Setenv(common.KubernetesEnvVar, "use_appsignals_eks_config") + eksdetector.NewDetector = eksdetector.TestEKSDetector + context.CurrentContext().SetMode(config.ModeEC2) + context.CurrentContext().SetKubernetesMode(config.ModeEKS) + + expectedEnvVars := map[string]string{} + checkTranslation(t, "appsignals_fallback_and_eks_config", "linux", expectedEnvVars, "") + checkTranslation(t, "appsignals_fallback_and_eks_config", "windows", expectedEnvVars, "") +} +func TestAppSignalsFavorOverFallbackConfig(t *testing.T) { + resetContext(t) + context.CurrentContext().SetRunInContainer(true) + t.Setenv(config.HOST_NAME, "host_name_from_env") + t.Setenv(config.HOST_IP, "127.0.0.1") + t.Setenv(common.KubernetesEnvVar, "use_appsignals_eks_config") + eksdetector.NewDetector = eksdetector.TestEKSDetector + context.CurrentContext().SetMode(config.ModeEC2) + context.CurrentContext().SetKubernetesMode(config.ModeEKS) + + expectedEnvVars := map[string]string{} + checkTranslation(t, "appsignals_over_fallback_config", "linux", expectedEnvVars, "") + checkTranslation(t, "appsignals_over_fallback_config", "windows", expectedEnvVars, "") +} + +func TestAppSignalsAndNativeKubernetesConfig(t *testing.T) { + resetContext(t) + context.CurrentContext().SetRunInContainer(true) + t.Setenv(config.HOST_NAME, "host_name_from_env") + t.Setenv(config.HOST_IP, "127.0.0.1") + t.Setenv(common.KubernetesEnvVar, "use_appsignals_k8s_config") + eksdetector.IsEKS = eksdetector.TestIsEKSCacheK8s + context.CurrentContext().SetMode(config.ModeEC2) + context.CurrentContext().SetKubernetesMode(config.ModeK8sEC2) + + expectedEnvVars := map[string]string{} checkTranslation(t, "appsignals_and_k8s_config", "linux", expectedEnvVars, "") checkTranslation(t, "appsignals_and_k8s_config", "windows", expectedEnvVars, "") } @@ -104,6 +156,7 @@ func TestEmfAndKubernetesConfig(t *testing.T) { resetContext(t) readCommonConfig(t, "./sampleConfig/commonConfig/withCredentials.toml") context.CurrentContext().SetRunInContainer(true) + context.CurrentContext().SetMode(config.ModeOnPremise) t.Setenv(config.HOST_NAME, "host_name_from_env") t.Setenv(config.HOST_IP, "127.0.0.1") expectedEnvVars := map[string]string{} @@ -111,6 +164,18 @@ func TestEmfAndKubernetesConfig(t *testing.T) { checkTranslation(t, "emf_and_kubernetes_config", "darwin", nil, "") } +func TestEmfAndKubernetesWithGpuConfig(t *testing.T) { + resetContext(t) + readCommonConfig(t, "./sampleConfig/commonConfig/withCredentials.toml") + context.CurrentContext().SetRunInContainer(true) + context.CurrentContext().SetMode(config.ModeOnPremise) + t.Setenv(config.HOST_NAME, "host_name_from_env") + t.Setenv(config.HOST_IP, "127.0.0.1") + expectedEnvVars := map[string]string{} + checkTranslation(t, "emf_and_kubernetes_with_gpu_config", "linux", expectedEnvVars, "") + checkTranslation(t, "emf_and_kubernetes_with_gpu_config", "darwin", nil, "") +} + func TestKubernetesModeOnPremiseConfig(t *testing.T) { resetContext(t) context.CurrentContext().SetRunInContainer(true) @@ -124,6 +189,7 @@ func TestKubernetesModeOnPremiseConfig(t *testing.T) { func TestLogsAndKubernetesConfig(t *testing.T) { resetContext(t) context.CurrentContext().SetRunInContainer(true) + context.CurrentContext().SetMode(config.ModeEC2) t.Setenv(config.HOST_NAME, "host_name_from_env") t.Setenv(config.HOST_IP, "127.0.0.1") // for otel components and not our adapter components like @@ -186,6 +252,7 @@ func TestCollectDConfig(t *testing.T) { func TestPrometheusConfig(t *testing.T) { resetContext(t) context.CurrentContext().SetRunInContainer(true) + context.CurrentContext().SetMode(config.ModeEC2) t.Setenv(config.HOST_NAME, "host_name_from_env") temp := t.TempDir() prometheusConfigFileName := filepath.Join(temp, "prometheus.yaml") @@ -407,6 +474,7 @@ func TestTraceConfig(t *testing.T) { for name, testCase := range testCases { t.Run(name, func(t *testing.T) { resetContext(t) + context.CurrentContext().SetMode(config.ModeEC2) readCommonConfig(t, "./sampleConfig/commonConfig/withCredentials.toml") checkTranslation(t, testCase.filename, testCase.targetPlatform, testCase.expectedEnvVars, testCase.appendString) }) @@ -415,6 +483,7 @@ func TestTraceConfig(t *testing.T) { func TestConfigWithEnvironmentVariables(t *testing.T) { resetContext(t) + context.CurrentContext().SetMode(config.ModeEC2) expectedEnvVars := map[string]string{} checkTranslation(t, "config_with_env", "linux", expectedEnvVars, "") } @@ -470,6 +539,8 @@ func TestDeltaNetConfigLinux(t *testing.T) { func TestECSNodeMetricConfig(t *testing.T) { resetContext(t) + context.CurrentContext().SetRunInContainer(true) + context.CurrentContext().SetMode(config.ModeEC2) t.Setenv("RUN_IN_CONTAINER", "True") t.Setenv("HOST_NAME", "fake-host-name") t.Setenv("HOST_IP", "127.0.0.1") diff --git a/translator/translate/metrics/util/commonconfigutil.go b/translator/translate/metrics/util/commonconfigutil.go index b7fcc3a80c..770aabfb8d 100755 --- a/translator/translate/metrics/util/commonconfigutil.go +++ b/translator/translate/metrics/util/commonconfigutil.go @@ -54,8 +54,7 @@ func ProcessLinuxCommonConfig(input interface{}, pluginName string, path string, // Set append_dimensions as tags if val, ok := inputMap[Append_Dimensions_Key]; ok { - result[Append_Dimensions_Mapped_Key] = val - util.Cleanup(val) + result[Append_Dimensions_Mapped_Key] = util.FilterReservedKeys(val) } // Apply any specific rules for the plugin diff --git a/translator/translate/otel/common/common.go b/translator/translate/otel/common/common.go index 1e1bef0e73..88c3c27e83 100644 --- a/translator/translate/otel/common/common.go +++ b/translator/translate/otel/common/common.go @@ -45,7 +45,9 @@ const ( ContainerInsightsMetricGranularity = "metric_granularity" // replaced with enhanced_container_insights EnhancedContainerInsights = "enhanced_container_insights" PreferFullPodName = "prefer_full_pod_name" + EnableAcceleratedComputeMetric = "accelerated_compute_metrics" Console = "console" + DiskKey = "disk" DiskIOKey = "diskio" NetKey = "net" Emf = "emf" @@ -53,6 +55,7 @@ const ( ServiceAddress = "service_address" Udp = "udp" Tcp = "tcp" + TlsKey = "tls" Region = "region" LogGroupName = "log_group_name" LogStreamName = "log_stream_name" @@ -62,17 +65,20 @@ const ( PipelineNameHost = "host" PipelineNameHostDeltaMetrics = "hostDeltaMetrics" PipelineNameEmfLogs = "emf_logs" - AppSignals = "app_signals" + AppSignals = "application_signals" + AppSignalsFallback = "app_signals" AppSignalsRules = "rules" ) var ( - AppSignalsTraces = ConfigKey(TracesKey, TracesCollectedKey, AppSignals) - AppSignalsMetrics = ConfigKey(LogsKey, MetricsCollectedKey, AppSignals) - - AppSignalsConfigKeys = map[component.DataType]string{ - component.DataTypeTraces: AppSignalsTraces, - component.DataTypeMetrics: AppSignalsMetrics, + AppSignalsTraces = ConfigKey(TracesKey, TracesCollectedKey, AppSignals) + AppSignalsMetrics = ConfigKey(LogsKey, MetricsCollectedKey, AppSignals) + AppSignalsTracesFallback = ConfigKey(TracesKey, TracesCollectedKey, AppSignalsFallback) + AppSignalsMetricsFallback = ConfigKey(LogsKey, MetricsCollectedKey, AppSignalsFallback) + + AppSignalsConfigKeys = map[component.DataType][]string{ + component.DataTypeTraces: {AppSignalsTraces, AppSignalsTracesFallback}, + component.DataTypeMetrics: {AppSignalsMetrics, AppSignalsMetricsFallback}, } ) @@ -230,8 +236,14 @@ func GetString(conf *confmap.Conf, key string) (string, bool) { // the return value will be nil func GetArray[C any](conf *confmap.Conf, key string) []C { if value := conf.Get(key); value != nil { - got, _ := value.([]C) - return got + var arr []C + got, _ := value.([]any) + for _, entry := range got { + if t, ok := entry.(C); ok { + arr = append(arr, t) + } + } + return arr } return nil } diff --git a/translator/translate/otel/common/common_test.go b/translator/translate/otel/common/common_test.go index c087e9df85..528c65a875 100644 --- a/translator/translate/otel/common/common_test.go +++ b/translator/translate/otel/common/common_test.go @@ -48,7 +48,10 @@ func TestGetString(t *testing.T) { } func TestGetArray(t *testing.T) { - conf := confmap.NewFromStringMap(map[string]interface{}{"int": []int{5, 8, 10}, "string": []string{"bool", "empty"}}) + conf := confmap.NewFromStringMap(map[string]any{ + "int": []any{5, 8, 10}, + "string": []any{"bool", "empty"}, + }) gotInt := GetArray[int](conf, "int") require.Equal(t, []int{5, 8, 10}, gotInt) @@ -168,26 +171,30 @@ func TestParseDuration(t *testing.T) { } func TestTranslatorMap(t *testing.T) { - got := NewTranslatorMap[int](&testTranslator{"first", 0}, &testTranslator{"middle", 1}) + firstType, _ := component.NewType("first") + middleType, _ := component.NewType("middle") + lastType, _ := component.NewType("last") + got := NewTranslatorMap[int](&testTranslator{firstType, 0}, &testTranslator{middleType, 1}) require.Equal(t, 2, got.Len()) - translator, ok := got.Get(component.NewID("first")) + translator, ok := got.Get(component.NewID(firstType)) require.True(t, ok) result, err := translator.Translate(nil) require.NoError(t, err) require.Equal(t, 0, result) - other := NewTranslatorMap[int](&testTranslator{"first", 2}, &testTranslator{"last", 3}) + other := NewTranslatorMap[int](&testTranslator{firstType, 2}, &testTranslator{lastType, 3}) got.Merge(other) require.Equal(t, 3, got.Len()) - translator, ok = got.Get(component.NewID("first")) + translator, ok = got.Get(component.NewID(firstType)) require.True(t, ok) result, err = translator.Translate(nil) require.NoError(t, err) require.Equal(t, 2, result) - require.Equal(t, []component.ID{component.NewID("first"), component.NewID("middle"), component.NewID("last")}, got.Keys()) + require.Equal(t, []component.ID{component.NewID(firstType), component.NewID(middleType), component.NewID(lastType)}, got.Keys()) } func TestMissingKeyError(t *testing.T) { - err := &MissingKeyError{ID: component.NewID("type"), JsonKey: "key"} + newType, _ := component.NewType("type") + err := &MissingKeyError{ID: component.NewID(newType), JsonKey: "key"} require.Equal(t, "\"type\" missing key in JSON: \"key\"", err.Error()) } diff --git a/translator/translate/otel/exporter/awscloudwatch/translator.go b/translator/translate/otel/exporter/awscloudwatch/translator.go index 839353a634..b8291623b9 100644 --- a/translator/translate/otel/exporter/awscloudwatch/translator.go +++ b/translator/translate/otel/exporter/awscloudwatch/translator.go @@ -12,7 +12,6 @@ import ( "github.com/aws/amazon-cloudwatch-agent/internal/metric" "github.com/aws/amazon-cloudwatch-agent/plugins/outputs/cloudwatch" - "github.com/aws/amazon-cloudwatch-agent/translator/context" "github.com/aws/amazon-cloudwatch-agent/translator/translate/agent" "github.com/aws/amazon-cloudwatch-agent/translator/translate/metrics/config" "github.com/aws/amazon-cloudwatch-agent/translator/translate/metrics/rollup_dimensions" @@ -59,8 +58,6 @@ func (t *translator) Translate(conf *confmap.Conf) (component.Config, error) { _ = credentials.Unmarshal(cfg) cfg.RoleARN = getRoleARN(conf) cfg.Region = agent.Global_Config.Region - cfg.RegionType = agent.Global_Config.RegionType - cfg.Mode = context.CurrentContext().ShortMode() if namespace, ok := common.GetString(conf, common.ConfigKey(common.MetricsKey, namespaceKey)); ok { cfg.Namespace = namespace } diff --git a/translator/translate/otel/exporter/awsemf/appsignals_config_eks.yaml b/translator/translate/otel/exporter/awsemf/appsignals_config_eks.yaml index 7171c91dac..036c519717 100644 --- a/translator/translate/otel/exporter/awsemf/appsignals_config_eks.yaml +++ b/translator/translate/otel/exporter/awsemf/appsignals_config_eks.yaml @@ -1,37 +1,37 @@ -log_group_name: "/aws/appsignals/eks" -namespace: "AppSignals" +log_group_name: "/aws/application-signals/data" +namespace: "ApplicationSignals" middleware: agenthealth/logs dimension_rollup_option: "NoDimensionRollup" metric_declarations: - dimensions: - - [HostedIn.EKS.Cluster, HostedIn.K8s.Namespace, Service, Operation] - - [HostedIn.EKS.Cluster, HostedIn.K8s.Namespace, Service] + - [Environment, Service, Operation] + - [Environment, Service] label_matchers: - label_names: - - aws.span.kind - regex: '^(SERVER|LOCAL_ROOT)$' + - Telemetry.Source + regex: '^(ServerSpan|LocalRootSpan)$' metric_name_selectors: - Latency - Fault - Error - dimensions: - - [HostedIn.EKS.Cluster, HostedIn.K8s.Namespace, Service, Operation, RemoteService, RemoteOperation, K8s.RemoteNamespace, RemoteTarget] - - [HostedIn.EKS.Cluster, HostedIn.K8s.Namespace, Service, Operation, RemoteService, RemoteOperation, K8s.RemoteNamespace] - - [HostedIn.EKS.Cluster, HostedIn.K8s.Namespace, Service, Operation, RemoteService, RemoteOperation, RemoteTarget] - - [HostedIn.EKS.Cluster, HostedIn.K8s.Namespace, Service, Operation, RemoteService, RemoteOperation] - - [HostedIn.EKS.Cluster, HostedIn.K8s.Namespace, Service, RemoteService, K8s.RemoteNamespace] - - [HostedIn.EKS.Cluster, HostedIn.K8s.Namespace, Service, RemoteService] - - [HostedIn.EKS.Cluster, HostedIn.K8s.Namespace, Service, RemoteService, RemoteOperation, K8s.RemoteNamespace, RemoteTarget] - - [HostedIn.EKS.Cluster, HostedIn.K8s.Namespace, Service, RemoteService, RemoteOperation, K8s.RemoteNamespace] - - [HostedIn.EKS.Cluster, HostedIn.K8s.Namespace, Service, RemoteService, RemoteOperation, RemoteTarget] - - [HostedIn.EKS.Cluster, HostedIn.K8s.Namespace, Service, RemoteService, RemoteOperation] - - [HostedIn.EKS.Cluster, HostedIn.K8s.Namespace, Service, RemoteService, RemoteTarget] - - [RemoteService, RemoteTarget] + - [Environment, Service, Operation, RemoteService, RemoteOperation, RemoteEnvironment, RemoteResourceIdentifier, RemoteResourceType] + - [Environment, Service, Operation, RemoteService, RemoteOperation, RemoteEnvironment] + - [Environment, Service, Operation, RemoteService, RemoteOperation, RemoteResourceIdentifier, RemoteResourceType] + - [Environment, Service, Operation, RemoteService, RemoteOperation] + - [Environment, Service, RemoteService, RemoteEnvironment] + - [Environment, Service, RemoteService] + - [Environment, Service, RemoteService, RemoteOperation, RemoteEnvironment, RemoteResourceIdentifier, RemoteResourceType] + - [Environment, Service, RemoteService, RemoteOperation, RemoteEnvironment] + - [Environment, Service, RemoteService, RemoteOperation, RemoteResourceIdentifier, RemoteResourceType] + - [Environment, Service, RemoteService, RemoteOperation] + - [Environment, Service, RemoteService, RemoteResourceIdentifier, RemoteResourceType] + - [RemoteService, RemoteResourceIdentifier, RemoteResourceType] - [RemoteService] label_matchers: - label_names: - - aws.span.kind - regex: '^(CLIENT|PRODUCER|CONSUMER)$' + - Telemetry.Source + regex: '^(ClientSpan|ProducerSpan|ConsumerSpan)$' metric_name_selectors: - Latency - Fault diff --git a/translator/translate/otel/exporter/awsemf/appsignals_config_generic.yaml b/translator/translate/otel/exporter/awsemf/appsignals_config_generic.yaml index 942faf6d81..735f6df7da 100644 --- a/translator/translate/otel/exporter/awsemf/appsignals_config_generic.yaml +++ b/translator/translate/otel/exporter/awsemf/appsignals_config_generic.yaml @@ -1,32 +1,32 @@ -log_group_name: "/aws/appsignals/generic" -namespace: "AppSignals" +log_group_name: "/aws/application-signals/data" +namespace: "ApplicationSignals" middleware: agenthealth/logs dimension_rollup_option: "NoDimensionRollup" metric_declarations: - dimensions: - - [HostedIn.Environment, Service, Operation] - - [HostedIn.Environment, Service] + - [Environment, Service, Operation] + - [Environment, Service] label_matchers: - label_names: - - aws.span.kind - regex: '^(SERVER|LOCAL_ROOT)$' + - Telemetry.Source + regex: '^(ServerSpan|LocalRootSpan)$' metric_name_selectors: - Latency - Fault - Error - dimensions: - - [HostedIn.Environment, Service, Operation, RemoteService, RemoteOperation, RemoteTarget] - - [HostedIn.Environment, Service, Operation, RemoteService, RemoteOperation] - - [HostedIn.Environment, Service, RemoteService] - - [HostedIn.Environment, Service, RemoteService, RemoteOperation, RemoteTarget] - - [HostedIn.Environment, Service, RemoteService, RemoteOperation] - - [HostedIn.Environment, Service, RemoteService, RemoteTarget] - - [RemoteService, RemoteTarget] + - [Environment, Service, Operation, RemoteService, RemoteOperation, RemoteResourceIdentifier, RemoteResourceType] + - [Environment, Service, Operation, RemoteService, RemoteOperation] + - [Environment, Service, RemoteService] + - [Environment, Service, RemoteService, RemoteOperation, RemoteResourceIdentifier, RemoteResourceType] + - [Environment, Service, RemoteService, RemoteOperation] + - [Environment, Service, RemoteService, RemoteResourceIdentifier, RemoteResourceType] + - [RemoteService, RemoteResourceIdentifier, RemoteResourceType] - [RemoteService] label_matchers: - label_names: - - aws.span.kind - regex: '^(CLIENT|PRODUCER|CONSUMER)$' + - Telemetry.Source + regex: '^(ClientSpan|ProducerSpan|ConsumerSpan)$' metric_name_selectors: - Latency - Fault diff --git a/translator/translate/otel/exporter/awsemf/appsignals_config_k8s.yaml b/translator/translate/otel/exporter/awsemf/appsignals_config_k8s.yaml index d4bf0794bd..ce85e50f78 100644 --- a/translator/translate/otel/exporter/awsemf/appsignals_config_k8s.yaml +++ b/translator/translate/otel/exporter/awsemf/appsignals_config_k8s.yaml @@ -1,37 +1,37 @@ -log_group_name: "/aws/appsignals/k8s" -namespace: "AppSignals" +log_group_name: "/aws/application-signals/data" +namespace: "ApplicationSignals" middleware: agenthealth/logs dimension_rollup_option: "NoDimensionRollup" metric_declarations: - dimensions: - - [HostedIn.K8s.Cluster, HostedIn.K8s.Namespace, Service, Operation] - - [HostedIn.K8s.Cluster, HostedIn.K8s.Namespace, Service] + - [Environment, Service, Operation] + - [Environment, Service] label_matchers: - label_names: - - aws.span.kind - regex: '^(SERVER|LOCAL_ROOT)$' + - Telemetry.Source + regex: ^(ServerSpan|LocalRootSpan)$ metric_name_selectors: - Latency - Fault - Error - dimensions: - - [HostedIn.K8s.Cluster, HostedIn.K8s.Namespace, Service, Operation, RemoteService, RemoteOperation, K8s.RemoteNamespace, RemoteTarget] - - [HostedIn.K8s.Cluster, HostedIn.K8s.Namespace, Service, Operation, RemoteService, RemoteOperation, K8s.RemoteNamespace] - - [HostedIn.K8s.Cluster, HostedIn.K8s.Namespace, Service, Operation, RemoteService, RemoteOperation, RemoteTarget] - - [HostedIn.K8s.Cluster, HostedIn.K8s.Namespace, Service, Operation, RemoteService, RemoteOperation] - - [HostedIn.K8s.Cluster, HostedIn.K8s.Namespace, Service, RemoteService, K8s.RemoteNamespace] - - [HostedIn.K8s.Cluster, HostedIn.K8s.Namespace, Service, RemoteService] - - [HostedIn.K8s.Cluster, HostedIn.K8s.Namespace, Service, RemoteService, RemoteOperation, K8s.RemoteNamespace, RemoteTarget] - - [HostedIn.K8s.Cluster, HostedIn.K8s.Namespace, Service, RemoteService, RemoteOperation, K8s.RemoteNamespace] - - [HostedIn.K8s.Cluster, HostedIn.K8s.Namespace, Service, RemoteService, RemoteOperation, RemoteTarget] - - [HostedIn.K8s.Cluster, HostedIn.K8s.Namespace, Service, RemoteService, RemoteOperation] - - [HostedIn.K8s.Cluster, HostedIn.K8s.Namespace, Service, RemoteService, RemoteTarget] - - [RemoteService, RemoteTarget] + - [Environment, Service, Operation, RemoteService, RemoteOperation, RemoteEnvironment, RemoteResourceIdentifier, RemoteResourceType] + - [Environment, Service, Operation, RemoteService, RemoteOperation, RemoteEnvironment] + - [Environment, Service, Operation, RemoteService, RemoteOperation, RemoteResourceIdentifier, RemoteResourceType] + - [Environment, Service, Operation, RemoteService, RemoteOperation] + - [Environment, Service, RemoteService, RemoteEnvironment] + - [Environment, Service, RemoteService] + - [Environment, Service, RemoteService, RemoteOperation, RemoteEnvironment, RemoteResourceIdentifier, RemoteResourceType] + - [Environment, Service, RemoteService, RemoteOperation, RemoteEnvironment] + - [Environment, Service, RemoteService, RemoteOperation, RemoteResourceIdentifier, RemoteResourceType] + - [Environment, Service, RemoteService, RemoteOperation] + - [Environment, Service, RemoteService, RemoteResourceIdentifier, RemoteResourceType] + - [RemoteService, RemoteResourceIdentifier, RemoteResourceType] - [RemoteService] label_matchers: - label_names: - - aws.span.kind - regex: '^(CLIENT|PRODUCER|CONSUMER)$' + - Telemetry.Source + regex: '^(ClientSpan|ProducerSpan|ConsumerSpan)$' metric_name_selectors: - Latency - Fault diff --git a/translator/translate/otel/exporter/awsemf/kubernetes.go b/translator/translate/otel/exporter/awsemf/kubernetes.go index 564b0c2c84..c5db4fb26d 100644 --- a/translator/translate/otel/exporter/awsemf/kubernetes.go +++ b/translator/translate/otel/exporter/awsemf/kubernetes.go @@ -46,6 +46,14 @@ func setKubernetesMetricDeclaration(conf *confmap.Conf, cfg *awsemfexporter.Conf // Setup control plane metrics kubernetesMetricDeclarations = append(kubernetesMetricDeclarations, getControlPlaneMetricDeclarations(conf)...) + // Setup GPU metrics + kubernetesMetricDeclarations = append(kubernetesMetricDeclarations, getGPUMetricDeclarations(conf)...) + + // Setup Aws Neuron metrics + kubernetesMetricDeclarations = append(kubernetesMetricDeclarations, getAwsNeuronMetricDeclarations(conf)...) + + kubernetesMetricDeclarations = append(kubernetesMetricDeclarations, getEFAMetricDeclarations(conf)...) + cfg.MetricDeclarations = kubernetesMetricDeclarations cfg.MetricDescriptors = getControlPlaneMetricDescriptors(conf) @@ -457,3 +465,188 @@ func getControlPlaneMetricDescriptors(conf *confmap.Conf) []awsemfexporter.Metri return []awsemfexporter.MetricDescriptor{} } + +func getGPUMetricDeclarations(conf *confmap.Conf) []*awsemfexporter.MetricDeclaration { + var metricDeclarations []*awsemfexporter.MetricDeclaration + enhancedContainerInsightsEnabled := awscontainerinsight.EnhancedContainerInsightsEnabled(conf) + if awscontainerinsight.AcceleratedComputeMetricsEnabled(conf) && enhancedContainerInsightsEnabled { + metricDeclarations = append(metricDeclarations, []*awsemfexporter.MetricDeclaration{ + { + Dimensions: [][]string{{"ClusterName"}, {"ClusterName", "Namespace", "PodName", "ContainerName"}, {"ClusterName", "Namespace", "PodName", "FullPodName", "ContainerName"}, {"ClusterName", "Namespace", "PodName", "FullPodName", "ContainerName", "GpuDevice"}}, + MetricNameSelectors: []string{ + "container_gpu_utilization", + "container_gpu_memory_utilization", + "container_gpu_memory_total", + "container_gpu_memory_used", + "container_gpu_power_draw", + "container_gpu_temperature", + }, + }, + { + Dimensions: [][]string{{"ClusterName"}, {"ClusterName", "Namespace"}, {"ClusterName", "Namespace", "Service"}, {"ClusterName", "Namespace", "PodName"}, {"ClusterName", "Namespace", "PodName", "FullPodName"}, {"ClusterName", "Namespace", "PodName", "FullPodName", "GpuDevice"}}, + MetricNameSelectors: []string{ + "pod_gpu_utilization", + "pod_gpu_memory_utilization", + "pod_gpu_memory_total", + "pod_gpu_memory_used", + "pod_gpu_power_draw", + "pod_gpu_temperature", + }, + }, + { + Dimensions: [][]string{{"ClusterName"}, {"ClusterName", "NodeName", "InstanceId"}, {"ClusterName", "NodeName", "InstanceId", "InstanceType", "GpuDevice"}}, + MetricNameSelectors: []string{ + "node_gpu_utilization", + "node_gpu_memory_utilization", + "node_gpu_memory_total", + "node_gpu_memory_used", + "node_gpu_power_draw", + "node_gpu_temperature", + }, + }, + { + Dimensions: [][]string{{"ClusterName", "NodeName", "InstanceId"}, {"ClusterName"}}, + MetricNameSelectors: []string{ + "node_gpu_total", + "node_gpu_request", + "node_gpu_limit", + }, + }, + { + Dimensions: [][]string{{"ClusterName"}}, + MetricNameSelectors: []string{ + "cluster_gpu_request", + "cluster_gpu_total", + }, + }, + }...) + } + return metricDeclarations +} + +func getAwsNeuronMetricDeclarations(conf *confmap.Conf) []*awsemfexporter.MetricDeclaration { + var metricDeclarations []*awsemfexporter.MetricDeclaration + enhancedContainerInsightsEnabled := awscontainerinsight.EnhancedContainerInsightsEnabled(conf) + if awscontainerinsight.AcceleratedComputeMetricsEnabled(conf) && enhancedContainerInsightsEnabled { + metricDeclarations = append(metricDeclarations, []*awsemfexporter.MetricDeclaration{ + { + Dimensions: [][]string{{"ClusterName"}, {"ClusterName", "Namespace", "PodName", "ContainerName"}, {"ClusterName", "Namespace", "PodName", "FullPodName", "ContainerName"}, {"ClusterName", "Namespace", "PodName", "FullPodName", "ContainerName", "NeuronDevice", "NeuronCore"}}, + MetricNameSelectors: []string{ + "container_neuroncore_utilization", + "container_neuroncore_memory_usage_total", + "container_neuroncore_memory_usage_constants", + "container_neuroncore_memory_usage_model_code", + "container_neuroncore_memory_usage_model_shared_scratchpad", + "container_neuroncore_memory_usage_runtime_memory", + "container_neuroncore_memory_usage_tensors", + }, + }, + { + Dimensions: [][]string{{"ClusterName"}, {"ClusterName", "Namespace", "PodName", "ContainerName"}, {"ClusterName", "Namespace", "PodName", "FullPodName", "ContainerName"}, {"ClusterName", "Namespace", "PodName", "FullPodName", "ContainerName", "NeuronDevice"}}, + MetricNameSelectors: []string{ + "container_neurondevice_hw_ecc_events_total", + }, + }, + { + Dimensions: [][]string{{"ClusterName"}, {"ClusterName", "Namespace"}, {"ClusterName", "Namespace", "Service"}, {"ClusterName", "Namespace", "PodName"}, {"ClusterName", "Namespace", "PodName", "FullPodName"}, {"ClusterName", "Namespace", "PodName", "FullPodName", "NeuronDevice", "NeuronCore"}}, + MetricNameSelectors: []string{ + "pod_neuroncore_utilization", + "pod_neuroncore_memory_usage_total", + "pod_neuroncore_memory_usage_constants", + "pod_neuroncore_memory_usage_model_code", + "pod_neuroncore_memory_usage_model_shared_scratchpad", + "pod_neuroncore_memory_usage_runtime_memory", + "pod_neuroncore_memory_usage_tensors", + }, + }, + { + Dimensions: [][]string{{"ClusterName"}, {"ClusterName", "Namespace"}, {"ClusterName", "Namespace", "Service"}, {"ClusterName", "Namespace", "PodName"}, {"ClusterName", "Namespace", "PodName", "FullPodName"}, {"ClusterName", "Namespace", "PodName", "FullPodName", "NeuronDevice"}}, + MetricNameSelectors: []string{ + "pod_neurondevice_hw_ecc_events_total", + }, + }, + { + Dimensions: [][]string{{"ClusterName"}, {"ClusterName", "InstanceId", "NodeName"}, {"ClusterName", "InstanceType", "InstanceId", "NodeName", "NeuronDevice", "NeuronCore"}}, + MetricNameSelectors: []string{ + "node_neuroncore_utilization", + "node_neuroncore_memory_usage_total", + "node_neuroncore_memory_usage_constants", + "node_neuroncore_memory_usage_model_code", + "node_neuroncore_memory_usage_model_shared_scratchpad", + "node_neuroncore_memory_usage_runtime_memory", + "node_neuroncore_memory_usage_tensors", + }, + }, + { + Dimensions: [][]string{{"ClusterName"}, {"ClusterName", "InstanceId", "NodeName"}}, + MetricNameSelectors: []string{ + "node_neuron_execution_errors_total", + "node_neurondevice_runtime_memory_used_bytes", + "node_neuron_execution_latency", + }, + }, + { + Dimensions: [][]string{{"ClusterName"}, {"ClusterName", "InstanceId", "NodeName"}, {"ClusterName", "InstanceId", "NodeName", "NeuronDevice"}}, + MetricNameSelectors: []string{ + "node_neurondevice_hw_ecc_events_total", + }, + }, + }...) + } + return metricDeclarations +} + +func getEFAMetricDeclarations(conf *confmap.Conf) []*awsemfexporter.MetricDeclaration { + var metricDeclarations []*awsemfexporter.MetricDeclaration + if awscontainerinsight.EnhancedContainerInsightsEnabled(conf) && awscontainerinsight.AcceleratedComputeMetricsEnabled(conf) { + metricDeclarations = []*awsemfexporter.MetricDeclaration{ + { + Dimensions: [][]string{ + {"ClusterName"}, + {"ClusterName", "Namespace", "PodName", "ContainerName"}, + {"ClusterName", "Namespace", "PodName", "FullPodName", "ContainerName"}, + }, + MetricNameSelectors: []string{ + "container_efa_rx_bytes", + "container_efa_tx_bytes", + "container_efa_rx_dropped", + "container_efa_rdma_read_bytes", + "container_efa_rdma_write_bytes", + "container_efa_rdma_write_recv_bytes", + }, + }, + { + Dimensions: [][]string{ + {"ClusterName"}, + {"ClusterName", "Namespace"}, + {"ClusterName", "Namespace", "Service"}, + {"ClusterName", "Namespace", "PodName"}, + {"ClusterName", "Namespace", "PodName", "FullPodName"}, + }, + MetricNameSelectors: []string{ + "pod_efa_rx_bytes", + "pod_efa_tx_bytes", + "pod_efa_rx_dropped", + "pod_efa_rdma_read_bytes", + "pod_efa_rdma_write_bytes", + "pod_efa_rdma_write_recv_bytes", + }, + }, + { + Dimensions: [][]string{ + {"ClusterName"}, + {"ClusterName", "NodeName", "InstanceId"}, + }, + MetricNameSelectors: []string{ + "node_efa_rx_bytes", + "node_efa_tx_bytes", + "node_efa_rx_dropped", + "node_efa_rdma_read_bytes", + "node_efa_rdma_write_bytes", + "node_efa_rdma_write_recv_bytes", + }, + }, + } + } + return metricDeclarations +} diff --git a/translator/translate/otel/exporter/awsemf/translator.go b/translator/translate/otel/exporter/awsemf/translator.go index 5bf0c3cd25..bc130f3d54 100644 --- a/translator/translate/otel/exporter/awsemf/translator.go +++ b/translator/translate/otel/exporter/awsemf/translator.go @@ -16,10 +16,13 @@ import ( "github.com/aws/amazon-cloudwatch-agent/cfg/envconfig" "github.com/aws/amazon-cloudwatch-agent/internal/retryer" + "github.com/aws/amazon-cloudwatch-agent/translator/config" + "github.com/aws/amazon-cloudwatch-agent/translator/context" "github.com/aws/amazon-cloudwatch-agent/translator/translate/agent" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/common" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/extension/agenthealth" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/receiver/awscontainerinsight" + "github.com/aws/amazon-cloudwatch-agent/translator/util/ecsutil" ) //go:embed awsemf_default_ecs.yaml @@ -73,22 +76,10 @@ func (t *translator) Translate(c *confmap.Conf) (component.Config, error) { cfg := t.factory.CreateDefaultConfig().(*awsemfexporter.Config) cfg.MiddlewareID = &agenthealth.LogsID - if common.IsAppSignalsKubernetes() && t.name == common.AppSignals { - isEks, err := common.IsEKS() - if err != nil { - return nil, err - } - - if isEks { - return common.GetYamlFileToYamlConfig(cfg, appSignalsConfigEks) - } - return common.GetYamlFileToYamlConfig(cfg, appSignalsConfigK8s) - } else if t.name == common.AppSignals { - return common.GetYamlFileToYamlConfig(cfg, appSignalsConfigGeneric) - } - var defaultConfig string - if isEcs(c) { + if t.isAppSignals(c) { + defaultConfig = getAppSignalsConfig() + } else if isEcs(c) { defaultConfig = defaultEcsConfig } else if isKubernetes(c) { defaultConfig = defaultKubernetesConfig @@ -123,8 +114,15 @@ func (t *translator) Translate(c *confmap.Conf) (component.Config, error) { if credentialsFileKey, ok := agent.Global_Config.Credentials[agent.CredentialsFile_Key]; ok { cfg.AWSSessionSettings.SharedCredentialsFile = []string{fmt.Sprintf("%v", credentialsFileKey)} } + if context.CurrentContext().Mode() == config.ModeOnPrem || context.CurrentContext().Mode() == config.ModeOnPremise { + cfg.AWSSessionSettings.LocalMode = true + } - if isEcs(c) { + if t.isAppSignals(c) { + if err := setAppSignalsFields(c, cfg); err != nil { + return nil, err + } + } else if isEcs(c) { if err := setEcsFields(c, cfg); err != nil { return nil, err } @@ -140,6 +138,35 @@ func (t *translator) Translate(c *confmap.Conf) (component.Config, error) { return cfg, nil } +func getAppSignalsConfig() string { + ctx := context.CurrentContext() + + mode := ctx.KubernetesMode() + if mode == "" { + mode = ctx.Mode() + } + if mode == config.ModeEC2 { + if ecsutil.GetECSUtilSingleton().IsECS() { + mode = config.ModeECS + } + } + + switch mode { + case config.ModeEKS: + return appSignalsConfigEks + case config.ModeK8sEC2, config.ModeK8sOnPrem: + return appSignalsConfigK8s + case config.ModeEC2, config.ModeECS: + return appSignalsConfigGeneric + default: + return appSignalsConfigGeneric + } +} + +func (t *translator) isAppSignals(conf *confmap.Conf) bool { + return (t.name == common.AppSignals || t.name == common.AppSignalsFallback) && (conf.IsSet(common.AppSignalsMetrics) || conf.IsSet(common.AppSignalsTraces) || conf.IsSet(common.AppSignalsMetricsFallback) || conf.IsSet(common.AppSignalsTracesFallback)) +} + func isEcs(conf *confmap.Conf) bool { return conf.IsSet(ecsBasePathKey) } @@ -152,6 +179,10 @@ func isPrometheus(conf *confmap.Conf) bool { return conf.IsSet(prometheusBasePathKey) } +func setAppSignalsFields(_ *confmap.Conf, _ *awsemfexporter.Config) error { + return nil +} + func setEcsFields(conf *confmap.Conf, cfg *awsemfexporter.Config) error { setDisableMetricExtraction(ecsBasePathKey, conf, cfg) return nil diff --git a/translator/translate/otel/exporter/awsemf/translator_test.go b/translator/translate/otel/exporter/awsemf/translator_test.go index 91e7ebf459..87a8644102 100644 --- a/translator/translate/otel/exporter/awsemf/translator_test.go +++ b/translator/translate/otel/exporter/awsemf/translator_test.go @@ -17,6 +17,8 @@ import ( "github.com/aws/amazon-cloudwatch-agent/cfg/envconfig" "github.com/aws/amazon-cloudwatch-agent/internal/util/testutil" legacytranslator "github.com/aws/amazon-cloudwatch-agent/translator" + "github.com/aws/amazon-cloudwatch-agent/translator/config" + "github.com/aws/amazon-cloudwatch-agent/translator/context" "github.com/aws/amazon-cloudwatch-agent/translator/translate/agent" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/common" ) @@ -75,6 +77,7 @@ func TestTranslator(t *testing.T) { }, }, "metric_descriptors": nilMetricDescriptorsSlice, + "local_mode": false, }, }, "GenerateAwsEmfExporterConfigEcsDisableMetricExtraction": { @@ -116,6 +119,7 @@ func TestTranslator(t *testing.T) { }, }, "metric_descriptors": nilMetricDescriptorsSlice, + "local_mode": false, }, }, "GenerateAwsEmfExporterConfigKubernetes": { @@ -182,6 +186,7 @@ func TestTranslator(t *testing.T) { }, }, "metric_descriptors": nilMetricDescriptorsSlice, + "local_mode": false, }, }, "GenerateAwsEmfExporterConfigKubernetesDisableMetricExtraction": { @@ -250,6 +255,7 @@ func TestTranslator(t *testing.T) { }, }, "metric_descriptors": nilMetricDescriptorsSlice, + "local_mode": false, }, }, "GenerateAwsEmfExporterConfigKubernetesWithEnableFullPodAndContainerMetrics": { @@ -395,6 +401,116 @@ func TestTranslator(t *testing.T) { Dimensions: [][]string{{"ClusterName", "priority_level"}, {"ClusterName"}}, MetricNameSelectors: []string{"apiserver_flowcontrol_request_concurrency_limit"}, }, + { + Dimensions: [][]string{{"ClusterName"}, {"ClusterName", "Namespace", "PodName", "ContainerName"}, {"ClusterName", "Namespace", "PodName", "FullPodName", "ContainerName"}, {"ClusterName", "Namespace", "PodName", "FullPodName", "ContainerName", "GpuDevice"}}, + MetricNameSelectors: []string{ + "container_gpu_utilization", "container_gpu_memory_utilization", "container_gpu_memory_total", "container_gpu_memory_used", "container_gpu_power_draw", "container_gpu_temperature", + }, + }, + { + Dimensions: [][]string{{"ClusterName"}, {"ClusterName", "Namespace"}, {"ClusterName", "Namespace", "Service"}, {"ClusterName", "Namespace", "PodName"}, {"ClusterName", "Namespace", "PodName", "FullPodName"}, {"ClusterName", "Namespace", "PodName", "FullPodName", "GpuDevice"}}, + MetricNameSelectors: []string{ + "pod_gpu_utilization", "pod_gpu_memory_utilization", "pod_gpu_memory_total", "pod_gpu_memory_used", "pod_gpu_power_draw", "pod_gpu_temperature", + }, + }, + { + Dimensions: [][]string{{"ClusterName"}, {"ClusterName", "NodeName", "InstanceId"}, {"ClusterName", "NodeName", "InstanceId", "InstanceType", "GpuDevice"}}, + MetricNameSelectors: []string{ + "node_gpu_utilization", "node_gpu_memory_utilization", "node_gpu_memory_total", "node_gpu_memory_used", "node_gpu_power_draw", "node_gpu_temperature", + }, + }, + { + Dimensions: [][]string{{"ClusterName", "NodeName", "InstanceId"}, {"ClusterName"}}, + MetricNameSelectors: []string{ + "node_gpu_total", "node_gpu_request", "node_gpu_limit", + }, + }, + { + Dimensions: [][]string{{"ClusterName"}}, + MetricNameSelectors: []string{ + "cluster_gpu_request", "cluster_gpu_total", + }, + }, + { + Dimensions: [][]string{{"ClusterName"}, {"ClusterName", "Namespace", "PodName", "ContainerName"}, {"ClusterName", "Namespace", "PodName", "FullPodName", "ContainerName"}, {"ClusterName", "Namespace", "PodName", "FullPodName", "ContainerName", "NeuronDevice", "NeuronCore"}}, + MetricNameSelectors: []string{ + "container_neuroncore_utilization", + "container_neuroncore_memory_usage_total", + "container_neuroncore_memory_usage_constants", + "container_neuroncore_memory_usage_model_code", + "container_neuroncore_memory_usage_model_shared_scratchpad", + "container_neuroncore_memory_usage_runtime_memory", + "container_neuroncore_memory_usage_tensors", + }, + }, + { + Dimensions: [][]string{{"ClusterName"}, {"ClusterName", "Namespace", "PodName", "ContainerName"}, {"ClusterName", "Namespace", "PodName", "FullPodName", "ContainerName"}, {"ClusterName", "Namespace", "PodName", "FullPodName", "ContainerName", "NeuronDevice"}}, + MetricNameSelectors: []string{ + "container_neurondevice_hw_ecc_events_total", + }, + }, + { + Dimensions: [][]string{{"ClusterName"}, {"ClusterName", "Namespace"}, {"ClusterName", "Namespace", "Service"}, {"ClusterName", "Namespace", "PodName"}, {"ClusterName", "Namespace", "PodName", "FullPodName"}, {"ClusterName", "Namespace", "PodName", "FullPodName", "NeuronDevice", "NeuronCore"}}, + MetricNameSelectors: []string{ + "pod_neuroncore_utilization", + "pod_neuroncore_memory_usage_total", + "pod_neuroncore_memory_usage_constants", + "pod_neuroncore_memory_usage_model_code", + "pod_neuroncore_memory_usage_model_shared_scratchpad", + "pod_neuroncore_memory_usage_runtime_memory", + "pod_neuroncore_memory_usage_tensors", + }, + }, + { + Dimensions: [][]string{{"ClusterName"}, {"ClusterName", "Namespace"}, {"ClusterName", "Namespace", "Service"}, {"ClusterName", "Namespace", "PodName"}, {"ClusterName", "Namespace", "PodName", "FullPodName"}, {"ClusterName", "Namespace", "PodName", "FullPodName", "NeuronDevice"}}, + MetricNameSelectors: []string{ + "pod_neurondevice_hw_ecc_events_total", + }, + }, + { + Dimensions: [][]string{{"ClusterName"}, {"ClusterName", "InstanceId", "NodeName"}, {"ClusterName", "InstanceType", "InstanceId", "NodeName", "NeuronDevice", "NeuronCore"}}, + MetricNameSelectors: []string{ + "node_neuroncore_utilization", + "node_neuroncore_memory_usage_total", + "node_neuroncore_memory_usage_constants", + "node_neuroncore_memory_usage_model_code", + "node_neuroncore_memory_usage_model_shared_scratchpad", + "node_neuroncore_memory_usage_runtime_memory", + "node_neuroncore_memory_usage_tensors", + }, + }, + { + Dimensions: [][]string{{"ClusterName"}, {"ClusterName", "InstanceId", "NodeName"}}, + MetricNameSelectors: []string{ + "node_neuron_execution_errors_total", + "node_neurondevice_runtime_memory_used_bytes", + "node_neuron_execution_latency", + }, + }, + { + Dimensions: [][]string{{"ClusterName"}, {"ClusterName", "InstanceId", "NodeName"}, {"ClusterName", "InstanceId", "NodeName", "NeuronDevice"}}, + MetricNameSelectors: []string{ + "node_neurondevice_hw_ecc_events_total", + }, + }, + { + Dimensions: [][]string{{"ClusterName"}, {"ClusterName", "Namespace", "PodName", "ContainerName"}, {"ClusterName", "Namespace", "PodName", "FullPodName", "ContainerName"}}, + MetricNameSelectors: []string{ + "container_efa_rx_bytes", "container_efa_tx_bytes", "container_efa_rx_dropped", "container_efa_rdma_read_bytes", "container_efa_rdma_write_bytes", "container_efa_rdma_write_recv_bytes", + }, + }, + { + Dimensions: [][]string{{"ClusterName"}, {"ClusterName", "Namespace"}, {"ClusterName", "Namespace", "Service"}, {"ClusterName", "Namespace", "PodName"}, {"ClusterName", "Namespace", "PodName", "FullPodName"}}, + MetricNameSelectors: []string{ + "pod_efa_rx_bytes", "pod_efa_tx_bytes", "pod_efa_rx_dropped", "pod_efa_rdma_read_bytes", "pod_efa_rdma_write_bytes", "pod_efa_rdma_write_recv_bytes", + }, + }, + { + Dimensions: [][]string{{"ClusterName"}, {"ClusterName", "NodeName", "InstanceId"}}, + MetricNameSelectors: []string{ + "node_efa_rx_bytes", "node_efa_tx_bytes", "node_efa_rx_dropped", "node_efa_rdma_read_bytes", "node_efa_rdma_write_bytes", "node_efa_rdma_write_recv_bytes", + }, + }, }, "metric_descriptors": []awsemfexporter.MetricDescriptor{ { @@ -498,6 +614,7 @@ func TestTranslator(t *testing.T) { Overwrite: true, }, }, + "local_mode": false, }, }, "GenerateAwsEmfExporterConfigPrometheus": { @@ -555,6 +672,7 @@ func TestTranslator(t *testing.T) { Unit: "Milliseconds", }, }, + "local_mode": false, }, }, "GenerateAwsEmfExporterConfigPrometheusDisableMetricExtraction": { @@ -588,6 +706,7 @@ func TestTranslator(t *testing.T) { }, }, "metric_descriptors": nilMetricDescriptorsSlice, + "local_mode": false, }, }, "GenerateAwsEmfExporterConfigPrometheusNoDeclarations": { @@ -630,6 +749,7 @@ func TestTranslator(t *testing.T) { Unit: "Milliseconds", }, }, + "local_mode": false, }, }, "GenerateAwsEmfExporterConfigPrometheusNoEmfProcessor": { @@ -662,6 +782,7 @@ func TestTranslator(t *testing.T) { }, }, "metric_descriptors": nilMetricDescriptorsSlice, + "local_mode": false, }, }, } @@ -687,6 +808,7 @@ func TestTranslator(t *testing.T) { assert.Equal(t, testCase.want["resource_to_telemetry_conversion"], gotCfg.ResourceToTelemetrySettings) assert.ElementsMatch(t, testCase.want["metric_declarations"], gotCfg.MetricDeclarations) assert.ElementsMatch(t, testCase.want["metric_descriptors"], gotCfg.MetricDescriptors) + assert.Equal(t, testCase.want["local_mode"], gotCfg.LocalMode) assert.Equal(t, "/ca/bundle", gotCfg.CertificateFilePath) assert.Equal(t, "global_arn", gotCfg.RoleARN) assert.Equal(t, "us-east-1", gotCfg.Region) @@ -698,54 +820,152 @@ func TestTranslator(t *testing.T) { } func TestTranslateAppSignals(t *testing.T) { + t.Setenv(envconfig.AWS_CA_BUNDLE, "/ca/bundle") + agent.Global_Config.Region = "us-east-1" + agent.Global_Config.Role_arn = "global_arn" + t.Setenv(envconfig.IMDS_NUMBER_RETRY, "0") tt := NewTranslatorWithName(common.AppSignals) testCases := map[string]struct { - input map[string]any - want *confmap.Conf - wantErr error - isKubernetes bool - detector func() (common.Detector, error) + input map[string]any + want *confmap.Conf + wantErr error + kubernetesMode string + mode string }{ "WithAppSignalsEnabledEKS": { input: map[string]any{ "logs": map[string]any{ "metrics_collected": map[string]any{ - "app_signals": map[string]any{}, + "application_signals": map[string]any{}, }, }}, - want: testutil.GetConf(t, filepath.Join("appsignals_config_eks.yaml")), - isKubernetes: true, - detector: common.TestEKSDetector, + want: testutil.GetConfWithOverrides(t, filepath.Join("appsignals_config_eks.yaml"), map[string]any{ + "local_mode": "false", + "region": "us-east-1", + "role_arn": "global_arn", + "certificate_file_path": "/ca/bundle", + }), + kubernetesMode: config.ModeEKS, + mode: config.ModeEC2, }, "WithAppSignalsEnabledK8s": { input: map[string]any{ "logs": map[string]any{ "metrics_collected": map[string]any{ - "app_signals": map[string]any{}, + "application_signals": map[string]any{}, }, }}, - want: testutil.GetConf(t, filepath.Join("appsignals_config_k8s.yaml")), - isKubernetes: true, - detector: common.TestK8sDetector, + want: testutil.GetConfWithOverrides(t, filepath.Join("appsignals_config_k8s.yaml"), map[string]any{ + "local_mode": "true", + "region": "us-east-1", + "role_arn": "global_arn", + "certificate_file_path": "/ca/bundle", + }), + kubernetesMode: config.ModeK8sOnPrem, + mode: config.ModeOnPrem, }, "WithAppSignalsEnabledGeneric": { + input: map[string]any{ + "logs": map[string]any{ + "metrics_collected": map[string]any{ + "application_signals": map[string]any{}, + }, + }}, + want: testutil.GetConfWithOverrides(t, filepath.Join("appsignals_config_generic.yaml"), map[string]any{ + "local_mode": "true", + "region": "us-east-1", + "role_arn": "global_arn", + "certificate_file_path": "/ca/bundle", + }), + kubernetesMode: "", + mode: config.ModeOnPrem, + }, + "WithAppSignalsEnabledEC2": { + input: map[string]any{ + "logs": map[string]any{ + "metrics_collected": map[string]any{ + "application_signals": map[string]any{}, + }, + }}, + want: testutil.GetConfWithOverrides(t, filepath.Join("appsignals_config_generic.yaml"), map[string]any{ + "local_mode": "false", + "region": "us-east-1", + "role_arn": "global_arn", + "certificate_file_path": "/ca/bundle", + }), + kubernetesMode: "", + mode: config.ModeEC2, + }, + "WithAppSignalsFallbackEnabledEKS": { input: map[string]any{ "logs": map[string]any{ "metrics_collected": map[string]any{ "app_signals": map[string]any{}, }, }}, - want: testutil.GetConf(t, filepath.Join("appsignals_config_generic.yaml")), - isKubernetes: false, + want: testutil.GetConfWithOverrides(t, filepath.Join("appsignals_config_eks.yaml"), map[string]any{ + "local_mode": "false", + "region": "us-east-1", + "role_arn": "global_arn", + "certificate_file_path": "/ca/bundle", + }), + kubernetesMode: config.ModeEKS, + mode: config.ModeEC2, + }, + "WithAppSignalsFallbackEnabledK8s": { + input: map[string]any{ + "logs": map[string]any{ + "metrics_collected": map[string]any{ + "app_signals": map[string]any{}, + }, + }}, + want: testutil.GetConfWithOverrides(t, filepath.Join("appsignals_config_k8s.yaml"), map[string]any{ + "local_mode": "true", + "region": "us-east-1", + "role_arn": "global_arn", + "certificate_file_path": "/ca/bundle", + }), + kubernetesMode: config.ModeK8sOnPrem, + mode: config.ModeOnPrem, + }, + "WithAppSignalsFallbackEnabledGeneric": { + input: map[string]any{ + "logs": map[string]any{ + "metrics_collected": map[string]any{ + "app_signals": map[string]any{}, + }, + }}, + want: testutil.GetConfWithOverrides(t, filepath.Join("appsignals_config_generic.yaml"), map[string]any{ + "local_mode": "true", + "region": "us-east-1", + "role_arn": "global_arn", + "certificate_file_path": "/ca/bundle", + }), + kubernetesMode: "", + mode: config.ModeOnPrem, + }, + "WithAppSignalsFallbackEnabledEC2": { + input: map[string]any{ + "logs": map[string]any{ + "metrics_collected": map[string]any{ + "app_signals": map[string]any{}, + }, + }}, + want: testutil.GetConfWithOverrides(t, filepath.Join("appsignals_config_generic.yaml"), map[string]any{ + "local_mode": "false", + "region": "us-east-1", + "role_arn": "global_arn", + "certificate_file_path": "/ca/bundle", + }), + kubernetesMode: "", + mode: config.ModeEC2, }, } factory := awsemfexporter.NewFactory() for name, testCase := range testCases { t.Run(name, func(t *testing.T) { - if testCase.isKubernetes { - t.Setenv(common.KubernetesEnvVar, "TEST") - } - common.NewDetector = testCase.detector + context.CurrentContext().SetKubernetesMode(testCase.kubernetesMode) + context.CurrentContext().SetMode(testCase.mode) conf := confmap.NewFromStringMap(testCase.input) got, err := tt.Translate(conf) assert.Equal(t, testCase.wantErr, err) diff --git a/translator/translate/otel/exporter/awsxray/translator.go b/translator/translate/otel/exporter/awsxray/translator.go index 59e63a5454..3754080f86 100644 --- a/translator/translate/otel/exporter/awsxray/translator.go +++ b/translator/translate/otel/exporter/awsxray/translator.go @@ -15,6 +15,8 @@ import ( "github.com/aws/amazon-cloudwatch-agent/cfg/envconfig" "github.com/aws/amazon-cloudwatch-agent/internal/retryer" + "github.com/aws/amazon-cloudwatch-agent/translator/config" + "github.com/aws/amazon-cloudwatch-agent/translator/context" "github.com/aws/amazon-cloudwatch-agent/translator/translate/agent" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/common" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/extension/agenthealth" @@ -33,16 +35,10 @@ type translator struct { var _ common.Translator[component.Config] = (*translator)(nil) var ( - indexedAttributesEKS = []string{ - "aws.local.service", "aws.local.operation", "aws.remote.service", "aws.remote.operation", - "HostedIn.K8s.Namespace", "K8s.RemoteNamespace", "aws.remote.target", - "HostedIn.Environment", "HostedIn.EKS.Cluster", - } - - indexedAttributesK8s = []string{ - "aws.local.service", "aws.local.operation", "aws.remote.service", "aws.remote.operation", - "HostedIn.K8s.Namespace", "K8s.RemoteNamespace", "aws.remote.target", - "HostedIn.Environment", "HostedIn.K8s.Cluster", + indexedAttributes = []string{ + "aws.local.service", "aws.local.operation", "aws.local.environment", + "aws.remote.service", "aws.remote.operation", "aws.remote.environment", + "aws.remote.resource.identifier", "aws.remote.resource.type", } ) @@ -68,16 +64,7 @@ func (t *translator) Translate(conf *confmap.Conf) (component.Config, error) { cfg := t.factory.CreateDefaultConfig().(*awsxrayexporter.Config) if isAppSignals(conf) { - isEks, err := common.IsEKS() - if err != nil { - return nil, err - } - - if isEks { - cfg.IndexedAttributes = indexedAttributesEKS - } else { - cfg.IndexedAttributes = indexedAttributesK8s - } + cfg.IndexedAttributes = indexedAttributes } c := confmap.NewFromStringMap(map[string]interface{}{ @@ -94,6 +81,9 @@ func (t *translator) Translate(conf *confmap.Conf) (component.Config, error) { cfg.AWSSessionSettings.Endpoint = endpointOverride } cfg.AWSSessionSettings.IMDSRetries = retryer.GetDefaultRetryNumber() + if context.CurrentContext().Mode() == config.ModeOnPrem || context.CurrentContext().Mode() == config.ModeOnPremise { + cfg.AWSSessionSettings.LocalMode = true + } if localMode, ok := common.GetBool(conf, common.ConfigKey(common.TracesKey, common.LocalModeKey)); ok { cfg.AWSSessionSettings.LocalMode = localMode } @@ -140,5 +130,5 @@ func getRegion(conf *confmap.Conf) string { } func isAppSignals(conf *confmap.Conf) bool { - return conf.IsSet(common.AppSignalsTraces) + return conf.IsSet(common.AppSignalsTraces) || conf.IsSet(common.AppSignalsTracesFallback) } diff --git a/translator/translate/otel/exporter/awsxray/translator_test.go b/translator/translate/otel/exporter/awsxray/translator_test.go index e4f8343433..e59f1c5672 100644 --- a/translator/translate/otel/exporter/awsxray/translator_test.go +++ b/translator/translate/otel/exporter/awsxray/translator_test.go @@ -15,6 +15,8 @@ import ( "github.com/aws/amazon-cloudwatch-agent/cfg/envconfig" "github.com/aws/amazon-cloudwatch-agent/internal/util/testutil" + "github.com/aws/amazon-cloudwatch-agent/translator/config" + "github.com/aws/amazon-cloudwatch-agent/translator/context" "github.com/aws/amazon-cloudwatch-agent/translator/translate/agent" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/common" ) @@ -26,10 +28,11 @@ func TestTranslator(t *testing.T) { tt := NewTranslator() assert.EqualValues(t, "awsxray", tt.ID().String()) testCases := map[string]struct { - input map[string]any - want *confmap.Conf - wantErr error - detector func() (common.Detector, error) + input map[string]any + want *confmap.Conf + wantErr error + kubernetesMode string + mode string }{ "WithMissingKey": { input: map[string]any{"logs": map[string]any{}}, @@ -37,12 +40,14 @@ func TestTranslator(t *testing.T) { ID: tt.ID(), JsonKey: common.TracesKey, }, + mode: config.ModeOnPrem, }, "WithDefault": { input: map[string]any{"traces": map[string]any{}}, want: confmap.NewFromStringMap(map[string]any{ "certificate_file_path": "/ca/bundle", "region": "us-east-1", + "local_mode": "true", "role_arn": "global_arn", "imds_retries": 1, "telemetry": map[string]any{ @@ -51,10 +56,12 @@ func TestTranslator(t *testing.T) { }, "middleware": "agenthealth/traces", }), + mode: config.ModeOnPrem, }, "WithCompleteConfig": { input: testutil.GetJson(t, filepath.Join("testdata", "config.json")), want: testutil.GetConf(t, filepath.Join("testdata", "config.yaml")), + mode: config.ModeOnPrem, }, "WithAppSignalsEnabledEKS": { input: map[string]any{ @@ -67,13 +74,12 @@ func TestTranslator(t *testing.T) { "indexed_attributes": []string{ "aws.local.service", "aws.local.operation", + "aws.local.environment", "aws.remote.service", "aws.remote.operation", - "HostedIn.K8s.Namespace", - "K8s.RemoteNamespace", - "aws.remote.target", - "HostedIn.Environment", - "HostedIn.EKS.Cluster", + "aws.remote.environment", + "aws.remote.resource.identifier", + "aws.remote.resource.type", }, "certificate_file_path": "/ca/bundle", "region": "us-east-1", @@ -85,7 +91,8 @@ func TestTranslator(t *testing.T) { }, "middleware": "agenthealth/traces", }), - detector: common.TestEKSDetector, + kubernetesMode: config.ModeEKS, + mode: config.ModeEC2, }, "WithAppSignalsEnabledK8s": { input: map[string]any{ @@ -98,13 +105,12 @@ func TestTranslator(t *testing.T) { "indexed_attributes": []string{ "aws.local.service", "aws.local.operation", + "aws.local.environment", "aws.remote.service", "aws.remote.operation", - "HostedIn.K8s.Namespace", - "K8s.RemoteNamespace", - "aws.remote.target", - "HostedIn.Environment", - "HostedIn.K8s.Cluster", + "aws.remote.environment", + "aws.remote.resource.identifier", + "aws.remote.resource.type", }, "certificate_file_path": "/ca/bundle", "region": "us-east-1", @@ -116,13 +122,45 @@ func TestTranslator(t *testing.T) { }, "middleware": "agenthealth/traces", }), - detector: common.TestK8sDetector, + kubernetesMode: config.ModeK8sEC2, + mode: config.ModeEC2, + }, + "WithAppSignalsEnabledEC2": { + input: map[string]any{ + "traces": map[string]any{ + "traces_collected": map[string]any{ + "app_signals": map[string]any{}, + }, + }}, + want: confmap.NewFromStringMap(map[string]any{ + "indexed_attributes": []string{ + "aws.local.service", + "aws.local.operation", + "aws.local.environment", + "aws.remote.service", + "aws.remote.operation", + "aws.remote.environment", + "aws.remote.resource.identifier", + "aws.remote.resource.type", + }, + "certificate_file_path": "/ca/bundle", + "region": "us-east-1", + "role_arn": "global_arn", + "imds_retries": 1, + "telemetry": map[string]any{ + "enabled": true, + "include_metadata": true, + }, + "middleware": "agenthealth/traces", + }), + mode: config.ModeEC2, }, } factory := awsxrayexporter.NewFactory() for name, testCase := range testCases { t.Run(name, func(t *testing.T) { - common.NewDetector = testCase.detector + context.CurrentContext().SetKubernetesMode(testCase.kubernetesMode) + context.CurrentContext().SetMode(testCase.mode) conf := confmap.NewFromStringMap(testCase.input) got, err := tt.Translate(conf) assert.Equal(t, testCase.wantErr, err) diff --git a/translator/translate/otel/exporter/otel_aws_cloudwatch_logs/translator.go b/translator/translate/otel/exporter/otel_aws_cloudwatch_logs/translator.go index 2380114cd5..34c66a5190 100644 --- a/translator/translate/otel/exporter/otel_aws_cloudwatch_logs/translator.go +++ b/translator/translate/otel/exporter/otel_aws_cloudwatch_logs/translator.go @@ -16,6 +16,8 @@ import ( "github.com/aws/amazon-cloudwatch-agent/cfg/envconfig" "github.com/aws/amazon-cloudwatch-agent/internal/retryer" + "github.com/aws/amazon-cloudwatch-agent/translator/config" + "github.com/aws/amazon-cloudwatch-agent/translator/context" "github.com/aws/amazon-cloudwatch-agent/translator/translate/agent" "github.com/aws/amazon-cloudwatch-agent/translator/translate/logs" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/common" @@ -56,8 +58,6 @@ func (t *translator) Translate(c *confmap.Conf) (component.Config, error) { // Add more else if when otel supports log reading if t.name == common.PipelineNameEmfLogs && t.isEmf(c) { defaultConfig = defaultAwsCloudwatchLogsDefault - } else { - return cfg, nil } if defaultConfig != "" { @@ -94,6 +94,9 @@ func (t *translator) Translate(c *confmap.Conf) (component.Config, error) { if credentialsFileKey, ok := agent.Global_Config.Credentials[agent.CredentialsFile_Key]; ok { cfg.AWSSessionSettings.SharedCredentialsFile = []string{fmt.Sprintf("%v", credentialsFileKey)} } + if context.CurrentContext().Mode() == config.ModeOnPrem || context.CurrentContext().Mode() == config.ModeOnPremise { + cfg.AWSSessionSettings.LocalMode = true + } return cfg, nil } diff --git a/translator/translate/otel/extension/agenthealth/translator.go b/translator/translate/otel/extension/agenthealth/translator.go index bfe2fb963f..ef39f9390c 100644 --- a/translator/translate/otel/extension/agenthealth/translator.go +++ b/translator/translate/otel/extension/agenthealth/translator.go @@ -11,6 +11,8 @@ import ( "github.com/aws/amazon-cloudwatch-agent/cfg/envconfig" "github.com/aws/amazon-cloudwatch-agent/extension/agenthealth" "github.com/aws/amazon-cloudwatch-agent/extension/agenthealth/handler/stats/agent" + "github.com/aws/amazon-cloudwatch-agent/translator/context" + translateagent "github.com/aws/amazon-cloudwatch-agent/translator/translate/agent" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/common" ) @@ -23,9 +25,9 @@ const ( ) var ( - MetricsID = component.NewIDWithName(agenthealth.TypeStr, string(component.DataTypeMetrics)) - LogsID = component.NewIDWithName(agenthealth.TypeStr, string(component.DataTypeLogs)) - TracesID = component.NewIDWithName(agenthealth.TypeStr, string(component.DataTypeTraces)) + MetricsID = component.NewIDWithName(agenthealth.TypeStr, component.DataTypeMetrics.String()) + LogsID = component.NewIDWithName(agenthealth.TypeStr, component.DataTypeLogs.String()) + TracesID = component.NewIDWithName(agenthealth.TypeStr, component.DataTypeTraces.String()) ) type translator struct { @@ -39,7 +41,7 @@ var _ common.Translator[component.Config] = (*translator)(nil) func NewTranslator(name component.DataType, operations []string) common.Translator[component.Config] { return &translator{ - name: string(name), + name: name.String(), operations: operations, factory: agenthealth.NewFactory(), isUsageDataEnabled: envconfig.IsUsageDataEnabled(), @@ -57,6 +59,12 @@ func (t *translator) Translate(conf *confmap.Conf) (component.Config, error) { if usageData, ok := common.GetBool(conf, common.ConfigKey(common.AgentKey, usageDataKey)); ok { cfg.IsUsageDataEnabled = cfg.IsUsageDataEnabled && usageData } - cfg.Stats = agent.StatsConfig{Operations: t.operations} + cfg.Stats = agent.StatsConfig{ + Operations: t.operations, + UsageFlags: map[agent.Flag]any{ + agent.FlagMode: context.CurrentContext().ShortMode(), + agent.FlagRegionType: translateagent.Global_Config.RegionType, + }, + } return cfg, nil } diff --git a/translator/translate/otel/extension/agenthealth/translator_test.go b/translator/translate/otel/extension/agenthealth/translator_test.go index f0febf174b..989372e04a 100644 --- a/translator/translate/otel/extension/agenthealth/translator_test.go +++ b/translator/translate/otel/extension/agenthealth/translator_test.go @@ -7,14 +7,24 @@ import ( "testing" "github.com/stretchr/testify/assert" + "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/confmap" "github.com/aws/amazon-cloudwatch-agent/extension/agenthealth" "github.com/aws/amazon-cloudwatch-agent/extension/agenthealth/handler/stats/agent" + "github.com/aws/amazon-cloudwatch-agent/translator/config" + "github.com/aws/amazon-cloudwatch-agent/translator/context" + translateagent "github.com/aws/amazon-cloudwatch-agent/translator/translate/agent" ) func TestTranslate(t *testing.T) { + context.CurrentContext().SetMode(config.ModeEC2) + translateagent.Global_Config.RegionType = config.RegionTypeNotFound operations := []string{OperationPutLogEvents} + usageFlags := map[agent.Flag]any{ + agent.FlagMode: config.ShortModeEC2, + agent.FlagRegionType: config.RegionTypeNotFound, + } testCases := map[string]struct { input map[string]interface{} isEnvUsageData bool @@ -27,6 +37,7 @@ func TestTranslate(t *testing.T) { IsUsageDataEnabled: true, Stats: agent.StatsConfig{ Operations: operations, + UsageFlags: usageFlags, }, }, }, @@ -37,6 +48,7 @@ func TestTranslate(t *testing.T) { IsUsageDataEnabled: false, Stats: agent.StatsConfig{ Operations: operations, + UsageFlags: usageFlags, }, }, }, @@ -47,6 +59,7 @@ func TestTranslate(t *testing.T) { IsUsageDataEnabled: false, Stats: agent.StatsConfig{ Operations: operations, + UsageFlags: usageFlags, }, }, }, @@ -57,13 +70,15 @@ func TestTranslate(t *testing.T) { IsUsageDataEnabled: true, Stats: agent.StatsConfig{ Operations: operations, + UsageFlags: usageFlags, }, }, }, } for name, testCase := range testCases { t.Run(name, func(t *testing.T) { - tt := NewTranslator("test", operations).(*translator) + testType, _ := component.NewType("test") + tt := NewTranslator(testType, operations).(*translator) assert.Equal(t, "agenthealth/test", tt.ID().String()) tt.isUsageDataEnabled = testCase.isEnvUsageData conf := confmap.NewFromStringMap(testCase.input) diff --git a/translator/translate/otel/extension/awsproxy/translator.go b/translator/translate/otel/extension/awsproxy/translator.go index 4aeab63887..46e16ea037 100644 --- a/translator/translate/otel/extension/awsproxy/translator.go +++ b/translator/translate/otel/extension/awsproxy/translator.go @@ -4,14 +4,27 @@ package awsproxy import ( + "fmt" + "os" + "github.com/open-telemetry/opentelemetry-collector-contrib/extension/awsproxy" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/confmap" "go.opentelemetry.io/collector/extension" + "github.com/aws/amazon-cloudwatch-agent/cfg/envconfig" + "github.com/aws/amazon-cloudwatch-agent/internal/retryer" + "github.com/aws/amazon-cloudwatch-agent/translator/config" + "github.com/aws/amazon-cloudwatch-agent/translator/context" + "github.com/aws/amazon-cloudwatch-agent/translator/translate/agent" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/common" ) +var ( + endpointOverrideKey = common.ConfigKey(common.TracesKey, common.EndpointOverrideKey) + localModeKey = common.ConfigKey(common.TracesKey, common.LocalModeKey) +) + type translator struct { name string factory extension.Factory @@ -32,6 +45,46 @@ func (t *translator) ID() component.ID { } func (t *translator) Translate(conf *confmap.Conf) (component.Config, error) { + if conf == nil || !conf.IsSet(common.TracesKey) { + return nil, &common.MissingKeyError{ID: t.ID(), JsonKey: common.TracesKey} + } cfg := t.factory.CreateDefaultConfig().(*awsproxy.Config) + cfg.ProxyConfig.CertificateFilePath = os.Getenv(envconfig.AWS_CA_BUNDLE) + if conf.IsSet(endpointOverrideKey) { + cfg.ProxyConfig.AWSEndpoint, _ = common.GetString(conf, endpointOverrideKey) + } + cfg.ProxyConfig.IMDSRetries = retryer.GetDefaultRetryNumber() + if context.CurrentContext().Mode() == config.ModeOnPrem || context.CurrentContext().Mode() == config.ModeOnPremise { + cfg.ProxyConfig.LocalMode = true + } + if localMode, ok := common.GetBool(conf, localModeKey); ok { + cfg.ProxyConfig.LocalMode = localMode + } + if profileKey, ok := agent.Global_Config.Credentials[agent.Profile_Key]; ok { + cfg.ProxyConfig.Profile = fmt.Sprintf("%v", profileKey) + } + cfg.ProxyConfig.Region = getRegion(conf) + cfg.ProxyConfig.RoleARN = getRoleARN(conf) + if credentialsFileKey, ok := agent.Global_Config.Credentials[agent.CredentialsFile_Key]; ok { + cfg.ProxyConfig.SharedCredentialsFile = []string{fmt.Sprintf("%v", credentialsFileKey)} + } return cfg, nil } + +func getRoleARN(conf *confmap.Conf) string { + key := common.ConfigKey(common.TracesKey, common.CredentialsKey, common.RoleARNKey) + roleARN, ok := common.GetString(conf, key) + if !ok { + roleARN = agent.Global_Config.Role_arn + } + return roleARN +} + +func getRegion(conf *confmap.Conf) string { + key := common.ConfigKey(common.TracesKey, common.RegionOverrideKey) + region, ok := common.GetString(conf, key) + if !ok { + region = agent.Global_Config.Region + } + return region +} diff --git a/translator/translate/otel/extension/awsproxy/translator_test.go b/translator/translate/otel/extension/awsproxy/translator_test.go index 5934a92e91..ac2ac73345 100644 --- a/translator/translate/otel/extension/awsproxy/translator_test.go +++ b/translator/translate/otel/extension/awsproxy/translator_test.go @@ -14,13 +14,14 @@ import ( func TestTranslate(t *testing.T) { tt := NewTranslator() - conf := confmap.NewFromStringMap(map[string]interface{}{}) + conf := confmap.NewFromStringMap(map[string]any{"traces": map[string]any{}}) got, err := tt.Translate(conf) if err == nil { require.NotNil(t, got) gotCfg, ok := got.(*awsproxy.Config) require.True(t, ok) - wantCfg := awsproxy.NewFactory().CreateDefaultConfig() + wantCfg := awsproxy.NewFactory().CreateDefaultConfig().(*awsproxy.Config) + wantCfg.ProxyConfig.IMDSRetries = 1 assert.Equal(t, wantCfg, gotCfg) } } diff --git a/translator/translate/otel/pipeline/appsignals/translator.go b/translator/translate/otel/pipeline/applicationsignals/translator.go similarity index 87% rename from translator/translate/otel/pipeline/appsignals/translator.go rename to translator/translate/otel/pipeline/applicationsignals/translator.go index 17efc5fbe2..803afbd002 100644 --- a/translator/translate/otel/pipeline/appsignals/translator.go +++ b/translator/translate/otel/pipeline/applicationsignals/translator.go @@ -1,7 +1,7 @@ // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: MIT -package appsignals +package applicationsignals import ( "fmt" @@ -14,7 +14,7 @@ import ( "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/exporter/awsxray" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/extension/agenthealth" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/extension/awsproxy" - "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/processor/awsappsignals" + "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/processor/awsapplicationsignals" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/processor/resourcedetection" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/receiver/otlp" ) @@ -40,8 +40,8 @@ func (t *translator) Translate(conf *confmap.Conf) (*common.ComponentTranslators if !ok { return nil, fmt.Errorf("no config key defined for data type: %s", t.dataType) } - if conf == nil || !conf.IsSet(configKey) { - return nil, &common.MissingKeyError{ID: t.ID(), JsonKey: configKey} + if conf == nil || (!conf.IsSet(configKey[0]) && !conf.IsSet(configKey[1])) { + return nil, &common.MissingKeyError{ID: t.ID(), JsonKey: configKey[0]} } translators := &common.ComponentTranslators{ @@ -51,10 +51,8 @@ func (t *translator) Translate(conf *confmap.Conf) (*common.ComponentTranslators Extensions: common.NewTranslatorMap[component.Config](), } - if isEks, _ := common.IsEKS(); isEks { - translators.Processors.Set(resourcedetection.NewTranslator(resourcedetection.WithDataType(t.dataType))) - } - translators.Processors.Set(awsappsignals.NewTranslator(awsappsignals.WithDataType(t.dataType))) + translators.Processors.Set(resourcedetection.NewTranslator(resourcedetection.WithDataType(t.dataType))) + translators.Processors.Set(awsapplicationsignals.NewTranslator(awsapplicationsignals.WithDataType(t.dataType))) if t.dataType == component.DataTypeTraces { translators.Exporters.Set(awsxray.NewTranslatorWithName(common.AppSignals)) diff --git a/translator/translate/otel/pipeline/applicationsignals/translator_test.go b/translator/translate/otel/pipeline/applicationsignals/translator_test.go new file mode 100644 index 0000000000..575f726c95 --- /dev/null +++ b/translator/translate/otel/pipeline/applicationsignals/translator_test.go @@ -0,0 +1,229 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package applicationsignals + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/confmap" + + "github.com/aws/amazon-cloudwatch-agent/internal/util/collections" + "github.com/aws/amazon-cloudwatch-agent/translator/config" + "github.com/aws/amazon-cloudwatch-agent/translator/context" + "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/common" + "github.com/aws/amazon-cloudwatch-agent/translator/util/eksdetector" +) + +func TestTranslatorTraces(t *testing.T) { + type want struct { + receivers []string + processors []string + exporters []string + extensions []string + } + tt := NewTranslator(component.DataTypeTraces) + assert.EqualValues(t, "traces/application_signals", tt.ID().String()) + testCases := map[string]struct { + input map[string]interface{} + want *want + wantErr error + detector func() (eksdetector.Detector, error) + isEKSCache func() eksdetector.IsEKSCache + }{ + "WithoutTracesCollectedKey": { + input: map[string]interface{}{}, + wantErr: &common.MissingKeyError{ID: tt.ID(), JsonKey: fmt.Sprint(common.AppSignalsTraces)}, + }, + "WithAppSignalsEnabledTracesEKS": { + input: map[string]interface{}{ + "traces": map[string]interface{}{ + "traces_collected": map[string]interface{}{ + "app_signals": map[string]interface{}{}, + }, + }, + }, + want: &want{ + receivers: []string{"otlp/application_signals"}, + processors: []string{"resourcedetection", "awsapplicationsignals"}, + exporters: []string{"awsxray/application_signals"}, + extensions: []string{"awsproxy/application_signals", "agenthealth/traces"}, + }, + detector: eksdetector.TestEKSDetector, + isEKSCache: eksdetector.TestIsEKSCacheEKS, + }, + "WithAppSignalsEnabledK8s": { + input: map[string]interface{}{ + "traces": map[string]interface{}{ + "traces_collected": map[string]interface{}{ + "app_signals": map[string]interface{}{}, + }, + }, + }, + want: &want{ + receivers: []string{"otlp/application_signals"}, + processors: []string{"resourcedetection", "awsapplicationsignals"}, + exporters: []string{"awsxray/application_signals"}, + extensions: []string{"awsproxy/application_signals", "agenthealth/traces"}, + }, + detector: eksdetector.TestK8sDetector, + isEKSCache: eksdetector.TestIsEKSCacheK8s, + }, + } + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + t.Setenv(common.KubernetesEnvVar, "TEST") + eksdetector.NewDetector = testCase.detector + eksdetector.IsEKS = testCase.isEKSCache + conf := confmap.NewFromStringMap(testCase.input) + got, err := tt.Translate(conf) + assert.Equal(t, testCase.wantErr, err) + if testCase.want == nil { + assert.Nil(t, got) + } else { + require.NotNil(t, got) + assert.Equal(t, testCase.want.receivers, collections.MapSlice(got.Receivers.Keys(), component.ID.String)) + assert.Equal(t, testCase.want.processors, collections.MapSlice(got.Processors.Keys(), component.ID.String)) + assert.Equal(t, testCase.want.exporters, collections.MapSlice(got.Exporters.Keys(), component.ID.String)) + assert.Equal(t, testCase.want.extensions, collections.MapSlice(got.Extensions.Keys(), component.ID.String)) + } + }) + } +} + +func TestTranslatorMetricsForKubernetes(t *testing.T) { + type want struct { + receivers []string + processors []string + exporters []string + extensions []string + } + tt := NewTranslator(component.DataTypeMetrics) + assert.EqualValues(t, "metrics/application_signals", tt.ID().String()) + testCases := map[string]struct { + input map[string]interface{} + want *want + wantErr error + detector func() (eksdetector.Detector, error) + isEKSCache func() eksdetector.IsEKSCache + }{ + "WithoutMetricsCollectedKey": { + input: map[string]interface{}{}, + wantErr: &common.MissingKeyError{ID: tt.ID(), JsonKey: fmt.Sprint(common.AppSignalsMetrics)}, + }, + "WithAppSignalsEnabledMetrics": { + input: map[string]interface{}{ + "logs": map[string]interface{}{ + "metrics_collected": map[string]interface{}{ + "app_signals": map[string]interface{}{}, + }, + }, + }, + want: &want{ + receivers: []string{"otlp/application_signals"}, + processors: []string{"resourcedetection", "awsapplicationsignals"}, + exporters: []string{"awsemf/application_signals"}, + extensions: []string{"agenthealth/logs"}, + }, + detector: eksdetector.TestEKSDetector, + isEKSCache: eksdetector.TestIsEKSCacheEKS, + }, + "WithAppSignalsEnabledK8s": { + input: map[string]interface{}{ + "logs": map[string]interface{}{ + "metrics_collected": map[string]interface{}{ + "app_signals": map[string]interface{}{}, + }, + }, + }, + want: &want{ + receivers: []string{"otlp/application_signals"}, + processors: []string{"resourcedetection", "awsapplicationsignals"}, + exporters: []string{"awsemf/application_signals"}, + extensions: []string{"agenthealth/logs"}, + }, + detector: eksdetector.TestK8sDetector, + isEKSCache: eksdetector.TestIsEKSCacheK8s, + }, + } + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + t.Setenv(common.KubernetesEnvVar, "TEST") + eksdetector.NewDetector = testCase.detector + eksdetector.IsEKS = testCase.isEKSCache + conf := confmap.NewFromStringMap(testCase.input) + got, err := tt.Translate(conf) + assert.Equal(t, testCase.wantErr, err) + if testCase.want == nil { + assert.Nil(t, got) + } else { + require.NotNil(t, got) + assert.Equal(t, testCase.want.receivers, collections.MapSlice(got.Receivers.Keys(), component.ID.String)) + assert.Equal(t, testCase.want.processors, collections.MapSlice(got.Processors.Keys(), component.ID.String)) + assert.Equal(t, testCase.want.exporters, collections.MapSlice(got.Exporters.Keys(), component.ID.String)) + assert.Equal(t, testCase.want.extensions, collections.MapSlice(got.Extensions.Keys(), component.ID.String)) + } + }) + } +} +func TestTranslatorMetricsForEC2(t *testing.T) { + type want struct { + receivers []string + processors []string + exporters []string + extensions []string + } + tt := NewTranslator(component.DataTypeMetrics) + assert.EqualValues(t, "metrics/application_signals", tt.ID().String()) + testCases := map[string]struct { + input map[string]interface{} + want *want + wantErr error + detector func() (eksdetector.Detector, error) + isEKSCache func() eksdetector.IsEKSCache + }{ + "WithoutMetricsCollectedKey": { + input: map[string]interface{}{}, + wantErr: &common.MissingKeyError{ID: tt.ID(), JsonKey: fmt.Sprint(common.AppSignalsMetrics)}, + }, + "WithAppSignalsEnabledMetrics": { + input: map[string]interface{}{ + "logs": map[string]interface{}{ + "metrics_collected": map[string]interface{}{ + "app_signals": map[string]interface{}{}, + }, + }, + }, + want: &want{ + receivers: []string{"otlp/application_signals"}, + processors: []string{"resourcedetection", "awsapplicationsignals"}, + exporters: []string{"awsemf/application_signals"}, + extensions: []string{"agenthealth/logs"}, + }, + detector: eksdetector.TestEKSDetector, + isEKSCache: eksdetector.TestIsEKSCacheEKS, + }, + } + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + ctx := context.CurrentContext() + ctx.SetMode(config.ModeEC2) + conf := confmap.NewFromStringMap(testCase.input) + got, err := tt.Translate(conf) + assert.Equal(t, testCase.wantErr, err) + if testCase.want == nil { + assert.Nil(t, got) + } else { + require.NotNil(t, got) + assert.Equal(t, testCase.want.receivers, collections.MapSlice(got.Receivers.Keys(), component.ID.String)) + assert.Equal(t, testCase.want.processors, collections.MapSlice(got.Processors.Keys(), component.ID.String)) + assert.Equal(t, testCase.want.exporters, collections.MapSlice(got.Exporters.Keys(), component.ID.String)) + assert.Equal(t, testCase.want.extensions, collections.MapSlice(got.Extensions.Keys(), component.ID.String)) + } + }) + } +} diff --git a/translator/translate/otel/pipeline/appsignals/translator_test.go b/translator/translate/otel/pipeline/appsignals/translator_test.go deleted file mode 100644 index ddd8be627c..0000000000 --- a/translator/translate/otel/pipeline/appsignals/translator_test.go +++ /dev/null @@ -1,159 +0,0 @@ -// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -// SPDX-License-Identifier: MIT - -package appsignals - -import ( - "fmt" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/confmap" - - "github.com/aws/amazon-cloudwatch-agent/internal/util/collections" - "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/common" -) - -func TestTranslatorTraces(t *testing.T) { - type want struct { - receivers []string - processors []string - exporters []string - extensions []string - } - tt := NewTranslator(component.DataTypeTraces) - assert.EqualValues(t, "traces/app_signals", tt.ID().String()) - testCases := map[string]struct { - input map[string]interface{} - want *want - wantErr error - detector func() (common.Detector, error) - }{ - "WithoutTracesCollectedKey": { - input: map[string]interface{}{}, - wantErr: &common.MissingKeyError{ID: tt.ID(), JsonKey: fmt.Sprint(common.AppSignalsTraces)}, - }, - "WithAppSignalsEnabledTracesEKS": { - input: map[string]interface{}{ - "traces": map[string]interface{}{ - "traces_collected": map[string]interface{}{ - "app_signals": map[string]interface{}{}, - }, - }, - }, - want: &want{ - receivers: []string{"otlp/app_signals"}, - processors: []string{"resourcedetection", "awsappsignals"}, - exporters: []string{"awsxray/app_signals"}, - extensions: []string{"awsproxy/app_signals", "agenthealth/traces"}, - }, - detector: common.TestEKSDetector, - }, - "WithAppSignalsEnabledK8s": { - input: map[string]interface{}{ - "traces": map[string]interface{}{ - "traces_collected": map[string]interface{}{ - "app_signals": map[string]interface{}{}, - }, - }, - }, - want: &want{ - receivers: []string{"otlp/app_signals"}, - processors: []string{"awsappsignals"}, - exporters: []string{"awsxray/app_signals"}, - extensions: []string{"awsproxy/app_signals", "agenthealth/traces"}, - }, - detector: common.TestK8sDetector, - }, - } - for name, testCase := range testCases { - t.Run(name, func(t *testing.T) { - common.NewDetector = testCase.detector - conf := confmap.NewFromStringMap(testCase.input) - got, err := tt.Translate(conf) - assert.Equal(t, testCase.wantErr, err) - if testCase.want == nil { - assert.Nil(t, got) - } else { - require.NotNil(t, got) - assert.Equal(t, testCase.want.receivers, collections.MapSlice(got.Receivers.Keys(), component.ID.String)) - assert.Equal(t, testCase.want.processors, collections.MapSlice(got.Processors.Keys(), component.ID.String)) - assert.Equal(t, testCase.want.exporters, collections.MapSlice(got.Exporters.Keys(), component.ID.String)) - assert.Equal(t, testCase.want.extensions, collections.MapSlice(got.Extensions.Keys(), component.ID.String)) - } - }) - } -} - -func TestTranslatorMetrics(t *testing.T) { - type want struct { - receivers []string - processors []string - exporters []string - extensions []string - } - tt := NewTranslator(component.DataTypeMetrics) - assert.EqualValues(t, "metrics/app_signals", tt.ID().String()) - testCases := map[string]struct { - input map[string]interface{} - want *want - wantErr error - detector func() (common.Detector, error) - }{ - "WithoutMetricsCollectedKey": { - input: map[string]interface{}{}, - wantErr: &common.MissingKeyError{ID: tt.ID(), JsonKey: fmt.Sprint(common.AppSignalsMetrics)}, - }, - "WithAppSignalsEnabledMetrics": { - input: map[string]interface{}{ - "logs": map[string]interface{}{ - "metrics_collected": map[string]interface{}{ - "app_signals": map[string]interface{}{}, - }, - }, - }, - want: &want{ - receivers: []string{"otlp/app_signals"}, - processors: []string{"resourcedetection", "awsappsignals"}, - exporters: []string{"awsemf/app_signals"}, - extensions: []string{"agenthealth/logs"}, - }, - detector: common.TestEKSDetector, - }, - "WithAppSignalsEnabledK8s": { - input: map[string]interface{}{ - "logs": map[string]interface{}{ - "metrics_collected": map[string]interface{}{ - "app_signals": map[string]interface{}{}, - }, - }, - }, - want: &want{ - receivers: []string{"otlp/app_signals"}, - processors: []string{"awsappsignals"}, - exporters: []string{"awsemf/app_signals"}, - extensions: []string{"agenthealth/logs"}, - }, - detector: common.TestK8sDetector, - }, - } - for name, testCase := range testCases { - t.Run(name, func(t *testing.T) { - common.NewDetector = testCase.detector - conf := confmap.NewFromStringMap(testCase.input) - got, err := tt.Translate(conf) - assert.Equal(t, testCase.wantErr, err) - if testCase.want == nil { - assert.Nil(t, got) - } else { - require.NotNil(t, got) - assert.Equal(t, testCase.want.receivers, collections.MapSlice(got.Receivers.Keys(), component.ID.String)) - assert.Equal(t, testCase.want.processors, collections.MapSlice(got.Processors.Keys(), component.ID.String)) - assert.Equal(t, testCase.want.exporters, collections.MapSlice(got.Exporters.Keys(), component.ID.String)) - assert.Equal(t, testCase.want.extensions, collections.MapSlice(got.Extensions.Keys(), component.ID.String)) - } - }) - } -} diff --git a/translator/translate/otel/pipeline/containerinsights/translator.go b/translator/translate/otel/pipeline/containerinsights/translator.go index ba5fb093e2..4e5169ffac 100644 --- a/translator/translate/otel/pipeline/containerinsights/translator.go +++ b/translator/translate/otel/pipeline/containerinsights/translator.go @@ -13,6 +13,7 @@ import ( "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/exporter/awsemf" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/extension/agenthealth" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/processor/batchprocessor" + "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/processor/gpu" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/processor/metricstransformprocessor" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/receiver/awscontainerinsight" ) @@ -50,9 +51,15 @@ func (t *translator) Translate(conf *confmap.Conf) (*common.ComponentTranslators // Append the metricstransformprocessor only if enhanced container insights is enabled enhancedContainerInsightsEnabled := awscontainerinsight.EnhancedContainerInsightsEnabled(conf) if enhancedContainerInsightsEnabled { + processors := common.NewTranslatorMap(metricstransformprocessor.NewTranslatorWithName(pipelineName)) + acceleratedComputeMetricsEnabled := awscontainerinsight.AcceleratedComputeMetricsEnabled(conf) + if acceleratedComputeMetricsEnabled { + processors.Set(gpu.NewTranslatorWithName(pipelineName)) + } + processors.Set(batchprocessor.NewTranslatorWithNameAndSection(pipelineName, common.LogsKey)) return &common.ComponentTranslators{ Receivers: common.NewTranslatorMap(awscontainerinsight.NewTranslator()), - Processors: common.NewTranslatorMap(metricstransformprocessor.NewTranslatorWithName(pipelineName), batchprocessor.NewTranslatorWithNameAndSection(pipelineName, common.LogsKey)), // EKS & ECS CI sit under metrics_collected in "logs" + Processors: processors, // EKS & ECS CI sit under metrics_collected in "logs" Exporters: common.NewTranslatorMap(awsemf.NewTranslatorWithName(pipelineName)), Extensions: common.NewTranslatorMap(agenthealth.NewTranslator(component.DataTypeLogs, []string{agenthealth.OperationPutLogEvents})), }, nil diff --git a/translator/translate/otel/pipeline/containerinsights/translator_test.go b/translator/translate/otel/pipeline/containerinsights/translator_test.go index 14a721e6b2..d3a4bb67a1 100644 --- a/translator/translate/otel/pipeline/containerinsights/translator_test.go +++ b/translator/translate/otel/pipeline/containerinsights/translator_test.go @@ -81,7 +81,7 @@ func TestTranslator(t *testing.T) { want: &want{ pipelineType: "metrics/containerinsights", receivers: []string{"awscontainerinsightreceiver"}, - processors: []string{"metricstransform/containerinsights", "batch/containerinsights"}, + processors: []string{"metricstransform/containerinsights", "gpuattributes/containerinsights", "batch/containerinsights"}, exporters: []string{"awsemf/containerinsights"}, extensions: []string{"agenthealth/logs"}, }, diff --git a/translator/translate/otel/pipeline/host/translator.go b/translator/translate/otel/pipeline/host/translator.go index 4b126341fe..bb82a82bf4 100644 --- a/translator/translate/otel/pipeline/host/translator.go +++ b/translator/translate/otel/pipeline/host/translator.go @@ -15,6 +15,7 @@ import ( "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/processor/cumulativetodeltaprocessor" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/processor/ec2taggerprocessor" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/processor/metricsdecorator" + otlpReceiver "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/receiver/otlp" ) type translator struct { @@ -42,7 +43,23 @@ func (t translator) ID() component.ID { func (t translator) Translate(conf *confmap.Conf) (*common.ComponentTranslators, error) { if conf == nil || !conf.IsSet(common.MetricsKey) { return nil, &common.MissingKeyError{ID: t.ID(), JsonKey: common.MetricsKey} - } else if t.receivers.Len() == 0 { + } + + hostReceivers := t.receivers + if common.PipelineNameHost == t.name { + switch v := conf.Get(common.ConfigKey(common.MetricsKey, common.MetricsCollectedKey, common.OtlpKey)).(type) { + case []interface{}: + for index, _ := range v { + hostReceivers.Set(otlpReceiver.NewTranslator( + otlpReceiver.WithDataType(component.DataTypeMetrics), + otlpReceiver.WithInstanceNum(index))) + } + case map[string]interface{}: + hostReceivers.Set(otlpReceiver.NewTranslator(otlpReceiver.WithDataType(component.DataTypeMetrics))) + } + } + + if hostReceivers.Len() == 0 { log.Printf("D! pipeline %s has no receivers", t.name) return nil, nil } diff --git a/translator/translate/otel/pipeline/host/translator_test.go b/translator/translate/otel/pipeline/host/translator_test.go index b91400e53f..f41a3e1d83 100644 --- a/translator/translate/otel/pipeline/host/translator_test.go +++ b/translator/translate/otel/pipeline/host/translator_test.go @@ -128,10 +128,12 @@ func TestTranslator(t *testing.T) { }, } for name, testCase := range testCases { + nopType, _ := component.NewType("nop") + otherType, _ := component.NewType("other") t.Run(name, func(t *testing.T) { ht := NewTranslator(testCase.pipelineName, common.NewTranslatorMap[component.Config]( - &testTranslator{id: component.NewID("nop")}, - &testTranslator{id: component.NewID("other")}, + &testTranslator{id: component.NewID(nopType)}, + &testTranslator{id: component.NewID(otherType)}, )) conf := confmap.NewFromStringMap(testCase.input) got, err := ht.Translate(conf) diff --git a/translator/translate/otel/pipeline/translator.go b/translator/translate/otel/pipeline/translator.go index 4102808a80..b7e18b5b8c 100644 --- a/translator/translate/otel/pipeline/translator.go +++ b/translator/translate/otel/pipeline/translator.go @@ -36,7 +36,8 @@ func NewTranslator(translators common.TranslatorMap[*common.ComponentTranslators } func (t *translator) ID() component.ID { - return component.NewID("") + newType, _ := component.NewType("") + return component.NewID(newType) } // Translate creates the pipeline configuration. diff --git a/translator/translate/otel/pipeline/translator_test.go b/translator/translate/otel/pipeline/translator_test.go index 5a76598877..afaf0fe1b0 100644 --- a/translator/translate/otel/pipeline/translator_test.go +++ b/translator/translate/otel/pipeline/translator_test.go @@ -24,7 +24,8 @@ func (t testTranslator) Translate(_ *confmap.Conf) (*common.ComponentTranslators } func (t testTranslator) ID() component.ID { - return component.NewID("") + newType, _ := component.NewType("") + return component.NewID(newType) } func TestTranslator(t *testing.T) { diff --git a/translator/translate/otel/pipeline/xray/translator_test.go b/translator/translate/otel/pipeline/xray/translator_test.go index 450db8ff54..888c7c1775 100644 --- a/translator/translate/otel/pipeline/xray/translator_test.go +++ b/translator/translate/otel/pipeline/xray/translator_test.go @@ -58,7 +58,7 @@ func TestTranslator(t *testing.T) { }, }, want: &want{ - receivers: []string{"otlp"}, + receivers: []string{"otlp/traces"}, processors: []string{"batch/xray"}, exporters: []string{"awsxray"}, extensions: []string{"agenthealth/traces"}, @@ -74,7 +74,7 @@ func TestTranslator(t *testing.T) { }, }, want: &want{ - receivers: []string{"awsxray", "otlp"}, + receivers: []string{"awsxray", "otlp/traces"}, processors: []string{"batch/xray"}, exporters: []string{"awsxray"}, extensions: []string{"agenthealth/traces"}, diff --git a/translator/translate/otel/processor/awsapplicationsignals/testdata/config_ec2.yaml b/translator/translate/otel/processor/awsapplicationsignals/testdata/config_ec2.yaml new file mode 100644 index 0000000000..57330b3c0e --- /dev/null +++ b/translator/translate/otel/processor/awsapplicationsignals/testdata/config_ec2.yaml @@ -0,0 +1,3 @@ +resolvers: + - platform: ec2 + name: test \ No newline at end of file diff --git a/translator/translate/otel/processor/awsappsignals/testdata/config_eks.yaml b/translator/translate/otel/processor/awsapplicationsignals/testdata/config_eks.yaml similarity index 100% rename from translator/translate/otel/processor/awsappsignals/testdata/config_eks.yaml rename to translator/translate/otel/processor/awsapplicationsignals/testdata/config_eks.yaml diff --git a/translator/translate/otel/processor/awsappsignals/testdata/config_generic.yaml b/translator/translate/otel/processor/awsapplicationsignals/testdata/config_generic.yaml similarity index 100% rename from translator/translate/otel/processor/awsappsignals/testdata/config_generic.yaml rename to translator/translate/otel/processor/awsapplicationsignals/testdata/config_generic.yaml diff --git a/translator/translate/otel/processor/awsappsignals/testdata/config_k8s.yaml b/translator/translate/otel/processor/awsapplicationsignals/testdata/config_k8s.yaml similarity index 100% rename from translator/translate/otel/processor/awsappsignals/testdata/config_k8s.yaml rename to translator/translate/otel/processor/awsapplicationsignals/testdata/config_k8s.yaml diff --git a/translator/translate/otel/processor/awsappsignals/testdata/invalidRulesConfig.json b/translator/translate/otel/processor/awsapplicationsignals/testdata/invalidRulesConfig.json similarity index 67% rename from translator/translate/otel/processor/awsappsignals/testdata/invalidRulesConfig.json rename to translator/translate/otel/processor/awsapplicationsignals/testdata/invalidRulesConfig.json index e1f8d8cec5..e06d487fc9 100644 --- a/translator/translate/otel/processor/awsappsignals/testdata/invalidRulesConfig.json +++ b/translator/translate/otel/processor/awsapplicationsignals/testdata/invalidRulesConfig.json @@ -3,6 +3,12 @@ "metrics_collected": { "app_signals": { "hosted_in": "test", + "limiter": { + "drop_threshold": 20, + "log_dropped_metrics": true, + "rotation_interval": "10m", + "garbage_collection_interval": "10m" + }, "rules": [ { "selectors": [ diff --git a/translator/translate/otel/processor/awsappsignals/testdata/validRulesConfig.json b/translator/translate/otel/processor/awsapplicationsignals/testdata/validRulesConfig.json similarity index 88% rename from translator/translate/otel/processor/awsappsignals/testdata/validRulesConfig.json rename to translator/translate/otel/processor/awsapplicationsignals/testdata/validRulesConfig.json index caaae63bc8..97b8da0a2c 100644 --- a/translator/translate/otel/processor/awsappsignals/testdata/validRulesConfig.json +++ b/translator/translate/otel/processor/awsapplicationsignals/testdata/validRulesConfig.json @@ -3,6 +3,12 @@ "metrics_collected": { "app_signals": { "hosted_in": "test", + "limiter": { + "drop_threshold": 20, + "log_dropped_metrics": true, + "rotation_interval": "10m", + "garbage_collection_interval": "10m" + }, "rules": [ { "selectors": [ diff --git a/translator/translate/otel/processor/awsappsignals/testdata/validRulesConfigEKS.yaml b/translator/translate/otel/processor/awsapplicationsignals/testdata/validRulesConfigEKS.yaml similarity index 82% rename from translator/translate/otel/processor/awsappsignals/testdata/validRulesConfigEKS.yaml rename to translator/translate/otel/processor/awsapplicationsignals/testdata/validRulesConfigEKS.yaml index 4508e78ffb..9ff0eb0503 100644 --- a/translator/translate/otel/processor/awsappsignals/testdata/validRulesConfigEKS.yaml +++ b/translator/translate/otel/processor/awsapplicationsignals/testdata/validRulesConfigEKS.yaml @@ -1,6 +1,12 @@ resolvers: - platform: eks name: test +limiter: + disabled: false + drop_threshold: 20 + log_dropped_metrics: true + rotation_interval: 10m + garbage_collection_interval: 10m rules: - selectors: - dimension: Operation diff --git a/translator/translate/otel/processor/awsappsignals/testdata/validRulesConfigGeneric.yaml b/translator/translate/otel/processor/awsapplicationsignals/testdata/validRulesConfigGeneric.yaml similarity index 83% rename from translator/translate/otel/processor/awsappsignals/testdata/validRulesConfigGeneric.yaml rename to translator/translate/otel/processor/awsapplicationsignals/testdata/validRulesConfigGeneric.yaml index fb570a879c..5c494daa0b 100644 --- a/translator/translate/otel/processor/awsappsignals/testdata/validRulesConfigGeneric.yaml +++ b/translator/translate/otel/processor/awsapplicationsignals/testdata/validRulesConfigGeneric.yaml @@ -1,6 +1,12 @@ resolvers: - platform: generic name: test +limiter: + disabled: false + drop_threshold: 20 + log_dropped_metrics: true + rotation_interval: 10m + garbage_collection_interval: 10m rules: - selectors: - dimension: Operation diff --git a/translator/translate/otel/processor/awsappsignals/translator.go b/translator/translate/otel/processor/awsapplicationsignals/translator.go similarity index 50% rename from translator/translate/otel/processor/awsappsignals/translator.go rename to translator/translate/otel/processor/awsapplicationsignals/translator.go index 5f5fd2d9df..49c669efc9 100644 --- a/translator/translate/otel/processor/awsappsignals/translator.go +++ b/translator/translate/otel/processor/awsapplicationsignals/translator.go @@ -1,21 +1,25 @@ // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: MIT -package awsappsignals +package awsapplicationsignals import ( _ "embed" "errors" + "time" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/confmap" "go.opentelemetry.io/collector/processor" - "github.com/aws/amazon-cloudwatch-agent/plugins/processors/awsappsignals" - appsignalsconfig "github.com/aws/amazon-cloudwatch-agent/plugins/processors/awsappsignals/config" - "github.com/aws/amazon-cloudwatch-agent/plugins/processors/awsappsignals/rules" + "github.com/aws/amazon-cloudwatch-agent/plugins/processors/awsapplicationsignals" + appsignalsconfig "github.com/aws/amazon-cloudwatch-agent/plugins/processors/awsapplicationsignals/config" + "github.com/aws/amazon-cloudwatch-agent/plugins/processors/awsapplicationsignals/rules" + "github.com/aws/amazon-cloudwatch-agent/translator/config" + "github.com/aws/amazon-cloudwatch-agent/translator/context" "github.com/aws/amazon-cloudwatch-agent/translator/translate/logs/util" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/common" + "github.com/aws/amazon-cloudwatch-agent/translator/util/ecsutil" ) type translator struct { @@ -45,7 +49,7 @@ func WithDataType(dataType component.DataType) Option { var _ common.Translator[component.Config] = (*translator)(nil) func NewTranslator(opts ...Option) common.Translator[component.Config] { - t := &translator{factory: awsappsignals.NewFactory()} + t := &translator{factory: awsapplicationsignals.NewFactory()} for _, opt := range opts { opt.apply(t) } @@ -62,38 +66,122 @@ func (t *translator) Translate(conf *confmap.Conf) (component.Config, error) { hostedInConfigKey := common.ConfigKey(common.LogsKey, common.MetricsCollectedKey, common.AppSignals, "hosted_in") hostedIn, hostedInConfigured := common.GetString(conf, hostedInConfigKey) + if !hostedInConfigured { + hostedInConfigKey = common.ConfigKey(common.LogsKey, common.MetricsCollectedKey, common.AppSignalsFallback, "hosted_in") + hostedIn, hostedInConfigured = common.GetString(conf, hostedInConfigKey) + } if common.IsAppSignalsKubernetes() { if !hostedInConfigured { hostedIn = util.GetClusterNameFromEc2Tagger() } + } - isEks, err := common.IsEKS() - if err != nil { - return nil, err + mode := context.CurrentContext().KubernetesMode() + if mode == "" { + mode = context.CurrentContext().Mode() + } + if mode == config.ModeEC2 { + if ecsutil.GetECSUtilSingleton().IsECS() { + mode = config.ModeECS } - - if isEks { - cfg.Resolvers = []appsignalsconfig.Resolver{ - appsignalsconfig.NewEKSResolver(hostedIn), - } - } else { - cfg.Resolvers = []appsignalsconfig.Resolver{ - appsignalsconfig.NewK8sResolver(hostedIn), - } + } + switch mode { + case config.ModeEKS: + cfg.Resolvers = []appsignalsconfig.Resolver{ + appsignalsconfig.NewEKSResolver(hostedIn), } - - } else { + case config.ModeK8sEC2, config.ModeK8sOnPrem: + cfg.Resolvers = []appsignalsconfig.Resolver{ + appsignalsconfig.NewK8sResolver(hostedIn), + } + case config.ModeEC2: + cfg.Resolvers = []appsignalsconfig.Resolver{ + appsignalsconfig.NewEC2Resolver(hostedIn), + } + case config.ModeECS: + cfg.Resolvers = []appsignalsconfig.Resolver{ + appsignalsconfig.NewGenericResolver(hostedIn), + } + default: cfg.Resolvers = []appsignalsconfig.Resolver{ appsignalsconfig.NewGenericResolver(hostedIn), } } + limiterConfig, _ := t.translateMetricLimiterConfig(conf, configKey) + cfg.Limiter = limiterConfig + return t.translateCustomRules(conf, configKey, cfg) } -func (t *translator) translateCustomRules(conf *confmap.Conf, configKey string, cfg *appsignalsconfig.Config) (component.Config, error) { +func (t *translator) translateMetricLimiterConfig(conf *confmap.Conf, configKey []string) (*appsignalsconfig.LimiterConfig, error) { + limiterConfigKey := common.ConfigKey(configKey[0], "limiter") + if !conf.IsSet(limiterConfigKey) { + limiterConfigKey = common.ConfigKey(configKey[1], "limiter") + if !conf.IsSet(limiterConfigKey) { + return nil, nil + } + } + + configJson, ok := conf.Get(limiterConfigKey).(map[string]interface{}) + if !ok { + return nil, errors.New("type conversion error: limiter is not an object") + } + + limiterConfig := appsignalsconfig.NewDefaultLimiterConfig() + if rawVal, exists := configJson["drop_threshold"]; exists { + if val, ok := rawVal.(float64); !ok { + return nil, errors.New("type conversion error: drop_threshold is not a number") + } else { + limiterConfig.Threshold = int(val) + } + } + if rawVal, exists := configJson["disabled"]; exists { + if val, ok := rawVal.(bool); !ok { + return nil, errors.New("type conversion error: disabled is not a boolean") + } else { + limiterConfig.Disabled = val + } + } + if rawVal, exists := configJson["log_dropped_metrics"]; exists { + if val, ok := rawVal.(bool); !ok { + return nil, errors.New("type conversion error: log_dropped_metrics is not a boolean") + } else { + limiterConfig.LogDroppedMetrics = val + } + } + if rawVal, exists := configJson["garbage_collection_interval"]; exists { + if val, ok := rawVal.(string); !ok { + return nil, errors.New("type conversion error: garbage_collection_interval is not a string") + } else { + if interval, err := time.ParseDuration(val); err != nil { + return nil, errors.New("type conversion error: garbage_collection_interval is not a time string") + } else { + limiterConfig.GarbageCollectionInterval = interval + } + } + } + if rawVal, exists := configJson["rotation_interval"]; exists { + if val, ok := rawVal.(string); !ok { + return nil, errors.New("type conversion error: rotation_interval is not a string") + } else { + if interval, err := time.ParseDuration(val); err != nil { + return nil, errors.New("type conversion error: rotation_interval is not a time string") + } else { + limiterConfig.RotationInterval = interval + } + } + } + return limiterConfig, nil + +} + +func (t *translator) translateCustomRules(conf *confmap.Conf, configKey []string, cfg *appsignalsconfig.Config) (component.Config, error) { var rulesList []rules.Rule - rulesConfigKey := common.ConfigKey(configKey, common.AppSignalsRules) + rulesConfigKey := common.ConfigKey(configKey[0], common.AppSignalsRules) + if !conf.IsSet(rulesConfigKey) { + rulesConfigKey = common.ConfigKey(configKey[1], common.AppSignalsRules) + } if conf.IsSet(rulesConfigKey) { for _, rule := range conf.Get(rulesConfigKey).([]interface{}) { ruleConfig := rules.Rule{} diff --git a/translator/translate/otel/processor/awsapplicationsignals/translator_test.go b/translator/translate/otel/processor/awsapplicationsignals/translator_test.go new file mode 100644 index 0000000000..9f222d0116 --- /dev/null +++ b/translator/translate/otel/processor/awsapplicationsignals/translator_test.go @@ -0,0 +1,202 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package awsapplicationsignals + +import ( + _ "embed" + "encoding/json" + "errors" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/confmap" + + "github.com/aws/amazon-cloudwatch-agent/plugins/processors/awsapplicationsignals" + "github.com/aws/amazon-cloudwatch-agent/plugins/processors/awsapplicationsignals/config" + translatorConfig "github.com/aws/amazon-cloudwatch-agent/translator/config" + "github.com/aws/amazon-cloudwatch-agent/translator/context" + "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/common" +) + +var ( + //go:embed testdata/config_eks.yaml + validAppSignalsYamlEKS string + //go:embed testdata/config_k8s.yaml + validAppSignalsYamlK8s string + //go:embed testdata/config_ec2.yaml + validAppSignalsYamlEC2 string + //go:embed testdata/config_generic.yaml + validAppSignalsYamlGeneric string + //go:embed testdata/validRulesConfig.json + validAppSignalsRulesConfig string + //go:embed testdata/validRulesConfigEKS.yaml + validAppSignalsRulesYamlEKS string + //go:embed testdata/validRulesConfigGeneric.yaml + validAppSignalsRulesYamlGeneric string + //go:embed testdata/invalidRulesConfig.json + invalidAppSignalsRulesConfig string +) + +func TestTranslate(t *testing.T) { + var validJsonMap, invalidJsonMap map[string]interface{} + json.Unmarshal([]byte(validAppSignalsRulesConfig), &validJsonMap) + json.Unmarshal([]byte(invalidAppSignalsRulesConfig), &invalidJsonMap) + + tt := NewTranslator(WithDataType(component.DataTypeMetrics)) + testCases := map[string]struct { + input map[string]interface{} + want string + wantErr error + isKubernetes bool + kubernetesMode string + mode string + }{ + //The config for the awsapplicationsignals processor is https://code.amazon.com/packages/AWSTracingSamplePetClinic/blobs/97ce3c409986ac8ae014de1e3fe71fdb98080f22/--/eks/appsignals/auto-instrumentation-new.yaml#L20 + //The awsapplicationsignals processor config does not have a platform field, instead it gets added to resolvers when marshalled + "WithAppSignalsEnabledEKS": { + input: map[string]interface{}{ + "logs": map[string]interface{}{ + "metrics_collected": map[string]interface{}{ + "application_signals": map[string]interface{}{ + "hosted_in": "test", + }, + }, + }}, + want: validAppSignalsYamlEKS, + isKubernetes: true, + kubernetesMode: translatorConfig.ModeEKS, + mode: translatorConfig.ModeEC2, + }, + "WithAppSignalsCustomRulesEnabledEKS": { + input: validJsonMap, + want: validAppSignalsRulesYamlEKS, + isKubernetes: true, + kubernetesMode: translatorConfig.ModeEKS, + mode: translatorConfig.ModeEC2, + }, + "WithAppSignalsEnabledK8S": { + input: map[string]interface{}{ + "logs": map[string]interface{}{ + "metrics_collected": map[string]interface{}{ + "application_signals": map[string]interface{}{ + "hosted_in": "test", + }, + }, + }}, + want: validAppSignalsYamlK8s, + isKubernetes: true, + kubernetesMode: translatorConfig.ModeK8sEC2, + mode: translatorConfig.ModeEC2, + }, + "WithAppSignalsEnabledGeneric": { + input: map[string]interface{}{ + "logs": map[string]interface{}{ + "metrics_collected": map[string]interface{}{ + "application_signals": map[string]interface{}{}, + }, + }}, + want: validAppSignalsYamlGeneric, + isKubernetes: false, + mode: translatorConfig.ModeOnPrem, + }, + "WithAppSignalsCustomRulesEnabledGeneric": { + input: validJsonMap, + want: validAppSignalsRulesYamlGeneric, + isKubernetes: false, + mode: translatorConfig.ModeOnPrem, + }, + "WithInvalidAppSignalsCustomRulesEnabled": { + input: invalidJsonMap, + wantErr: errors.New("replace action set, but no replacements defined for service rule"), + mode: translatorConfig.ModeOnPrem, + }, + "WithAppSignalsEnabledEC2": { + input: map[string]interface{}{ + "logs": map[string]interface{}{ + "metrics_collected": map[string]interface{}{ + "application_signals": map[string]interface{}{ + "hosted_in": "test", + }, + }, + }}, + want: validAppSignalsYamlEC2, + mode: translatorConfig.ModeEC2, + }, + "WithAppSignalsFallbackEnabledK8S": { + input: map[string]interface{}{ + "logs": map[string]interface{}{ + "metrics_collected": map[string]interface{}{ + "app_signals": map[string]interface{}{ + "hosted_in": "test", + }, + }, + }}, + want: validAppSignalsYamlK8s, + isKubernetes: true, + kubernetesMode: translatorConfig.ModeK8sEC2, + mode: translatorConfig.ModeEC2, + }, + "WithAppSignalsFallbackEnabledGeneric": { + input: map[string]interface{}{ + "logs": map[string]interface{}{ + "metrics_collected": map[string]interface{}{ + "app_signals": map[string]interface{}{}, + }, + }}, + want: validAppSignalsYamlGeneric, + isKubernetes: false, + mode: translatorConfig.ModeOnPrem, + }, + "WithAppSignalsFallbackEnabledEKS": { + input: map[string]interface{}{ + "logs": map[string]interface{}{ + "metrics_collected": map[string]interface{}{ + "app_signals": map[string]interface{}{ + "hosted_in": "test", + }, + }, + }}, + want: validAppSignalsYamlEKS, + isKubernetes: true, + kubernetesMode: translatorConfig.ModeEKS, + mode: translatorConfig.ModeEC2, + }, + "WithAppSignalsFallbackEnabledEC2": { + input: map[string]interface{}{ + "logs": map[string]interface{}{ + "metrics_collected": map[string]interface{}{ + "app_signals": map[string]interface{}{ + "hosted_in": "test", + }, + }, + }}, + want: validAppSignalsYamlEC2, + mode: translatorConfig.ModeEC2, + }, + } + factory := awsapplicationsignals.NewFactory() + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + if testCase.isKubernetes { + t.Setenv(common.KubernetesEnvVar, "TEST") + } + context.CurrentContext().SetKubernetesMode(testCase.kubernetesMode) + context.CurrentContext().SetMode(testCase.mode) + conf := confmap.NewFromStringMap(testCase.input) + got, err := tt.Translate(conf) + assert.Equal(t, testCase.wantErr, err) + if err == nil { + require.NotNil(t, got) + gotCfg, ok := got.(*config.Config) + require.True(t, ok) + wantCfg := factory.CreateDefaultConfig() + yamlConfig, err := common.GetYamlFileToYamlConfig(wantCfg, testCase.want) + require.NoError(t, err) + assert.Equal(t, yamlConfig.(*config.Config), gotCfg) + } + }) + } +} diff --git a/translator/translate/otel/processor/awsappsignals/translator_test.go b/translator/translate/otel/processor/awsappsignals/translator_test.go deleted file mode 100644 index b8d8da4be1..0000000000 --- a/translator/translate/otel/processor/awsappsignals/translator_test.go +++ /dev/null @@ -1,128 +0,0 @@ -// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -// SPDX-License-Identifier: MIT - -package awsappsignals - -import ( - _ "embed" - "encoding/json" - "errors" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/confmap" - - "github.com/aws/amazon-cloudwatch-agent/plugins/processors/awsappsignals" - "github.com/aws/amazon-cloudwatch-agent/plugins/processors/awsappsignals/config" - "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/common" -) - -var ( - //go:embed testdata/config_eks.yaml - validAppSignalsYamlEKS string - //go:embed testdata/config_k8s.yaml - validAppSignalsYamlK8s string - - //go:embed testdata/config_generic.yaml - validAppSignalsYamlGeneric string - //go:embed testdata/validRulesConfig.json - validAppSignalsRulesConfig string - //go:embed testdata/validRulesConfigEKS.yaml - validAppSignalsRulesYamlEKS string - //go:embed testdata/validRulesConfigGeneric.yaml - validAppSignalsRulesYamlGeneric string - //go:embed testdata/invalidRulesConfig.json - invalidAppSignalsRulesConfig string -) - -func TestTranslate(t *testing.T) { - var validJsonMap, invalidJsonMap map[string]interface{} - json.Unmarshal([]byte(validAppSignalsRulesConfig), &validJsonMap) - json.Unmarshal([]byte(invalidAppSignalsRulesConfig), &invalidJsonMap) - - tt := NewTranslator(WithDataType(component.DataTypeMetrics)) - testCases := map[string]struct { - input map[string]interface{} - want string - wantErr error - isKubernetes bool - detector func() (common.Detector, error) - }{ - //The config for the awsappsignals processor is https://code.amazon.com/packages/AWSTracingSamplePetClinic/blobs/97ce3c409986ac8ae014de1e3fe71fdb98080f22/--/eks/appsignals/auto-instrumentation-new.yaml#L20 - //The awsappsignals processor config does not have a platform field, instead it gets added to resolvers when marshalled - "WithAppSignalsEnabledEKS": { - input: map[string]interface{}{ - "logs": map[string]interface{}{ - "metrics_collected": map[string]interface{}{ - "app_signals": map[string]interface{}{ - "hosted_in": "test", - }, - }, - }}, - want: validAppSignalsYamlEKS, - isKubernetes: true, - detector: common.TestEKSDetector, - }, - "WithAppSignalsCustomRulesEnabledEKS": { - input: validJsonMap, - want: validAppSignalsRulesYamlEKS, - isKubernetes: true, - detector: common.TestEKSDetector, - }, - "WithAppSignalsEnabledK8S": { - input: map[string]interface{}{ - "logs": map[string]interface{}{ - "metrics_collected": map[string]interface{}{ - "app_signals": map[string]interface{}{ - "hosted_in": "test", - }, - }, - }}, - want: validAppSignalsYamlK8s, - isKubernetes: true, - detector: common.TestK8sDetector, - }, - "WithAppSignalsEnabledGeneric": { - input: map[string]interface{}{ - "logs": map[string]interface{}{ - "metrics_collected": map[string]interface{}{ - "app_signals": map[string]interface{}{}, - }, - }}, - want: validAppSignalsYamlGeneric, - isKubernetes: false, - }, - "WithAppSignalsCustomRulesEnabledGeneric": { - input: validJsonMap, - want: validAppSignalsRulesYamlGeneric, - isKubernetes: false, - }, - "WithInvalidAppSignalsCustomRulesEnabled": { - input: invalidJsonMap, - wantErr: errors.New("replace action set, but no replacements defined for service rule"), - }, - } - factory := awsappsignals.NewFactory() - for name, testCase := range testCases { - t.Run(name, func(t *testing.T) { - if testCase.isKubernetes { - t.Setenv(common.KubernetesEnvVar, "TEST") - } - common.NewDetector = testCase.detector - conf := confmap.NewFromStringMap(testCase.input) - got, err := tt.Translate(conf) - assert.Equal(t, testCase.wantErr, err) - if err == nil { - require.NotNil(t, got) - gotCfg, ok := got.(*config.Config) - require.True(t, ok) - wantCfg := factory.CreateDefaultConfig() - yamlConfig, err := common.GetYamlFileToYamlConfig(wantCfg, testCase.want) - require.NoError(t, err) - assert.Equal(t, yamlConfig.(*config.Config), gotCfg) - } - }) - } -} diff --git a/translator/translate/otel/processor/ec2taggerprocessor/translator.go b/translator/translate/otel/processor/ec2taggerprocessor/translator.go index c8e2dfdd6a..46dd00ef85 100644 --- a/translator/translate/otel/processor/ec2taggerprocessor/translator.go +++ b/translator/translate/otel/processor/ec2taggerprocessor/translator.go @@ -52,7 +52,7 @@ func (t *translator) Translate(conf *confmap.Conf) (component.Config, error) { credentials := confmap.NewFromStringMap(agent.Global_Config.Credentials) _ = credentials.Unmarshal(cfg) for k, v := range ec2tagger.SupportedAppendDimensions { - value, ok := common.GetString(conf, common.ConfigKey(common.MetricsKey, AppendDimensionsKey, k)) + value, ok := common.GetString(conf, common.ConfigKey(ec2taggerKey, k)) if ok && v == value { if k == "AutoScalingGroupName" { cfg.EC2InstanceTagKeys = append(cfg.EC2InstanceTagKeys, k) @@ -61,7 +61,13 @@ func (t *translator) Translate(conf *confmap.Conf) (component.Config, error) { } } } - cfg.RefreshIntervalSeconds = 0 * time.Second + + if value, ok := common.GetString(conf, common.ConfigKey(common.MetricsKey, common.MetricsCollectedKey, common.DiskKey, AppendDimensionsKey, ec2tagger.AttributeVolumeId)); ok && value == ec2tagger.ValueAppendDimensionVolumeId { + cfg.EBSDeviceKeys = []string{"*"} + cfg.DiskDeviceTagKey = "device" + } + + cfg.RefreshIntervalSeconds = time.Duration(0) cfg.IMDSRetries = retryer.GetDefaultRetryNumber() return cfg, nil diff --git a/translator/translate/otel/processor/ec2taggerprocessor/translator_test.go b/translator/translate/otel/processor/ec2taggerprocessor/translator_test.go index 5b01985135..f863da8e3d 100644 --- a/translator/translate/otel/processor/ec2taggerprocessor/translator_test.go +++ b/translator/translate/otel/processor/ec2taggerprocessor/translator_test.go @@ -46,6 +46,32 @@ func TestTranslator(t *testing.T) { EC2InstanceTagKeys: []string{"AutoScalingGroupName"}, }, }, + "WithDiskAppendDimensions": { + input: map[string]interface{}{ + "metrics": map[string]interface{}{ + "append_dimensions": map[string]interface{}{ + "AutoScalingGroupName": "${aws:AutoScalingGroupName}", + "ImageId": "${aws:ImageId}", + "InstanceId": "${aws:InstanceId}", + "InstanceType": "${aws:InstanceType}", + }, + "metrics_collected": map[string]interface{}{ + "disk": map[string]interface{}{ + "append_dimensions": map[string]interface{}{ + "VolumeId": "${aws:VolumeId}", + }, + }, + }, + }, + }, + want: &ec2tagger.Config{ + RefreshIntervalSeconds: 0 * time.Second, + EC2MetadataTags: []string{"ImageId", "InstanceId", "InstanceType"}, + EC2InstanceTagKeys: []string{"AutoScalingGroupName"}, + DiskDeviceTagKey: "device", + EBSDeviceKeys: []string{"*"}, + }, + }, } for name, tc := range testCases { t.Run(name, func(t *testing.T) { @@ -60,6 +86,8 @@ func TestTranslator(t *testing.T) { sort.Strings(gotCfg.EC2MetadataTags) require.Equal(t, tc.want.EC2MetadataTags, gotCfg.EC2MetadataTags) require.Equal(t, tc.want.EC2InstanceTagKeys, gotCfg.EC2InstanceTagKeys) + require.Equal(t, tc.want.DiskDeviceTagKey, gotCfg.DiskDeviceTagKey) + require.Equal(t, tc.want.EBSDeviceKeys, gotCfg.EBSDeviceKeys) } }) } diff --git a/translator/translate/otel/processor/gpu/translator.go b/translator/translate/otel/processor/gpu/translator.go new file mode 100644 index 0000000000..ab7538ee0f --- /dev/null +++ b/translator/translate/otel/processor/gpu/translator.go @@ -0,0 +1,33 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package gpu + +import ( + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/confmap" + "go.opentelemetry.io/collector/processor" + + "github.com/aws/amazon-cloudwatch-agent/plugins/processors/gpuattributes" + "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/common" +) + +type translator struct { + name string + factory processor.Factory +} + +var _ common.Translator[component.Config] = (*translator)(nil) + +func NewTranslatorWithName(name string) common.Translator[component.Config] { + return &translator{name, gpuattributes.NewFactory()} +} + +func (t *translator) ID() component.ID { + return component.NewIDWithName(t.factory.Type(), t.name) +} + +func (t *translator) Translate(conf *confmap.Conf) (component.Config, error) { + cfg := t.factory.CreateDefaultConfig().(*gpuattributes.Config) + return cfg, nil +} diff --git a/translator/translate/otel/processor/metricstransformprocessor/translator.go b/translator/translate/otel/processor/metricstransformprocessor/translator.go index 29979b6965..d50f5ded1e 100644 --- a/translator/translate/otel/processor/metricstransformprocessor/translator.go +++ b/translator/translate/otel/processor/metricstransformprocessor/translator.go @@ -11,9 +11,44 @@ import ( "go.opentelemetry.io/collector/confmap" "go.opentelemetry.io/collector/processor" + "github.com/aws/amazon-cloudwatch-agent/internal/containerinsightscommon" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/common" + "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/receiver/awscontainerinsight" ) +const gpuLogSuffix = "GPU" + +var metricDuplicateTypes = []string{ + containerinsightscommon.TypeGpuContainer, + containerinsightscommon.TypeGpuPod, + containerinsightscommon.TypeGpuNode, +} + +var renameMapForDcgm = map[string]string{ + "DCGM_FI_DEV_GPU_UTIL": containerinsightscommon.GpuUtilization, + "DCGM_FI_DEV_FB_USED_PERCENT": containerinsightscommon.GpuMemUtilization, + "DCGM_FI_DEV_FB_USED": containerinsightscommon.GpuMemUsed, + "DCGM_FI_DEV_FB_TOTAL": containerinsightscommon.GpuMemTotal, + "DCGM_FI_DEV_GPU_TEMP": containerinsightscommon.GpuTemperature, + "DCGM_FI_DEV_POWER_USAGE": containerinsightscommon.GpuPowerDraw, +} + +var renameMapForNeuronMonitor = map[string]string{ + "execution_errors_total": containerinsightscommon.NeuronExecutionErrors, + "execution_status_total": containerinsightscommon.NeuronExecutionStatus, + "neuron_runtime_memory_used_bytes": containerinsightscommon.NeuronRuntimeMemoryUsage, + "neuroncore_memory_usage_constants": containerinsightscommon.NeuronCoreMemoryUtilizationConstants, + "neuroncore_memory_usage_model_code": containerinsightscommon.NeuronCoreMemoryUtilizationModelCode, + "neuroncore_memory_usage_model_shared_scratchpad": containerinsightscommon.NeuronCoreMemoryUtilizationSharedScratchpad, + "neuroncore_memory_usage_runtime_memory": containerinsightscommon.NeuronCoreMemoryUtilizationRuntimeMemory, + "neuroncore_memory_usage_tensors": containerinsightscommon.NeuronCoreMemoryUtilizationTensors, + "neuroncore_utilization_ratio": containerinsightscommon.NeuronCoreUtilization, + "instance_info": containerinsightscommon.NeuronInstanceInfo, + "neuron_hardware": containerinsightscommon.NeuronHardware, + "hardware_ecc_events_total": containerinsightscommon.NeuronDeviceHardwareEccEvents, + "execution_latency_seconds": containerinsightscommon.NeuronExecutionLatency, +} + type translator struct { name string factory processor.Factory @@ -31,14 +66,82 @@ func (t *translator) ID() component.ID { func (t *translator) Translate(conf *confmap.Conf) (component.Config, error) { cfg := t.factory.CreateDefaultConfig().(*metricstransformprocessor.Config) - c := confmap.NewFromStringMap(map[string]interface{}{ - "transforms": map[string]interface{}{ + transformRules := []map[string]interface{}{ + { "include": "apiserver_request_total", "match_type": "regexp", "experimental_match_labels": map[string]string{"code": "^5.*"}, "action": "insert", "new_name": "apiserver_request_total_5xx", }, + } + + if awscontainerinsight.AcceleratedComputeMetricsEnabled(conf) { + // appends DCGM metric transform rules for each metric type (container/pod/node) with following format: + // { + // "include": "DCGM_FI_DEV_GPU_UTIL", + // "action": "insert", + // "new_name": "container_gpu_utilization", + // "operations": [ + // { + // "action": "add_label", + // "new_label": "Type", + // "new_value": "ContainerGPU", + // }, + // ... + // ] + // }, + for old, new := range renameMapForDcgm { + var operations []map[string]interface{} + // convert decimals to percent + if new == containerinsightscommon.GpuMemUtilization { + operations = append(operations, map[string]interface{}{ + "action": "experimental_scale_value", + "experimental_scale": 100, + }) + } else if new == containerinsightscommon.GpuMemTotal || new == containerinsightscommon.GpuMemUsed { + operations = append(operations, map[string]interface{}{ + "action": "experimental_scale_value", + "experimental_scale": 1024 * 1024, + }) + } + for _, t := range metricDuplicateTypes { + transformRules = append(transformRules, map[string]interface{}{ + "include": old, + "action": "insert", + "new_name": containerinsightscommon.MetricName(t, new), + "operations": append([]map[string]interface{}{ + { + "action": "add_label", + "new_label": containerinsightscommon.MetricType, + "new_value": t, + }, + }, operations...), + }) + } + } + + for oldName, newName := range renameMapForNeuronMonitor { + var operations []map[string]interface{} + if newName == containerinsightscommon.NeuronCoreUtilization { + operations = append(operations, map[string]interface{}{ + "action": "experimental_scale_value", + "experimental_scale": 100, + }) + } + + transformRules = append(transformRules, map[string]interface{}{ + "include": oldName, + "action": "update", + "new_name": newName, + "operations": append([]map[string]interface{}{}, + operations...), + }) + } + } + + c := confmap.NewFromStringMap(map[string]interface{}{ + "transforms": transformRules, }) if err := c.Unmarshal(&cfg); err != nil { return nil, fmt.Errorf("unable to unmarshal into metricstransform config: %w", err) diff --git a/translator/translate/otel/processor/resourcedetection/configs/config.yaml b/translator/translate/otel/processor/resourcedetection/configs/config.yaml index b957ade437..607f9dd719 100644 --- a/translator/translate/otel/processor/resourcedetection/configs/config.yaml +++ b/translator/translate/otel/processor/resourcedetection/configs/config.yaml @@ -3,4 +3,5 @@ override: true timeout: 2s ec2: tags: - - ^kubernetes.io/cluster/.*$ \ No newline at end of file + - ^kubernetes.io/cluster/.*$ + - ^aws:autoscaling:groupName \ No newline at end of file diff --git a/translator/translate/otel/processor/resourcedetection/translator_test.go b/translator/translate/otel/processor/resourcedetection/translator_test.go index d84624937f..177be2ca7d 100644 --- a/translator/translate/otel/processor/resourcedetection/translator_test.go +++ b/translator/translate/otel/processor/resourcedetection/translator_test.go @@ -36,7 +36,7 @@ func TestTranslate(t *testing.T) { "timeout": "2s", "override": true, "ec2": map[string]interface{}{ - "tags": []interface{}{"^kubernetes.io/cluster/.*$"}, + "tags": []interface{}{"^kubernetes.io/cluster/.*$", "^aws:autoscaling:groupName"}, }, }), }, diff --git a/translator/translate/otel/receiver/adapter/translator.go b/translator/translate/otel/receiver/adapter/translator.go index b6193e7c60..d180ef6190 100644 --- a/translator/translate/otel/receiver/adapter/translator.go +++ b/translator/translate/otel/receiver/adapter/translator.go @@ -62,7 +62,8 @@ func (t *translator) Translate(conf *confmap.Conf) (component.Config, error) { return nil, &common.MissingKeyError{ID: t.ID(), JsonKey: t.cfgKey} } cfg := &adapter.Config{ - ScraperControllerSettings: scraperhelper.NewDefaultScraperControllerSettings(t.ID().Type()), + ControllerConfig: scraperhelper.NewDefaultControllerConfig(), + AliasName: t.ID().String(), } intervalKeyChain := []string{ diff --git a/translator/translate/otel/receiver/adapter/translator_test.go b/translator/translate/otel/receiver/adapter/translator_test.go index d6c319fa97..1c85750afd 100644 --- a/translator/translate/otel/receiver/adapter/translator_test.go +++ b/translator/translate/otel/receiver/adapter/translator_test.go @@ -17,6 +17,7 @@ import ( ) func TestTranslator(t *testing.T) { + telegrafTestType, _ := component.NewType("telegraf_test") testCases := map[string]struct { input map[string]interface{} cfgName string @@ -31,7 +32,7 @@ func TestTranslator(t *testing.T) { cfgName: "", cfgType: "test", cfgKey: "mem", - wantErr: &common.MissingKeyError{ID: component.NewID("telegraf_test"), JsonKey: "mem"}, + wantErr: &common.MissingKeyError{ID: component.NewID(telegrafTestType), JsonKey: "mem"}, }, "WithoutIntervalInSection": { input: map[string]interface{}{ diff --git a/translator/translate/otel/receiver/adapter/translators.go b/translator/translate/otel/receiver/adapter/translators.go index a60b9fc7d9..5419fbb5c6 100644 --- a/translator/translate/otel/receiver/adapter/translators.go +++ b/translator/translate/otel/receiver/adapter/translators.go @@ -22,6 +22,7 @@ import ( "github.com/aws/amazon-cloudwatch-agent/translator/translate/metrics/metrics_collect/procstat" "github.com/aws/amazon-cloudwatch-agent/translator/translate/metrics/metrics_collect/statsd" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/common" + "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/receiver/otlp" ) const ( @@ -66,6 +67,12 @@ var ( defaultCollectionIntervalMap = map[string]time.Duration{ statsd.SectionKey: 10 * time.Second, } + + // OtelReceivers is used for receivers that need to be in the same pipeline that + // exports to Cloudwatch while not having to follow the adapter rules + OtelReceivers = map[string]common.Translator[component.Config]{ + common.OtlpKey: otlp.NewTranslator(otlp.WithDataType(component.DataTypeMetrics)), + } ) // FindReceiversInConfig looks in the metrics and logs sections to determine which @@ -115,6 +122,9 @@ func fromWindowsMetrics(conf *confmap.Conf) common.TranslatorMap[component.Confi translators := common.NewTranslatorMap[component.Config]() if inputs, ok := conf.Get(metricKey).(map[string]interface{}); ok { for inputName := range inputs { + if _, ok := OtelReceivers[inputName]; ok { + continue + } if windowsInputSet.Contains(inputName) { cfgKey := common.ConfigKey(metricKey, inputName) translators.Set(NewTranslator(toAlias(inputName), cfgKey, collections.GetOrDefault( diff --git a/translator/translate/otel/receiver/adapter/translators_test.go b/translator/translate/otel/receiver/adapter/translators_test.go index 267ac24ea0..b3a3e1dfa8 100644 --- a/translator/translate/otel/receiver/adapter/translators_test.go +++ b/translator/translate/otel/receiver/adapter/translators_test.go @@ -19,6 +19,13 @@ import ( // TestFindReceiversInConfig confirms whether the given the agent json configuration // will give the appropriate receivers in the agent yaml func TestFindReceiversInConfig(t *testing.T) { + telegrafSocketListenerType, _ := component.NewType("telegraf_socket_listener") + telegrafCPUType, _ := component.NewType("telegraf_cpu") + telegrafEthtoolType, _ := component.NewType("telegraf_ethtool") + telegrafNvidiaSmiType, _ := component.NewType("telegraf_nvidia_smi") + telegrafStatsdType, _ := component.NewType("telegraf_statsd") + telegrafProcstatType, _ := component.NewType("telegraf_procstat") + telegrafWinPerfCountersType, _ := component.NewType("telegraf_win_perf_counters") type wantResult struct { cfgKey string interval time.Duration @@ -56,13 +63,13 @@ func TestFindReceiversInConfig(t *testing.T) { }, os: translatorconfig.OS_TYPE_LINUX, want: map[component.ID]wantResult{ - component.NewID("telegraf_socket_listener"): {"metrics::metrics_collected::collectd", time.Minute}, - component.NewID("telegraf_cpu"): {"metrics::metrics_collected::cpu", time.Minute}, - component.NewID("telegraf_ethtool"): {"metrics::metrics_collected::ethtool", time.Minute}, - component.NewID("telegraf_nvidia_smi"): {"metrics::metrics_collected::nvidia_gpu", time.Minute}, - component.NewID("telegraf_statsd"): {"metrics::metrics_collected::statsd", 10 * time.Second}, - component.NewIDWithName("telegraf_procstat", "793254176"): {"metrics::metrics_collected::procstat", time.Minute}, - component.NewIDWithName("telegraf_procstat", "3599690165"): {"metrics::metrics_collected::procstat", time.Minute}, + component.NewID(telegrafSocketListenerType): {"metrics::metrics_collected::collectd", time.Minute}, + component.NewID(telegrafCPUType): {"metrics::metrics_collected::cpu", time.Minute}, + component.NewID(telegrafEthtoolType): {"metrics::metrics_collected::ethtool", time.Minute}, + component.NewID(telegrafNvidiaSmiType): {"metrics::metrics_collected::nvidia_gpu", time.Minute}, + component.NewID(telegrafStatsdType): {"metrics::metrics_collected::statsd", 10 * time.Second}, + component.NewIDWithName(telegrafProcstatType, "793254176"): {"metrics::metrics_collected::procstat", time.Minute}, + component.NewIDWithName(telegrafProcstatType, "3599690165"): {"metrics::metrics_collected::procstat", time.Minute}, }, }, "WithWindowsMetrics": { @@ -92,13 +99,13 @@ func TestFindReceiversInConfig(t *testing.T) { }, os: translatorconfig.OS_TYPE_WINDOWS, want: map[component.ID]wantResult{ - component.NewID("telegraf_nvidia_smi"): {"metrics::metrics_collected::nvidia_gpu", time.Minute}, - component.NewIDWithName("telegraf_procstat", "793254176"): {"metrics::metrics_collected::procstat", time.Minute}, - component.NewIDWithName("telegraf_procstat", "3599690165"): {"metrics::metrics_collected::procstat", time.Minute}, - component.NewIDWithName("telegraf_win_perf_counters", "4283769065"): {"metrics::metrics_collected::LogicalDisk", time.Minute}, - component.NewIDWithName("telegraf_win_perf_counters", "1492679118"): {"metrics::metrics_collected::Memory", time.Minute}, - component.NewIDWithName("telegraf_win_perf_counters", "3610923661"): {"metrics::metrics_collected::Paging File", time.Minute}, - component.NewIDWithName("telegraf_win_perf_counters", "3446270237"): {"metrics::metrics_collected::PhysicalDisk", time.Minute}, + component.NewID(telegrafNvidiaSmiType): {"metrics::metrics_collected::nvidia_gpu", time.Minute}, + component.NewIDWithName(telegrafProcstatType, "793254176"): {"metrics::metrics_collected::procstat", time.Minute}, + component.NewIDWithName(telegrafProcstatType, "3599690165"): {"metrics::metrics_collected::procstat", time.Minute}, + component.NewIDWithName(telegrafWinPerfCountersType, "4283769065"): {"metrics::metrics_collected::LogicalDisk", time.Minute}, + component.NewIDWithName(telegrafWinPerfCountersType, "1492679118"): {"metrics::metrics_collected::Memory", time.Minute}, + component.NewIDWithName(telegrafWinPerfCountersType, "3610923661"): {"metrics::metrics_collected::Paging File", time.Minute}, + component.NewIDWithName(telegrafWinPerfCountersType, "3446270237"): {"metrics::metrics_collected::PhysicalDisk", time.Minute}, }, }, "WithLogs": { @@ -145,7 +152,7 @@ func TestFindReceiversInConfig(t *testing.T) { }, os: translatorconfig.OS_TYPE_LINUX, want: map[component.ID]wantResult{ - component.NewID("telegraf_socket_listener"): {"metrics::metrics_collected::collectd", time.Minute}, + component.NewID(telegrafSocketListenerType): {"metrics::metrics_collected::collectd", time.Minute}, }, }, "WithInvalidOS": { diff --git a/translator/translate/otel/receiver/awscontainerinsight/translator.go b/translator/translate/otel/receiver/awscontainerinsight/translator.go index 8c58c86fce..94360518a6 100644 --- a/translator/translate/otel/receiver/awscontainerinsight/translator.go +++ b/translator/translate/otel/receiver/awscontainerinsight/translator.go @@ -108,9 +108,11 @@ func (t *translator) Translate(conf *confmap.Conf) (component.Config, error) { cfg.PrefFullPodName = true cfg.EnableControlPlaneMetrics = true } + } cfg.PrefFullPodName = cfg.PrefFullPodName || common.GetOrDefaultBool(conf, common.ConfigKey(common.LogsKey, common.MetricsCollectedKey, common.KubernetesKey, common.PreferFullPodName), false) + cfg.EnableAcceleratedComputeMetrics = cfg.EnableAcceleratedComputeMetrics || AcceleratedComputeMetricsEnabled(conf) return cfg, nil } diff --git a/translator/translate/otel/receiver/awscontainerinsight/granularity.go b/translator/translate/otel/receiver/awscontainerinsight/utils.go similarity index 77% rename from translator/translate/otel/receiver/awscontainerinsight/granularity.go rename to translator/translate/otel/receiver/awscontainerinsight/utils.go index e0d364081b..721951b056 100644 --- a/translator/translate/otel/receiver/awscontainerinsight/granularity.go +++ b/translator/translate/otel/receiver/awscontainerinsight/utils.go @@ -23,3 +23,7 @@ func EnhancedContainerInsightsEnabled(conf *confmap.Conf) bool { } return isSet } + +func AcceleratedComputeMetricsEnabled(conf *confmap.Conf) bool { + return common.GetOrDefaultBool(conf, common.ConfigKey(common.LogsKey, common.MetricsCollectedKey, common.KubernetesKey, common.EnableAcceleratedComputeMetric), true) +} diff --git a/translator/translate/otel/receiver/awsxray/testdata/config.yaml b/translator/translate/otel/receiver/awsxray/testdata/config.yaml index ded24bcfc2..487cbd5700 100644 --- a/translator/translate/otel/receiver/awsxray/testdata/config.yaml +++ b/translator/translate/otel/receiver/awsxray/testdata/config.yaml @@ -6,4 +6,5 @@ proxy_server: role_arn: trace_role_arn tls: insecure: true - proxy_address: https://proxy.proxy.com \ No newline at end of file + proxy_address: https://proxy.proxy.com + imds_retries: 1 \ No newline at end of file diff --git a/translator/translate/otel/receiver/awsxray/translator.go b/translator/translate/otel/receiver/awsxray/translator.go index 9dad0917bf..36834a9b3a 100644 --- a/translator/translate/otel/receiver/awsxray/translator.go +++ b/translator/translate/otel/receiver/awsxray/translator.go @@ -4,11 +4,18 @@ package awsxray import ( + "fmt" + "os" + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awsxrayreceiver" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/confmap" "go.opentelemetry.io/collector/receiver" + "github.com/aws/amazon-cloudwatch-agent/cfg/envconfig" + "github.com/aws/amazon-cloudwatch-agent/internal/retryer" + "github.com/aws/amazon-cloudwatch-agent/translator/config" + "github.com/aws/amazon-cloudwatch-agent/translator/context" "github.com/aws/amazon-cloudwatch-agent/translator/translate/agent" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/common" ) @@ -61,15 +68,26 @@ func (t *translator) Translate(conf *confmap.Conf) (component.Config, error) { if insecure, ok := common.GetBool(conf, common.ConfigKey(common.TracesKey, common.InsecureKey)); ok { cfg.ProxyServer.TLSSetting.Insecure = insecure } + if context.CurrentContext().Mode() == config.ModeOnPrem || context.CurrentContext().Mode() == config.ModeOnPremise { + cfg.ProxyServer.LocalMode = true + } if localMode, ok := common.GetBool(conf, common.ConfigKey(common.TracesKey, common.LocalModeKey)); ok { cfg.ProxyServer.LocalMode = localMode } + if profileKey, ok := agent.Global_Config.Credentials[agent.Profile_Key]; ok { + cfg.ProxyServer.Profile = fmt.Sprintf("%v", profileKey) + } if endpoint, ok := common.GetString(conf, common.ConfigKey(common.TracesKey, common.EndpointOverrideKey)); ok { cfg.ProxyServer.AWSEndpoint = endpoint } if proxyAddress, ok := common.GetString(conf, common.ConfigKey(common.TracesKey, common.ProxyOverrideKey)); ok { cfg.ProxyServer.ProxyAddress = proxyAddress } + if credentialsFileKey, ok := agent.Global_Config.Credentials[agent.CredentialsFile_Key]; ok { + cfg.ProxyServer.SharedCredentialsFile = []string{fmt.Sprintf("%v", credentialsFileKey)} + } + cfg.ProxyServer.CertificateFilePath = os.Getenv(envconfig.AWS_CA_BUNDLE) + cfg.ProxyServer.IMDSRetries = retryer.GetDefaultRetryNumber() return cfg, nil } diff --git a/translator/translate/otel/receiver/awsxray/translator_test.go b/translator/translate/otel/receiver/awsxray/translator_test.go index d53da52a9b..f50d52a33b 100644 --- a/translator/translate/otel/receiver/awsxray/translator_test.go +++ b/translator/translate/otel/receiver/awsxray/translator_test.go @@ -41,9 +41,10 @@ func TestTranslator(t *testing.T) { "endpoint": "127.0.0.1:2000", "transport": "udp", "proxy_server": map[string]interface{}{ - "endpoint": "127.0.0.1:2000", - "region": "us-east-1", - "role_arn": "global_arn", + "endpoint": "127.0.0.1:2000", + "region": "us-east-1", + "role_arn": "global_arn", + "imds_retries": 1, }, }), }, diff --git a/translator/translate/otel/receiver/otlp/appsignals_config.yaml b/translator/translate/otel/receiver/otlp/appsignals_config.yaml deleted file mode 100644 index 560470c3a2..0000000000 --- a/translator/translate/otel/receiver/otlp/appsignals_config.yaml +++ /dev/null @@ -1,5 +0,0 @@ -protocols: - grpc: - endpoint: 0.0.0.0:4315 - http: - endpoint: 0.0.0.0:4316 \ No newline at end of file diff --git a/translator/translate/otel/receiver/otlp/translator.go b/translator/translate/otel/receiver/otlp/translator.go index 48c86c0fd3..3add9e8c06 100644 --- a/translator/translate/otel/receiver/otlp/translator.go +++ b/translator/translate/otel/receiver/otlp/translator.go @@ -6,8 +6,10 @@ package otlp import ( _ "embed" "fmt" + "strconv" "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config/configtls" "go.opentelemetry.io/collector/confmap" "go.opentelemetry.io/collector/receiver" "go.opentelemetry.io/collector/receiver/otlpreceiver" @@ -16,23 +18,24 @@ import ( ) const ( - defaultGrpcEndpoint = "127.0.0.1:4317" - defaultHttpEndpoint = "127.0.0.1:4318" + defaultGrpcEndpoint = "127.0.0.1:4317" + defaultHttpEndpoint = "127.0.0.1:4318" + defaultAppSignalsGrpcEndpoint = "0.0.0.0:4315" + defaultAppSignalsHttpEndpoint = "0.0.0.0:4316" ) var ( configKeys = map[component.DataType]string{ - component.DataTypeTraces: common.ConfigKey(common.TracesKey, common.TracesCollectedKey, common.OtlpKey), + component.DataTypeTraces: common.ConfigKey(common.TracesKey, common.TracesCollectedKey), + component.DataTypeMetrics: common.ConfigKey(common.LogsKey, common.MetricsCollectedKey), } - - //go:embed appsignals_config.yaml - appSignalsConfig string ) type translator struct { - name string - dataType component.DataType - factory receiver.Factory + name string + dataType component.DataType + instanceNum int + factory receiver.Factory } type Option interface { @@ -52,22 +55,29 @@ func WithDataType(dataType component.DataType) Option { t.dataType = dataType }) } +func WithInstanceNum(instanceNum int) Option { + return optionFunc(func(t *translator) { + t.instanceNum = instanceNum + }) +} var _ common.Translator[component.Config] = (*translator)(nil) func NewTranslator(opts ...Option) common.Translator[component.Config] { - t := &translator{factory: otlpreceiver.NewFactory()} - for _, opt := range opts { - opt.apply(t) - } - return t + return NewTranslatorWithName("", opts...) } func NewTranslatorWithName(name string, opts ...Option) common.Translator[component.Config] { - t := &translator{name: name, factory: otlpreceiver.NewFactory()} + t := &translator{name: name, instanceNum: -1, factory: otlpreceiver.NewFactory()} for _, opt := range opts { opt.apply(t) } + if name == "" && t.dataType.String() != "" { + t.name = t.dataType.String() + if t.instanceNum != -1 { + t.name += strconv.Itoa(t.instanceNum) + } + } return t } @@ -77,26 +87,49 @@ func (t *translator) ID() component.ID { func (t *translator) Translate(conf *confmap.Conf) (component.Config, error) { cfg := t.factory.CreateDefaultConfig().(*otlpreceiver.Config) - - // TODO: Should follow pattern done in awsemf and awsexray exporter translations (i.e should be integrated with standard otlp translation) - if t.name == common.AppSignals { - return common.GetYamlFileToYamlConfig(cfg, appSignalsConfig) - } - - configKey, ok := configKeys[t.dataType] + // init default configuration + configBase, ok := configKeys[t.dataType] if !ok { return nil, fmt.Errorf("no config key defined for data type: %s", t.dataType) } + configKey := common.ConfigKey(configBase, common.OtlpKey) + cfg.GRPC.NetAddr.Endpoint = defaultGrpcEndpoint + cfg.HTTP.Endpoint = defaultHttpEndpoint + if t.name == common.AppSignals { + configKey = common.ConfigKey(configKeys[t.dataType], common.AppSignals) + if conf == nil || !conf.IsSet(configKey) { + configKey = common.ConfigKey(configBase, common.AppSignalsFallback) + } + cfg.GRPC.NetAddr.Endpoint = defaultAppSignalsGrpcEndpoint + cfg.HTTP.Endpoint = defaultAppSignalsHttpEndpoint + } if conf == nil || !conf.IsSet(configKey) { return nil, &common.MissingKeyError{ID: t.ID(), JsonKey: configKey} } - cfg.GRPC.NetAddr.Endpoint = defaultGrpcEndpoint - cfg.HTTP.Endpoint = defaultHttpEndpoint - if endpoint, ok := common.GetString(conf, common.ConfigKey(configKey, "grpc_endpoint")); ok { - cfg.GRPC.NetAddr.Endpoint = endpoint + + var otlpKeyMap map[string]interface{} + if otlpSlice := common.GetArray[any](conf, configKey); t.instanceNum != -1 && len(otlpSlice) > t.instanceNum { + otlpKeyMap = otlpSlice[t.instanceNum].(map[string]interface{}) + } else { + otlpKeyMap = conf.Get(configKey).(map[string]interface{}) + } + var tlsSettings *configtls.ServerConfig + if tls, ok := otlpKeyMap["tls"].(map[string]interface{}); ok { + tlsSettings = &configtls.ServerConfig{} + tlsSettings.CertFile = tls["cert_file"].(string) + tlsSettings.KeyFile = tls["key_file"].(string) + } + cfg.GRPC.TLSSetting = tlsSettings + cfg.HTTP.TLSSetting = tlsSettings + + grpcEndpoint, grpcOk := otlpKeyMap["grpc_endpoint"] + httpEndpoint, httpOk := otlpKeyMap["http_endpoint"] + + if grpcOk { + cfg.GRPC.NetAddr.Endpoint = grpcEndpoint.(string) } - if endpoint, ok := common.GetString(conf, common.ConfigKey(configKey, "http_endpoint")); ok { - cfg.HTTP.Endpoint = endpoint + if httpOk { + cfg.HTTP.Endpoint = httpEndpoint.(string) } return cfg, nil } diff --git a/translator/translate/otel/receiver/otlp/translator_test.go b/translator/translate/otel/receiver/otlp/translator_test.go index 06a6672a1e..71f1bf6501 100644 --- a/translator/translate/otel/receiver/otlp/translator_test.go +++ b/translator/translate/otel/receiver/otlp/translator_test.go @@ -40,7 +40,7 @@ func TestTracesTranslator(t *testing.T) { }, }, "WithDefault": { - input: map[string]interface{}{"traces": map[string]interface{}{"traces_collected": map[string]interface{}{"otlp": nil}}}, + input: map[string]interface{}{"traces": map[string]interface{}{"traces_collected": map[string]interface{}{"otlp": map[string]interface{}{}}}}, want: confmap.NewFromStringMap(map[string]interface{}{ "protocols": map[string]interface{}{ "grpc": map[string]interface{}{ @@ -52,6 +52,25 @@ func TestTracesTranslator(t *testing.T) { }, }), }, + "WithTLS": { + input: map[string]interface{}{ + "protocols": map[string]interface{}{ + "grpc": map[string]interface{}{ + "endpoint": "127.0.0.1:4317", + }, + "http": map[string]interface{}{ + "endpoint": "127.0.0.1:4318", + }, + "tls": map[string]interface{}{ + "cert_file": "path/to/cert.crt", + "key_file": "path/to/key.key", + }, + }}, + wantErr: &common.MissingKeyError{ + ID: tt.ID(), + JsonKey: common.ConfigKey(common.TracesKey, common.TracesCollectedKey, common.OtlpKey), + }, + }, "WithCompleteConfig": { input: testutil.GetJson(t, filepath.Join("testdata", "traces", "config.json")), want: testutil.GetConf(t, filepath.Join("testdata", "traces", "config.yaml")), @@ -83,6 +102,55 @@ func TestTranslateAppSignals(t *testing.T) { wantErr error }{ "WithAppSignalsEnabledTraces": { + input: map[string]interface{}{ + "traces": map[string]interface{}{ + "traces_collected": map[string]interface{}{ + "application_signals": map[string]interface{}{}, + }, + }}, + want: confmap.NewFromStringMap(map[string]interface{}{ + "protocols": map[string]interface{}{ + "grpc": map[string]interface{}{ + "endpoint": "0.0.0.0:4315", + }, + "http": map[string]interface{}{ + "endpoint": "0.0.0.0:4316", + }, + }, + }), + }, + "WithAppSignalsEnabledTracesWithTLS": { + input: map[string]interface{}{ + "traces": map[string]interface{}{ + "traces_collected": map[string]interface{}{ + "application_signals": map[string]interface{}{ + "tls": map[string]interface{}{ + "cert_file": "path/to/cert.crt", + "key_file": "path/to/key.key", + }, + }, + }, + }}, + want: confmap.NewFromStringMap(map[string]interface{}{ + "protocols": map[string]interface{}{ + "grpc": map[string]interface{}{ + "endpoint": "0.0.0.0:4315", + "tls": map[string]interface{}{ + "cert_file": "path/to/cert.crt", + "key_file": "path/to/key.key", + }, + }, + "http": map[string]interface{}{ + "endpoint": "0.0.0.0:4316", + "tls": map[string]interface{}{ + "cert_file": "path/to/cert.crt", + "key_file": "path/to/key.key", + }, + }, + }, + }), + }, + "WithAppSignalsFallbackEnabledTraces": { input: map[string]interface{}{ "traces": map[string]interface{}{ "traces_collected": map[string]interface{}{ @@ -100,6 +168,37 @@ func TestTranslateAppSignals(t *testing.T) { }, }), }, + "WithAppSignalsFallbackEnabledTracesWithTLS": { + input: map[string]interface{}{ + "traces": map[string]interface{}{ + "traces_collected": map[string]interface{}{ + "app_signals": map[string]interface{}{ + "tls": map[string]interface{}{ + "cert_file": "path/to/cert.crt", + "key_file": "path/to/key.key", + }, + }, + }, + }}, + want: confmap.NewFromStringMap(map[string]interface{}{ + "protocols": map[string]interface{}{ + "grpc": map[string]interface{}{ + "endpoint": "0.0.0.0:4315", + "tls": map[string]interface{}{ + "cert_file": "path/to/cert.crt", + "key_file": "path/to/key.key", + }, + }, + "http": map[string]interface{}{ + "endpoint": "0.0.0.0:4316", + "tls": map[string]interface{}{ + "cert_file": "path/to/cert.crt", + "key_file": "path/to/key.key", + }, + }, + }, + }), + }, } factory := otlpreceiver.NewFactory() for name, testCase := range testCases { diff --git a/translator/translate/otel/translate_otel.go b/translator/translate/otel/translate_otel.go index d5731620f4..79649dfa45 100644 --- a/translator/translate/otel/translate_otel.go +++ b/translator/translate/otel/translate_otel.go @@ -22,7 +22,7 @@ import ( "github.com/aws/amazon-cloudwatch-agent/translator/context" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/common" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/pipeline" - "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/pipeline/appsignals" + "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/pipeline/applicationsignals" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/pipeline/containerinsights" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/pipeline/emf_logs" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/pipeline/host" @@ -66,10 +66,9 @@ func Translate(jsonConfig interface{}, os string) (*otelcol.Config, error) { hostReceivers.Set(translator) } }) - translators := common.NewTranslatorMap( - appsignals.NewTranslator(component.DataTypeTraces), - appsignals.NewTranslator(component.DataTypeMetrics), + applicationsignals.NewTranslator(component.DataTypeTraces), + applicationsignals.NewTranslator(component.DataTypeMetrics), host.NewTranslator(common.PipelineNameHost, hostReceivers), host.NewTranslator(common.PipelineNameHostDeltaMetrics, deltaMetricsReceivers), containerinsights.NewTranslator(), diff --git a/translator/translate/otel/translate_otel_test.go b/translator/translate/otel/translate_otel_test.go index 05978cfdcf..21a54bcfab 100644 --- a/translator/translate/otel/translate_otel_test.go +++ b/translator/translate/otel/translate_otel_test.go @@ -15,6 +15,7 @@ import ( _ "github.com/aws/amazon-cloudwatch-agent/translator/registerrules" "github.com/aws/amazon-cloudwatch-agent/translator/translate/agent" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/common" + "github.com/aws/amazon-cloudwatch-agent/translator/util/eksdetector" ) func TestTranslator(t *testing.T) { @@ -22,7 +23,8 @@ func TestTranslator(t *testing.T) { testCases := map[string]struct { input interface{} wantErrContains string - detector func() (common.Detector, error) + detector func() (eksdetector.Detector, error) + isEKSDataStore func() eksdetector.IsEKSCache }{ "WithInvalidConfig": { input: "", @@ -51,41 +53,121 @@ func TestTranslator(t *testing.T) { input: map[string]interface{}{ "logs": map[string]interface{}{ "metrics_collected": map[string]interface{}{ - "app_signals": map[string]interface{}{}, + "application_signals": map[string]interface{}{}, }, }, }, - detector: common.TestEKSDetector, + detector: eksdetector.TestEKSDetector, + isEKSDataStore: eksdetector.TestIsEKSCacheEKS, }, "WithAppSignalsTracesEnabled": { input: map[string]interface{}{ "traces": map[string]interface{}{ "traces_collected": map[string]interface{}{ - "app_signals": map[string]interface{}{}, + "application_signals": map[string]interface{}{}, }, }, }, - detector: common.TestEKSDetector, + detector: eksdetector.TestEKSDetector, + isEKSDataStore: eksdetector.TestIsEKSCacheEKS, }, "WithAppSignalsMetricsAndTracesEnabled": { + input: map[string]interface{}{ + "logs": map[string]interface{}{ + "metrics_collected": map[string]interface{}{ + "application_signals": map[string]interface{}{}, + }, + }, + "traces": map[string]interface{}{ + "traces_collected": map[string]interface{}{ + "application_signals": map[string]interface{}{}, + }, + }, + }, + detector: eksdetector.TestEKSDetector, + isEKSDataStore: eksdetector.TestIsEKSCacheEKS, + }, + "WithAppSignalsMultipleMetricsReceiversConfig": { + input: map[string]interface{}{ + "logs": map[string]interface{}{ + "metrics_collected": map[string]interface{}{ + "application_signals": map[string]interface{}{}, + "cpu": map[string]interface{}{}, + }, + }, + "traces": map[string]interface{}{ + "traces_collected": map[string]interface{}{ + "application_signals": map[string]interface{}{}, + "otlp": map[string]interface{}{}, + "otlp2": map[string]interface{}{}, + }, + }, + }, + detector: eksdetector.TestEKSDetector, + isEKSDataStore: eksdetector.TestIsEKSCacheEKS, + }, + "WithAppSignalsFallbackMetricsEnabled": { + input: map[string]interface{}{ + "logs": map[string]interface{}{ + "metrics_collected": map[string]interface{}{ + "app_signals": map[string]interface{}{}, + }, + }, + }, + detector: eksdetector.TestEKSDetector, + isEKSDataStore: eksdetector.TestIsEKSCacheEKS, + }, + "WithAppSignalsFallbackTracesEnabled": { + input: map[string]interface{}{ + "traces": map[string]interface{}{ + "traces_collected": map[string]interface{}{ + "app_signals": map[string]interface{}{}, + }, + }, + }, + detector: eksdetector.TestEKSDetector, + isEKSDataStore: eksdetector.TestIsEKSCacheEKS, + }, + "WithAppSignalsFallbackMetricsAndTracesEnabled": { + input: map[string]interface{}{ + "logs": map[string]interface{}{ + "metrics_collected": map[string]interface{}{ + "app_signals": map[string]interface{}{}, + }, + }, + "traces": map[string]interface{}{ + "traces_collected": map[string]interface{}{ + "app_signals": map[string]interface{}{}, + }, + }, + }, + detector: eksdetector.TestEKSDetector, + isEKSDataStore: eksdetector.TestIsEKSCacheEKS, + }, + "WithAppSignalsFallbackMultipleMetricsReceiversConfig": { input: map[string]interface{}{ "logs": map[string]interface{}{ "metrics_collected": map[string]interface{}{ "app_signals": map[string]interface{}{}, + "cpu": map[string]interface{}{}, }, }, "traces": map[string]interface{}{ "traces_collected": map[string]interface{}{ "app_signals": map[string]interface{}{}, + "otlp": map[string]interface{}{}, + "otlp2": map[string]interface{}{}, }, }, }, - detector: common.TestEKSDetector, + detector: eksdetector.TestEKSDetector, + isEKSDataStore: eksdetector.TestIsEKSCacheEKS, }, } for name, testCase := range testCases { t.Run(name, func(t *testing.T) { - common.NewDetector = testCase.detector + eksdetector.NewDetector = testCase.detector + eksdetector.IsEKS = testCase.isEKSDataStore translator.SetTargetPlatform("linux") got, err := Translate(testCase.input, "linux") if testCase.wantErrContains != "" { @@ -117,15 +199,16 @@ func (t testTranslator) ID() component.ID { var _ common.Translator[*common.ComponentTranslators] = (*testTranslator)(nil) func TestRegisterPipeline(t *testing.T) { - original := &testTranslator{id: component.NewID("test"), version: 1} + testType, _ := component.NewType("test") + original := &testTranslator{id: component.NewID(testType), version: 1} tm := common.NewTranslatorMap[*common.ComponentTranslators](original) assert.Equal(t, 0, registry.Len()) - first := &testTranslator{id: component.NewID("test"), version: 2} - second := &testTranslator{id: component.NewID("test"), version: 3} + first := &testTranslator{id: component.NewID(testType), version: 2} + second := &testTranslator{id: component.NewID(testType), version: 3} RegisterPipeline(first, second) assert.Equal(t, 1, registry.Len()) tm.Merge(registry) - got, ok := tm.Get(component.NewID("test")) + got, ok := tm.Get(component.NewID(testType)) assert.True(t, ok) assert.Equal(t, second.version, got.(*testTranslator).version) assert.NotEqual(t, first.version, got.(*testTranslator).version) diff --git a/translator/translate/util/tagsutil.go b/translator/translate/util/tagsutil.go index f1bba7509c..f3eca6701b 100644 --- a/translator/translate/util/tagsutil.go +++ b/translator/translate/util/tagsutil.go @@ -3,22 +3,30 @@ package util +import ( + "github.com/aws/amazon-cloudwatch-agent/internal/util/collections" + "github.com/aws/amazon-cloudwatch-agent/plugins/processors/ec2tagger" +) + const ( High_Resolution_Tag_Key = "aws:StorageResolution" Aggregation_Interval_Tag_Key = "aws:AggregationInterval" ) -var Reserved_Tag_Keys = []string{High_Resolution_Tag_Key, Aggregation_Interval_Tag_Key} +var ReservedTagKeySet = collections.NewSet[string](High_Resolution_Tag_Key, Aggregation_Interval_Tag_Key, ec2tagger.AttributeVolumeId) func AddHighResolutionTag(tags interface{}) { tagMap := tags.(map[string]interface{}) tagMap[High_Resolution_Tag_Key] = "true" } -// Filter out reserved tag keys -func Cleanup(input interface{}) { - inputmap := input.(map[string]interface{}) - for _, reserved_key := range Reserved_Tag_Keys { - delete(inputmap, reserved_key) +// FilterReservedKeys out reserved tag keys +func FilterReservedKeys(input any) any { + result := map[string]any{} + for k, v := range input.(map[string]interface{}) { + if !ReservedTagKeySet.Contains(k) { + result[k] = v + } } + return result } diff --git a/translator/util/ec2util/ec2util.go b/translator/util/ec2util/ec2util.go index 5da0790d23..480a08c8b8 100644 --- a/translator/util/ec2util/ec2util.go +++ b/translator/util/ec2util/ec2util.go @@ -14,7 +14,7 @@ import ( "github.com/aws/aws-sdk-go/aws/session" configaws "github.com/aws/amazon-cloudwatch-agent/cfg/aws" - "github.com/aws/amazon-cloudwatch-agent/extension/agenthealth/handler/stats/provider" + "github.com/aws/amazon-cloudwatch-agent/extension/agenthealth/handler/stats/agent" "github.com/aws/amazon-cloudwatch-agent/internal/retryer" "github.com/aws/amazon-cloudwatch-agent/translator/config" "github.com/aws/amazon-cloudwatch-agent/translator/context" @@ -116,7 +116,7 @@ func (e *ec2Util) deriveEC2MetadataFromIMDS() error { hostnameInner, errInner := mdEnableFallback.GetMetadata("hostname") if errInner == nil { e.Hostname = hostnameInner - provider.GetFlagsStats().SetFlag(provider.FlagIMDSFallbackSucceed) + agent.UsageFlags().Set(agent.FlagIMDSFallbackSuccess) } else { fmt.Println("E! [EC2] Fetch hostname from EC2 metadata fail:", errInner) } @@ -136,7 +136,7 @@ func (e *ec2Util) deriveEC2MetadataFromIMDS() error { e.AccountID = instanceIdentityDocumentInner.AccountID e.PrivateIP = instanceIdentityDocumentInner.PrivateIP e.InstanceID = instanceIdentityDocumentInner.InstanceID - provider.GetFlagsStats().SetFlag(provider.FlagIMDSFallbackSucceed) + agent.UsageFlags().Set(agent.FlagIMDSFallbackSuccess) } else { fmt.Println("E! [EC2] Fetch identity document from EC2 metadata fail:", errInner) } diff --git a/translator/translate/otel/common/eksdetector.go b/translator/util/eksdetector/eksdetector.go similarity index 65% rename from translator/translate/otel/common/eksdetector.go rename to translator/util/eksdetector/eksdetector.go index de004e4e49..58830883d6 100644 --- a/translator/translate/otel/common/eksdetector.go +++ b/translator/util/eksdetector/eksdetector.go @@ -1,7 +1,7 @@ // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: MIT -package common +package eksdetector import ( "context" @@ -21,6 +21,11 @@ type EksDetector struct { Clientset kubernetes.Interface } +type IsEKSCache struct { + Value bool + Err error +} + const ( authConfigNamespace = "kube-system" authConfigConfigMap = "aws-auth" @@ -29,8 +34,9 @@ const ( var _ Detector = (*EksDetector)(nil) var ( - detectorSingleton Detector - once sync.Once + detector Detector + isEKSCacheSingleton IsEKSCache + once sync.Once ) var ( @@ -39,36 +45,41 @@ var ( // NewDetector creates a new singleton detector for EKS NewDetector = func() (Detector, error) { var errors error + if clientset, err := getClient(); err != nil { + errors = err + } else { + detector = &EksDetector{Clientset: clientset} + } + + return detector, errors + } + + // IsEKS checks if the agent is running on EKS. This is done by using the kubernetes API to determine if the aws-auth + // configmap exists in the kube-system namespace + IsEKS = func() IsEKSCache { once.Do(func() { - if clientset, err := getClient(); err != nil { + var errors error + var value bool + // Create eks detector + eksDetector, err := NewDetector() + if err != nil { errors = err - } else { - detectorSingleton = &EksDetector{Clientset: clientset} } + + if eksDetector != nil { + // Make HTTP GET request + awsAuth, err := eksDetector.getConfigMap(authConfigNamespace, authConfigConfigMap) + if err == nil { + value = awsAuth != nil + } + } + isEKSCacheSingleton = IsEKSCache{Value: value, Err: errors} }) - return detectorSingleton, errors + return isEKSCacheSingleton } ) -// IsEKS checks if the agent is running on EKS. This is done by using the kubernetes API to determine if the aws-auth -// configmap exists in the kube-system namespace -func IsEKS() (bool, error) { - // Create eks detector - eksDetector, err := NewDetector() - if err != nil { - return false, err - } - - // Make HTTP GET request - awsAuth, err := eksDetector.getConfigMap(authConfigNamespace, authConfigConfigMap) - if err != nil { - return false, nil - } - - return awsAuth != nil, nil -} - // getConfigMap retrieves the configmap with the provided name in the provided namespace func (d *EksDetector) getConfigMap(namespace string, name string) (map[string]string, error) { configMap, err := d.Clientset.CoreV1().ConfigMaps(namespace).Get(context.TODO(), name, metav1.GetOptions{}) diff --git a/translator/translate/otel/common/eksdetector_test.go b/translator/util/eksdetector/eksdetector_test.go similarity index 73% rename from translator/translate/otel/common/eksdetector_test.go rename to translator/util/eksdetector/eksdetector_test.go index 3dfee8fa75..a5dafaf446 100644 --- a/translator/translate/otel/common/eksdetector_test.go +++ b/translator/util/eksdetector/eksdetector_test.go @@ -1,7 +1,7 @@ // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: MIT -package common +package eksdetector import ( "fmt" @@ -25,45 +25,38 @@ func TestNewDetector(t *testing.T) { assert.NoError(t, err) assert.NotNil(t, testDetector1) - // Test singleton - testDetector2, err := NewDetector() - assert.NoError(t, err) - assert.True(t, testDetector1 == testDetector2) + getInClusterConfig = func() (*rest.Config, error) { + return nil, fmt.Errorf("error") + } + _, err = NewDetector() + assert.Error(t, err) } -// Tests EKS resource detector running in EKS environment -func TestEKS(t *testing.T) { - testDetector := new(MockDetector) - NewDetector = func() (Detector, error) { - return testDetector, nil +func TestIsEKSSingleton(t *testing.T) { + getInClusterConfig = func() (*rest.Config, error) { + return &rest.Config{}, nil } - testDetector.On("getConfigMap", authConfigNamespace, authConfigConfigMap).Return(map[string]string{conventions.AttributeK8SClusterName: "my-cluster"}, nil) - isEks, err := IsEKS() - assert.True(t, isEks) - assert.NoError(t, err) + NewDetector = TestEKSDetector + value1 := IsEKS() + assert.NoError(t, value1.Err) + value2 := IsEKS() + assert.NoError(t, value2.Err) + + assert.True(t, value1 == value2) } -// Tests EKS resource detector not running in EKS environment by verifying resource is not running on k8s -func TestNotEKS(t *testing.T) { +// Tests EKS resource detector running in EKS environment +func TestEKS(t *testing.T) { testDetector := new(MockDetector) - - // Detector creation failure - NewDetector = func() (Detector, error) { - return nil, fmt.Errorf("test error") - } - isEks, err := IsEKS() - assert.False(t, isEks) - assert.Error(t, err) - - //get configmap failure NewDetector = func() (Detector, error) { return testDetector, nil } - testDetector.On("getConfigMap", authConfigNamespace, authConfigConfigMap).Return(map[string]string{}, fmt.Errorf("error")) - isEks, err = IsEKS() - assert.False(t, isEks) + testDetector.On("getConfigMap", authConfigNamespace, authConfigConfigMap).Return(map[string]string{conventions.AttributeK8SClusterName: "my-cluster"}, nil) + isEks := IsEKS() + assert.True(t, isEks.Value) + assert.NoError(t, isEks.Err) } func Test_getConfigMap(t *testing.T) { diff --git a/translator/translate/otel/common/eksdetectortestutil.go b/translator/util/eksdetector/eksdetectortestutil.go similarity index 77% rename from translator/translate/otel/common/eksdetectortestutil.go rename to translator/util/eksdetector/eksdetectortestutil.go index 3845754eb3..c3b14a48c5 100644 --- a/translator/translate/otel/common/eksdetectortestutil.go +++ b/translator/util/eksdetector/eksdetectortestutil.go @@ -1,7 +1,7 @@ // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: MIT -package common +package eksdetector import ( "github.com/stretchr/testify/mock" @@ -24,6 +24,16 @@ var ( TestK8sDetector = func() (Detector, error) { return &EksDetector{Clientset: fake.NewSimpleClientset()}, nil } + + // TestIsEKSCacheEKS os used for unit testing EKS route + TestIsEKSCacheEKS = func() IsEKSCache { + return IsEKSCache{Value: true, Err: nil} + } + + // TestIsEKSCacheK8s is used for unit testing K8s route + TestIsEKSCacheK8s = func() IsEKSCache { + return IsEKSCache{Value: false, Err: nil} + } ) type MockDetector struct { diff --git a/translator/util/sdkutil.go b/translator/util/sdkutil.go index c17fdec5cc..d0bb20510b 100644 --- a/translator/util/sdkutil.go +++ b/translator/util/sdkutil.go @@ -17,6 +17,7 @@ import ( "github.com/aws/amazon-cloudwatch-agent/translator/config" "github.com/aws/amazon-cloudwatch-agent/translator/util/ec2util" "github.com/aws/amazon-cloudwatch-agent/translator/util/ecsutil" + "github.com/aws/amazon-cloudwatch-agent/translator/util/eksdetector" ) const ( @@ -27,6 +28,7 @@ var DetectRegion = detectRegion var DetectCredentialsPath = detectCredentialsPath var DefaultEC2Region = defaultEC2Region var DefaultECSRegion = defaultECSRegion +var IsEKS = isEKS var runInAws = os.Getenv(config.RUN_IN_AWS) var runWithIrsa = os.Getenv(config.RUN_WITH_IRSA) @@ -59,6 +61,25 @@ func DetectAgentMode(configuredMode string) string { return config.ModeOnPrem } +func DetectKubernetesMode(configuredMode string) string { + isEKS := IsEKS() + + if isEKS.Err != nil { + return "" // not kubernetes + } + + if isEKS.Value { + return config.ModeEKS + } + + if configuredMode == config.ModeEC2 { + return config.ModeK8sEC2 + } + + return config.ModeK8sOnPrem + +} + func SDKRegionWithCredsMap(mode string, credsConfig map[string]string) (region string) { credsMap := GetCredentials(mode, credsConfig) @@ -97,6 +118,10 @@ func defaultECSRegion() string { return ecsutil.GetECSUtilSingleton().Region } +func isEKS() eksdetector.IsEKSCache { + return eksdetector.IsEKS() +} + func detectRegion(mode string, credsConfig map[string]string) (region string, regionType string) { region = SDKRegionWithCredsMap(mode, credsConfig) regionType = config.RegionTypeNotFound @@ -106,6 +131,7 @@ func detectRegion(mode string, credsConfig map[string]string) (region string, re // For ec2, fallback to metadata when no region info found in credential profile. if region == "" && mode == config.ModeEC2 { + fmt.Println("I! Trying to detect region from ec2") region = DefaultEC2Region() regionType = config.RegionTypeEC2Metadata diff --git a/translator/util/sdkutil_test.go b/translator/util/sdkutil_test.go index eb6bdeec25..94d4d4f3c6 100644 --- a/translator/util/sdkutil_test.go +++ b/translator/util/sdkutil_test.go @@ -4,11 +4,13 @@ package util import ( + "fmt" "testing" "github.com/stretchr/testify/require" "github.com/aws/amazon-cloudwatch-agent/translator/config" + "github.com/aws/amazon-cloudwatch-agent/translator/util/eksdetector" ) func TestDetectAgentModeAuto(t *testing.T) { @@ -32,3 +34,25 @@ func TestDetectAgentModeAuto(t *testing.T) { }) } } + +func TestDetectKubernetesMode(t *testing.T) { + testCases := map[string]struct { + isEKS bool + isEKSErr error + configuredMode string + wantKubernetesMode string + }{ + "EKS": {isEKS: true, isEKSErr: nil, configuredMode: config.ModeEC2, wantKubernetesMode: config.ModeEKS}, + "K8sEC2": {isEKS: false, isEKSErr: nil, configuredMode: config.ModeEC2, wantKubernetesMode: config.ModeK8sEC2}, + "K8sOnPrem": {isEKS: false, isEKSErr: nil, configuredMode: config.ModeOnPrem, wantKubernetesMode: config.ModeK8sOnPrem}, + "NotKubernetes": {isEKS: false, isEKSErr: fmt.Errorf("error"), configuredMode: config.ModeEC2, wantKubernetesMode: ""}, + } + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + IsEKS = func() eksdetector.IsEKSCache { + return eksdetector.IsEKSCache{Value: testCase.isEKS, Err: testCase.isEKSErr} + } + require.Equal(t, testCase.wantKubernetesMode, DetectKubernetesMode(testCase.configuredMode)) + }) + } +} From ae2f107dc2b1e2d3fd657881d650982dec0b0f5b Mon Sep 17 00:00:00 2001 From: Dinakar Chappa Date: Fri, 31 May 2024 10:26:28 -0400 Subject: [PATCH 16/55] Adds ResourceId struct for CW Logs (#703) --- logs/logs.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/logs/logs.go b/logs/logs.go index 21241c8e71..9e5cdbedcf 100644 --- a/logs/logs.go +++ b/logs/logs.go @@ -72,6 +72,11 @@ func NewLogAgent(c *config.Config) *LogAgent { } } +type ResourceID struct { + KeyAttributes map[string]string + AttributeMap map[string]string +} + // Run LogAgent will scan all input and output plugins for LogCollection and LogBackend. // And connect all the LogSrc from the LogCollection found to the respective LogDest // based on the configured "destination", and "name" From e29b4283285eefa3edfecd2cd93a26f68722c95e Mon Sep 17 00:00:00 2001 From: zhihonl <61301537+zhihonl@users.noreply.github.com> Date: Mon, 3 Jun 2024 18:18:53 -0400 Subject: [PATCH 17/55] Set up resource map with basic ec2, ecs, and eks attributes (#707) --- internal/resourcemap/resourcemap.go | 67 ++++++++ internal/resourcemap/resourcemap_test.go | 192 +++++++++++++++++++++++ 2 files changed, 259 insertions(+) create mode 100644 internal/resourcemap/resourcemap.go create mode 100644 internal/resourcemap/resourcemap_test.go diff --git a/internal/resourcemap/resourcemap.go b/internal/resourcemap/resourcemap.go new file mode 100644 index 0000000000..81a019442e --- /dev/null +++ b/internal/resourcemap/resourcemap.go @@ -0,0 +1,67 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package resourcemap + +var resourceMap *ResourceMap + +type ec2Info struct { + InstanceID string + AutoScalingGroup string +} + +type ecsInfo struct { + ClusterName string +} + +type eksInfo struct { + ClusterName string +} + +type ResourceMap struct { + // mode should be EC2, ECS, EKS, and K8S + mode string + + // ec2Info stores information about EC2 instances such as instance ID and + // auto scaling groups + ec2Info ec2Info + + // ecsInfo stores information about ECS such as cluster name + // TODO: This struct may need to be expanded to include task role arn and more + ecsInfo ecsInfo + + // ekeInfo stores information about EKS such as cluster + // TODO: This struct may need to be expanded to include namespace, pod, node, etc + eksInfo eksInfo + + // This variable is reserved for communication between OTEL components and LogAgent + // in order to achieve process correlations + logFiles map[string]string +} + +func GetResourceMap() *ResourceMap { + if resourceMap == nil { + InitResourceMap() + } + return resourceMap +} + +func InitResourceMap() { + // Add logic to store attributes such as instance ID, cluster name, etc here +} + +func (r *ResourceMap) LogFiles() map[string]string { + return r.logFiles +} + +func (r *ResourceMap) EC2Info() ec2Info { + return r.ec2Info +} + +func (r *ResourceMap) ECSInfo() ecsInfo { + return r.ecsInfo +} + +func (r *ResourceMap) EKSInfo() eksInfo { + return r.eksInfo +} diff --git a/internal/resourcemap/resourcemap_test.go b/internal/resourcemap/resourcemap_test.go new file mode 100644 index 0000000000..317ecb9b51 --- /dev/null +++ b/internal/resourcemap/resourcemap_test.go @@ -0,0 +1,192 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package resourcemap + +import ( + "reflect" + "testing" +) + +func resetResourceMap() { + resourceMap = nil +} + +func TestGetResourceMap(t *testing.T) { + tests := []struct { + name string + want *ResourceMap + }{ + { + name: "happypath", + want: resourceMap, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := GetResourceMap(); !reflect.DeepEqual(got, tt.want) { + t.Errorf("GetResourceMap() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestInitResourceMap(t *testing.T) { + tests := []struct { + name string + }{} + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + InitResourceMap() + }) + } +} + +func TestResourceMap_EC2Info(t *testing.T) { + type fields struct { + mode string + ec2Info ec2Info + ecsInfo ecsInfo + eksInfo eksInfo + logFiles map[string]string + } + tests := []struct { + name string + fields fields + want ec2Info + }{ + { + name: "happypath", + fields: fields{ + ec2Info: ec2Info{InstanceID: "i-1234567890", AutoScalingGroup: "test-asg"}, + }, + want: ec2Info{ + InstanceID: "i-1234567890", + AutoScalingGroup: "test-asg", + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + r := &ResourceMap{ + mode: tt.fields.mode, + ec2Info: tt.fields.ec2Info, + ecsInfo: tt.fields.ecsInfo, + eksInfo: tt.fields.eksInfo, + logFiles: tt.fields.logFiles, + } + if got := r.EC2Info(); !reflect.DeepEqual(got, tt.want) { + t.Errorf("EC2Info() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestResourceMap_ECSInfo(t *testing.T) { + type fields struct { + mode string + ec2Info ec2Info + ecsInfo ecsInfo + eksInfo eksInfo + logFiles map[string]string + } + tests := []struct { + name string + fields fields + want ecsInfo + }{ + { + name: "happypath", + fields: fields{ + ecsInfo: ecsInfo{ClusterName: "test-cluster"}, + }, + want: ecsInfo{ + ClusterName: "test-cluster", + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + r := &ResourceMap{ + mode: tt.fields.mode, + ec2Info: tt.fields.ec2Info, + ecsInfo: tt.fields.ecsInfo, + eksInfo: tt.fields.eksInfo, + logFiles: tt.fields.logFiles, + } + if got := r.ECSInfo(); !reflect.DeepEqual(got, tt.want) { + t.Errorf("ECSInfo() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestResourceMap_EKSInfo(t *testing.T) { + type fields struct { + mode string + ec2Info ec2Info + ecsInfo ecsInfo + eksInfo eksInfo + logFiles map[string]string + } + tests := []struct { + name string + fields fields + want eksInfo + }{ + { + name: "happypath", + fields: fields{ + eksInfo: eksInfo{ClusterName: "test-cluster"}, + }, + want: eksInfo{ + ClusterName: "test-cluster", + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + r := &ResourceMap{ + mode: tt.fields.mode, + ec2Info: tt.fields.ec2Info, + ecsInfo: tt.fields.ecsInfo, + eksInfo: tt.fields.eksInfo, + logFiles: tt.fields.logFiles, + } + if got := r.EKSInfo(); !reflect.DeepEqual(got, tt.want) { + t.Errorf("EKSInfo() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestResourceMap_LogFiles(t *testing.T) { + type fields struct { + mode string + ec2Info ec2Info + ecsInfo ecsInfo + eksInfo eksInfo + logFiles map[string]string + } + tests := []struct { + name string + fields fields + want map[string]string + }{ + // TODO: Add test cases. + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + r := &ResourceMap{ + mode: tt.fields.mode, + ec2Info: tt.fields.ec2Info, + ecsInfo: tt.fields.ecsInfo, + eksInfo: tt.fields.eksInfo, + logFiles: tt.fields.logFiles, + } + if got := r.LogFiles(); !reflect.DeepEqual(got, tt.want) { + t.Errorf("LogFiles() = %v, want %v", got, tt.want) + } + }) + } +} From 954cda45fb4fa811165ec3add1b1b3291a581e70 Mon Sep 17 00:00:00 2001 From: Dinakar Chappa Date: Tue, 4 Jun 2024 11:57:51 -0400 Subject: [PATCH 18/55] Fetches AWS SDK from S3 and builds for PR requests (#708) --- .github/workflows/PR-build.yml | 38 ++++++++++++++++++++++++++++++++++ 1 file changed, 38 insertions(+) diff --git a/.github/workflows/PR-build.yml b/.github/workflows/PR-build.yml index a8d2fb7af2..448b48f687 100644 --- a/.github/workflows/PR-build.yml +++ b/.github/workflows/PR-build.yml @@ -2,6 +2,10 @@ # SPDX-License-Identifier: MIT name: PR Build +env: + TERRAFORM_AWS_ASSUME_ROLE: ${{ vars.TERRAFORM_AWS_ASSUME_ROLE }} + TERRAFORM_AWS_ASSUME_ROLE_DURATION: 14400 # 4 hours + on: workflow_dispatch: pull_request: @@ -72,6 +76,9 @@ jobs: needs: [lint, changes] name: Build ${{ matrix.os }} runs-on: ${{ matrix.os }} + permissions: + id-token: write + contents: read strategy: fail-fast: false matrix: @@ -98,6 +105,13 @@ jobs: ~\AppData\Local\go-build ~\go\pkg\mod steps: + + - name: Configure AWS Credentials + uses: aws-actions/configure-aws-credentials@v2 + with: + role-to-assume: ${{ vars.TERRAFORM_AWS_ASSUME_ROLE }} + aws-region: us-east-1 + - name: Set up Go 1.x if: needs.changes.outputs.build == 'true' uses: actions/setup-go@v4 @@ -128,6 +142,30 @@ jobs: if: matrix.family == 'windows' && steps.cached_binaries.outputs.cache-hit != 'true' && needs.changes.outputs.build == 'true' run: choco install make +# TODO: remove "Replace AWS SDK" once changes are available publicly + - name: Replace AWS SDK (Windows) + if: matrix.family == 'windows' + run: | + mkdir C:/Users/runneradmin/gosdk + aws s3 cp s3://compass-pre-release/staging.zip $env:SYSTEMROOT + tar -xf $env:SYSTEMROOT/staging.zip -C C:/Users/runneradmin/gosdk + $env:sdkPath=(Get-ChildItem "C:/Users/runneradmin/gosdk/apollo/env/AWSGoSDK-Release/var/tmp/release-automation/staging-*/sdk/src/github.com/aws/aws-sdk-go") + echo $env:sdkPath + cd D:\a\private-amazon-cloudwatch-agent-staging\private-amazon-cloudwatch-agent-staging + ls + go mod edit -replace github.com/aws/aws-sdk-go=$env:sdkPath + + - name: Replace AWS SDK (Linux) + if: matrix.family != 'windows' + run: | + mkdir ~/gosdk + aws s3 cp s3://compass-pre-release/staging.zip ~ + unzip -q -d ~/gosdk ~/staging.zip || true + sdkPath=$(echo ~/gosdk/apollo/env/AWSGoSDK-Release/var/tmp/release-automation/staging-*/sdk/src/github.com/aws/aws-sdk-go) + echo $sdkPath + ls + go mod edit -replace github.com/aws/aws-sdk-go=$sdkPath + - name: Unit Test if: steps.cached_binaries.outputs.cache-hit != 'true' && needs.changes.outputs.build == 'true' run: make test From 1db25cba4260525c99ba5a1079c4e3603352a8ec Mon Sep 17 00:00:00 2001 From: zhihonl <61301537+zhihonl@users.noreply.github.com> Date: Thu, 6 Jun 2024 10:41:20 -0400 Subject: [PATCH 19/55] Refactor ResourceMap to ResourceStore and fix unit tests (#709) --- internal/resourcemap/resourcemap.go | 67 ------- internal/resourcemap/resourcemap_test.go | 192 ------------------- internal/resourcestore/resourcestore.go | 68 +++++++ internal/resourcestore/resourcestore_test.go | 118 ++++++++++++ 4 files changed, 186 insertions(+), 259 deletions(-) delete mode 100644 internal/resourcemap/resourcemap.go delete mode 100644 internal/resourcemap/resourcemap_test.go create mode 100644 internal/resourcestore/resourcestore.go create mode 100644 internal/resourcestore/resourcestore_test.go diff --git a/internal/resourcemap/resourcemap.go b/internal/resourcemap/resourcemap.go deleted file mode 100644 index 81a019442e..0000000000 --- a/internal/resourcemap/resourcemap.go +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -// SPDX-License-Identifier: MIT - -package resourcemap - -var resourceMap *ResourceMap - -type ec2Info struct { - InstanceID string - AutoScalingGroup string -} - -type ecsInfo struct { - ClusterName string -} - -type eksInfo struct { - ClusterName string -} - -type ResourceMap struct { - // mode should be EC2, ECS, EKS, and K8S - mode string - - // ec2Info stores information about EC2 instances such as instance ID and - // auto scaling groups - ec2Info ec2Info - - // ecsInfo stores information about ECS such as cluster name - // TODO: This struct may need to be expanded to include task role arn and more - ecsInfo ecsInfo - - // ekeInfo stores information about EKS such as cluster - // TODO: This struct may need to be expanded to include namespace, pod, node, etc - eksInfo eksInfo - - // This variable is reserved for communication between OTEL components and LogAgent - // in order to achieve process correlations - logFiles map[string]string -} - -func GetResourceMap() *ResourceMap { - if resourceMap == nil { - InitResourceMap() - } - return resourceMap -} - -func InitResourceMap() { - // Add logic to store attributes such as instance ID, cluster name, etc here -} - -func (r *ResourceMap) LogFiles() map[string]string { - return r.logFiles -} - -func (r *ResourceMap) EC2Info() ec2Info { - return r.ec2Info -} - -func (r *ResourceMap) ECSInfo() ecsInfo { - return r.ecsInfo -} - -func (r *ResourceMap) EKSInfo() eksInfo { - return r.eksInfo -} diff --git a/internal/resourcemap/resourcemap_test.go b/internal/resourcemap/resourcemap_test.go deleted file mode 100644 index 317ecb9b51..0000000000 --- a/internal/resourcemap/resourcemap_test.go +++ /dev/null @@ -1,192 +0,0 @@ -// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -// SPDX-License-Identifier: MIT - -package resourcemap - -import ( - "reflect" - "testing" -) - -func resetResourceMap() { - resourceMap = nil -} - -func TestGetResourceMap(t *testing.T) { - tests := []struct { - name string - want *ResourceMap - }{ - { - name: "happypath", - want: resourceMap, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if got := GetResourceMap(); !reflect.DeepEqual(got, tt.want) { - t.Errorf("GetResourceMap() = %v, want %v", got, tt.want) - } - }) - } -} - -func TestInitResourceMap(t *testing.T) { - tests := []struct { - name string - }{} - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - InitResourceMap() - }) - } -} - -func TestResourceMap_EC2Info(t *testing.T) { - type fields struct { - mode string - ec2Info ec2Info - ecsInfo ecsInfo - eksInfo eksInfo - logFiles map[string]string - } - tests := []struct { - name string - fields fields - want ec2Info - }{ - { - name: "happypath", - fields: fields{ - ec2Info: ec2Info{InstanceID: "i-1234567890", AutoScalingGroup: "test-asg"}, - }, - want: ec2Info{ - InstanceID: "i-1234567890", - AutoScalingGroup: "test-asg", - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - r := &ResourceMap{ - mode: tt.fields.mode, - ec2Info: tt.fields.ec2Info, - ecsInfo: tt.fields.ecsInfo, - eksInfo: tt.fields.eksInfo, - logFiles: tt.fields.logFiles, - } - if got := r.EC2Info(); !reflect.DeepEqual(got, tt.want) { - t.Errorf("EC2Info() = %v, want %v", got, tt.want) - } - }) - } -} - -func TestResourceMap_ECSInfo(t *testing.T) { - type fields struct { - mode string - ec2Info ec2Info - ecsInfo ecsInfo - eksInfo eksInfo - logFiles map[string]string - } - tests := []struct { - name string - fields fields - want ecsInfo - }{ - { - name: "happypath", - fields: fields{ - ecsInfo: ecsInfo{ClusterName: "test-cluster"}, - }, - want: ecsInfo{ - ClusterName: "test-cluster", - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - r := &ResourceMap{ - mode: tt.fields.mode, - ec2Info: tt.fields.ec2Info, - ecsInfo: tt.fields.ecsInfo, - eksInfo: tt.fields.eksInfo, - logFiles: tt.fields.logFiles, - } - if got := r.ECSInfo(); !reflect.DeepEqual(got, tt.want) { - t.Errorf("ECSInfo() = %v, want %v", got, tt.want) - } - }) - } -} - -func TestResourceMap_EKSInfo(t *testing.T) { - type fields struct { - mode string - ec2Info ec2Info - ecsInfo ecsInfo - eksInfo eksInfo - logFiles map[string]string - } - tests := []struct { - name string - fields fields - want eksInfo - }{ - { - name: "happypath", - fields: fields{ - eksInfo: eksInfo{ClusterName: "test-cluster"}, - }, - want: eksInfo{ - ClusterName: "test-cluster", - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - r := &ResourceMap{ - mode: tt.fields.mode, - ec2Info: tt.fields.ec2Info, - ecsInfo: tt.fields.ecsInfo, - eksInfo: tt.fields.eksInfo, - logFiles: tt.fields.logFiles, - } - if got := r.EKSInfo(); !reflect.DeepEqual(got, tt.want) { - t.Errorf("EKSInfo() = %v, want %v", got, tt.want) - } - }) - } -} - -func TestResourceMap_LogFiles(t *testing.T) { - type fields struct { - mode string - ec2Info ec2Info - ecsInfo ecsInfo - eksInfo eksInfo - logFiles map[string]string - } - tests := []struct { - name string - fields fields - want map[string]string - }{ - // TODO: Add test cases. - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - r := &ResourceMap{ - mode: tt.fields.mode, - ec2Info: tt.fields.ec2Info, - ecsInfo: tt.fields.ecsInfo, - eksInfo: tt.fields.eksInfo, - logFiles: tt.fields.logFiles, - } - if got := r.LogFiles(); !reflect.DeepEqual(got, tt.want) { - t.Errorf("LogFiles() = %v, want %v", got, tt.want) - } - }) - } -} diff --git a/internal/resourcestore/resourcestore.go b/internal/resourcestore/resourcestore.go new file mode 100644 index 0000000000..724f89e4b9 --- /dev/null +++ b/internal/resourcestore/resourcestore.go @@ -0,0 +1,68 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package resourcestore + +import "sync" + +var ( + resourceStore *ResourceStore + once sync.Once +) + +type ec2Info struct { + InstanceID string + AutoScalingGroup string +} + +type eksInfo struct { + ClusterName string +} + +type ResourceStore struct { + // mode should be EC2, ECS, EKS, and K8S + mode string + + // ec2Info stores information about EC2 instances such as instance ID and + // auto scaling groups + ec2Info ec2Info + + // ekeInfo stores information about EKS such as cluster + // TODO: This struct may need to be expanded to include namespace, pod, node, etc + eksInfo eksInfo + + // logFiles is a variable reserved for communication between OTEL components and LogAgent + // in order to achieve process correlations where the key is the log file path and the value + // is the service name + // Example: + // "/opt/aws/amazon-cloudwatch-agent/logs/amazon-cloudwatch-agent.log": "cloudwatch-agent" + logFiles map[string]string +} + +func GetResourceStore() *ResourceStore { + once.Do(func() { + resourceStore = initResourceStore() + }) + return resourceStore +} + +func initResourceStore() *ResourceStore { + // Add logic to store attributes such as instance ID, cluster name, etc here + return &ResourceStore{} +} + +func (r *ResourceStore) Mode() string { + return r.mode +} + +func (r *ResourceStore) EC2Info() ec2Info { + return r.ec2Info +} + +func (r *ResourceStore) EKSInfo() eksInfo { + return r.eksInfo +} + +func (r *ResourceStore) LogFiles() map[string]string { + return r.logFiles +} diff --git a/internal/resourcestore/resourcestore_test.go b/internal/resourcestore/resourcestore_test.go new file mode 100644 index 0000000000..1d921fe884 --- /dev/null +++ b/internal/resourcestore/resourcestore_test.go @@ -0,0 +1,118 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package resourcestore + +import ( + "reflect" + "testing" +) + +func TestInitResourceStore(t *testing.T) { + tests := []struct { + name string + }{} + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + initResourceStore() + }) + } +} + +func TestResourceStore_EC2Info(t *testing.T) { + tests := []struct { + name string + ec2InfoInput ec2Info + want ec2Info + }{ + { + name: "happypath", + ec2InfoInput: ec2Info{ + InstanceID: "i-1234567890", + AutoScalingGroup: "test-asg", + }, + want: ec2Info{ + InstanceID: "i-1234567890", + AutoScalingGroup: "test-asg", + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + r := &ResourceStore{ + ec2Info: tt.ec2InfoInput, + } + if got := r.EC2Info(); !reflect.DeepEqual(got, tt.want) { + t.Errorf("EC2Info() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestResourceStore_EKSInfo(t *testing.T) { + tests := []struct { + name string + eksInfoInput eksInfo + want eksInfo + }{ + { + name: "happypath", + eksInfoInput: eksInfo{ClusterName: "test-cluster"}, + want: eksInfo{ClusterName: "test-cluster"}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + r := &ResourceStore{ + eksInfo: tt.eksInfoInput, + } + if got := r.EKSInfo(); !reflect.DeepEqual(got, tt.want) { + t.Errorf("EKSInfo() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestResourceStore_LogFiles(t *testing.T) { + tests := []struct { + name string + logFileInput map[string]string + want map[string]string + }{ + { + name: "happypath", + logFileInput: map[string]string{"/opt/aws/amazon-cloudwatch-agent/logs/amazon-cloudwatch-agent.log": "cloudwatch-agent"}, + want: map[string]string{"/opt/aws/amazon-cloudwatch-agent/logs/amazon-cloudwatch-agent.log": "cloudwatch-agent"}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + r := &ResourceStore{ + logFiles: tt.logFileInput, + } + if got := r.LogFiles(); !reflect.DeepEqual(got, tt.want) { + t.Errorf("LogFiles() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestResourceStore_Mode(t *testing.T) { + tests := []struct { + name string + modeInput string + want string + }{ + {name: "happypath", modeInput: "EC2", want: "EC2"}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + r := &ResourceStore{ + mode: tt.modeInput, + } + if got := r.Mode(); got != tt.want { + t.Errorf("Mode() = %v, want %v", got, tt.want) + } + }) + } +} From 5344a3dd714e12a7a073ae3b4e0781f7dafe992a Mon Sep 17 00:00:00 2001 From: zhihonl <61301537+zhihonl@users.noreply.github.com> Date: Tue, 11 Jun 2024 10:40:22 -0400 Subject: [PATCH 20/55] Add service provider to scrape service name from instance IAM role (#716) --- .../amazon-cloudwatch-agent.go | 3 + .../ec2metadataprovider.go | 54 ++++++------ .../ec2metadataprovider_test.go | 2 +- internal/resourcestore/resourcestore.go | 31 ++++++- internal/resourcestore/resourcestore_test.go | 27 ++++++ internal/resourcestore/serviceprovider.go | 65 ++++++++++++++ .../resourcestore/serviceprovider_test.go | 86 +++++++++++++++++++ plugins/processors/ec2tagger/ec2tagger.go | 5 +- .../processors/ec2tagger/ec2tagger_test.go | 4 + 9 files changed, 245 insertions(+), 32 deletions(-) rename {plugins/processors/ec2tagger => internal/ec2metadataprovider}/ec2metadataprovider.go (57%) rename {plugins/processors/ec2tagger => internal/ec2metadataprovider}/ec2metadataprovider_test.go (98%) create mode 100644 internal/resourcestore/serviceprovider.go create mode 100644 internal/resourcestore/serviceprovider_test.go diff --git a/cmd/amazon-cloudwatch-agent/amazon-cloudwatch-agent.go b/cmd/amazon-cloudwatch-agent/amazon-cloudwatch-agent.go index 2afac53173..9dcc40376d 100644 --- a/cmd/amazon-cloudwatch-agent/amazon-cloudwatch-agent.go +++ b/cmd/amazon-cloudwatch-agent/amazon-cloudwatch-agent.go @@ -37,6 +37,7 @@ import ( "github.com/aws/amazon-cloudwatch-agent/cfg/envconfig" "github.com/aws/amazon-cloudwatch-agent/cmd/amazon-cloudwatch-agent/internal" "github.com/aws/amazon-cloudwatch-agent/extension/agenthealth/handler/useragent" + "github.com/aws/amazon-cloudwatch-agent/internal/resourcestore" "github.com/aws/amazon-cloudwatch-agent/internal/version" cwaLogger "github.com/aws/amazon-cloudwatch-agent/logger" "github.com/aws/amazon-cloudwatch-agent/logs" @@ -355,6 +356,8 @@ func runAgent(ctx context.Context, e := []string{"--config=" + yamlConfigPath + " --feature-gates=exporter.xray.allowDot"} cmd.SetArgs(e) + resourcestore.GetResourceStore() + return cmd.Execute() } diff --git a/plugins/processors/ec2tagger/ec2metadataprovider.go b/internal/ec2metadataprovider/ec2metadataprovider.go similarity index 57% rename from plugins/processors/ec2tagger/ec2metadataprovider.go rename to internal/ec2metadataprovider/ec2metadataprovider.go index 6278f69dff..1546c505c6 100644 --- a/plugins/processors/ec2tagger/ec2metadataprovider.go +++ b/internal/ec2metadataprovider/ec2metadataprovider.go @@ -1,7 +1,7 @@ // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: MIT -package ec2tagger +package ec2metadataprovider import ( "context" @@ -20,6 +20,7 @@ type MetadataProvider interface { Get(ctx context.Context) (ec2metadata.EC2InstanceIdentityDocument, error) Hostname(ctx context.Context) (string, error) InstanceID(ctx context.Context) (string, error) + InstanceProfileIAMRole() (string, error) } type metadataClient struct { @@ -47,40 +48,41 @@ func NewMetadataProvider(p client.ConfigProvider, retries int) MetadataProvider } func (c *metadataClient) InstanceID(ctx context.Context) (string, error) { - instanceId, err := c.metadataFallbackDisabled.GetMetadataWithContext(ctx, "instance-id") - if err != nil { - log.Printf("D! could not get instance id without imds v1 fallback enable thus enable fallback") - instanceInner, errorInner := c.metadataFallbackEnabled.GetMetadataWithContext(ctx, "instance-id") - if errorInner == nil { - agent.UsageFlags().Set(agent.FlagIMDSFallbackSuccess) - } - return instanceInner, errorInner - } - return instanceId, err + return withMetadataFallbackRetry(ctx, c, func(metadataClient *ec2metadata.EC2Metadata) (string, error) { + return metadataClient.GetMetadataWithContext(ctx, "instance-id") + }) } func (c *metadataClient) Hostname(ctx context.Context) (string, error) { - hostname, err := c.metadataFallbackDisabled.GetMetadataWithContext(ctx, "hostname") - if err != nil { - log.Printf("D! could not get hostname without imds v1 fallback enable thus enable fallback") - hostnameInner, errorInner := c.metadataFallbackEnabled.GetMetadataWithContext(ctx, "hostname") - if errorInner == nil { - agent.UsageFlags().Set(agent.FlagIMDSFallbackSuccess) + return withMetadataFallbackRetry(ctx, c, func(metadataClient *ec2metadata.EC2Metadata) (string, error) { + return metadataClient.GetMetadataWithContext(ctx, "hostname") + }) +} + +func (c *metadataClient) InstanceProfileIAMRole() (string, error) { + return withMetadataFallbackRetry(context.Background(), c, func(metadataClient *ec2metadata.EC2Metadata) (string, error) { + iamInfo, err := metadataClient.IAMInfo() + if err != nil { + return "", err } - return hostnameInner, errorInner - } - return hostname, err + return iamInfo.InstanceProfileArn, nil + }) } func (c *metadataClient) Get(ctx context.Context) (ec2metadata.EC2InstanceIdentityDocument, error) { - instanceDocument, err := c.metadataFallbackDisabled.GetInstanceIdentityDocumentWithContext(ctx) + return withMetadataFallbackRetry(ctx, c, func(metadataClient *ec2metadata.EC2Metadata) (ec2metadata.EC2InstanceIdentityDocument, error) { + return metadataClient.GetInstanceIdentityDocumentWithContext(ctx) + }) +} + +func withMetadataFallbackRetry[T any](ctx context.Context, c *metadataClient, operation func(*ec2metadata.EC2Metadata) (T, error)) (T, error) { + result, err := operation(c.metadataFallbackDisabled) if err != nil { - log.Printf("D! could not get instance document without imds v1 fallback enable thus enable fallback") - instanceDocumentInner, errorInner := c.metadataFallbackEnabled.GetInstanceIdentityDocumentWithContext(ctx) - if errorInner == nil { + log.Printf("D! could not perform operation without imds v1 fallback enable thus enable fallback") + result, err = operation(c.metadataFallbackEnabled) + if err == nil { agent.UsageFlags().Set(agent.FlagIMDSFallbackSuccess) } - return instanceDocumentInner, errorInner } - return instanceDocument, err + return result, err } diff --git a/plugins/processors/ec2tagger/ec2metadataprovider_test.go b/internal/ec2metadataprovider/ec2metadataprovider_test.go similarity index 98% rename from plugins/processors/ec2tagger/ec2metadataprovider_test.go rename to internal/ec2metadataprovider/ec2metadataprovider_test.go index 619b7d18e4..5252ce6c1d 100644 --- a/plugins/processors/ec2tagger/ec2metadataprovider_test.go +++ b/internal/ec2metadataprovider/ec2metadataprovider_test.go @@ -1,7 +1,7 @@ // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: MIT -package ec2tagger +package ec2metadataprovider import ( "context" diff --git a/internal/resourcestore/resourcestore.go b/internal/resourcestore/resourcestore.go index 724f89e4b9..08a9d508e7 100644 --- a/internal/resourcestore/resourcestore.go +++ b/internal/resourcestore/resourcestore.go @@ -3,13 +3,25 @@ package resourcestore -import "sync" +import ( + "sync" + + configaws "github.com/aws/amazon-cloudwatch-agent/cfg/aws" + "github.com/aws/amazon-cloudwatch-agent/internal/ec2metadataprovider" + "github.com/aws/amazon-cloudwatch-agent/internal/retryer" +) var ( resourceStore *ResourceStore once sync.Once ) +type ServiceNameProvider interface { + startServiceProvider(metadataProvider ec2metadataprovider.MetadataProvider) + ServiceName() + getIAMRole(metadataProvider ec2metadataprovider.MetadataProvider) +} + type ec2Info struct { InstanceID string AutoScalingGroup string @@ -28,9 +40,12 @@ type ResourceStore struct { ec2Info ec2Info // ekeInfo stores information about EKS such as cluster - // TODO: This struct may need to be expanded to include namespace, pod, node, etc eksInfo eksInfo + // serviceprovider stores information about possible service names + // that we can attach to the resource ID + serviceprovider serviceprovider + // logFiles is a variable reserved for communication between OTEL components and LogAgent // in order to achieve process correlations where the key is the log file path and the value // is the service name @@ -48,7 +63,12 @@ func GetResourceStore() *ResourceStore { func initResourceStore() *ResourceStore { // Add logic to store attributes such as instance ID, cluster name, etc here - return &ResourceStore{} + metadataProvider := getMetaDataProvider() + serviceInfo := newServiceProvider() + go serviceInfo.startServiceProvider(metadataProvider) + return &ResourceStore{ + serviceprovider: *serviceInfo, + } } func (r *ResourceStore) Mode() string { @@ -66,3 +86,8 @@ func (r *ResourceStore) EKSInfo() eksInfo { func (r *ResourceStore) LogFiles() map[string]string { return r.logFiles } + +func getMetaDataProvider() ec2metadataprovider.MetadataProvider { + mdCredentialConfig := &configaws.CredentialConfig{} + return ec2metadataprovider.NewMetadataProvider(mdCredentialConfig.Credentials(), retryer.GetDefaultRetryNumber()) +} diff --git a/internal/resourcestore/resourcestore_test.go b/internal/resourcestore/resourcestore_test.go index 1d921fe884..488623f782 100644 --- a/internal/resourcestore/resourcestore_test.go +++ b/internal/resourcestore/resourcestore_test.go @@ -4,10 +4,37 @@ package resourcestore import ( + "context" + "errors" "reflect" "testing" + + "github.com/aws/aws-sdk-go/aws/ec2metadata" ) +type mockMetadataProvider struct { + InstanceIdentityDocument *ec2metadata.EC2InstanceIdentityDocument +} + +func (m *mockMetadataProvider) Get(ctx context.Context) (ec2metadata.EC2InstanceIdentityDocument, error) { + if m.InstanceIdentityDocument != nil { + return *m.InstanceIdentityDocument, nil + } + return ec2metadata.EC2InstanceIdentityDocument{}, errors.New("No instance identity document") +} + +func (m *mockMetadataProvider) Hostname(ctx context.Context) (string, error) { + return "MockHostName", nil +} + +func (m *mockMetadataProvider) InstanceID(ctx context.Context) (string, error) { + return "MockInstanceID", nil +} + +func (m *mockMetadataProvider) InstanceProfileIAMRole() (string, error) { + return "arn:aws:iam::123456789:instance-profile/TestRole", nil +} + func TestInitResourceStore(t *testing.T) { tests := []struct { name string diff --git a/internal/resourcestore/serviceprovider.go b/internal/resourcestore/serviceprovider.go new file mode 100644 index 0000000000..a7d4fefaa6 --- /dev/null +++ b/internal/resourcestore/serviceprovider.go @@ -0,0 +1,65 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package resourcestore + +import ( + "log" + "strings" + + "github.com/aws/aws-sdk-go/aws/arn" + + "github.com/aws/amazon-cloudwatch-agent/internal/ec2metadataprovider" +) + +const ( + INSTANCE_PROFILE = "instance-profile/" +) + +type serviceprovider struct { + iamRole string +} + +func (s *serviceprovider) startServiceProvider(metadataProvider ec2metadataprovider.MetadataProvider) error { + err := s.getIAMRole(metadataProvider) + if err != nil { + log.Println("D! Failed to get IAM role through service provider") + return err + } + return nil +} + +// ServiceName function gets the relevant service name based +// on the following priority chain +// 1. Incoming telemetry attributes +// 2. CWA config +// 3. Process correlation +// 4. instance tags +// 5. IAM Role - The IAM role name retrieved through IMDS(Instance Metadata Service) +func (s *serviceprovider) ServiceName() string { + return s.iamRole +} + +func (s *serviceprovider) getIAMRole(metadataProvider ec2metadataprovider.MetadataProvider) error { + iamRole, err := metadataProvider.InstanceProfileIAMRole() + if err != nil { + log.Println("D! resourceMap: Unable to retrieve EC2 Metadata. This feature must only be used on an EC2 instance.") + return err + } + iamRoleArn, err := arn.Parse(iamRole) + if err != nil { + log.Println("D! resourceMap: Unable to parse IAM Role Arn. " + err.Error()) + } + iamRoleResource := iamRoleArn.Resource + if strings.HasPrefix(iamRoleResource, INSTANCE_PROFILE) { + roleName := strings.TrimPrefix(iamRoleResource, INSTANCE_PROFILE) + s.iamRole = roleName + } else { + log.Println("D! resourceMap: IAM Role resource does not follow the expected pattern. Should be instance-profile/") + } + return nil +} + +func newServiceProvider() *serviceprovider { + return &serviceprovider{} +} diff --git a/internal/resourcestore/serviceprovider_test.go b/internal/resourcestore/serviceprovider_test.go new file mode 100644 index 0000000000..894a6dacae --- /dev/null +++ b/internal/resourcestore/serviceprovider_test.go @@ -0,0 +1,86 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package resourcestore + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/aws/amazon-cloudwatch-agent/internal/ec2metadataprovider" +) + +func Test_serviceprovider_initServiceProvider(t *testing.T) { + type args struct { + metadataProvider ec2metadataprovider.MetadataProvider + } + tests := []struct { + name string + args args + wantIAM string + }{ + { + name: "HappyPath_IAMRole", + args: args{ + metadataProvider: &mockMetadataProvider{InstanceIdentityDocument: nil}, + }, + wantIAM: "TestRole", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + s := &serviceprovider{} + s.startServiceProvider(tt.args.metadataProvider) + assert.Equal(t, tt.wantIAM, s.iamRole) + }) + } +} + +func Test_serviceprovider_ServiceName(t *testing.T) { + type fields struct { + iamRole string + } + tests := []struct { + name string + fields fields + want string + }{ + { + name: "HappyPath_IAMServiceName", + fields: fields{ + iamRole: "MockIAM", + }, + want: "MockIAM", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + s := &serviceprovider{ + iamRole: tt.fields.iamRole, + } + assert.Equal(t, tt.want, s.ServiceName()) + }) + } +} + +func Test_serviceprovider_getIAMRole(t *testing.T) { + tests := []struct { + name string + metadataProvider ec2metadataprovider.MetadataProvider + want string + }{ + { + name: "Happypath_MockMetadata", + metadataProvider: &mockMetadataProvider{InstanceIdentityDocument: nil}, + want: "TestRole", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + s := &serviceprovider{} + s.getIAMRole(tt.metadataProvider) + assert.Equal(t, tt.want, s.iamRole) + }) + } +} diff --git a/plugins/processors/ec2tagger/ec2tagger.go b/plugins/processors/ec2tagger/ec2tagger.go index d4236397d5..07ccb0bb82 100644 --- a/plugins/processors/ec2tagger/ec2tagger.go +++ b/plugins/processors/ec2tagger/ec2tagger.go @@ -19,6 +19,7 @@ import ( "go.uber.org/zap" configaws "github.com/aws/amazon-cloudwatch-agent/cfg/aws" + "github.com/aws/amazon-cloudwatch-agent/internal/ec2metadataprovider" "github.com/aws/amazon-cloudwatch-agent/plugins/processors/ec2tagger/internal/volume" translatorCtx "github.com/aws/amazon-cloudwatch-agent/translator/context" ) @@ -43,7 +44,7 @@ type Tagger struct { logger *zap.Logger cancelFunc context.CancelFunc - metadataProvider MetadataProvider + metadataProvider ec2metadataprovider.MetadataProvider ec2Provider ec2ProviderType shutdownC chan bool @@ -67,7 +68,7 @@ func newTagger(config *Config, logger *zap.Logger) *Tagger { Config: config, logger: logger, cancelFunc: cancel, - metadataProvider: NewMetadataProvider(mdCredentialConfig.Credentials(), config.IMDSRetries), + metadataProvider: ec2metadataprovider.NewMetadataProvider(mdCredentialConfig.Credentials(), config.IMDSRetries), ec2Provider: func(ec2CredentialConfig *configaws.CredentialConfig) ec2iface.EC2API { return ec2.New( ec2CredentialConfig.Credentials(), diff --git a/plugins/processors/ec2tagger/ec2tagger_test.go b/plugins/processors/ec2tagger/ec2tagger_test.go index 0f3d3d84c8..2472fcd5d8 100644 --- a/plugins/processors/ec2tagger/ec2tagger_test.go +++ b/plugins/processors/ec2tagger/ec2tagger_test.go @@ -144,6 +144,10 @@ func (m *mockMetadataProvider) InstanceID(ctx context.Context) (string, error) { return "MockInstanceID", nil } +func (m *mockMetadataProvider) InstanceProfileIAMRole() (string, error) { + return "MockIAM", nil +} + var mockedInstanceIdentityDoc = &ec2metadata.EC2InstanceIdentityDocument{ InstanceID: "i-01d2417c27a396e44", Region: "us-east-1", From 87801a92a9b1d849432eb680a6700e2b37d900ff Mon Sep 17 00:00:00 2001 From: POOJA REDDY NATHALA Date: Wed, 12 Jun 2024 09:53:01 -0400 Subject: [PATCH 21/55] added logic to get Instance Id and ASG in resourcestore (#713) --- internal/resourcestore/ec2Info.go | 144 ++++++++++++++++++ internal/resourcestore/ec2Info_test.go | 123 +++++++++++++++ internal/resourcestore/resourcestore.go | 46 ++++-- plugins/processors/ec2tagger/constants.go | 4 +- plugins/processors/ec2tagger/ec2tagger.go | 12 +- .../processors/ec2tagger/ec2tagger_test.go | 12 +- 6 files changed, 318 insertions(+), 23 deletions(-) create mode 100644 internal/resourcestore/ec2Info.go create mode 100644 internal/resourcestore/ec2Info_test.go diff --git a/internal/resourcestore/ec2Info.go b/internal/resourcestore/ec2Info.go new file mode 100644 index 0000000000..213332ccb4 --- /dev/null +++ b/internal/resourcestore/ec2Info.go @@ -0,0 +1,144 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package resourcestore + +import ( + "context" + "errors" + "log" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/aws/aws-sdk-go/service/ec2/ec2iface" + + configaws "github.com/aws/amazon-cloudwatch-agent/cfg/aws" + "github.com/aws/amazon-cloudwatch-agent/internal/ec2metadataprovider" + "github.com/aws/amazon-cloudwatch-agent/plugins/processors/ec2tagger" +) + +type ec2Info struct { + InstanceID string + AutoScalingGroup string + + // region is used while making call to describeTags Ec2 API for AutoScalingGroup + Region string + + metadataProvider ec2metadataprovider.MetadataProvider + credentialCfg *configaws.CredentialConfig + shutdownC chan bool +} + +func (ei *ec2Info) initEc2Info() { + log.Println("I! ec2Info: Initializing ec2Info") + ei.shutdownC = make(chan bool) + if err := ei.setInstanceIdAndRegion(); err != nil { + return + } + ec2CredentialConfig := ei.credentialCfg + ec2CredentialConfig.Region = ei.Region + if err := ei.setAutoScalingGroup(ec2Provider(ec2CredentialConfig)); err != nil { + return + } + log.Printf("I! ec2Info: Finished initializing ec2Info: InstanceId %s, AutoScalingGroup %s", ei.InstanceID, ei.AutoScalingGroup) + ei.Shutdown() +} + +func (ei *ec2Info) setInstanceIdAndRegion() error { + for { + metadataDoc, err := ei.metadataProvider.Get(context.Background()) + if err != nil { + log.Printf("E! ec2Info: Failed to get Instance Id and region through metadata provider: %v", err) + wait := time.NewTimer(1 * time.Minute) + select { + case <-ei.shutdownC: + wait.Stop() + return errors.New("ec2Info: shutdownC received") + case <-wait.C: + } + } else { + ei.InstanceID = metadataDoc.InstanceID + ei.Region = metadataDoc.Region + log.Printf("I! ec2Info: Successfully retrieved Instance Id %s, Region %s", ei.InstanceID, ei.Region) + return nil + } + } +} + +func (ei *ec2Info) setAutoScalingGroup(ec2API ec2iface.EC2API) error { + retry := 0 + for { + var waitDuration time.Duration + if retry < len(ec2tagger.BackoffSleepArray) { + waitDuration = ec2tagger.BackoffSleepArray[retry] + } else { + waitDuration = ec2tagger.BackoffSleepArray[len(ec2tagger.BackoffSleepArray)-1] + } + + wait := time.NewTimer(waitDuration) + select { + case <-ei.shutdownC: + wait.Stop() + return errors.New("ec2Info: shutdownC received") + case <-wait.C: + } + + if retry > 0 { + log.Printf("D! ec2Info: initial retrieval of tags and volumes with retry: %d", retry) + } + + if err := ei.retrieveAsgName(ec2API); err != nil { + log.Printf("E! ec2Info: Unable to describe ec2 tags for retry %d with error %v", retry, err) + } else { + log.Println("I! ec2Info: Retrieval of tags succeeded") + return nil + } + + retry++ + } + +} + +func (ei *ec2Info) retrieveAsgName(ec2API ec2iface.EC2API) error { + tagFilters := []*ec2.Filter{ + { + Name: aws.String("resource-type"), + Values: aws.StringSlice([]string{"instance"}), + }, + { + Name: aws.String("resource-id"), + Values: aws.StringSlice([]string{ei.InstanceID}), + }, + { + Name: aws.String("key"), + Values: aws.StringSlice([]string{ec2tagger.Ec2InstanceTagKeyASG}), + }, + } + input := &ec2.DescribeTagsInput{ + Filters: tagFilters, + } + for { + result, err := ec2API.DescribeTags(input) + if err != nil { + log.Println("E! ec2Info: Unable to retrieve EC2 AutoScalingGroup. This feature must only be used on an EC2 instance.") + return err + } + for _, tag := range result.Tags { + key := *tag.Key + if ec2tagger.Ec2InstanceTagKeyASG == key { + ei.AutoScalingGroup = *tag.Value + return nil + } + } + if result.NextToken == nil { + break + } + input.SetNextToken(*result.NextToken) + } + return nil +} + +func (ei *ec2Info) Shutdown() { + close(ei.shutdownC) +} diff --git a/internal/resourcestore/ec2Info_test.go b/internal/resourcestore/ec2Info_test.go new file mode 100644 index 0000000000..898b27a1da --- /dev/null +++ b/internal/resourcestore/ec2Info_test.go @@ -0,0 +1,123 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package resourcestore + +import ( + "testing" + + "github.com/aws/aws-sdk-go/aws/ec2metadata" + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/aws/aws-sdk-go/service/ec2/ec2iface" + "github.com/stretchr/testify/assert" + + "github.com/aws/amazon-cloudwatch-agent/internal/ec2metadataprovider" +) + +var mockedInstanceIdentityDoc = &ec2metadata.EC2InstanceIdentityDocument{ + InstanceID: "i-01d2417c27a396e44", + Region: "us-east-1", + InstanceType: "m5ad.large", + ImageID: "ami-09edd32d9b0990d49", +} + +type mockEC2Client struct { + ec2iface.EC2API +} + +// construct the return results for the mocked DescribeTags api +var ( + tagKey1 = "tagKey1" + tagVal1 = "tagVal1" + tagDes1 = ec2.TagDescription{Key: &tagKey1, Value: &tagVal1} +) + +var ( + tagKey2 = "tagKey2" + tagVal2 = "tagVal2" + tagDes2 = ec2.TagDescription{Key: &tagKey2, Value: &tagVal2} +) + +var ( + tagKey3 = "aws:autoscaling:groupName" + tagVal3 = "ASG-1" + tagDes3 = ec2.TagDescription{Key: &tagKey3, Value: &tagVal3} +) + +func (m *mockEC2Client) DescribeTags(*ec2.DescribeTagsInput) (*ec2.DescribeTagsOutput, error) { + //all tags are returned when the ec2 metadata service knows about all tags + allTags := ec2.DescribeTagsOutput{ + NextToken: nil, + Tags: []*ec2.TagDescription{&tagDes1, &tagDes2, &tagDes3}, + } + + return &allTags, nil +} + +func TestSetInstanceIdAndRegion(t *testing.T) { + type args struct { + metadataProvider ec2metadataprovider.MetadataProvider + } + tests := []struct { + name string + args args + wantErr bool + want ec2Info + }{ + { + name: "happy path", + args: args{ + metadataProvider: &mockMetadataProvider{InstanceIdentityDocument: mockedInstanceIdentityDoc}, + }, + wantErr: false, + want: ec2Info{ + InstanceID: mockedInstanceIdentityDoc.InstanceID, + Region: mockedInstanceIdentityDoc.Region, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ei := &ec2Info{ + metadataProvider: tt.args.metadataProvider, + } + if err := ei.setInstanceIdAndRegion(); (err != nil) != tt.wantErr { + t.Errorf("setInstanceIdAndRegion() error = %v, wantErr %v", err, tt.wantErr) + } + assert.Equal(t, tt.want.InstanceID, ei.InstanceID) + assert.Equal(t, tt.want.Region, ei.Region) + }) + } +} + +func TestRetrieveASGName(t *testing.T) { + type args struct { + ec2Client ec2iface.EC2API + } + tests := []struct { + name string + args args + wantErr bool + want ec2Info + }{ + { + name: "happy path", + args: args{ + ec2Client: &mockEC2Client{}, + }, + wantErr: false, + want: ec2Info{ + AutoScalingGroup: tagVal3, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ei := &ec2Info{} + if err := ei.retrieveAsgName(tt.args.ec2Client); (err != nil) != tt.wantErr { + t.Errorf("retrieveAsgName() error = %v, wantErr %v", err, tt.wantErr) + } + assert.Equal(t, tt.want.AutoScalingGroup, ei.AutoScalingGroup) + }) + } +} diff --git a/internal/resourcestore/resourcestore.go b/internal/resourcestore/resourcestore.go index 08a9d508e7..13929ba5f7 100644 --- a/internal/resourcestore/resourcestore.go +++ b/internal/resourcestore/resourcestore.go @@ -4,11 +4,18 @@ package resourcestore import ( + "log" "sync" + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/aws/aws-sdk-go/service/ec2/ec2iface" + configaws "github.com/aws/amazon-cloudwatch-agent/cfg/aws" "github.com/aws/amazon-cloudwatch-agent/internal/ec2metadataprovider" "github.com/aws/amazon-cloudwatch-agent/internal/retryer" + "github.com/aws/amazon-cloudwatch-agent/translator/config" + translatorCtx "github.com/aws/amazon-cloudwatch-agent/translator/context" ) var ( @@ -22,11 +29,6 @@ type ServiceNameProvider interface { getIAMRole(metadataProvider ec2metadataprovider.MetadataProvider) } -type ec2Info struct { - InstanceID string - AutoScalingGroup string -} - type eksInfo struct { ClusterName string } @@ -63,12 +65,29 @@ func GetResourceStore() *ResourceStore { func initResourceStore() *ResourceStore { // Add logic to store attributes such as instance ID, cluster name, etc here + rs := &ResourceStore{} metadataProvider := getMetaDataProvider() - serviceInfo := newServiceProvider() - go serviceInfo.startServiceProvider(metadataProvider) - return &ResourceStore{ - serviceprovider: *serviceInfo, + if translatorCtx.CurrentContext().Mode() != "" { + rs.mode = translatorCtx.CurrentContext().Mode() + log.Printf("I! resourcestore: ResourceStore mode is %s ", rs.mode) } + switch rs.mode { + case config.ModeEC2: + rs.ec2Info = ec2Info{ + metadataProvider: metadataProvider, + credentialCfg: &configaws.CredentialConfig{}, + } + go rs.ec2Info.initEc2Info() + } + serviceInfo := newServiceProvider() + go func() { + err := serviceInfo.startServiceProvider(metadataProvider) + if err != nil { + log.Printf("E! resourcestore: Failed to start service provider: %v", err) + } + }() + rs.serviceprovider = *serviceInfo + return rs } func (r *ResourceStore) Mode() string { @@ -91,3 +110,12 @@ func getMetaDataProvider() ec2metadataprovider.MetadataProvider { mdCredentialConfig := &configaws.CredentialConfig{} return ec2metadataprovider.NewMetadataProvider(mdCredentialConfig.Credentials(), retryer.GetDefaultRetryNumber()) } + +func ec2Provider(ec2CredentialConfig *configaws.CredentialConfig) ec2iface.EC2API { + return ec2.New( + ec2CredentialConfig.Credentials(), + &aws.Config{ + LogLevel: configaws.SDKLogLevel(), + Logger: configaws.SDKLogger{}, + }) +} diff --git a/plugins/processors/ec2tagger/constants.go b/plugins/processors/ec2tagger/constants.go index 782ff303cd..b99054e863 100644 --- a/plugins/processors/ec2tagger/constants.go +++ b/plugins/processors/ec2tagger/constants.go @@ -60,7 +60,7 @@ const sampleConfig = ` ` const ( - ec2InstanceTagKeyASG = "aws:autoscaling:groupName" + Ec2InstanceTagKeyASG = "aws:autoscaling:groupName" cwDimensionASG = "AutoScalingGroupName" mdKeyInstanceId = "InstanceId" mdKeyImageId = "ImageId" @@ -70,5 +70,5 @@ const ( var ( // issue with newer versions of the sdk take longer when hop limit is 1 in eks defaultRefreshInterval = 180 * time.Second - backoffSleepArray = []time.Duration{0, 1 * time.Minute, 1 * time.Minute, 3 * time.Minute, 3 * time.Minute, 3 * time.Minute, 10 * time.Minute} // backoff retry for ec2 describe instances API call. Assuming the throttle limit is 20 per second. 10 mins allow 12000 API calls. + BackoffSleepArray = []time.Duration{0, 1 * time.Minute, 1 * time.Minute, 3 * time.Minute, 3 * time.Minute, 3 * time.Minute, 10 * time.Minute} // backoff retry for ec2 describe instances API call. Assuming the throttle limit is 20 per second. 10 mins allow 12000 API calls. ) diff --git a/plugins/processors/ec2tagger/ec2tagger.go b/plugins/processors/ec2tagger/ec2tagger.go index 07ccb0bb82..52e9423100 100644 --- a/plugins/processors/ec2tagger/ec2tagger.go +++ b/plugins/processors/ec2tagger/ec2tagger.go @@ -173,7 +173,7 @@ func (t *Tagger) updateTags() error { } for _, tag := range result.Tags { key := *tag.Key - if ec2InstanceTagKeyASG == key { + if Ec2InstanceTagKeyASG == key { // rename to match CW dimension as applied by AutoScaling service, not the EC2 tag key = cwDimensionASG } @@ -247,7 +247,7 @@ func (t *Tagger) ec2TagsRetrieved() bool { defer t.RUnlock() if t.ec2TagCache != nil { for _, key := range t.EC2InstanceTagKeys { - if key == ec2InstanceTagKeyASG { + if key == Ec2InstanceTagKeyASG { key = cwDimensionASG } if key == "*" { @@ -306,7 +306,7 @@ func (t *Tagger) Start(ctx context.Context, _ component.Host) error { // and filter for the EC2 tag name called 'aws:autoscaling:groupName' for i, key := range t.EC2InstanceTagKeys { if cwDimensionASG == key { - t.EC2InstanceTagKeys[i] = ec2InstanceTagKeyASG + t.EC2InstanceTagKeys[i] = Ec2InstanceTagKeyASG } } @@ -443,10 +443,10 @@ func (t *Tagger) initialRetrievalOfTagsAndVolumes() { retry := 0 for { var waitDuration time.Duration - if retry < len(backoffSleepArray) { - waitDuration = backoffSleepArray[retry] + if retry < len(BackoffSleepArray) { + waitDuration = BackoffSleepArray[retry] } else { - waitDuration = backoffSleepArray[len(backoffSleepArray)-1] + waitDuration = BackoffSleepArray[len(BackoffSleepArray)-1] } wait := time.NewTimer(waitDuration) diff --git a/plugins/processors/ec2tagger/ec2tagger_test.go b/plugins/processors/ec2tagger/ec2tagger_test.go index 2472fcd5d8..f7baba1dd5 100644 --- a/plugins/processors/ec2tagger/ec2tagger_test.go +++ b/plugins/processors/ec2tagger/ec2tagger_test.go @@ -290,7 +290,7 @@ func TestStartSuccessWithNoTagsVolumesUpdate(t *testing.T) { } volumeCache := &mockVolumeCache{cache: make(map[string]string)} - backoffSleepArray = []time.Duration{10 * time.Millisecond, 20 * time.Millisecond, 30 * time.Millisecond} + BackoffSleepArray = []time.Duration{10 * time.Millisecond, 20 * time.Millisecond, 30 * time.Millisecond} defaultRefreshInterval = 50 * time.Millisecond tagger := &Tagger{ Config: cfg, @@ -333,7 +333,7 @@ func TestStartSuccessWithTagsVolumesUpdate(t *testing.T) { return ec2Client } volumeCache := &mockVolumeCache{cache: make(map[string]string)} - backoffSleepArray = []time.Duration{10 * time.Millisecond, 20 * time.Millisecond, 30 * time.Millisecond} + BackoffSleepArray = []time.Duration{10 * time.Millisecond, 20 * time.Millisecond, 30 * time.Millisecond} defaultRefreshInterval = 10 * time.Millisecond tagger := &Tagger{ @@ -389,7 +389,7 @@ func TestStartSuccessWithWildcardTagVolumeKey(t *testing.T) { return ec2Client } volumeCache := &mockVolumeCache{cache: make(map[string]string)} - backoffSleepArray = []time.Duration{10 * time.Millisecond, 20 * time.Millisecond, 30 * time.Millisecond} + BackoffSleepArray = []time.Duration{10 * time.Millisecond, 20 * time.Millisecond, 30 * time.Millisecond} defaultRefreshInterval = 50 * time.Millisecond tagger := &Tagger{ Config: cfg, @@ -434,7 +434,7 @@ func TestApplyWithTagsVolumesUpdate(t *testing.T) { return ec2Client } volumeCache := &mockVolumeCache{cache: make(map[string]string)} - backoffSleepArray = []time.Duration{10 * time.Millisecond, 20 * time.Millisecond, 30 * time.Millisecond} + BackoffSleepArray = []time.Duration{10 * time.Millisecond, 20 * time.Millisecond, 30 * time.Millisecond} defaultRefreshInterval = 50 * time.Millisecond tagger := &Tagger{ Config: cfg, @@ -527,7 +527,7 @@ func TestMetricsDroppedBeforeStarted(t *testing.T) { return ec2Client } volumeCache := &mockVolumeCache{cache: make(map[string]string)} - backoffSleepArray = []time.Duration{10 * time.Millisecond, 20 * time.Millisecond, 30 * time.Millisecond} + BackoffSleepArray = []time.Duration{10 * time.Millisecond, 20 * time.Millisecond, 30 * time.Millisecond} defaultRefreshInterval = 50 * time.Millisecond tagger := &Tagger{ Config: cfg, @@ -591,7 +591,7 @@ func TestTaggerStartDoesNotBlock(t *testing.T) { ec2Provider := func(*configaws.CredentialConfig) ec2iface.EC2API { return ec2Client } - backoffSleepArray = []time.Duration{1 * time.Minute, 1 * time.Minute, 1 * time.Minute, 3 * time.Minute, 3 * time.Minute, 3 * time.Minute, 10 * time.Minute} + BackoffSleepArray = []time.Duration{1 * time.Minute, 1 * time.Minute, 1 * time.Minute, 3 * time.Minute, 3 * time.Minute, 3 * time.Minute, 10 * time.Minute} defaultRefreshInterval = 180 * time.Second tagger := &Tagger{ Config: cfg, From 8fab947f9d08031845ff6d776db0b9547b3d7dd2 Mon Sep 17 00:00:00 2001 From: zhihonl <61301537+zhihonl@users.noreply.github.com> Date: Wed, 12 Jun 2024 16:18:16 -0400 Subject: [PATCH 22/55] Add tailersrc pointer to pusher and create RID from resourcestore (#719) --- internal/resourcestore/resourcestore.go | 17 ++++++++ logs/logs.go | 6 ++- plugins/inputs/logfile/logfile.go | 1 + plugins/inputs/logfile/tailersrc.go | 10 ++++- plugins/inputs/logfile/tailersrc_test.go | 3 ++ .../wineventlog/wineventlog.go | 5 +++ .../outputs/cloudwatchlogs/cloudwatchlogs.go | 10 ++--- .../cloudwatchlogs/cloudwatchlogs_test.go | 16 ++++++-- plugins/outputs/cloudwatchlogs/pusher.go | 10 ++++- plugins/outputs/cloudwatchlogs/pusher_test.go | 39 ++++++++++++++++++- 10 files changed, 102 insertions(+), 15 deletions(-) diff --git a/internal/resourcestore/resourcestore.go b/internal/resourcestore/resourcestore.go index 13929ba5f7..761e806993 100644 --- a/internal/resourcestore/resourcestore.go +++ b/internal/resourcestore/resourcestore.go @@ -8,6 +8,7 @@ import ( "sync" "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/cloudwatchlogs" "github.com/aws/aws-sdk-go/service/ec2" "github.com/aws/aws-sdk-go/service/ec2/ec2iface" @@ -106,6 +107,22 @@ func (r *ResourceStore) LogFiles() map[string]string { return r.logFiles } +func (r *ResourceStore) CreateLogFileRID(fileGlobPath string, filePath string) *cloudwatchlogs.Resource { + return &cloudwatchlogs.Resource{ + AttributeMaps: []map[string]*string{ + { + "PlatformType": aws.String("AWS::EC2"), + "EC2.InstanceId": aws.String("i-123456789"), + "EC2.AutoScalingGroup": aws.String("test-group"), + }, + }, + KeyAttributes: &cloudwatchlogs.KeyAttributes{ + Name: aws.String("myService"), + Environment: aws.String("myEnvironment"), + }, + } +} + func getMetaDataProvider() ec2metadataprovider.MetadataProvider { mdCredentialConfig := &configaws.CredentialConfig{} return ec2metadataprovider.NewMetadataProvider(mdCredentialConfig.Credentials(), retryer.GetDefaultRetryNumber()) diff --git a/logs/logs.go b/logs/logs.go index 9e5cdbedcf..fcdd031269 100644 --- a/logs/logs.go +++ b/logs/logs.go @@ -9,6 +9,7 @@ import ( "log" "time" + "github.com/aws/aws-sdk-go/service/cloudwatchlogs" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" @@ -39,13 +40,14 @@ type LogSrc interface { Description() string Retention() int Class() string + ResourceID() *cloudwatchlogs.Resource Stop() } // A LogBackend is able to return a LogDest of a given name. // The same name should always return the same LogDest. type LogBackend interface { - CreateDest(string, string, int, string) LogDest + CreateDest(string, string, int, string, LogSrc) LogDest } // A LogDest represents a final endpoint where log events are published to. @@ -127,7 +129,7 @@ func (l *LogAgent) Run(ctx context.Context) { continue } retention = l.checkRetentionAlreadyAttempted(retention, logGroup) - dest := backend.CreateDest(logGroup, logStream, retention, logGroupClass) + dest := backend.CreateDest(logGroup, logStream, retention, logGroupClass, src) l.destNames[dest] = dname log.Printf("I! [logagent] piping log from %s/%s(%s) to %s with retention %d", logGroup, logStream, description, dname, retention) go l.runSrcToDest(src, dest) diff --git a/plugins/inputs/logfile/logfile.go b/plugins/inputs/logfile/logfile.go index f253262812..7794b57a4f 100644 --- a/plugins/inputs/logfile/logfile.go +++ b/plugins/inputs/logfile/logfile.go @@ -234,6 +234,7 @@ func (t *LogFile) FindLogSrc() []logs.LogSrc { t.Destination, t.getStateFilePath(filename), fileconfig.LogGroupClass, + fileconfig.FilePath, tailer, fileconfig.AutoRemoval, mlCheck, diff --git a/plugins/inputs/logfile/tailersrc.go b/plugins/inputs/logfile/tailersrc.go index 67dae23a8f..b6e1126737 100644 --- a/plugins/inputs/logfile/tailersrc.go +++ b/plugins/inputs/logfile/tailersrc.go @@ -11,8 +11,10 @@ import ( "sync" "time" + "github.com/aws/aws-sdk-go/service/cloudwatchlogs" "golang.org/x/text/encoding" + "github.com/aws/amazon-cloudwatch-agent/internal/resourcestore" "github.com/aws/amazon-cloudwatch-agent/logs" "github.com/aws/amazon-cloudwatch-agent/plugins/inputs/logfile/tail" ) @@ -60,6 +62,7 @@ type tailerSrc struct { group string stream string class string + fileGlobPath string destination string stateFilePath string tailer *tail.Tail @@ -83,7 +86,7 @@ type tailerSrc struct { var _ logs.LogSrc = (*tailerSrc)(nil) func NewTailerSrc( - group, stream, destination, stateFilePath, logClass string, + group, stream, destination, stateFilePath, logClass, fileGlobPath string, tailer *tail.Tail, autoRemoval bool, isMultilineStartFn func(string) bool, @@ -100,6 +103,7 @@ func NewTailerSrc( destination: destination, stateFilePath: stateFilePath, class: logClass, + fileGlobPath: fileGlobPath, tailer: tailer, autoRemoval: autoRemoval, isMLStart: isMultilineStartFn, @@ -166,6 +170,10 @@ func (ts *tailerSrc) AddCleanUpFn(f func()) { ts.cleanUpFns = append(ts.cleanUpFns, f) } +func (ts *tailerSrc) ResourceID() *cloudwatchlogs.Resource { + return resourcestore.GetResourceStore().CreateLogFileRID(ts.fileGlobPath, ts.tailer.Filename) +} + func (ts *tailerSrc) runTail() { defer ts.cleanUp() t := time.NewTicker(multilineWaitPeriod) diff --git a/plugins/inputs/logfile/tailersrc_test.go b/plugins/inputs/logfile/tailersrc_test.go index 24f2d4a510..23a8ae8ba4 100644 --- a/plugins/inputs/logfile/tailersrc_test.go +++ b/plugins/inputs/logfile/tailersrc_test.go @@ -62,6 +62,7 @@ func TestTailerSrc(t *testing.T) { "groupName", "streamName", "destination", statefile.Name(), util.InfrequentAccessLogGroupClass, + "tailsrctest-*.log", tailer, false, // AutoRemoval regexp.MustCompile("^[\\S]").MatchString, @@ -173,6 +174,7 @@ func TestOffsetDoneCallBack(t *testing.T) { "destination", statefile.Name(), util.InfrequentAccessLogGroupClass, + "tailsrctest-*.log", tailer, false, // AutoRemoval regexp.MustCompile("^[\\S]").MatchString, @@ -391,6 +393,7 @@ func setupTailer(t *testing.T, multiLineFn func(string) bool, maxEventSize int) t.Name(), "destination", util.InfrequentAccessLogGroupClass, + "tailsrctest-*.log", statefile.Name(), tailer, false, // AutoRemoval diff --git a/plugins/inputs/windows_event_log/wineventlog/wineventlog.go b/plugins/inputs/windows_event_log/wineventlog/wineventlog.go index a55a1eeb55..0fa149590d 100644 --- a/plugins/inputs/windows_event_log/wineventlog/wineventlog.go +++ b/plugins/inputs/windows_event_log/wineventlog/wineventlog.go @@ -17,6 +17,7 @@ import ( "syscall" "time" + "github.com/aws/aws-sdk-go/service/cloudwatchlogs" "golang.org/x/sys/windows" "github.com/aws/amazon-cloudwatch-agent/logs" @@ -110,6 +111,10 @@ func (w *windowsEventLog) Stop() { close(w.done) } +func (w *windowsEventLog) ResourceID() *cloudwatchlogs.Resource { + return nil +} + func (w *windowsEventLog) run() { ticker := time.NewTicker(1 * time.Second) defer ticker.Stop() diff --git a/plugins/outputs/cloudwatchlogs/cloudwatchlogs.go b/plugins/outputs/cloudwatchlogs/cloudwatchlogs.go index 7cd89fb4b1..d7923e6e6f 100644 --- a/plugins/outputs/cloudwatchlogs/cloudwatchlogs.go +++ b/plugins/outputs/cloudwatchlogs/cloudwatchlogs.go @@ -104,7 +104,7 @@ func (c *CloudWatchLogs) Write(metrics []telegraf.Metric) error { return nil } -func (c *CloudWatchLogs) CreateDest(group, stream string, retention int, logGroupClass string) logs.LogDest { +func (c *CloudWatchLogs) CreateDest(group, stream string, retention int, logGroupClass string, logSrc logs.LogSrc) logs.LogDest { if group == "" { group = c.LogGroupName } @@ -121,10 +121,10 @@ func (c *CloudWatchLogs) CreateDest(group, stream string, retention int, logGrou Retention: retention, Class: logGroupClass, } - return c.getDest(t) + return c.getDest(t, logSrc) } -func (c *CloudWatchLogs) getDest(t Target) *cwDest { +func (c *CloudWatchLogs) getDest(t Target, logSrc logs.LogSrc) *cwDest { if cwd, ok := c.cwDests[t]; ok { return cwd } @@ -162,7 +162,7 @@ func (c *CloudWatchLogs) getDest(t Target) *cwDest { c.Log.Info("Configured middleware on AWS client") } } - pusher := NewPusher(t, client, c.ForceFlushInterval.Duration, maxRetryTimeout, c.Log, c.pusherStopChan, &c.pusherWaitGroup) + pusher := NewPusher(t, client, c.ForceFlushInterval.Duration, maxRetryTimeout, c.Log, c.pusherStopChan, &c.pusherWaitGroup, logSrc) cwd := &cwDest{pusher: pusher, retryer: logThrottleRetryer} c.cwDests[t] = cwd return cwd @@ -173,7 +173,7 @@ func (c *CloudWatchLogs) writeMetricAsStructuredLog(m telegraf.Metric) { if err != nil { c.Log.Errorf("Failed to find target: %v", err) } - cwd := c.getDest(t) + cwd := c.getDest(t, nil) if cwd == nil { c.Log.Warnf("unable to find log destination, group: %v, stream: %v", t.Group, t.Stream) return diff --git a/plugins/outputs/cloudwatchlogs/cloudwatchlogs_test.go b/plugins/outputs/cloudwatchlogs/cloudwatchlogs_test.go index ad794ac88b..2b4cdf5290 100644 --- a/plugins/outputs/cloudwatchlogs/cloudwatchlogs_test.go +++ b/plugins/outputs/cloudwatchlogs/cloudwatchlogs_test.go @@ -8,6 +8,7 @@ import ( "github.com/stretchr/testify/require" + "github.com/aws/amazon-cloudwatch-agent/logs" "github.com/aws/amazon-cloudwatch-agent/tool/util" ) @@ -20,39 +21,47 @@ func TestCreateDestination(t *testing.T) { cfgLogStream string cfgLogRetention int cfgLogClass string + cfgTailerSrc logs.LogSrc expectedLogGroup string expectedLogStream string expectedLogGroupRetention int expectedLogClass string + expectedTailerSrc logs.LogSrc }{ "WithTomlGroupStream": { cfgLogGroup: "", cfgLogStream: "", cfgLogRetention: -1, cfgLogClass: "", + cfgTailerSrc: nil, expectedLogGroup: "G1", expectedLogStream: "S1", expectedLogGroupRetention: -1, + expectedTailerSrc: nil, }, "WithOverrideGroupStreamStandardLogGroup": { cfgLogGroup: "", cfgLogStream: "", cfgLogRetention: -1, cfgLogClass: util.StandardLogGroupClass, + cfgTailerSrc: nil, expectedLogGroup: "G1", expectedLogStream: "S1", expectedLogGroupRetention: -1, expectedLogClass: util.StandardLogGroupClass, + expectedTailerSrc: nil, }, "WithOverrideGroupStreamInfrequentLogGroup": { cfgLogGroup: "Group5", cfgLogStream: "Stream5", cfgLogRetention: -1, cfgLogClass: util.InfrequentAccessLogGroupClass, + cfgTailerSrc: nil, expectedLogGroup: "Group5", expectedLogStream: "Stream5", expectedLogGroupRetention: -1, expectedLogClass: util.InfrequentAccessLogGroupClass, + expectedTailerSrc: nil, }, } @@ -66,11 +75,12 @@ func TestCreateDestination(t *testing.T) { pusherStopChan: make(chan struct{}), cwDests: make(map[Target]*cwDest), } - dest := c.CreateDest(testCase.cfgLogGroup, testCase.cfgLogStream, testCase.cfgLogRetention, testCase.cfgLogClass).(*cwDest) + dest := c.CreateDest(testCase.cfgLogGroup, testCase.cfgLogStream, testCase.cfgLogRetention, testCase.cfgLogClass, testCase.cfgTailerSrc).(*cwDest) require.Equal(t, testCase.expectedLogGroup, dest.pusher.Group) require.Equal(t, testCase.expectedLogStream, dest.pusher.Stream) require.Equal(t, testCase.expectedLogGroupRetention, dest.pusher.Retention) require.Equal(t, testCase.expectedLogClass, dest.pusher.Class) + require.Equal(t, testCase.expectedTailerSrc, dest.pusher.logSrc) }) } } @@ -83,8 +93,8 @@ func TestDuplicateDestination(t *testing.T) { pusherStopChan: make(chan struct{}), } // Given the same log group, log stream, same retention, and logClass - d1 := c.CreateDest("FILENAME", "", -1, util.InfrequentAccessLogGroupClass) - d2 := c.CreateDest("FILENAME", "", -1, util.InfrequentAccessLogGroupClass) + d1 := c.CreateDest("FILENAME", "", -1, util.InfrequentAccessLogGroupClass, nil) + d2 := c.CreateDest("FILENAME", "", -1, util.InfrequentAccessLogGroupClass, nil) // Then the destination for cloudwatchlogs endpoint would be the same require.Equal(t, d1, d2) diff --git a/plugins/outputs/cloudwatchlogs/pusher.go b/plugins/outputs/cloudwatchlogs/pusher.go index 50c5b4ab71..8b4864c265 100644 --- a/plugins/outputs/cloudwatchlogs/pusher.go +++ b/plugins/outputs/cloudwatchlogs/pusher.go @@ -43,6 +43,7 @@ type pusher struct { RetryDuration time.Duration Log telegraf.Logger + logSrc logs.LogSrc events []*cloudwatchlogs.InputLogEvent minT, maxT *time.Time doneCallbacks []func() @@ -63,13 +64,14 @@ type pusher struct { wg *sync.WaitGroup } -func NewPusher(target Target, service CloudWatchLogsService, flushTimeout time.Duration, retryDuration time.Duration, logger telegraf.Logger, stop <-chan struct{}, wg *sync.WaitGroup) *pusher { +func NewPusher(target Target, service CloudWatchLogsService, flushTimeout time.Duration, retryDuration time.Duration, logger telegraf.Logger, stop <-chan struct{}, wg *sync.WaitGroup, logSrc logs.LogSrc) *pusher { p := &pusher{ Target: target, Service: service, FlushTimeout: flushTimeout, RetryDuration: retryDuration, Log: logger, + logSrc: logSrc, events: make([]*cloudwatchlogs.InputLogEvent, 0, 10), eventsCh: make(chan logs.LogEvent, 100), flushTimer: time.NewTimer(flushTimeout), @@ -218,12 +220,16 @@ func (p *pusher) send() { if p.needSort { sort.Stable(ByTimestamp(p.events)) } - + var resourceID *cloudwatchlogs.Resource + if p.logSrc != nil { + resourceID = p.logSrc.ResourceID() + } input := &cloudwatchlogs.PutLogEventsInput{ LogEvents: p.events, LogGroupName: &p.Group, LogStreamName: &p.Stream, SequenceToken: p.sequenceToken, + Resource: resourceID, } startTime := time.Now() diff --git a/plugins/outputs/cloudwatchlogs/pusher_test.go b/plugins/outputs/cloudwatchlogs/pusher_test.go index d3927a9134..482f5a6e7c 100644 --- a/plugins/outputs/cloudwatchlogs/pusher_test.go +++ b/plugins/outputs/cloudwatchlogs/pusher_test.go @@ -21,9 +21,30 @@ import ( "github.com/influxdata/telegraf/models" "github.com/stretchr/testify/require" + "github.com/aws/amazon-cloudwatch-agent/logs" "github.com/aws/amazon-cloudwatch-agent/tool/util" ) +type mockLogSrc struct { + logs.LogSrc +} + +func (m *mockLogSrc) ResourceID() *cloudwatchlogs.Resource { + return &cloudwatchlogs.Resource{ + AttributeMaps: []map[string]*string{ + { + "PlatformType": aws.String("AWS::EC2"), + "EC2.InstanceId": aws.String("i-123456789"), + "EC2.AutoScalingGroup": aws.String("test-group"), + }, + }, + KeyAttributes: &cloudwatchlogs.KeyAttributes{ + Name: aws.String("myService"), + Environment: aws.String("myEnvironment"), + }, + } +} + var wg sync.WaitGroup type svcMock struct { @@ -88,6 +109,19 @@ func TestAddSingleEvent(t *testing.T) { var s svcMock called := false nst := "NEXT_SEQ_TOKEN" + expectedResourceID := &cloudwatchlogs.Resource{ + AttributeMaps: []map[string]*string{ + { + "PlatformType": aws.String("AWS::EC2"), + "EC2.InstanceId": aws.String("i-123456789"), + "EC2.AutoScalingGroup": aws.String("test-group"), + }, + }, + KeyAttributes: &cloudwatchlogs.KeyAttributes{ + Name: aws.String("myService"), + Environment: aws.String("myEnvironment"), + }, + } s.ple = func(in *cloudwatchlogs.PutLogEventsInput) (*cloudwatchlogs.PutLogEventsOutput, error) { called = true @@ -103,7 +137,7 @@ func TestAddSingleEvent(t *testing.T) { if len(in.LogEvents) != 1 || *in.LogEvents[0].Message != "MSG" { t.Errorf("PutLogEvents called with incorrect message, got: '%v'", *in.LogEvents[0].Message) } - + require.Equal(t, expectedResourceID, in.Resource) return &cloudwatchlogs.PutLogEventsOutput{ NextSequenceToken: &nst, }, nil @@ -765,6 +799,7 @@ func TestResendWouldStopAfterExhaustedRetries(t *testing.T) { func testPreparation(retention int, s *svcMock, flushTimeout time.Duration, retryDuration time.Duration) (chan struct{}, *pusher) { stop := make(chan struct{}) - p := NewPusher(Target{"G", "S", util.StandardLogGroupClass, retention}, s, flushTimeout, retryDuration, models.NewLogger("cloudwatchlogs", "test", ""), stop, &wg) + mockLogSrcObj := &mockLogSrc{} + p := NewPusher(Target{"G", "S", util.StandardLogGroupClass, retention}, s, flushTimeout, retryDuration, models.NewLogger("cloudwatchlogs", "test", ""), stop, &wg, mockLogSrcObj) return stop, p } From 43afe53c03e3b76376a33d4d450dfd48615b26fe Mon Sep 17 00:00:00 2001 From: zhihonl <61301537+zhihonl@users.noreply.github.com> Date: Fri, 14 Jun 2024 10:31:49 -0400 Subject: [PATCH 23/55] Get service name from EC2 tags by checking for allowlisted keys (#718) --- internal/resourcestore/ec2Info.go | 20 ++- internal/resourcestore/resourcestore.go | 36 ++-- internal/resourcestore/resourcestore_test.go | 27 +++ internal/resourcestore/serviceprovider.go | 114 +++++++++++-- .../resourcestore/serviceprovider_test.go | 156 ++++++++++++++++-- 5 files changed, 308 insertions(+), 45 deletions(-) diff --git a/internal/resourcestore/ec2Info.go b/internal/resourcestore/ec2Info.go index 213332ccb4..38043c3fba 100644 --- a/internal/resourcestore/ec2Info.go +++ b/internal/resourcestore/ec2Info.go @@ -13,7 +13,6 @@ import ( "github.com/aws/aws-sdk-go/service/ec2" "github.com/aws/aws-sdk-go/service/ec2/ec2iface" - configaws "github.com/aws/amazon-cloudwatch-agent/cfg/aws" "github.com/aws/amazon-cloudwatch-agent/internal/ec2metadataprovider" "github.com/aws/amazon-cloudwatch-agent/plugins/processors/ec2tagger" ) @@ -26,7 +25,8 @@ type ec2Info struct { Region string metadataProvider ec2metadataprovider.MetadataProvider - credentialCfg *configaws.CredentialConfig + ec2API ec2iface.EC2API + ec2Provider ec2ProviderType shutdownC chan bool } @@ -36,9 +36,8 @@ func (ei *ec2Info) initEc2Info() { if err := ei.setInstanceIdAndRegion(); err != nil { return } - ec2CredentialConfig := ei.credentialCfg - ec2CredentialConfig.Region = ei.Region - if err := ei.setAutoScalingGroup(ec2Provider(ec2CredentialConfig)); err != nil { + ei.ec2API = ei.ec2Provider(ei.Region) + if err := ei.setAutoScalingGroup(); err != nil { return } log.Printf("I! ec2Info: Finished initializing ec2Info: InstanceId %s, AutoScalingGroup %s", ei.InstanceID, ei.AutoScalingGroup) @@ -66,7 +65,7 @@ func (ei *ec2Info) setInstanceIdAndRegion() error { } } -func (ei *ec2Info) setAutoScalingGroup(ec2API ec2iface.EC2API) error { +func (ei *ec2Info) setAutoScalingGroup() error { retry := 0 for { var waitDuration time.Duration @@ -88,7 +87,7 @@ func (ei *ec2Info) setAutoScalingGroup(ec2API ec2iface.EC2API) error { log.Printf("D! ec2Info: initial retrieval of tags and volumes with retry: %d", retry) } - if err := ei.retrieveAsgName(ec2API); err != nil { + if err := ei.retrieveAsgName(ei.ec2API); err != nil { log.Printf("E! ec2Info: Unable to describe ec2 tags for retry %d with error %v", retry, err) } else { log.Println("I! ec2Info: Retrieval of tags succeeded") @@ -142,3 +141,10 @@ func (ei *ec2Info) retrieveAsgName(ec2API ec2iface.EC2API) error { func (ei *ec2Info) Shutdown() { close(ei.shutdownC) } + +func newEC2Info(metadataProvider ec2metadataprovider.MetadataProvider, providerType ec2ProviderType) *ec2Info { + return &ec2Info{ + metadataProvider: metadataProvider, + ec2Provider: providerType, + } +} diff --git a/internal/resourcestore/resourcestore.go b/internal/resourcestore/resourcestore.go index 761e806993..1b610f8baa 100644 --- a/internal/resourcestore/resourcestore.go +++ b/internal/resourcestore/resourcestore.go @@ -4,6 +4,7 @@ package resourcestore import ( + "context" "log" "sync" @@ -24,10 +25,13 @@ var ( once sync.Once ) +type ec2ProviderType func(string) ec2iface.EC2API + type ServiceNameProvider interface { - startServiceProvider(metadataProvider ec2metadataprovider.MetadataProvider) ServiceName() + startServiceProvider(metadataProvider ec2metadataprovider.MetadataProvider) getIAMRole(metadataProvider ec2metadataprovider.MetadataProvider) + getEC2Tags(ec2API ec2iface.EC2API) } type eksInfo struct { @@ -65,7 +69,9 @@ func GetResourceStore() *ResourceStore { } func initResourceStore() *ResourceStore { - // Add logic to store attributes such as instance ID, cluster name, etc here + // Get IMDS client and EC2 API client which requires region for authentication + // These will be passed down to any object that requires access to IMDS or EC2 + // API client so we have single source of truth for credential rs := &ResourceStore{} metadataProvider := getMetaDataProvider() if translatorCtx.CurrentContext().Mode() != "" { @@ -74,19 +80,11 @@ func initResourceStore() *ResourceStore { } switch rs.mode { case config.ModeEC2: - rs.ec2Info = ec2Info{ - metadataProvider: metadataProvider, - credentialCfg: &configaws.CredentialConfig{}, - } + rs.ec2Info = *newEC2Info(metadataProvider, getEC2Provider) go rs.ec2Info.initEc2Info() } - serviceInfo := newServiceProvider() - go func() { - err := serviceInfo.startServiceProvider(metadataProvider) - if err != nil { - log.Printf("E! resourcestore: Failed to start service provider: %v", err) - } - }() + serviceInfo := newServiceProvider(metadataProvider, getEC2Provider) + go serviceInfo.startServiceProvider() rs.serviceprovider = *serviceInfo return rs } @@ -128,7 +126,9 @@ func getMetaDataProvider() ec2metadataprovider.MetadataProvider { return ec2metadataprovider.NewMetadataProvider(mdCredentialConfig.Credentials(), retryer.GetDefaultRetryNumber()) } -func ec2Provider(ec2CredentialConfig *configaws.CredentialConfig) ec2iface.EC2API { +func getEC2Provider(region string) ec2iface.EC2API { + ec2CredentialConfig := &configaws.CredentialConfig{} + ec2CredentialConfig.Region = region return ec2.New( ec2CredentialConfig.Credentials(), &aws.Config{ @@ -136,3 +136,11 @@ func ec2Provider(ec2CredentialConfig *configaws.CredentialConfig) ec2iface.EC2AP Logger: configaws.SDKLogger{}, }) } + +func getRegion(metadataProvider ec2metadataprovider.MetadataProvider) (string, error) { + instanceDocument, err := metadataProvider.Get(context.Background()) + if err != nil { + return "", err + } + return instanceDocument.Region, nil +} diff --git a/internal/resourcestore/resourcestore_test.go b/internal/resourcestore/resourcestore_test.go index 488623f782..a34f6ee0ce 100644 --- a/internal/resourcestore/resourcestore_test.go +++ b/internal/resourcestore/resourcestore_test.go @@ -10,6 +10,9 @@ import ( "testing" "github.com/aws/aws-sdk-go/aws/ec2metadata" + "github.com/stretchr/testify/assert" + + "github.com/aws/amazon-cloudwatch-agent/internal/ec2metadataprovider" ) type mockMetadataProvider struct { @@ -143,3 +146,27 @@ func TestResourceStore_Mode(t *testing.T) { }) } } + +func Test_getRegion(t *testing.T) { + tests := []struct { + name string + metadataProvider ec2metadataprovider.MetadataProvider + want string + }{ + { + name: "HappyPath", + metadataProvider: &mockMetadataProvider{ + InstanceIdentityDocument: &ec2metadata.EC2InstanceIdentityDocument{ + Region: "us-west-2"}, + }, + want: "us-west-2", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := getRegion(tt.metadataProvider) + assert.NoError(t, err) + assert.Equalf(t, tt.want, got, "getRegion(%v)", tt.metadataProvider) + }) + } +} diff --git a/internal/resourcestore/serviceprovider.go b/internal/resourcestore/serviceprovider.go index a7d4fefaa6..4149c96d7e 100644 --- a/internal/resourcestore/serviceprovider.go +++ b/internal/resourcestore/serviceprovider.go @@ -4,29 +4,60 @@ package resourcestore import ( + "context" + "errors" "log" "strings" + "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/arn" + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/aws/aws-sdk-go/service/ec2/ec2iface" "github.com/aws/amazon-cloudwatch-agent/internal/ec2metadataprovider" ) const ( INSTANCE_PROFILE = "instance-profile/" + SERVICE = "service" + APPLICATION = "application" + APP = "app" +) + +var ( + priorityMap = map[string]int{ + SERVICE: 2, + APPLICATION: 1, + APP: 0, + } ) type serviceprovider struct { - iamRole string + metadataProvider ec2metadataprovider.MetadataProvider + ec2API ec2iface.EC2API + ec2Provider ec2ProviderType + iamRole string + ec2TagServiceName string } -func (s *serviceprovider) startServiceProvider(metadataProvider ec2metadataprovider.MetadataProvider) error { - err := s.getIAMRole(metadataProvider) +func (s *serviceprovider) startServiceProvider() { + go func() { + err := s.getIAMRole() + if err != nil { + log.Println("D! serviceprovider failed to get service name through IAM role in service provider: ", err) + } + }() + region, err := getRegion(s.metadataProvider) if err != nil { - log.Println("D! Failed to get IAM role through service provider") - return err + log.Println("D! serviceprovider failed to get region: ", err) } - return nil + go func() { + s.ec2API = s.ec2Provider(region) + err := s.getEC2TagServiceName() + if err != nil { + log.Println("D! serviceprovider failed to get service name through EC2 tags in service provider: ", err) + } + }() } // ServiceName function gets the relevant service name based @@ -34,14 +65,17 @@ func (s *serviceprovider) startServiceProvider(metadataProvider ec2metadataprovi // 1. Incoming telemetry attributes // 2. CWA config // 3. Process correlation -// 4. instance tags +// 4. instance tags - The tags attached to the EC2 instance. Only scrape for tag with the following key: service, application, app // 5. IAM Role - The IAM role name retrieved through IMDS(Instance Metadata Service) func (s *serviceprovider) ServiceName() string { + if s.ec2TagServiceName != "" { + return s.ec2TagServiceName + } return s.iamRole } -func (s *serviceprovider) getIAMRole(metadataProvider ec2metadataprovider.MetadataProvider) error { - iamRole, err := metadataProvider.InstanceProfileIAMRole() +func (s *serviceprovider) getIAMRole() error { + iamRole, err := s.metadataProvider.InstanceProfileIAMRole() if err != nil { log.Println("D! resourceMap: Unable to retrieve EC2 Metadata. This feature must only be used on an EC2 instance.") return err @@ -60,6 +94,64 @@ func (s *serviceprovider) getIAMRole(metadataProvider ec2metadataprovider.Metada return nil } -func newServiceProvider() *serviceprovider { - return &serviceprovider{} +func (s *serviceprovider) getEC2TagServiceName() error { + serviceTagFilters, err := s.getEC2TagFilters() + if err != nil { + return err + } + currentTagPriority := -1 + for { + input := &ec2.DescribeTagsInput{ + Filters: serviceTagFilters, + } + result, err := s.ec2API.DescribeTags(input) + if err != nil { + continue + } + for _, tag := range result.Tags { + key := *tag.Key + value := *tag.Value + if priority, found := priorityMap[key]; found { + if priority > currentTagPriority { + s.ec2TagServiceName = value + currentTagPriority = priority + } + } + } + if result.NextToken == nil { + break + } + input.SetNextToken(*result.NextToken) + } + return nil +} + +func (s *serviceprovider) getEC2TagFilters() ([]*ec2.Filter, error) { + instanceDocument, err := s.metadataProvider.Get(context.Background()) + if err != nil { + return nil, errors.New("failed to get instance document") + } + instanceID := instanceDocument.InstanceID + tagFilters := []*ec2.Filter{ + { + Name: aws.String("resource-type"), + Values: aws.StringSlice([]string{"instance"}), + }, + { + Name: aws.String("resource-id"), + Values: aws.StringSlice([]string{instanceID}), + }, + { + Name: aws.String("key"), + Values: aws.StringSlice([]string{SERVICE, APPLICATION, APP}), + }, + } + return tagFilters, nil +} + +func newServiceProvider(metadataProvider ec2metadataprovider.MetadataProvider, providerType ec2ProviderType) *serviceprovider { + return &serviceprovider{ + metadataProvider: metadataProvider, + ec2Provider: providerType, + } } diff --git a/internal/resourcestore/serviceprovider_test.go b/internal/resourcestore/serviceprovider_test.go index 894a6dacae..9c380c37bd 100644 --- a/internal/resourcestore/serviceprovider_test.go +++ b/internal/resourcestore/serviceprovider_test.go @@ -5,34 +5,72 @@ package resourcestore import ( "testing" + "time" + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/ec2metadata" + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/aws/aws-sdk-go/service/ec2/ec2iface" "github.com/stretchr/testify/assert" "github.com/aws/amazon-cloudwatch-agent/internal/ec2metadataprovider" ) -func Test_serviceprovider_initServiceProvider(t *testing.T) { +type mockServiceNameEC2Client struct { + ec2iface.EC2API +} + +// construct the return results for the mocked DescribeTags api +var ( + tagKeyService = "service" + tagValService = "test-service" + tagDesService = ec2.TagDescription{Key: &tagKeyService, Value: &tagValService} +) + +func (m *mockServiceNameEC2Client) DescribeTags(*ec2.DescribeTagsInput) (*ec2.DescribeTagsOutput, error) { + testTags := ec2.DescribeTagsOutput{ + NextToken: nil, + Tags: []*ec2.TagDescription{&tagDesService}, + } + return &testTags, nil +} + +func Test_serviceprovider_startServiceProvider(t *testing.T) { type args struct { metadataProvider ec2metadataprovider.MetadataProvider + ec2Client ec2iface.EC2API } tests := []struct { name string args args wantIAM string + wantTag string }{ { - name: "HappyPath_IAMRole", + name: "HappyPath_AllServiceNames", args: args{ - metadataProvider: &mockMetadataProvider{InstanceIdentityDocument: nil}, + metadataProvider: &mockMetadataProvider{ + InstanceIdentityDocument: &ec2metadata.EC2InstanceIdentityDocument{ + InstanceID: "i-123456789"}, + }, + ec2Client: &mockServiceNameEC2Client{}, }, wantIAM: "TestRole", + wantTag: "test-service", }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - s := &serviceprovider{} - s.startServiceProvider(tt.args.metadataProvider) + s := serviceprovider{ + metadataProvider: tt.args.metadataProvider, + ec2Provider: func(s string) ec2iface.EC2API { + return tt.args.ec2Client + }, + } + s.startServiceProvider() + time.Sleep(1 * time.Second) assert.Equal(t, tt.wantIAM, s.iamRole) + assert.Equal(t, tt.wantTag, s.ec2TagServiceName) }) } } @@ -65,22 +103,114 @@ func Test_serviceprovider_ServiceName(t *testing.T) { } func Test_serviceprovider_getIAMRole(t *testing.T) { - tests := []struct { - name string + type fields struct { metadataProvider ec2metadataprovider.MetadataProvider - want string + ec2API ec2iface.EC2API + } + tests := []struct { + name string + fields fields + want string }{ { - name: "Happypath_MockMetadata", - metadataProvider: &mockMetadataProvider{InstanceIdentityDocument: nil}, - want: "TestRole", + name: "Happypath_MockMetadata", + fields: fields{ + metadataProvider: &mockMetadataProvider{}, + ec2API: &mockServiceNameEC2Client{}, + }, + want: "TestRole", }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - s := &serviceprovider{} - s.getIAMRole(tt.metadataProvider) + s := serviceprovider{ + metadataProvider: tt.fields.metadataProvider, + ec2API: tt.fields.ec2API, + } + s.getIAMRole() assert.Equal(t, tt.want, s.iamRole) }) } } + +func Test_serviceprovider_getEC2TagFilters(t *testing.T) { + type fields struct { + metadataProvider ec2metadataprovider.MetadataProvider + ec2API ec2iface.EC2API + } + tests := []struct { + name string + fields fields + want []*ec2.Filter + wantErr assert.ErrorAssertionFunc + }{ + { + name: "HappyPath_MatchTags", + fields: fields{ + metadataProvider: &mockMetadataProvider{ + InstanceIdentityDocument: &ec2metadata.EC2InstanceIdentityDocument{ + InstanceID: "i-123456789"}, + }, + ec2API: &mockServiceNameEC2Client{}, + }, + want: []*ec2.Filter{ + { + Name: aws.String("resource-type"), + Values: aws.StringSlice([]string{"instance"}), + }, { + Name: aws.String("resource-id"), + Values: aws.StringSlice([]string{"i-123456789"}), + }, { + Name: aws.String("key"), + Values: aws.StringSlice([]string{"service", "application", "app"}), + }, + }, + wantErr: nil, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + s := &serviceprovider{ + metadataProvider: tt.fields.metadataProvider, + ec2API: tt.fields.ec2API, + } + got, err := s.getEC2TagFilters() + assert.NoError(t, err) + assert.Equalf(t, tt.want, got, "getEC2TagFilters()") + }) + } +} + +func Test_serviceprovider_getEC2TagServiceName(t *testing.T) { + type fields struct { + metadataProvider ec2metadataprovider.MetadataProvider + ec2API ec2iface.EC2API + } + tests := []struct { + name string + fields fields + wantTagServiceName string + }{ + { + name: "HappyPath_ServiceExists", + fields: fields{ + metadataProvider: &mockMetadataProvider{ + InstanceIdentityDocument: &ec2metadata.EC2InstanceIdentityDocument{ + InstanceID: "i-123456789"}, + }, + ec2API: &mockServiceNameEC2Client{}, + }, + wantTagServiceName: "test-service", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + s := &serviceprovider{ + metadataProvider: tt.fields.metadataProvider, + ec2API: tt.fields.ec2API, + } + s.getEC2TagServiceName() + assert.Equal(t, tt.wantTagServiceName, s.ec2TagServiceName) + }) + } +} From fcfe6585b1c105aa52c925837aaa8ba1da5437ad Mon Sep 17 00:00:00 2001 From: POOJA REDDY NATHALA Date: Fri, 14 Jun 2024 12:09:41 -0400 Subject: [PATCH 24/55] ResourceStore Implementation change to fetch tags from IMDS before Describe tags (#721) --- .../ec2metadataprovider.go | 15 ++++ internal/resourcestore/ec2Info.go | 26 +++++- internal/resourcestore/ec2Info_test.go | 85 +++++++++++++++++-- internal/resourcestore/resourcestore_test.go | 10 +++ .../processors/ec2tagger/ec2tagger_test.go | 8 ++ 5 files changed, 137 insertions(+), 7 deletions(-) diff --git a/internal/ec2metadataprovider/ec2metadataprovider.go b/internal/ec2metadataprovider/ec2metadataprovider.go index 1546c505c6..48203aef1f 100644 --- a/internal/ec2metadataprovider/ec2metadataprovider.go +++ b/internal/ec2metadataprovider/ec2metadataprovider.go @@ -21,6 +21,8 @@ type MetadataProvider interface { Hostname(ctx context.Context) (string, error) InstanceID(ctx context.Context) (string, error) InstanceProfileIAMRole() (string, error) + InstanceTags(ctx context.Context) (string, error) + InstanceTagValue(ctx context.Context, tagKey string) (string, error) } type metadataClient struct { @@ -69,6 +71,19 @@ func (c *metadataClient) InstanceProfileIAMRole() (string, error) { }) } +func (c *metadataClient) InstanceTags(ctx context.Context) (string, error) { + return withMetadataFallbackRetry(ctx, c, func(metadataClient *ec2metadata.EC2Metadata) (string, error) { + return metadataClient.GetMetadataWithContext(ctx, "tags/instance") + }) +} + +func (c *metadataClient) InstanceTagValue(ctx context.Context, tagKey string) (string, error) { + path := "tags/instance/" + tagKey + return withMetadataFallbackRetry(ctx, c, func(metadataClient *ec2metadata.EC2Metadata) (string, error) { + return metadataClient.GetMetadataWithContext(ctx, path) + }) +} + func (c *metadataClient) Get(ctx context.Context) (ec2metadata.EC2InstanceIdentityDocument, error) { return withMetadataFallbackRetry(ctx, c, func(metadataClient *ec2metadata.EC2Metadata) (ec2metadata.EC2InstanceIdentityDocument, error) { return metadataClient.GetInstanceIdentityDocumentWithContext(ctx) diff --git a/internal/resourcestore/ec2Info.go b/internal/resourcestore/ec2Info.go index 38043c3fba..131f859ad0 100644 --- a/internal/resourcestore/ec2Info.go +++ b/internal/resourcestore/ec2Info.go @@ -7,6 +7,7 @@ import ( "context" "errors" "log" + "strings" "time" "github.com/aws/aws-sdk-go/aws" @@ -40,7 +41,7 @@ func (ei *ec2Info) initEc2Info() { if err := ei.setAutoScalingGroup(); err != nil { return } - log.Printf("I! ec2Info: Finished initializing ec2Info: InstanceId %s, AutoScalingGroup %s", ei.InstanceID, ei.AutoScalingGroup) + log.Printf("D! ec2Info: Finished initializing ec2Info: InstanceId %s, AutoScalingGroup %s", ei.InstanceID, ei.AutoScalingGroup) ei.Shutdown() } @@ -59,7 +60,7 @@ func (ei *ec2Info) setInstanceIdAndRegion() error { } else { ei.InstanceID = metadataDoc.InstanceID ei.Region = metadataDoc.Region - log.Printf("I! ec2Info: Successfully retrieved Instance Id %s, Region %s", ei.InstanceID, ei.Region) + log.Printf("D! ec2Info: Successfully retrieved Instance Id %s, Region %s", ei.InstanceID, ei.Region) return nil } } @@ -99,7 +100,28 @@ func (ei *ec2Info) setAutoScalingGroup() error { } +/* +This can also be implemented by just calling the InstanceTagValue and then DescribeTags on failure. But preferred the current implementation +as we need to distinguish the tags not being fetchable at all, from the ASG tag in particular not existing. +*/ func (ei *ec2Info) retrieveAsgName(ec2API ec2iface.EC2API) error { + tags, err := ei.metadataProvider.InstanceTags(context.Background()) + if err != nil { + log.Printf("E! ec2Info: Failed to get tags through metadata provider: %v", err.Error()) + return ei.retrieveAsgNameWithDescribeTags(ec2API) + } else if strings.Contains(tags, ec2tagger.Ec2InstanceTagKeyASG) { + asg, err := ei.metadataProvider.InstanceTagValue(context.Background(), ec2tagger.Ec2InstanceTagKeyASG) + if err != nil { + log.Printf("E! ec2Info: Failed to get AutoScalingGroup through metadata provider: %v", err.Error()) + } else { + log.Printf("D! ec2Info: AutoScalingGroup retrieved through IMDS: %s", asg) + ei.AutoScalingGroup = asg + } + } + return nil +} + +func (ei *ec2Info) retrieveAsgNameWithDescribeTags(ec2API ec2iface.EC2API) error { tagFilters := []*ec2.Filter{ { Name: aws.String("resource-type"), diff --git a/internal/resourcestore/ec2Info_test.go b/internal/resourcestore/ec2Info_test.go index 898b27a1da..d58a1d98e0 100644 --- a/internal/resourcestore/ec2Info_test.go +++ b/internal/resourcestore/ec2Info_test.go @@ -23,6 +23,7 @@ var mockedInstanceIdentityDoc = &ec2metadata.EC2InstanceIdentityDocument{ type mockEC2Client struct { ec2iface.EC2API + withASG bool } // construct the return results for the mocked DescribeTags api @@ -46,9 +47,17 @@ var ( func (m *mockEC2Client) DescribeTags(*ec2.DescribeTagsInput) (*ec2.DescribeTagsOutput, error) { //all tags are returned when the ec2 metadata service knows about all tags - allTags := ec2.DescribeTagsOutput{ - NextToken: nil, - Tags: []*ec2.TagDescription{&tagDes1, &tagDes2, &tagDes3}, + var allTags ec2.DescribeTagsOutput + if m.withASG { + allTags = ec2.DescribeTagsOutput{ + NextToken: nil, + Tags: []*ec2.TagDescription{&tagDes1, &tagDes2, &tagDes3}, + } + } else { + allTags = ec2.DescribeTagsOutput{ + NextToken: nil, + Tags: []*ec2.TagDescription{&tagDes1, &tagDes2}, + } } return &allTags, nil @@ -91,6 +100,62 @@ func TestSetInstanceIdAndRegion(t *testing.T) { } func TestRetrieveASGName(t *testing.T) { + type args struct { + ec2Client ec2iface.EC2API + metadataProvider ec2metadataprovider.MetadataProvider + } + tests := []struct { + name string + args args + wantErr bool + want ec2Info + }{ + { + name: "happy path", + args: args{ + ec2Client: &mockEC2Client{}, + metadataProvider: &mockMetadataProvider{InstanceIdentityDocument: mockedInstanceIdentityDoc, Tags: "aws:autoscaling:groupName", TagValue: tagVal3}, + }, + wantErr: false, + want: ec2Info{ + AutoScalingGroup: tagVal3, + }, + }, + { + name: "happy path with multiple tags", + args: args{ + ec2Client: &mockEC2Client{}, + metadataProvider: &mockMetadataProvider{InstanceIdentityDocument: mockedInstanceIdentityDoc, Tags: "aws:autoscaling:groupName\nenv\nname", TagValue: tagVal3}, + }, + wantErr: false, + want: ec2Info{ + AutoScalingGroup: tagVal3, + }, + }, + { + name: "Success IMDS tags call but no ASG", + args: args{ + ec2Client: &mockEC2Client{}, + metadataProvider: &mockMetadataProvider{InstanceIdentityDocument: mockedInstanceIdentityDoc, Tags: "name", TagValue: tagVal3}, + }, + wantErr: false, + want: ec2Info{ + AutoScalingGroup: "", + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ei := &ec2Info{metadataProvider: tt.args.metadataProvider} + if err := ei.retrieveAsgName(tt.args.ec2Client); (err != nil) != tt.wantErr { + t.Errorf("retrieveAsgName() error = %v, wantErr %v", err, tt.wantErr) + } + assert.Equal(t, tt.want.AutoScalingGroup, ei.AutoScalingGroup) + }) + } +} + +func TestRetrieveASGNameWithDescribeTags(t *testing.T) { type args struct { ec2Client ec2iface.EC2API } @@ -103,18 +168,28 @@ func TestRetrieveASGName(t *testing.T) { { name: "happy path", args: args{ - ec2Client: &mockEC2Client{}, + ec2Client: &mockEC2Client{withASG: true}, }, wantErr: false, want: ec2Info{ AutoScalingGroup: tagVal3, }, }, + { + name: "Success Describe tags call but no ASG", + args: args{ + ec2Client: &mockEC2Client{withASG: false}, + }, + wantErr: false, + want: ec2Info{ + AutoScalingGroup: "", + }, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { ei := &ec2Info{} - if err := ei.retrieveAsgName(tt.args.ec2Client); (err != nil) != tt.wantErr { + if err := ei.retrieveAsgNameWithDescribeTags(tt.args.ec2Client); (err != nil) != tt.wantErr { t.Errorf("retrieveAsgName() error = %v, wantErr %v", err, tt.wantErr) } assert.Equal(t, tt.want.AutoScalingGroup, ei.AutoScalingGroup) diff --git a/internal/resourcestore/resourcestore_test.go b/internal/resourcestore/resourcestore_test.go index a34f6ee0ce..8257e82cf3 100644 --- a/internal/resourcestore/resourcestore_test.go +++ b/internal/resourcestore/resourcestore_test.go @@ -17,6 +17,8 @@ import ( type mockMetadataProvider struct { InstanceIdentityDocument *ec2metadata.EC2InstanceIdentityDocument + Tags string + TagValue string } func (m *mockMetadataProvider) Get(ctx context.Context) (ec2metadata.EC2InstanceIdentityDocument, error) { @@ -38,6 +40,14 @@ func (m *mockMetadataProvider) InstanceProfileIAMRole() (string, error) { return "arn:aws:iam::123456789:instance-profile/TestRole", nil } +func (m *mockMetadataProvider) InstanceTags(ctx context.Context) (string, error) { + return m.Tags, nil +} + +func (m *mockMetadataProvider) InstanceTagValue(ctx context.Context, tagKey string) (string, error) { + return m.TagValue, nil +} + func TestInitResourceStore(t *testing.T) { tests := []struct { name string diff --git a/plugins/processors/ec2tagger/ec2tagger_test.go b/plugins/processors/ec2tagger/ec2tagger_test.go index f7baba1dd5..25e419b20c 100644 --- a/plugins/processors/ec2tagger/ec2tagger_test.go +++ b/plugins/processors/ec2tagger/ec2tagger_test.go @@ -144,10 +144,18 @@ func (m *mockMetadataProvider) InstanceID(ctx context.Context) (string, error) { return "MockInstanceID", nil } +func (m *mockMetadataProvider) InstanceTags(ctx context.Context) (string, error) { + return "MockInstanceTag", nil +} + func (m *mockMetadataProvider) InstanceProfileIAMRole() (string, error) { return "MockIAM", nil } +func (m *mockMetadataProvider) InstanceTagValue(ctx context.Context, tagKey string) (string, error) { + return "MockInstanceValue", nil +} + var mockedInstanceIdentityDoc = &ec2metadata.EC2InstanceIdentityDocument{ InstanceID: "i-01d2417c27a396e44", Region: "us-east-1", From b4869590e9b170fea1af590e01d168115d0c84e8 Mon Sep 17 00:00:00 2001 From: zhihonl <61301537+zhihonl@users.noreply.github.com> Date: Fri, 14 Jun 2024 13:19:19 -0400 Subject: [PATCH 25/55] Add ServiceNameSource and dynamically modify RID (#720) --- internal/resourcestore/resourcestore.go | 18 +++++----- internal/resourcestore/serviceprovider.go | 26 ++++++++++++--- .../resourcestore/serviceprovider_test.go | 33 ++++++++++++++----- 3 files changed, 55 insertions(+), 22 deletions(-) diff --git a/internal/resourcestore/resourcestore.go b/internal/resourcestore/resourcestore.go index 1b610f8baa..2692a1ebbc 100644 --- a/internal/resourcestore/resourcestore.go +++ b/internal/resourcestore/resourcestore.go @@ -83,9 +83,8 @@ func initResourceStore() *ResourceStore { rs.ec2Info = *newEC2Info(metadataProvider, getEC2Provider) go rs.ec2Info.initEc2Info() } - serviceInfo := newServiceProvider(metadataProvider, getEC2Provider) - go serviceInfo.startServiceProvider() - rs.serviceprovider = *serviceInfo + rs.serviceprovider = *newServiceProvider(metadataProvider, getEC2Provider) + go rs.serviceprovider.startServiceProvider() return rs } @@ -106,17 +105,20 @@ func (r *ResourceStore) LogFiles() map[string]string { } func (r *ResourceStore) CreateLogFileRID(fileGlobPath string, filePath string) *cloudwatchlogs.Resource { + serviceAttr := r.serviceprovider.ServiceAttribute() return &cloudwatchlogs.Resource{ AttributeMaps: []map[string]*string{ { - "PlatformType": aws.String("AWS::EC2"), - "EC2.InstanceId": aws.String("i-123456789"), - "EC2.AutoScalingGroup": aws.String("test-group"), + "PlatformType": aws.String("AWS::EC2"), + "EC2.InstanceId": aws.String(r.ec2Info.InstanceID), + "EC2.AutoScalingGroup": aws.String(r.ec2Info.AutoScalingGroup), + "AWS.Internal.ServiceNameSource": aws.String(serviceAttr.serviceNameSource), }, }, KeyAttributes: &cloudwatchlogs.KeyAttributes{ - Name: aws.String("myService"), - Environment: aws.String("myEnvironment"), + Type: aws.String("Service"), + Name: aws.String(serviceAttr.serviceName), + Environment: aws.String(serviceAttr.environment), }, } } diff --git a/internal/resourcestore/serviceprovider.go b/internal/resourcestore/serviceprovider.go index 4149c96d7e..9dc21622f8 100644 --- a/internal/resourcestore/serviceprovider.go +++ b/internal/resourcestore/serviceprovider.go @@ -22,6 +22,8 @@ const ( SERVICE = "service" APPLICATION = "application" APP = "app" + ClientIamRole = "ClientIamRole" + ResourceTags = "ResourceTags" ) var ( @@ -32,6 +34,12 @@ var ( } ) +type ServiceAttribute struct { + serviceName string + serviceNameSource string + environment string +} + type serviceprovider struct { metadataProvider ec2metadataprovider.MetadataProvider ec2API ec2iface.EC2API @@ -60,18 +68,26 @@ func (s *serviceprovider) startServiceProvider() { }() } -// ServiceName function gets the relevant service name based -// on the following priority chain +// ServiceAttribute function gets the relevant service attributes +// service name is retrieved based on the following priority chain // 1. Incoming telemetry attributes // 2. CWA config // 3. Process correlation // 4. instance tags - The tags attached to the EC2 instance. Only scrape for tag with the following key: service, application, app // 5. IAM Role - The IAM role name retrieved through IMDS(Instance Metadata Service) -func (s *serviceprovider) ServiceName() string { +func (s *serviceprovider) ServiceAttribute() ServiceAttribute { + serviceAttr := ServiceAttribute{} if s.ec2TagServiceName != "" { - return s.ec2TagServiceName + serviceAttr.serviceName = s.ec2TagServiceName + serviceAttr.serviceNameSource = ResourceTags + return serviceAttr + } + if s.iamRole != "" { + serviceAttr.serviceName = s.iamRole + serviceAttr.serviceNameSource = ClientIamRole + return serviceAttr } - return s.iamRole + return serviceAttr } func (s *serviceprovider) getIAMRole() error { diff --git a/internal/resourcestore/serviceprovider_test.go b/internal/resourcestore/serviceprovider_test.go index 9c380c37bd..b89763d5d6 100644 --- a/internal/resourcestore/serviceprovider_test.go +++ b/internal/resourcestore/serviceprovider_test.go @@ -68,36 +68,51 @@ func Test_serviceprovider_startServiceProvider(t *testing.T) { }, } s.startServiceProvider() - time.Sleep(1 * time.Second) + time.Sleep(time.Second) assert.Equal(t, tt.wantIAM, s.iamRole) assert.Equal(t, tt.wantTag, s.ec2TagServiceName) }) } } -func Test_serviceprovider_ServiceName(t *testing.T) { +func Test_serviceprovider_ServiceAttribute(t *testing.T) { type fields struct { - iamRole string + iamRole string + ec2TagServiceName string } tests := []struct { name string fields fields - want string + want ServiceAttribute }{ { - name: "HappyPath_IAMServiceName", + name: "HappyPath_IAMRole", fields: fields{ - iamRole: "MockIAM", + iamRole: "TestRole", + }, + want: ServiceAttribute{ + serviceName: "TestRole", + serviceNameSource: ClientIamRole, + }, + }, + { + name: "HappyPath_EC2TagServiceName", + fields: fields{ + ec2TagServiceName: "tag-service", + }, + want: ServiceAttribute{ + serviceName: "tag-service", + serviceNameSource: ResourceTags, }, - want: "MockIAM", }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { s := &serviceprovider{ - iamRole: tt.fields.iamRole, + iamRole: tt.fields.iamRole, + ec2TagServiceName: tt.fields.ec2TagServiceName, } - assert.Equal(t, tt.want, s.ServiceName()) + assert.Equalf(t, tt.want, s.ServiceAttribute(), "ServiceAttribute()") }) } } From bcb4ff7f78c9f31301f89d84f84dd1a5311956e8 Mon Sep 17 00:00:00 2001 From: zhihonl <61301537+zhihonl@users.noreply.github.com> Date: Mon, 17 Jun 2024 09:49:46 -0400 Subject: [PATCH 26/55] Add RID attributes creation helper functions (#722) --- internal/resourcestore/resourcestore.go | 51 +++++++--- internal/resourcestore/resourcestore_test.go | 102 +++++++++++++++++++ 2 files changed, 141 insertions(+), 12 deletions(-) diff --git a/internal/resourcestore/resourcestore.go b/internal/resourcestore/resourcestore.go index 2692a1ebbc..2fc87ae91f 100644 --- a/internal/resourcestore/resourcestore.go +++ b/internal/resourcestore/resourcestore.go @@ -20,6 +20,13 @@ import ( translatorCtx "github.com/aws/amazon-cloudwatch-agent/translator/context" ) +const ( + Service = "Service" + InstanceIDKey = "EC2.InstanceId" + ASGKey = "EC2.AutoScalingGroup" + ServieNameSourceKey = "AWS.Internal.ServiceNameSource" +) + var ( resourceStore *ResourceStore once sync.Once @@ -105,24 +112,38 @@ func (r *ResourceStore) LogFiles() map[string]string { } func (r *ResourceStore) CreateLogFileRID(fileGlobPath string, filePath string) *cloudwatchlogs.Resource { - serviceAttr := r.serviceprovider.ServiceAttribute() return &cloudwatchlogs.Resource{ AttributeMaps: []map[string]*string{ - { - "PlatformType": aws.String("AWS::EC2"), - "EC2.InstanceId": aws.String(r.ec2Info.InstanceID), - "EC2.AutoScalingGroup": aws.String(r.ec2Info.AutoScalingGroup), - "AWS.Internal.ServiceNameSource": aws.String(serviceAttr.serviceNameSource), - }, - }, - KeyAttributes: &cloudwatchlogs.KeyAttributes{ - Type: aws.String("Service"), - Name: aws.String(serviceAttr.serviceName), - Environment: aws.String(serviceAttr.environment), + r.createAttributeMaps(), }, + KeyAttributes: r.createServiceKeyAttributes(), } } +func (r *ResourceStore) createAttributeMaps() map[string]*string { + serviceAttr := r.serviceprovider.ServiceAttribute() + attributeMap := make(map[string]*string) + + addNonEmptyToMap(attributeMap, InstanceIDKey, r.ec2Info.InstanceID) + addNonEmptyToMap(attributeMap, ASGKey, r.ec2Info.AutoScalingGroup) + addNonEmptyToMap(attributeMap, ServieNameSourceKey, serviceAttr.serviceNameSource) + return attributeMap +} + +func (r *ResourceStore) createServiceKeyAttributes() *cloudwatchlogs.KeyAttributes { + serviceAttr := r.serviceprovider.ServiceAttribute() + serviceKeyAttr := &cloudwatchlogs.KeyAttributes{ + Type: aws.String(Service), + } + if serviceAttr.serviceName != "" { + serviceKeyAttr.SetName(serviceAttr.serviceName) + } + if serviceAttr.environment != "" { + serviceKeyAttr.SetEnvironment(serviceAttr.environment) + } + return serviceKeyAttr +} + func getMetaDataProvider() ec2metadataprovider.MetadataProvider { mdCredentialConfig := &configaws.CredentialConfig{} return ec2metadataprovider.NewMetadataProvider(mdCredentialConfig.Credentials(), retryer.GetDefaultRetryNumber()) @@ -146,3 +167,9 @@ func getRegion(metadataProvider ec2metadataprovider.MetadataProvider) (string, e } return instanceDocument.Region, nil } + +func addNonEmptyToMap(m map[string]*string, key, value string) { + if value != "" { + m[key] = aws.String(value) + } +} diff --git a/internal/resourcestore/resourcestore_test.go b/internal/resourcestore/resourcestore_test.go index 8257e82cf3..ca19fe4bdb 100644 --- a/internal/resourcestore/resourcestore_test.go +++ b/internal/resourcestore/resourcestore_test.go @@ -9,7 +9,9 @@ import ( "reflect" "testing" + "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/ec2metadata" + "github.com/aws/aws-sdk-go/service/cloudwatchlogs" "github.com/stretchr/testify/assert" "github.com/aws/amazon-cloudwatch-agent/internal/ec2metadataprovider" @@ -180,3 +182,103 @@ func Test_getRegion(t *testing.T) { }) } } + +func TestResourceStore_createAttributeMaps(t *testing.T) { + type fields struct { + ec2Info ec2Info + serviceprovider serviceprovider + } + tests := []struct { + name string + fields fields + want map[string]*string + }{ + { + name: "HappyPath_IAMRole", + fields: fields{ + ec2Info: ec2Info{ + InstanceID: "i-123456789", + AutoScalingGroup: "test-asg", + }, + serviceprovider: serviceprovider{ + iamRole: "test-role", + }, + }, + want: map[string]*string{ + ServieNameSourceKey: aws.String(ClientIamRole), + ASGKey: aws.String("test-asg"), + InstanceIDKey: aws.String("i-123456789"), + }, + }, + { + name: "HappyPath_TagServiceName", + fields: fields{ + ec2Info: ec2Info{ + InstanceID: "i-123456789", + AutoScalingGroup: "test-asg", + }, + serviceprovider: serviceprovider{ + ec2TagServiceName: "test-tag-service", + }, + }, + want: map[string]*string{ + ServieNameSourceKey: aws.String(ResourceTags), + ASGKey: aws.String("test-asg"), + InstanceIDKey: aws.String("i-123456789"), + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + r := &ResourceStore{ + ec2Info: tt.fields.ec2Info, + serviceprovider: tt.fields.serviceprovider, + } + assert.Equalf(t, dereferenceMap(tt.want), dereferenceMap(r.createAttributeMaps()), "createAttributeMaps()") + }) + } +} + +func TestResourceStore_createServiceKeyAttributes(t *testing.T) { + type fields struct { + serviceprovider serviceprovider + } + tests := []struct { + name string + fields fields + want *cloudwatchlogs.KeyAttributes + }{ + { + name: "HappyPath_", + fields: fields{ + serviceprovider: serviceprovider{ + iamRole: "test-role", + }, + }, + want: &cloudwatchlogs.KeyAttributes{ + Name: aws.String("test-role"), + Type: aws.String(Service), + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + r := &ResourceStore{ + serviceprovider: tt.fields.serviceprovider, + } + assert.Equalf(t, tt.want, r.createServiceKeyAttributes(), "createServiceKeyAttributes()") + }) + } +} + +func dereferenceMap(input map[string]*string) map[string]string { + result := make(map[string]string) + for k, v := range input { + if v != nil { + result[k] = *v + } else { + result[k] = "" + } + } + return result +} From 285ca8bce7e6eca660ba693cf32591aa12e1456b Mon Sep 17 00:00:00 2001 From: Jason Polanco Date: Thu, 20 Jun 2024 14:00:37 -0400 Subject: [PATCH 27/55] [Compass] Add customer specified service.name and deployment.environment fields to logs config (#726) --- internal/resourcestore/resourcestore.go | 31 +- internal/resourcestore/resourcestore_test.go | 29 +- internal/resourcestore/serviceprovider.go | 21 +- .../resourcestore/serviceprovider_test.go | 8 +- plugins/inputs/logfile/fileconfig.go | 5 + plugins/inputs/logfile/logfile.go | 9 + plugins/inputs/logfile/logfile_test.go | 8 +- plugins/outputs/cloudwatchlogs/pusher_test.go | 2 +- translator/config/schema.json | 30 + .../sampleConfig/compass_linux_config.conf | 57 ++ .../sampleConfig/compass_linux_config.json | 48 ++ translator/tocwconfig/tocwconfig_test.go | 13 + .../tomlConfigTemplate/tomlConfig.go | 8 +- translator/translate/agent/agent.go | 26 +- translator/translate/agent/agent_test.go | 18 + .../agent/ruleDeploymentEnvironment.go | 19 + translator/translate/agent/ruleServiceName.go | 19 + translator/translate/logs/logs.go | 16 +- .../files/collect_list/collect_list_test.go | 616 +++++++++++------- .../collect_list/ruleDeploymentEnvironment.go | 29 + .../files/collect_list/ruleServiceName.go | 29 + translator/translate/logs/logs_test.go | 39 ++ .../logs/ruleDeploymentEnvironment.go | 25 + translator/translate/logs/ruleServiceName.go | 25 + 24 files changed, 848 insertions(+), 282 deletions(-) create mode 100644 translator/tocwconfig/sampleConfig/compass_linux_config.conf create mode 100755 translator/tocwconfig/sampleConfig/compass_linux_config.json create mode 100644 translator/translate/agent/ruleDeploymentEnvironment.go create mode 100644 translator/translate/agent/ruleServiceName.go create mode 100644 translator/translate/logs/logs_collected/files/collect_list/ruleDeploymentEnvironment.go create mode 100644 translator/translate/logs/logs_collected/files/collect_list/ruleServiceName.go create mode 100644 translator/translate/logs/ruleDeploymentEnvironment.go create mode 100644 translator/translate/logs/ruleServiceName.go diff --git a/internal/resourcestore/resourcestore.go b/internal/resourcestore/resourcestore.go index 2fc87ae91f..9a6d48dbcd 100644 --- a/internal/resourcestore/resourcestore.go +++ b/internal/resourcestore/resourcestore.go @@ -59,13 +59,6 @@ type ResourceStore struct { // serviceprovider stores information about possible service names // that we can attach to the resource ID serviceprovider serviceprovider - - // logFiles is a variable reserved for communication between OTEL components and LogAgent - // in order to achieve process correlations where the key is the log file path and the value - // is the service name - // Example: - // "/opt/aws/amazon-cloudwatch-agent/logs/amazon-cloudwatch-agent.log": "cloudwatch-agent" - logFiles map[string]string } func GetResourceStore() *ResourceStore { @@ -91,6 +84,7 @@ func initResourceStore() *ResourceStore { go rs.ec2Info.initEc2Info() } rs.serviceprovider = *newServiceProvider(metadataProvider, getEC2Provider) + rs.serviceprovider.logFiles = map[string]ServiceAttribute{} go rs.serviceprovider.startServiceProvider() return rs } @@ -107,10 +101,6 @@ func (r *ResourceStore) EKSInfo() eksInfo { return r.eksInfo } -func (r *ResourceStore) LogFiles() map[string]string { - return r.logFiles -} - func (r *ResourceStore) CreateLogFileRID(fileGlobPath string, filePath string) *cloudwatchlogs.Resource { return &cloudwatchlogs.Resource{ AttributeMaps: []map[string]*string{ @@ -120,13 +110,22 @@ func (r *ResourceStore) CreateLogFileRID(fileGlobPath string, filePath string) * } } +// AddServiceAttrEntryToResourceStore adds an entry to the resource store for the provided file -> serviceName, environmentName key-value pair +func (r *ResourceStore) AddServiceAttrEntryToResourceStore(key string, serviceName string, environmentName string) { + r.serviceprovider.logFiles[key] = ServiceAttribute{ServiceName: serviceName, Environment: environmentName} +} + +func (r *ResourceStore) LogFiles() map[string]ServiceAttribute { + return r.serviceprovider.logFiles +} + func (r *ResourceStore) createAttributeMaps() map[string]*string { serviceAttr := r.serviceprovider.ServiceAttribute() attributeMap := make(map[string]*string) addNonEmptyToMap(attributeMap, InstanceIDKey, r.ec2Info.InstanceID) addNonEmptyToMap(attributeMap, ASGKey, r.ec2Info.AutoScalingGroup) - addNonEmptyToMap(attributeMap, ServieNameSourceKey, serviceAttr.serviceNameSource) + addNonEmptyToMap(attributeMap, ServieNameSourceKey, serviceAttr.ServiceNameSource) return attributeMap } @@ -135,11 +134,11 @@ func (r *ResourceStore) createServiceKeyAttributes() *cloudwatchlogs.KeyAttribut serviceKeyAttr := &cloudwatchlogs.KeyAttributes{ Type: aws.String(Service), } - if serviceAttr.serviceName != "" { - serviceKeyAttr.SetName(serviceAttr.serviceName) + if serviceAttr.ServiceName != "" { + serviceKeyAttr.SetName(serviceAttr.ServiceName) } - if serviceAttr.environment != "" { - serviceKeyAttr.SetEnvironment(serviceAttr.environment) + if serviceAttr.Environment != "" { + serviceKeyAttr.SetEnvironment(serviceAttr.Environment) } return serviceKeyAttr } diff --git a/internal/resourcestore/resourcestore_test.go b/internal/resourcestore/resourcestore_test.go index ca19fe4bdb..d8f916ec9f 100644 --- a/internal/resourcestore/resourcestore_test.go +++ b/internal/resourcestore/resourcestore_test.go @@ -118,21 +118,23 @@ func TestResourceStore_EKSInfo(t *testing.T) { func TestResourceStore_LogFiles(t *testing.T) { tests := []struct { name string - logFileInput map[string]string - want map[string]string + logFileInput map[string]ServiceAttribute + want map[string]ServiceAttribute }{ { name: "happypath", - logFileInput: map[string]string{"/opt/aws/amazon-cloudwatch-agent/logs/amazon-cloudwatch-agent.log": "cloudwatch-agent"}, - want: map[string]string{"/opt/aws/amazon-cloudwatch-agent/logs/amazon-cloudwatch-agent.log": "cloudwatch-agent"}, + logFileInput: map[string]ServiceAttribute{"/opt/aws/amazon-cloudwatch-agent/logs/amazon-cloudwatch-agent.log": {"cloudwatch-agent", "", "ec2:test"}}, + want: map[string]ServiceAttribute{"/opt/aws/amazon-cloudwatch-agent/logs/amazon-cloudwatch-agent.log": {"cloudwatch-agent", "", "ec2:test"}}, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { r := &ResourceStore{ - logFiles: tt.logFileInput, + serviceprovider: serviceprovider{ + logFiles: tt.logFileInput, + }, } - if got := r.LogFiles(); !reflect.DeepEqual(got, tt.want) { + if got := r.serviceprovider.logFiles; !reflect.DeepEqual(got, tt.want) { t.Errorf("LogFiles() = %v, want %v", got, tt.want) } }) @@ -282,3 +284,18 @@ func dereferenceMap(input map[string]*string) map[string]string { } return result } + +func TestAddServiceKeyAttributeToLogFilesMap(t *testing.T) { + rs := initResourceStore() + key := "/opt/aws/amazon-cloudwatch-agent/logs/amazon-cloudwatch-agent.log" + rs.AddServiceAttrEntryToResourceStore(key, "test", "ec2:test") + + expected := &ResourceStore{ + serviceprovider: serviceprovider{ + iamRole: "test-role", + logFiles: map[string]ServiceAttribute{"/opt/aws/amazon-cloudwatch-agent/logs/amazon-cloudwatch-agent.log": {ServiceName: "test", Environment: "ec2:test"}}, + }, + } + + assert.Equal(t, true, reflect.DeepEqual(rs.LogFiles(), expected.serviceprovider.logFiles)) +} diff --git a/internal/resourcestore/serviceprovider.go b/internal/resourcestore/serviceprovider.go index 9dc21622f8..c4c3fc6c07 100644 --- a/internal/resourcestore/serviceprovider.go +++ b/internal/resourcestore/serviceprovider.go @@ -35,9 +35,9 @@ var ( ) type ServiceAttribute struct { - serviceName string - serviceNameSource string - environment string + ServiceName string + ServiceNameSource string + Environment string } type serviceprovider struct { @@ -46,6 +46,13 @@ type serviceprovider struct { ec2Provider ec2ProviderType iamRole string ec2TagServiceName string + + // logFiles is a variable reserved for communication between OTEL components and LogAgent + // in order to achieve process correlations where the key is the log file path and the value + // is the service name + // Example: + // "/opt/aws/amazon-cloudwatch-agent/logs/amazon-cloudwatch-agent.log": "cloudwatch-agent" + logFiles map[string]ServiceAttribute } func (s *serviceprovider) startServiceProvider() { @@ -78,13 +85,13 @@ func (s *serviceprovider) startServiceProvider() { func (s *serviceprovider) ServiceAttribute() ServiceAttribute { serviceAttr := ServiceAttribute{} if s.ec2TagServiceName != "" { - serviceAttr.serviceName = s.ec2TagServiceName - serviceAttr.serviceNameSource = ResourceTags + serviceAttr.ServiceName = s.ec2TagServiceName + serviceAttr.ServiceNameSource = ResourceTags return serviceAttr } if s.iamRole != "" { - serviceAttr.serviceName = s.iamRole - serviceAttr.serviceNameSource = ClientIamRole + serviceAttr.ServiceName = s.iamRole + serviceAttr.ServiceNameSource = ClientIamRole return serviceAttr } return serviceAttr diff --git a/internal/resourcestore/serviceprovider_test.go b/internal/resourcestore/serviceprovider_test.go index b89763d5d6..dd8d485c6c 100644 --- a/internal/resourcestore/serviceprovider_test.go +++ b/internal/resourcestore/serviceprovider_test.go @@ -91,8 +91,8 @@ func Test_serviceprovider_ServiceAttribute(t *testing.T) { iamRole: "TestRole", }, want: ServiceAttribute{ - serviceName: "TestRole", - serviceNameSource: ClientIamRole, + ServiceName: "TestRole", + ServiceNameSource: ClientIamRole, }, }, { @@ -101,8 +101,8 @@ func Test_serviceprovider_ServiceAttribute(t *testing.T) { ec2TagServiceName: "tag-service", }, want: ServiceAttribute{ - serviceName: "tag-service", - serviceNameSource: ResourceTags, + ServiceName: "tag-service", + ServiceNameSource: ResourceTags, }, }, } diff --git a/plugins/inputs/logfile/fileconfig.go b/plugins/inputs/logfile/fileconfig.go index 00aec4f7e6..1ef082b809 100644 --- a/plugins/inputs/logfile/fileconfig.go +++ b/plugins/inputs/logfile/fileconfig.go @@ -83,6 +83,11 @@ type FileConfig struct { Filters []*LogFilter `toml:"filters"` + //Customer specified service.name and deployment.environment + ServiceName string `toml:"service_name"` + //Customer specified deployment.environment + Environment string `toml:"deployment_environment"` + //Time *time.Location Go type timezone info. TimezoneLoc *time.Location //Regexp go type timestampFromLogLine regex diff --git a/plugins/inputs/logfile/logfile.go b/plugins/inputs/logfile/logfile.go index 7794b57a4f..a07030b809 100644 --- a/plugins/inputs/logfile/logfile.go +++ b/plugins/inputs/logfile/logfile.go @@ -17,6 +17,7 @@ import ( "github.com/influxdata/telegraf/plugins/inputs" "github.com/aws/amazon-cloudwatch-agent/internal/logscommon" + "github.com/aws/amazon-cloudwatch-agent/internal/resourcestore" "github.com/aws/amazon-cloudwatch-agent/logs" "github.com/aws/amazon-cloudwatch-agent/plugins/inputs/logfile/globpath" "github.com/aws/amazon-cloudwatch-agent/plugins/inputs/logfile/tail" @@ -152,9 +153,17 @@ func (t *LogFile) FindLogSrc() []logs.LogSrc { t.cleanUpStoppedTailerSrc() + rs := resourcestore.GetResourceStore() + // Create a "tailer" for each file for i := range t.FileConfig { fileconfig := &t.FileConfig[i] + + //Add file -> {serviceName, deploymentEnvironment} mapping to resource store + rs.AddServiceAttrEntryToResourceStore(fileconfig.FilePath, fileconfig.ServiceName, fileconfig.Environment) + + t.Log.Debugf("Created entry for file=%s with serviceName=%s, environment=%s in resource store", fileconfig.FilePath, fileconfig.ServiceName, fileconfig.Environment) + targetFiles, err := t.getTargetFiles(fileconfig) if err != nil { t.Log.Errorf("Failed to find target files for file config %v, with error: %v", fileconfig.FilePath, err) diff --git a/plugins/inputs/logfile/logfile_test.go b/plugins/inputs/logfile/logfile_test.go index 2c1a2e98dd..fb680bfd71 100644 --- a/plugins/inputs/logfile/logfile_test.go +++ b/plugins/inputs/logfile/logfile_test.go @@ -18,6 +18,7 @@ import ( "golang.org/x/text/encoding/simplifiedchinese" "golang.org/x/text/transform" + "github.com/aws/amazon-cloudwatch-agent/internal/resourcestore" "github.com/aws/amazon-cloudwatch-agent/logs" ) @@ -59,7 +60,8 @@ func TestLogs(t *testing.T) { tt := NewLogFile() tt.Log = TestLogger{t} - tt.FileConfig = []FileConfig{{FilePath: tmpfile.Name(), FromBeginning: true}} + filename := tmpfile.Name() + tt.FileConfig = []FileConfig{{FilePath: filename, FromBeginning: true, ServiceName: "test-service-name", Environment: "ec2:test-environment"}} tt.FileConfig[0].init() tt.started = true @@ -68,6 +70,10 @@ func TestLogs(t *testing.T) { t.Fatalf("%v log src was returned when only 1 should be available", len(lsrcs)) } + rs := resourcestore.GetResourceStore() + assert.Equal(t, rs.LogFiles()[filename].ServiceName, "test-service-name") + assert.Equal(t, rs.LogFiles()[filename].Environment, "ec2:test-environment") + done := make(chan struct{}) lsrc := lsrcs[0] lsrc.SetOutput(func(e logs.LogEvent) { diff --git a/plugins/outputs/cloudwatchlogs/pusher_test.go b/plugins/outputs/cloudwatchlogs/pusher_test.go index 482f5a6e7c..726a32ee79 100644 --- a/plugins/outputs/cloudwatchlogs/pusher_test.go +++ b/plugins/outputs/cloudwatchlogs/pusher_test.go @@ -738,7 +738,7 @@ func TestPutRetentionValidMaxInput(t *testing.T) { prpc++ return nil, nil } - stop, p := testPreparation(1000000000000000000, &s, 1*time.Hour, maxRetryTimeout) + stop, p := testPreparation(100000000, &s, 1*time.Hour, maxRetryTimeout) p.putRetentionPolicy() require.Equal(t, 2, prpc, fmt.Sprintf("Put Retention Policy api should have been called twice. Number of times called: %v", prpc)) diff --git a/translator/config/schema.json b/translator/config/schema.json index b442482cfb..4bdcf8e91f 100644 --- a/translator/config/schema.json +++ b/translator/config/schema.json @@ -52,6 +52,16 @@ "omit_hostname": { "description": "Hostname will be tagged by default unless you specifying append_dimensions, this flag allow you to omit hostname from tags without specifying append_dimensions", "type": "boolean" + }, + "service.name": { + "type": "string", + "minLength": 1, + "maxLength": 4096 + }, + "deployment.environment": { + "type": "string", + "minLength": 1, + "maxLength": 4096 } }, "additionalProperties": true @@ -775,6 +785,16 @@ "endpoint_override": { "description": "The override endpoint to use to access cloudwatch logs", "$ref": "#/definitions/endpointOverrideDefinition" + }, + "service.name": { + "type": "string", + "minLength": 1, + "maxLength": 4096 + }, + "deployment.environment": { + "type": "string", + "minLength": 1, + "maxLength": 4096 } }, "additionalProperties": false, @@ -855,6 +875,16 @@ "items": { "$ref": "#/definitions/logsDefinition/definitions/filterDefinition" } + }, + "service.name": { + "type": "string", + "minLength": 1, + "maxLength": 4096 + }, + "deployment.environment": { + "type": "string", + "minLength": 1, + "maxLength": 4096 } }, "required": [ diff --git a/translator/tocwconfig/sampleConfig/compass_linux_config.conf b/translator/tocwconfig/sampleConfig/compass_linux_config.conf new file mode 100644 index 0000000000..ebf652fe84 --- /dev/null +++ b/translator/tocwconfig/sampleConfig/compass_linux_config.conf @@ -0,0 +1,57 @@ +[agent] + collection_jitter = "0s" + debug = true + flush_interval = "1s" + flush_jitter = "0s" + hostname = "host_name_from_env" + interval = "10s" + logfile = "/tmp/fake/log/hotdog.log" + logtarget = "lumberjack" + metric_batch_size = 1000 + metric_buffer_limit = 10000 + omit_hostname = false + precision = "" + quiet = true + round_interval = false + +[inputs] + + [[inputs.logfile]] + destination = "cloudwatchlogs" + file_state_folder = "/opt/aws/amazon-cloudwatch-agent/logs/state" + + [[inputs.logfile.file_config]] + deployment_environment = "file-level-environment" + file_path = "/opt/aws/amazon-cloudwatch-agent/logs/amazon-cloudwatch-agent.log" + from_beginning = true + log_group_class = "" + log_group_name = "amazon-cloudwatch-agent.log" + log_stream_name = "amazon-cloudwatch-agent.log" + pipe = false + retention_in_days = 5 + service_name = "file-level-service" + timezone = "UTC" + + [[inputs.logfile.file_config]] + auto_removal = true + deployment_environment = "agent-level-environment" + file_path = "/opt/aws/amazon-cloudwatch-agent/logs/test.log" + from_beginning = true + log_group_class = "" + log_group_name = "test.log" + log_stream_name = "test.log" + pipe = false + retention_in_days = -1 + service_name = "log-level-service" + timezone = "UTC" + +[outputs] + + [[outputs.cloudwatchlogs]] + endpoint_override = "https://logs-fips.us-west-2.amazonaws.com" + force_flush_interval = "60s" + log_stream_name = "LOG_STREAM_NAME" + mode = "EC2" + region = "us-west-2" + region_type = "ACJ" + role_arn = "log_role_arn_value_test" \ No newline at end of file diff --git a/translator/tocwconfig/sampleConfig/compass_linux_config.json b/translator/tocwconfig/sampleConfig/compass_linux_config.json new file mode 100755 index 0000000000..d7d1ba059f --- /dev/null +++ b/translator/tocwconfig/sampleConfig/compass_linux_config.json @@ -0,0 +1,48 @@ +{ + "agent": { + "metrics_collection_interval": 10, + "logfile": "/tmp/fake/log/hotdog.log", + "internal": true, + "debug": true, + "quiet": true, + "aws_sdk_log_level": "LogDebug", + "user_agent": "CUSTOM USER AGENT VALUE", + "credentials": { + "role_arn": "global_role_arn_value" + }, + "region": "us-west-2", + "service.name": "agent-level-service", + "deployment.environment": "agent-level-environment" + }, + "logs": { + "logs_collected": { + "files": { + "collect_list": [ + { + "file_path": "/opt/aws/amazon-cloudwatch-agent/logs/amazon-cloudwatch-agent.log", + "log_group_name": "amazon-cloudwatch-agent.log", + "log_stream_name": "amazon-cloudwatch-agent.log", + "timezone": "UTC", + "retention_in_days": 5, + "service.name": "file-level-service", + "deployment.environment": "file-level-environment" + }, + { + "file_path": "/opt/aws/amazon-cloudwatch-agent/logs/test.log", + "log_group_name": "test.log", + "log_stream_name": "test.log", + "timezone": "UTC", + "auto_removal": true + } + ] + } + }, + "log_stream_name": "LOG_STREAM_NAME", + "force_flush_interval": 60, + "credentials": { + "role_arn": "log_role_arn_value_test" + }, + "endpoint_override": "https://logs-fips.us-west-2.amazonaws.com", + "service.name": "log-level-service" + } +} \ No newline at end of file diff --git a/translator/tocwconfig/tocwconfig_test.go b/translator/tocwconfig/tocwconfig_test.go index cbf7ffde39..5cfcb8daa1 100644 --- a/translator/tocwconfig/tocwconfig_test.go +++ b/translator/tocwconfig/tocwconfig_test.go @@ -152,6 +152,19 @@ func TestAppSignalsAndNativeKubernetesConfig(t *testing.T) { checkTranslation(t, "appsignals_and_k8s_config", "windows", expectedEnvVars, "") } +func TestCompassConfig(t *testing.T) { + resetContext(t) + + context.CurrentContext().SetRunInContainer(true) + context.CurrentContext().SetMode(config.ModeEC2) + + t.Setenv(config.HOST_NAME, "host_name_from_env") + t.Setenv(config.HOST_IP, "127.0.0.1") + + checkTranslation(t, "compass_linux_config", "linux", nil, "") + checkTranslation(t, "compass_linux_config", "darwin", nil, "") +} + func TestEmfAndKubernetesConfig(t *testing.T) { resetContext(t) readCommonConfig(t, "./sampleConfig/commonConfig/withCredentials.toml") diff --git a/translator/tocwconfig/totomlconfig/tomlConfigTemplate/tomlConfig.go b/translator/tocwconfig/totomlconfig/tomlConfigTemplate/tomlConfig.go index b48129cea4..37dc56c85a 100644 --- a/translator/tocwconfig/totomlconfig/tomlConfigTemplate/tomlConfig.go +++ b/translator/tocwconfig/totomlconfig/tomlConfigTemplate/tomlConfig.go @@ -126,8 +126,12 @@ type ( Pipe bool RetentionInDays int `toml:"retention_in_days"` Timezone string - Tags map[string]string - Filters []fileConfigFilter + //Customer specified service.name and deployment.environment + ServiceName string `toml:"service_name"` + //Customer specified deployment.environment + DeploymentEnvironment string `toml:"deployment_environment"` + Tags map[string]string + Filters []fileConfigFilter } k8sApiServerConfig struct { diff --git a/translator/translate/agent/agent.go b/translator/translate/agent/agent.go index dad8a19392..44a615a06d 100644 --- a/translator/translate/agent/agent.go +++ b/translator/translate/agent/agent.go @@ -25,20 +25,30 @@ func RegisterRule(fieldname string, r translator.Rule) { } type Agent struct { - Interval string - Credentials map[string]interface{} - Region string - RegionType string - Mode string - Internal bool - Role_arn string + Interval string + Credentials map[string]interface{} + Region string + RegionType string + Mode string + Internal bool + Role_arn string + ServiceName string + DeploymentEnvironment string } -var Global_Config Agent = *new(Agent) +var ( + Global_Config Agent = *new(Agent) + deploymentEnvironment DeploymentEnvironment + serviceName ServiceName +) func (a *Agent) ApplyRule(input interface{}) (returnKey string, returnVal interface{}) { m := input.(map[string]interface{}) result := map[string]interface{}{} + + //Apply DeploymentEnvironment and ServiceName rules + serviceName.ApplyRule(m[SectionKey]) + deploymentEnvironment.ApplyRule(m[SectionKey]) /* In JSON config file, it represent as "agent" : {//specification config information} To check the specification config entry diff --git a/translator/translate/agent/agent_test.go b/translator/translate/agent/agent_test.go index 4b3f221aed..f1158d4e29 100644 --- a/translator/translate/agent/agent_test.go +++ b/translator/translate/agent/agent_test.go @@ -174,3 +174,21 @@ func restoreProxyEnv() { os.Setenv("https_proxy", httpsProxy) os.Setenv("no_proxy", noProxy) } + +func TestAgentServiceAndEnvironmentConfig(t *testing.T) { + agentServiceAndEnvironmentConfig(t, config.OS_TYPE_LINUX) + agentServiceAndEnvironmentConfig(t, config.OS_TYPE_DARWIN) +} + +func agentServiceAndEnvironmentConfig(t *testing.T, osType string) { + a := new(Agent) + translator.SetTargetPlatform(osType) + var input interface{} + err := json.Unmarshal([]byte(`{"agent":{"region": "us-west-2", "service.name": "my-service", "deployment.environment":"test-environment"}}`), &input) + if err != nil { + assert.Fail(t, err.Error()) + } + _, _ = a.ApplyRule(input) + assert.Equal(t, "my-service", Global_Config.ServiceName) + assert.Equal(t, "test-environment", Global_Config.DeploymentEnvironment) +} diff --git a/translator/translate/agent/ruleDeploymentEnvironment.go b/translator/translate/agent/ruleDeploymentEnvironment.go new file mode 100644 index 0000000000..e638cfd935 --- /dev/null +++ b/translator/translate/agent/ruleDeploymentEnvironment.go @@ -0,0 +1,19 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package agent + +import ( + "github.com/aws/amazon-cloudwatch-agent/translator" +) + +type DeploymentEnvironment struct { +} + +func (f *DeploymentEnvironment) ApplyRule(input interface{}) (returnKey string, returnVal interface{}) { + _, returnVal = translator.DefaultCase("deployment.environment", "", input) + returnKey = "deployment_environment" + // Set global agent deployment environment + Global_Config.DeploymentEnvironment = returnVal.(string) + return +} diff --git a/translator/translate/agent/ruleServiceName.go b/translator/translate/agent/ruleServiceName.go new file mode 100644 index 0000000000..9b34226bf8 --- /dev/null +++ b/translator/translate/agent/ruleServiceName.go @@ -0,0 +1,19 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package agent + +import ( + "github.com/aws/amazon-cloudwatch-agent/translator" +) + +type ServiceName struct { +} + +func (f *ServiceName) ApplyRule(input interface{}) (returnKey string, returnVal interface{}) { + _, returnVal = translator.DefaultCase("service.name", "", input) + returnKey = "service_name" + // Set global agent service name + Global_Config.ServiceName = returnVal.(string) + return +} diff --git a/translator/translate/logs/logs.go b/translator/translate/logs/logs.go index b39a022f40..c1412b3667 100644 --- a/translator/translate/logs/logs.go +++ b/translator/translate/logs/logs.go @@ -30,11 +30,17 @@ func RegisterRule(fieldname string, r Rule) { } type Logs struct { - FileStateFolder string - MetadataInfo map[string]string + FileStateFolder string + MetadataInfo map[string]string + ServiceName string + DeploymentEnvironment string } -var GlobalLogConfig = Logs{} +var ( + GlobalLogConfig = Logs{} + serviceName ServiceName + deploymentEnvironment DeploymentEnvironment +) func (l *Logs) ApplyRule(input interface{}) (returnKey string, returnVal interface{}) { im := input.(map[string]interface{}) @@ -44,6 +50,10 @@ func (l *Logs) ApplyRule(input interface{}) (returnKey string, returnVal interfa cloudwatchConfig := map[string]interface{}{} GlobalLogConfig.MetadataInfo = util.GetMetadataInfo(util.Ec2MetadataInfoProvider) + //Apply DeploymentEnvironment and ServiceName rules + serviceName.ApplyRule(im[SectionKey]) + deploymentEnvironment.ApplyRule(im[SectionKey]) + //Check if this plugin exist in the input instance //If not, not process if _, ok := im[SectionKey]; !ok { diff --git a/translator/translate/logs/logs_collected/files/collect_list/collect_list_test.go b/translator/translate/logs/logs_collected/files/collect_list/collect_list_test.go index e9fdc2830e..5f5e93b115 100644 --- a/translator/translate/logs/logs_collected/files/collect_list/collect_list_test.go +++ b/translator/translate/logs/logs_collected/files/collect_list/collect_list_test.go @@ -17,6 +17,7 @@ import ( "github.com/aws/amazon-cloudwatch-agent/translator" "github.com/aws/amazon-cloudwatch-agent/translator/context" "github.com/aws/amazon-cloudwatch-agent/translator/translate/agent" + "github.com/aws/amazon-cloudwatch-agent/translator/translate/logs" ) func TestFileConfig(t *testing.T) { @@ -30,13 +31,15 @@ func TestFileConfig(t *testing.T) { _, val := f.ApplyRule(input) expectVal := []interface{}{map[string]interface{}{ - "file_path": "path1", - "from_beginning": true, - "log_group_name": "group1", - "log_stream_name": "LOG_STREAM_NAME", - "log_group_class": util.StandardLogGroupClass, - "pipe": false, - "retention_in_days": -1, + "file_path": "path1", + "from_beginning": true, + "log_group_name": "group1", + "log_stream_name": "LOG_STREAM_NAME", + "log_group_class": util.StandardLogGroupClass, + "pipe": false, + "retention_in_days": -1, + "service_name": "", + "deployment_environment": "", }} assert.Equal(t, expectVal, val) } @@ -51,12 +54,14 @@ func TestFileConfigOverride(t *testing.T) { } _, val := f.ApplyRule(input) expectVal := []interface{}{map[string]interface{}{ - "file_path": "path1", - "from_beginning": false, - "log_group_name": "group1", - "pipe": false, - "retention_in_days": -1, - "log_group_class": "", + "file_path": "path1", + "from_beginning": false, + "log_group_name": "group1", + "pipe": false, + "retention_in_days": -1, + "log_group_class": "", + "service_name": "", + "deployment_environment": "", }} assert.Equal(t, expectVal, val) } @@ -78,14 +83,16 @@ func TestTimestampFormat(t *testing.T) { } _, val := f.ApplyRule(input) expectVal := []interface{}{map[string]interface{}{ - "file_path": "path1", - "from_beginning": true, - "pipe": false, - "timestamp_layout": []string{"15:04:05 06 Jan _2"}, - "timestamp_regex": "(\\d{2}:\\d{2}:\\d{2} \\d{2} \\w{3} \\s{0,1}\\d{1,2})", - "timezone": "UTC", - "retention_in_days": -1, - "log_group_class": "", + "file_path": "path1", + "from_beginning": true, + "pipe": false, + "timestamp_layout": []string{"15:04:05 06 Jan _2"}, + "timestamp_regex": "(\\d{2}:\\d{2}:\\d{2} \\d{2} \\w{3} \\s{0,1}\\d{1,2})", + "timezone": "UTC", + "retention_in_days": -1, + "log_group_class": "", + "service_name": "", + "deployment_environment": "", }} assert.Equal(t, expectVal, val) } @@ -105,13 +112,15 @@ func TestTimestampFormatAll(t *testing.T) { ] }`, expected: []interface{}{map[string]interface{}{ - "file_path": "path1", - "from_beginning": true, - "pipe": false, - "retention_in_days": -1, - "timestamp_layout": []string{"15:04:05 06 Jan _2"}, - "timestamp_regex": "(\\d{2}:\\d{2}:\\d{2} \\d{2} \\w{3} \\s{0,1}\\d{1,2})", - "log_group_class": "", + "file_path": "path1", + "from_beginning": true, + "pipe": false, + "retention_in_days": -1, + "timestamp_layout": []string{"15:04:05 06 Jan _2"}, + "timestamp_regex": "(\\d{2}:\\d{2}:\\d{2} \\d{2} \\w{3} \\s{0,1}\\d{1,2})", + "log_group_class": "", + "service_name": "", + "deployment_environment": "", }}, }, { @@ -124,13 +133,15 @@ func TestTimestampFormatAll(t *testing.T) { ] }`, expected: []interface{}{map[string]interface{}{ - "file_path": "path1", - "from_beginning": true, - "pipe": false, - "retention_in_days": -1, - "timestamp_layout": []string{"1 _2 15:04:05", "01 _2 15:04:05"}, - "timestamp_regex": "(\\d{1,2} \\s{0,1}\\d{1,2} \\d{2}:\\d{2}:\\d{2})", - "log_group_class": "", + "file_path": "path1", + "from_beginning": true, + "pipe": false, + "retention_in_days": -1, + "timestamp_layout": []string{"1 _2 15:04:05", "01 _2 15:04:05"}, + "timestamp_regex": "(\\d{1,2} \\s{0,1}\\d{1,2} \\d{2}:\\d{2}:\\d{2})", + "log_group_class": "", + "service_name": "", + "deployment_environment": "", }}, }, { @@ -143,13 +154,15 @@ func TestTimestampFormatAll(t *testing.T) { ] }`, expected: []interface{}{map[string]interface{}{ - "file_path": "path1", - "from_beginning": true, - "pipe": false, - "retention_in_days": -1, - "timestamp_layout": []string{"_2 1 15:04:05", "_2 01 15:04:05"}, - "timestamp_regex": "(\\d{1,2} \\s{0,1}\\d{1,2} \\d{2}:\\d{2}:\\d{2})", - "log_group_class": "", + "file_path": "path1", + "from_beginning": true, + "pipe": false, + "retention_in_days": -1, + "timestamp_layout": []string{"_2 1 15:04:05", "_2 01 15:04:05"}, + "timestamp_regex": "(\\d{1,2} \\s{0,1}\\d{1,2} \\d{2}:\\d{2}:\\d{2})", + "log_group_class": "", + "service_name": "", + "deployment_environment": "", }}, }, { @@ -162,13 +175,15 @@ func TestTimestampFormatAll(t *testing.T) { ] }`, expected: []interface{}{map[string]interface{}{ - "file_path": "path4", - "from_beginning": true, - "pipe": false, - "retention_in_days": -1, - "timestamp_layout": []string{"Jan _2 15:04:05"}, - "timestamp_regex": "(\\w{3} \\s{0,1}\\d{1,2} \\d{2}:\\d{2}:\\d{2})", - "log_group_class": "", + "file_path": "path4", + "from_beginning": true, + "pipe": false, + "retention_in_days": -1, + "timestamp_layout": []string{"Jan _2 15:04:05"}, + "timestamp_regex": "(\\w{3} \\s{0,1}\\d{1,2} \\d{2}:\\d{2}:\\d{2})", + "log_group_class": "", + "service_name": "", + "deployment_environment": "", }}, }, { @@ -181,13 +196,15 @@ func TestTimestampFormatAll(t *testing.T) { ] }`, expected: []interface{}{map[string]interface{}{ - "file_path": "path5", - "from_beginning": true, - "pipe": false, - "retention_in_days": -1, - "timestamp_layout": []string{"Jan _2 15:04:05"}, - "timestamp_regex": "(\\w{3} \\s{0,1}\\d{1,2} \\d{2}:\\d{2}:\\d{2})", - "log_group_class": "", + "file_path": "path5", + "from_beginning": true, + "pipe": false, + "retention_in_days": -1, + "timestamp_layout": []string{"Jan _2 15:04:05"}, + "timestamp_regex": "(\\w{3} \\s{0,1}\\d{1,2} \\d{2}:\\d{2}:\\d{2})", + "log_group_class": "", + "service_name": "", + "deployment_environment": "", }}, }, { @@ -200,13 +217,15 @@ func TestTimestampFormatAll(t *testing.T) { ] }`, expected: []interface{}{map[string]interface{}{ - "file_path": "path4", - "from_beginning": true, - "pipe": false, - "retention_in_days": -1, - "timestamp_layout": []string{"Jan _2 15:04:05"}, - "timestamp_regex": "(\\w{3} \\s{0,1}\\d{1,2} \\d{2}:\\d{2}:\\d{2})", - "log_group_class": "", + "file_path": "path4", + "from_beginning": true, + "pipe": false, + "retention_in_days": -1, + "timestamp_layout": []string{"Jan _2 15:04:05"}, + "timestamp_regex": "(\\w{3} \\s{0,1}\\d{1,2} \\d{2}:\\d{2}:\\d{2})", + "log_group_class": "", + "service_name": "", + "deployment_environment": "", }}, }, { @@ -219,13 +238,15 @@ func TestTimestampFormatAll(t *testing.T) { ] }`, expected: []interface{}{map[string]interface{}{ - "file_path": "path5", - "from_beginning": true, - "pipe": false, - "retention_in_days": -1, - "timestamp_layout": []string{"Jan _2 15:04:05"}, - "timestamp_regex": "(\\w{3} \\s{0,1}\\d{1,2} \\d{2}:\\d{2}:\\d{2})", - "log_group_class": "", + "file_path": "path5", + "from_beginning": true, + "pipe": false, + "retention_in_days": -1, + "timestamp_layout": []string{"Jan _2 15:04:05"}, + "timestamp_regex": "(\\w{3} \\s{0,1}\\d{1,2} \\d{2}:\\d{2}:\\d{2})", + "log_group_class": "", + "service_name": "", + "deployment_environment": "", }}, }, { @@ -238,13 +259,15 @@ func TestTimestampFormatAll(t *testing.T) { ] }`, expected: []interface{}{map[string]interface{}{ - "file_path": "path1", - "from_beginning": true, - "pipe": false, - "retention_in_days": -1, - "timestamp_layout": []string{"5 _2 1 15:04:05", "5 _2 01 15:04:05"}, - "timestamp_regex": "(\\d{1,2} \\s{0,1}\\d{1,2} \\s{0,1}\\d{1,2} \\d{2}:\\d{2}:\\d{2})", - "log_group_class": "", + "file_path": "path1", + "from_beginning": true, + "pipe": false, + "retention_in_days": -1, + "timestamp_layout": []string{"5 _2 1 15:04:05", "5 _2 01 15:04:05"}, + "timestamp_regex": "(\\d{1,2} \\s{0,1}\\d{1,2} \\s{0,1}\\d{1,2} \\d{2}:\\d{2}:\\d{2})", + "log_group_class": "", + "service_name": "", + "deployment_environment": "", }}, }, { @@ -257,13 +280,15 @@ func TestTimestampFormatAll(t *testing.T) { ] }`, expected: []interface{}{map[string]interface{}{ - "file_path": "path7", - "from_beginning": true, - "pipe": false, - "retention_in_days": -1, - "timestamp_layout": []string{"5 _2 01 15:04:05", "5 _2 1 15:04:05"}, - "timestamp_regex": "(\\d{1,2} \\s{0,1}\\d{1,2} \\s{0,1}\\d{1,2} \\d{2}:\\d{2}:\\d{2})", - "log_group_class": "", + "file_path": "path7", + "from_beginning": true, + "pipe": false, + "retention_in_days": -1, + "timestamp_layout": []string{"5 _2 01 15:04:05", "5 _2 1 15:04:05"}, + "timestamp_regex": "(\\d{1,2} \\s{0,1}\\d{1,2} \\s{0,1}\\d{1,2} \\d{2}:\\d{2}:\\d{2})", + "log_group_class": "", + "service_name": "", + "deployment_environment": "", }}, }, } @@ -305,14 +330,16 @@ func TestTimestampFormat_NonZeroPadding(t *testing.T) { expectedLayout := []string{"3:4:5 06 1 _2", "3:4:5 06 01 _2"} expectedRegex := "(\\d{1,2}:\\d{1,2}:\\d{1,2} \\d{2} \\s{0,1}\\d{1,2} \\s{0,1}\\d{1,2})" expectVal := []interface{}{map[string]interface{}{ - "file_path": "path1", - "log_group_class": "", - "from_beginning": true, - "pipe": false, - "retention_in_days": -1, - "timestamp_layout": expectedLayout, - "timestamp_regex": expectedRegex, - "timezone": "UTC", + "file_path": "path1", + "log_group_class": "", + "from_beginning": true, + "pipe": false, + "retention_in_days": -1, + "timestamp_layout": expectedLayout, + "timestamp_regex": expectedRegex, + "timezone": "UTC", + "service_name": "", + "deployment_environment": "", }} assert.Equal(t, expectVal, val) @@ -351,14 +378,16 @@ func TestTimestampFormat_SpecialCharacters(t *testing.T) { expectedLayout := []string{"^.*?|[({15:04:05 06 Jan _2})]$"} expectedRegex := "(\\^\\.\\*\\?\\|\\[\\(\\{\\d{2}:\\d{2}:\\d{2} \\d{2} \\w{3} \\s{0,1}\\d{1,2}\\}\\)\\]\\$)" expectVal := []interface{}{map[string]interface{}{ - "file_path": "path1", - "log_group_class": "", - "from_beginning": true, - "pipe": false, - "retention_in_days": -1, - "timestamp_layout": expectedLayout, - "timestamp_regex": expectedRegex, - "timezone": "UTC", + "file_path": "path1", + "log_group_class": "", + "from_beginning": true, + "pipe": false, + "retention_in_days": -1, + "timestamp_layout": expectedLayout, + "timestamp_regex": expectedRegex, + "timezone": "UTC", + "service_name": "", + "deployment_environment": "", }} assert.Equal(t, expectVal, val) @@ -391,13 +420,15 @@ func TestTimestampFormat_Template(t *testing.T) { expectedLayout := []string{"Jan _2 15:04:05"} expectedRegex := "(\\w{3} \\s{0,1}\\d{1,2} \\d{2}:\\d{2}:\\d{2})" expectVal := []interface{}{map[string]interface{}{ - "file_path": "path1", - "log_group_class": "", - "from_beginning": true, - "pipe": false, - "retention_in_days": -1, - "timestamp_layout": expectedLayout, - "timestamp_regex": expectedRegex, + "file_path": "path1", + "log_group_class": "", + "from_beginning": true, + "pipe": false, + "retention_in_days": -1, + "timestamp_layout": expectedLayout, + "timestamp_regex": expectedRegex, + "service_name": "", + "deployment_environment": "", }} assert.Equal(t, expectVal, val) @@ -455,6 +486,8 @@ func TestMultiLineStartPattern(t *testing.T) { "timestamp_regex": "(\\d{2}:\\d{2}:\\d{2} \\d{2} \\w{3} \\s{0,1}\\d{1,2})", "timezone": "UTC", "multi_line_start_pattern": "{timestamp_regex}", + "service_name": "", + "deployment_environment": "", }} assert.Equal(t, expectVal, val) } @@ -477,15 +510,17 @@ func TestEncoding(t *testing.T) { } _, val := f.ApplyRule(input) expectVal := []interface{}{map[string]interface{}{ - "file_path": "path1", - "from_beginning": true, - "pipe": false, - "retention_in_days": -1, - "log_group_class": "", - "timestamp_layout": []string{"15:04:05 06 Jan _2"}, - "timestamp_regex": "(\\d{2}:\\d{2}:\\d{2} \\d{2} \\w{3} \\s{0,1}\\d{1,2})", - "timezone": "UTC", - "encoding": "gbk", + "file_path": "path1", + "from_beginning": true, + "pipe": false, + "retention_in_days": -1, + "log_group_class": "", + "timestamp_layout": []string{"15:04:05 06 Jan _2"}, + "timestamp_regex": "(\\d{2}:\\d{2}:\\d{2} \\d{2} \\w{3} \\s{0,1}\\d{1,2})", + "timezone": "UTC", + "encoding": "gbk", + "service_name": "", + "deployment_environment": "", }} assert.Equal(t, expectVal, val) } @@ -507,11 +542,13 @@ func TestEncoding_Invalid(t *testing.T) { } _, val := f.ApplyRule(input) expectVal := []interface{}{map[string]interface{}{ - "file_path": "path1", - "from_beginning": true, - "pipe": false, - "log_group_class": "", - "retention_in_days": -1, + "file_path": "path1", + "from_beginning": true, + "pipe": false, + "log_group_class": "", + "retention_in_days": -1, + "service_name": "", + "deployment_environment": "", }} assert.Equal(t, expectVal, val) assert.False(t, translator.IsTranslateSuccess()) @@ -535,12 +572,14 @@ func TestAutoRemoval(t *testing.T) { } _, val := f.ApplyRule(input) expectVal := []interface{}{map[string]interface{}{ - "file_path": "path1", - "from_beginning": true, - "pipe": false, - "retention_in_days": -1, - "log_group_class": "", - "auto_removal": true, + "file_path": "path1", + "from_beginning": true, + "pipe": false, + "retention_in_days": -1, + "log_group_class": "", + "auto_removal": true, + "service_name": "", + "deployment_environment": "", }} assert.Equal(t, expectVal, val) @@ -557,12 +596,14 @@ func TestAutoRemoval(t *testing.T) { } _, val = f.ApplyRule(input) expectVal = []interface{}{map[string]interface{}{ - "file_path": "path1", - "from_beginning": true, - "pipe": false, - "retention_in_days": -1, - "auto_removal": false, - "log_group_class": "", + "file_path": "path1", + "from_beginning": true, + "pipe": false, + "retention_in_days": -1, + "auto_removal": false, + "log_group_class": "", + "service_name": "", + "deployment_environment": "", }} assert.Equal(t, expectVal, val) @@ -578,11 +619,13 @@ func TestAutoRemoval(t *testing.T) { } _, val = f.ApplyRule(input) expectVal = []interface{}{map[string]interface{}{ - "file_path": "path1", - "from_beginning": true, - "pipe": false, - "retention_in_days": -1, - "log_group_class": "", + "file_path": "path1", + "from_beginning": true, + "pipe": false, + "retention_in_days": -1, + "log_group_class": "", + "service_name": "", + "deployment_environment": "", }} assert.Equal(t, expectVal, val) } @@ -651,14 +694,16 @@ func TestPublishMultiLogs_WithBlackList(t *testing.T) { } _, val := f.ApplyRule(input) expectVal := []interface{}{map[string]interface{}{ - "file_path": "path1", - "from_beginning": true, - "pipe": false, - "retention_in_days": -1, - "log_group_class": "", - "blacklist": "^agent.log", - "publish_multi_logs": true, - "timezone": "UTC", + "file_path": "path1", + "from_beginning": true, + "pipe": false, + "retention_in_days": -1, + "log_group_class": "", + "blacklist": "^agent.log", + "publish_multi_logs": true, + "timezone": "UTC", + "service_name": "", + "deployment_environment": "", }} assert.Equal(t, expectVal, val) @@ -676,13 +721,15 @@ func TestPublishMultiLogs_WithBlackList(t *testing.T) { } _, val = f.ApplyRule(input) expectVal = []interface{}{map[string]interface{}{ - "file_path": "path1", - "from_beginning": true, - "pipe": false, - "retention_in_days": -1, - "publish_multi_logs": false, - "timezone": "UTC", - "log_group_class": "", + "file_path": "path1", + "from_beginning": true, + "pipe": false, + "retention_in_days": -1, + "publish_multi_logs": false, + "timezone": "UTC", + "log_group_class": "", + "service_name": "", + "deployment_environment": "", }} assert.Equal(t, expectVal, val) @@ -698,11 +745,13 @@ func TestPublishMultiLogs_WithBlackList(t *testing.T) { } _, val = f.ApplyRule(input) expectVal = []interface{}{map[string]interface{}{ - "file_path": "path1", - "from_beginning": true, - "pipe": false, - "retention_in_days": -1, - "log_group_class": "", + "file_path": "path1", + "from_beginning": true, + "pipe": false, + "retention_in_days": -1, + "log_group_class": "", + "service_name": "", + "deployment_environment": "", }} assert.Equal(t, expectVal, val) } @@ -723,10 +772,12 @@ func TestLogFilters(t *testing.T) { assert.Nil(t, e) _, val := f.ApplyRule(input) expectVal := []interface{}{map[string]interface{}{ - "from_beginning": true, - "pipe": false, - "retention_in_days": -1, - "log_group_class": "", + "from_beginning": true, + "pipe": false, + "retention_in_days": -1, + "log_group_class": "", + "service_name": "", + "deployment_environment": "", "filters": []interface{}{ map[string]interface{}{ "type": "include", @@ -763,19 +814,23 @@ func TestRetentionDifferentLogGroups(t *testing.T) { } _, val := f.ApplyRule(input) expectVal := []interface{}{map[string]interface{}{ - "file_path": "path1", - "log_group_name": "test2", - "pipe": false, - "retention_in_days": 3, - "from_beginning": true, - "log_group_class": "", + "file_path": "path1", + "log_group_name": "test2", + "pipe": false, + "retention_in_days": 3, + "from_beginning": true, + "log_group_class": "", + "service_name": "", + "deployment_environment": "", }, map[string]interface{}{ - "file_path": "path1", - "log_group_name": "test1", - "pipe": false, - "retention_in_days": 3, - "from_beginning": true, - "log_group_class": "", + "file_path": "path1", + "log_group_name": "test1", + "pipe": false, + "retention_in_days": 3, + "from_beginning": true, + "log_group_class": "", + "service_name": "", + "deployment_environment": "", }} assert.Equal(t, expectVal, val) } @@ -802,19 +857,23 @@ func TestDuplicateRetention(t *testing.T) { } _, val := f.ApplyRule(input) expectVal := []interface{}{map[string]interface{}{ - "file_path": "path1", - "log_group_name": "test1", - "pipe": false, - "retention_in_days": 3, - "from_beginning": true, - "log_group_class": "", + "file_path": "path1", + "log_group_name": "test1", + "pipe": false, + "retention_in_days": 3, + "from_beginning": true, + "log_group_class": "", + "service_name": "", + "deployment_environment": "", }, map[string]interface{}{ - "file_path": "path1", - "log_group_name": "test1", - "pipe": false, - "retention_in_days": 3, - "from_beginning": true, - "log_group_class": "", + "file_path": "path1", + "log_group_name": "test1", + "pipe": false, + "retention_in_days": 3, + "from_beginning": true, + "log_group_class": "", + "service_name": "", + "deployment_environment": "", }} assert.Equal(t, expectVal, val) } @@ -842,19 +901,23 @@ func TestConflictingRetention(t *testing.T) { } _, val := f.ApplyRule(input) expectVal := []interface{}{map[string]interface{}{ - "file_path": "path1", - "log_group_name": "test1", - "pipe": false, - "retention_in_days": 3, - "from_beginning": true, - "log_group_class": "", + "file_path": "path1", + "log_group_name": "test1", + "pipe": false, + "retention_in_days": 3, + "from_beginning": true, + "log_group_class": "", + "service_name": "", + "deployment_environment": "", }, map[string]interface{}{ - "file_path": "path1", - "log_group_name": "test1", - "pipe": false, - "retention_in_days": 5, - "from_beginning": true, - "log_group_class": "", + "file_path": "path1", + "log_group_name": "test1", + "pipe": false, + "retention_in_days": 5, + "from_beginning": true, + "log_group_class": "", + "service_name": "", + "deployment_environment": "", }} assert.Equal(t, "Under path : /logs/logs_collected/files/collect_list/ | Error : Different retention_in_days values can't be set for the same log group: test1", translator.ErrorMessages[len(translator.ErrorMessages)-1]) assert.Equal(t, expectVal, val) @@ -882,19 +945,23 @@ func TestDifferentLogGroupClasses(t *testing.T) { } _, val := f.ApplyRule(input) expectVal := []interface{}{map[string]interface{}{ - "file_path": "path1", - "log_group_name": "test2", - "pipe": false, - "retention_in_days": 3, - "from_beginning": true, - "log_group_class": "", + "file_path": "path1", + "log_group_name": "test2", + "pipe": false, + "retention_in_days": 3, + "from_beginning": true, + "log_group_class": "", + "service_name": "", + "deployment_environment": "", }, map[string]interface{}{ - "file_path": "path1", - "log_group_name": "test1", - "pipe": false, - "retention_in_days": 3, - "from_beginning": true, - "log_group_class": "", + "file_path": "path1", + "log_group_name": "test1", + "pipe": false, + "retention_in_days": 3, + "from_beginning": true, + "log_group_class": "", + "service_name": "", + "deployment_environment": "", }} assert.Equal(t, expectVal, val) } @@ -923,19 +990,23 @@ func TestDuplicateLogGroupClass(t *testing.T) { } _, val := f.ApplyRule(input) expectVal := []interface{}{map[string]interface{}{ - "file_path": "path1", - "log_group_name": "test1", - "pipe": false, - "retention_in_days": 3, - "from_beginning": true, - "log_group_class": util.StandardLogGroupClass, + "file_path": "path1", + "log_group_name": "test1", + "pipe": false, + "retention_in_days": 3, + "from_beginning": true, + "log_group_class": util.StandardLogGroupClass, + "service_name": "", + "deployment_environment": "", }, map[string]interface{}{ - "file_path": "path1", - "log_group_name": "test1", - "pipe": false, - "retention_in_days": 3, - "from_beginning": true, - "log_group_class": util.StandardLogGroupClass, + "file_path": "path1", + "log_group_name": "test1", + "pipe": false, + "retention_in_days": 3, + "from_beginning": true, + "log_group_class": util.StandardLogGroupClass, + "service_name": "", + "deployment_environment": "", }} assert.Equal(t, expectVal, val) } @@ -967,20 +1038,97 @@ func TestConflictingLogGroupClass(t *testing.T) { } _, val := f.ApplyRule(input) expectVal := []interface{}{map[string]interface{}{ - "file_path": "path1", - "log_group_name": "test1", - "pipe": false, - "retention_in_days": 3, - "from_beginning": true, - "log_group_class": util.StandardLogGroupClass, + "file_path": "path1", + "log_group_name": "test1", + "pipe": false, + "retention_in_days": 3, + "from_beginning": true, + "log_group_class": util.StandardLogGroupClass, + "service_name": "", + "deployment_environment": "", }, map[string]interface{}{ - "file_path": "path1", - "log_group_name": "test1", - "pipe": false, - "retention_in_days": 3, - "from_beginning": true, - "log_group_class": util.InfrequentAccessLogGroupClass, + "file_path": "path1", + "log_group_name": "test1", + "pipe": false, + "retention_in_days": 3, + "from_beginning": true, + "log_group_class": util.InfrequentAccessLogGroupClass, + "service_name": "", + "deployment_environment": "", }} assert.Equal(t, "Under path : /logs/logs_collected/files/collect_list/ | Error : Different log_group_class values can't be set for the same log group: test1", translator.ErrorMessages[len(translator.ErrorMessages)-1]) assert.Equal(t, expectVal, val) } + +func TestServiceAndEnvironment(t *testing.T) { + logs.GlobalLogConfig.DeploymentEnvironment = "ec2:default" + + f := new(FileConfig) + var input interface{} + e := json.Unmarshal([]byte(`{ + "collect_list": [ + { + "service.name": "my-service1", + "deployment.environment": "ec2:test-deployment-environment", + "file_path": "path1", + "log_group_name": "group1", + "log_stream_name": "stream", + "log_group_class": "standard" + }, + { + "service.name": "my-service2", + "file_path": "path2", + "log_group_name": "group2", + "log_stream_name": "stream", + "log_group_class": "standard" + }, + { + "file_path": "path3", + "log_group_name": "group3", + "log_stream_name": "stream", + "log_group_class": "standard" + } + ] + }`), &input) + if e != nil { + assert.Fail(t, e.Error()) + } + _, val := f.ApplyRule(input) + + expectVal := []interface{}{ + map[string]interface{}{ + "file_path": "path1", + "log_group_name": "group1", + "log_stream_name": "stream", + "service_name": "my-service1", + "deployment_environment": "ec2:test-deployment-environment", + "from_beginning": true, + "log_group_class": util.StandardLogGroupClass, + "pipe": false, + "retention_in_days": -1, + }, + map[string]interface{}{ + "file_path": "path2", + "log_group_name": "group2", + "log_stream_name": "stream", + "service_name": "my-service2", + "deployment_environment": "ec2:default", + "from_beginning": true, + "log_group_class": util.StandardLogGroupClass, + "pipe": false, + "retention_in_days": -1, + }, + map[string]interface{}{ + "file_path": "path3", + "log_group_name": "group3", + "log_stream_name": "stream", + "from_beginning": true, + "log_group_class": util.StandardLogGroupClass, + "pipe": false, + "retention_in_days": -1, + "service_name": "", + "deployment_environment": "ec2:default", + }, + } + assert.Equal(t, expectVal, val) +} diff --git a/translator/translate/logs/logs_collected/files/collect_list/ruleDeploymentEnvironment.go b/translator/translate/logs/logs_collected/files/collect_list/ruleDeploymentEnvironment.go new file mode 100644 index 0000000000..6608add89b --- /dev/null +++ b/translator/translate/logs/logs_collected/files/collect_list/ruleDeploymentEnvironment.go @@ -0,0 +1,29 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package collect_list + +import ( + "github.com/aws/amazon-cloudwatch-agent/translator" + "github.com/aws/amazon-cloudwatch-agent/translator/translate/logs" +) + +type DeploymentEnvironment struct { +} + +func (f *DeploymentEnvironment) ApplyRule(input interface{}) (returnKey string, returnVal interface{}) { + _, returnVal = translator.DefaultCase("deployment.environment", "", input) + returnKey = "deployment_environment" + + if returnVal == "" { + returnVal = logs.GlobalLogConfig.DeploymentEnvironment + } + + return +} + +func init() { + f := new(DeploymentEnvironment) + r := []Rule{f} + RegisterRule("deployment.environment", r) +} diff --git a/translator/translate/logs/logs_collected/files/collect_list/ruleServiceName.go b/translator/translate/logs/logs_collected/files/collect_list/ruleServiceName.go new file mode 100644 index 0000000000..cf9a3b735c --- /dev/null +++ b/translator/translate/logs/logs_collected/files/collect_list/ruleServiceName.go @@ -0,0 +1,29 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package collect_list + +import ( + "github.com/aws/amazon-cloudwatch-agent/translator" + "github.com/aws/amazon-cloudwatch-agent/translator/translate/logs" +) + +type ServiceName struct { +} + +func (f *ServiceName) ApplyRule(input interface{}) (returnKey string, returnVal interface{}) { + _, returnVal = translator.DefaultCase("service.name", "", input) + returnKey = "service_name" + + if returnVal == "" { + returnVal = logs.GlobalLogConfig.ServiceName + } + + return +} + +func init() { + f := new(ServiceName) + r := []Rule{f} + RegisterRule("service.name", r) +} diff --git a/translator/translate/logs/logs_test.go b/translator/translate/logs/logs_test.go index 923386164f..fe1dc0583d 100644 --- a/translator/translate/logs/logs_test.go +++ b/translator/translate/logs/logs_test.go @@ -200,3 +200,42 @@ func TestLogs_EndpointOverride(t *testing.T) { ctx.SetMode(config.ModeEC2) //reset back to default mode } + +func TestLogs_ServiceAndEnvironment(t *testing.T) { + l := new(Logs) + agent.Global_Config.Region = "us-east-1" + agent.Global_Config.RegionType = "any" + + context.ResetContext() + + var input interface{} + err := json.Unmarshal([]byte(`{"logs":{"service.name": "my-service", + "deployment.environment": "ec2:group","log_stream_name":"LOG_STREAM_NAME"}}`), &input) + if err != nil { + assert.Fail(t, err.Error()) + } + + _, _ = l.ApplyRule(input) + assert.Equal(t, "my-service", GlobalLogConfig.ServiceName) + assert.Equal(t, "ec2:group", GlobalLogConfig.DeploymentEnvironment) +} + +func TestLogs_ServiceAndEnvironmentMissing(t *testing.T) { + l := new(Logs) + agent.Global_Config.Region = "us-east-1" + agent.Global_Config.RegionType = "any" + agent.Global_Config.DeploymentEnvironment = "ec2:group" + agent.Global_Config.ServiceName = "my-service" + + context.ResetContext() + + var input interface{} + err := json.Unmarshal([]byte(`{"logs":{"log_stream_name":"LOG_STREAM_NAME"}}`), &input) + if err != nil { + assert.Fail(t, err.Error()) + } + + _, _ = l.ApplyRule(input) + assert.Equal(t, "my-service", GlobalLogConfig.ServiceName) + assert.Equal(t, "ec2:group", GlobalLogConfig.DeploymentEnvironment) +} diff --git a/translator/translate/logs/ruleDeploymentEnvironment.go b/translator/translate/logs/ruleDeploymentEnvironment.go new file mode 100644 index 0000000000..8f0c821fff --- /dev/null +++ b/translator/translate/logs/ruleDeploymentEnvironment.go @@ -0,0 +1,25 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package logs + +import ( + "github.com/aws/amazon-cloudwatch-agent/translator" + "github.com/aws/amazon-cloudwatch-agent/translator/translate/agent" +) + +type DeploymentEnvironment struct { +} + +func (f *DeploymentEnvironment) ApplyRule(input interface{}) (returnKey string, returnVal interface{}) { + _, returnVal = translator.DefaultCase("deployment.environment", "", input) + returnKey = "deployment_environment" + + if returnVal == "" { + returnVal = agent.Global_Config.DeploymentEnvironment + } + // Set global log deployment environment + GlobalLogConfig.DeploymentEnvironment = returnVal.(string) + + return +} diff --git a/translator/translate/logs/ruleServiceName.go b/translator/translate/logs/ruleServiceName.go new file mode 100644 index 0000000000..8e8f183aae --- /dev/null +++ b/translator/translate/logs/ruleServiceName.go @@ -0,0 +1,25 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package logs + +import ( + "github.com/aws/amazon-cloudwatch-agent/translator" + "github.com/aws/amazon-cloudwatch-agent/translator/translate/agent" +) + +type ServiceName struct { +} + +func (f *ServiceName) ApplyRule(input interface{}) (returnKey string, returnVal interface{}) { + _, returnVal = translator.DefaultCase("service.name", "", input) + returnKey = "service_name" + + if returnVal == "" { + returnVal = agent.Global_Config.ServiceName + } + // Set global log service name + GlobalLogConfig.ServiceName = returnVal.(string) + + return +} From 40138593bea9fdbadc1e0497726ac325e4cc0d56 Mon Sep 17 00:00:00 2001 From: zhihonl <61301537+zhihonl@users.noreply.github.com> Date: Thu, 20 Jun 2024 15:44:34 -0400 Subject: [PATCH 28/55] Add cross account security check by verifying account ID (#727) --- internal/resourcestore/resourcestore.go | 70 ++++++++++++++++--- internal/resourcestore/resourcestore_test.go | 60 ++++++++++++++++ .../outputs/cloudwatchlogs/cloudwatchlogs.go | 5 ++ 3 files changed, 126 insertions(+), 9 deletions(-) diff --git a/internal/resourcestore/resourcestore.go b/internal/resourcestore/resourcestore.go index 9a6d48dbcd..3627c1c2db 100644 --- a/internal/resourcestore/resourcestore.go +++ b/internal/resourcestore/resourcestore.go @@ -9,9 +9,12 @@ import ( "sync" "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/service/cloudwatchlogs" "github.com/aws/aws-sdk-go/service/ec2" "github.com/aws/aws-sdk-go/service/ec2/ec2iface" + "github.com/aws/aws-sdk-go/service/sts" + "github.com/aws/aws-sdk-go/service/sts/stsiface" configaws "github.com/aws/amazon-cloudwatch-agent/cfg/aws" "github.com/aws/amazon-cloudwatch-agent/internal/ec2metadataprovider" @@ -59,6 +62,14 @@ type ResourceStore struct { // serviceprovider stores information about possible service names // that we can attach to the resource ID serviceprovider serviceprovider + + // nativeCredential stores the credential config for agent's native + // component such as LogAgent + nativeCredential client.ConfigProvider + + metadataprovider ec2metadataprovider.MetadataProvider + + stsClient stsiface.STSAPI } func GetResourceStore() *ResourceStore { @@ -72,18 +83,19 @@ func initResourceStore() *ResourceStore { // Get IMDS client and EC2 API client which requires region for authentication // These will be passed down to any object that requires access to IMDS or EC2 // API client so we have single source of truth for credential - rs := &ResourceStore{} - metadataProvider := getMetaDataProvider() + rs := &ResourceStore{ + metadataprovider: getMetaDataProvider(), + } if translatorCtx.CurrentContext().Mode() != "" { rs.mode = translatorCtx.CurrentContext().Mode() log.Printf("I! resourcestore: ResourceStore mode is %s ", rs.mode) } switch rs.mode { case config.ModeEC2: - rs.ec2Info = *newEC2Info(metadataProvider, getEC2Provider) + rs.ec2Info = *newEC2Info(rs.metadataprovider, getEC2Provider) go rs.ec2Info.initEc2Info() } - rs.serviceprovider = *newServiceProvider(metadataProvider, getEC2Provider) + rs.serviceprovider = *newServiceProvider(rs.metadataprovider, getEC2Provider) rs.serviceprovider.logFiles = map[string]ServiceAttribute{} go rs.serviceprovider.startServiceProvider() return rs @@ -101,13 +113,24 @@ func (r *ResourceStore) EKSInfo() eksInfo { return r.eksInfo } +func (r *ResourceStore) SetNativeCredential(client client.ConfigProvider) { + r.nativeCredential = client +} + +func (r *ResourceStore) NativeCredentialExists() bool { + return r.nativeCredential != nil +} + func (r *ResourceStore) CreateLogFileRID(fileGlobPath string, filePath string) *cloudwatchlogs.Resource { - return &cloudwatchlogs.Resource{ - AttributeMaps: []map[string]*string{ - r.createAttributeMaps(), - }, - KeyAttributes: r.createServiceKeyAttributes(), + if r.shouldReturnRID() { + return &cloudwatchlogs.Resource{ + AttributeMaps: []map[string]*string{ + r.createAttributeMaps(), + }, + KeyAttributes: r.createServiceKeyAttributes(), + } } + return nil } // AddServiceAttrEntryToResourceStore adds an entry to the resource store for the provided file -> serviceName, environmentName key-value pair @@ -143,6 +166,35 @@ func (r *ResourceStore) createServiceKeyAttributes() *cloudwatchlogs.KeyAttribut return serviceKeyAttr } +// shouldReturnRID checks if the account ID for the instance is +// matching the account ID when assuming role for the current credential. +func (r *ResourceStore) shouldReturnRID() bool { + if r.nativeCredential == nil { + log.Printf("D! resourceStore: there is no credential stored for cross-account checks\n") + return false + } + doc, err := r.metadataprovider.Get(context.Background()) + if err != nil { + log.Printf("D! resourceStore: an error occurred when getting instance document for cross-account checks. Reason: %v\n", err) + return false + } + instanceAccountID := doc.AccountID + if r.stsClient == nil { + r.stsClient = sts.New( + r.nativeCredential, + &aws.Config{ + LogLevel: configaws.SDKLogLevel(), + Logger: configaws.SDKLogger{}, + }) + } + assumedRoleIdentity, err := r.stsClient.GetCallerIdentity(&sts.GetCallerIdentityInput{}) + if err != nil { + log.Printf("D! resourceStore: an error occurred when calling STS GetCallerIdentity for cross-account checks. Reason: %v\n", err) + return false + } + return instanceAccountID == *assumedRoleIdentity.Account +} + func getMetaDataProvider() ec2metadataprovider.MetadataProvider { mdCredentialConfig := &configaws.CredentialConfig{} return ec2metadataprovider.NewMetadataProvider(mdCredentialConfig.Credentials(), retryer.GetDefaultRetryNumber()) diff --git a/internal/resourcestore/resourcestore_test.go b/internal/resourcestore/resourcestore_test.go index d8f916ec9f..600cf90dd3 100644 --- a/internal/resourcestore/resourcestore_test.go +++ b/internal/resourcestore/resourcestore_test.go @@ -10,8 +10,12 @@ import ( "testing" "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/ec2metadata" + "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/cloudwatchlogs" + "github.com/aws/aws-sdk-go/service/sts" + "github.com/aws/aws-sdk-go/service/sts/stsiface" "github.com/stretchr/testify/assert" "github.com/aws/amazon-cloudwatch-agent/internal/ec2metadataprovider" @@ -23,6 +27,14 @@ type mockMetadataProvider struct { TagValue string } +type mockSTSClient struct { + stsiface.STSAPI +} + +func (ms *mockSTSClient) GetCallerIdentity(*sts.GetCallerIdentityInput) (*sts.GetCallerIdentityOutput, error) { + return &sts.GetCallerIdentityOutput{Account: aws.String("123456789")}, nil +} + func (m *mockMetadataProvider) Get(ctx context.Context) (ec2metadata.EC2InstanceIdentityDocument, error) { if m.InstanceIdentityDocument != nil { return *m.InstanceIdentityDocument, nil @@ -273,6 +285,54 @@ func TestResourceStore_createServiceKeyAttributes(t *testing.T) { } } +func TestResourceStore_shouldReturnRID(t *testing.T) { + type fields struct { + metadataprovider ec2metadataprovider.MetadataProvider + stsClient stsiface.STSAPI + nativeCredential client.ConfigProvider + } + tests := []struct { + name string + fields fields + want bool + }{ + { + name: "HappyPath_AccountIDMatches", + fields: fields{ + metadataprovider: &mockMetadataProvider{ + InstanceIdentityDocument: &ec2metadata.EC2InstanceIdentityDocument{ + AccountID: "123456789"}, + }, + stsClient: &mockSTSClient{}, + nativeCredential: &session.Session{}, + }, + want: true, + }, + { + name: "HappyPath_AccountIDMismatches", + fields: fields{ + metadataprovider: &mockMetadataProvider{ + InstanceIdentityDocument: &ec2metadata.EC2InstanceIdentityDocument{ + AccountID: "987654321"}, + }, + stsClient: &mockSTSClient{}, + nativeCredential: &session.Session{}, + }, + want: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + r := &ResourceStore{ + metadataprovider: tt.fields.metadataprovider, + stsClient: tt.fields.stsClient, + nativeCredential: tt.fields.nativeCredential, + } + assert.Equalf(t, tt.want, r.shouldReturnRID(), "shouldReturnRID()") + }) + } +} + func dereferenceMap(input map[string]*string) map[string]string { result := make(map[string]string) for k, v := range input { diff --git a/plugins/outputs/cloudwatchlogs/cloudwatchlogs.go b/plugins/outputs/cloudwatchlogs/cloudwatchlogs.go index d7923e6e6f..c8da16084a 100644 --- a/plugins/outputs/cloudwatchlogs/cloudwatchlogs.go +++ b/plugins/outputs/cloudwatchlogs/cloudwatchlogs.go @@ -26,6 +26,7 @@ import ( "github.com/aws/amazon-cloudwatch-agent/extension/agenthealth/handler/useragent" "github.com/aws/amazon-cloudwatch-agent/handlers" "github.com/aws/amazon-cloudwatch-agent/internal" + "github.com/aws/amazon-cloudwatch-agent/internal/resourcestore" "github.com/aws/amazon-cloudwatch-agent/internal/retryer" "github.com/aws/amazon-cloudwatch-agent/logs" "github.com/aws/amazon-cloudwatch-agent/tool/util" @@ -140,6 +141,10 @@ func (c *CloudWatchLogs) getDest(t Target, logSrc logs.LogSrc) *cwDest { } logThrottleRetryer := retryer.NewLogThrottleRetryer(c.Log) + resourcestore := resourcestore.GetResourceStore() + if !resourcestore.NativeCredentialExists() { + resourcestore.SetNativeCredential(credentialConfig.Credentials()) + } client := cloudwatchlogs.New( credentialConfig.Credentials(), &aws.Config{ From 73d4aa564117616ca6ddd4a7827897ee9ddb2e19 Mon Sep 17 00:00:00 2001 From: zhihonl <61301537+zhihonl@users.noreply.github.com> Date: Fri, 21 Jun 2024 16:00:41 -0400 Subject: [PATCH 29/55] Add refresh logic for service name retrievals (#724) --- internal/resourcestore/serviceprovider.go | 100 ++++++++++++++---- .../resourcestore/serviceprovider_test.go | 70 +++++++++++- 2 files changed, 148 insertions(+), 22 deletions(-) diff --git a/internal/resourcestore/serviceprovider.go b/internal/resourcestore/serviceprovider.go index c4c3fc6c07..79b18c95ed 100644 --- a/internal/resourcestore/serviceprovider.go +++ b/internal/resourcestore/serviceprovider.go @@ -6,8 +6,11 @@ package resourcestore import ( "context" "errors" + "fmt" "log" + "math/rand" "strings" + "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/arn" @@ -15,6 +18,7 @@ import ( "github.com/aws/aws-sdk-go/service/ec2/ec2iface" "github.com/aws/amazon-cloudwatch-agent/internal/ec2metadataprovider" + "github.com/aws/amazon-cloudwatch-agent/plugins/processors/ec2tagger" ) const ( @@ -24,6 +28,8 @@ const ( APP = "app" ClientIamRole = "ClientIamRole" ResourceTags = "ResourceTags" + jitterMax = 180 + jitterMin = 60 ) var ( @@ -46,6 +52,7 @@ type serviceprovider struct { ec2Provider ec2ProviderType iamRole string ec2TagServiceName string + ctx context.Context // logFiles is a variable reserved for communication between OTEL components and LogAgent // in order to achieve process correlations where the key is the log file path and the value @@ -56,23 +63,12 @@ type serviceprovider struct { } func (s *serviceprovider) startServiceProvider() { - go func() { - err := s.getIAMRole() - if err != nil { - log.Println("D! serviceprovider failed to get service name through IAM role in service provider: ", err) - } - }() - region, err := getRegion(s.metadataProvider) + err := s.getEC2Client() if err != nil { - log.Println("D! serviceprovider failed to get region: ", err) + go refreshLoop(s.ctx, s.getEC2Client, true) } - go func() { - s.ec2API = s.ec2Provider(region) - err := s.getEC2TagServiceName() - if err != nil { - log.Println("D! serviceprovider failed to get service name through EC2 tags in service provider: ", err) - } - }() + go refreshLoop(s.ctx, s.getIAMRole, false) + go refreshLoop(s.ctx, s.getEC2TagServiceName, false) } // ServiceAttribute function gets the relevant service attributes @@ -100,27 +96,29 @@ func (s *serviceprovider) ServiceAttribute() ServiceAttribute { func (s *serviceprovider) getIAMRole() error { iamRole, err := s.metadataProvider.InstanceProfileIAMRole() if err != nil { - log.Println("D! resourceMap: Unable to retrieve EC2 Metadata. This feature must only be used on an EC2 instance.") - return err + return fmt.Errorf("failed to get instance profile role: %s", err) } iamRoleArn, err := arn.Parse(iamRole) if err != nil { - log.Println("D! resourceMap: Unable to parse IAM Role Arn. " + err.Error()) + return fmt.Errorf("failed to parse IAM Role Arn: %s", err) } iamRoleResource := iamRoleArn.Resource if strings.HasPrefix(iamRoleResource, INSTANCE_PROFILE) { roleName := strings.TrimPrefix(iamRoleResource, INSTANCE_PROFILE) s.iamRole = roleName } else { - log.Println("D! resourceMap: IAM Role resource does not follow the expected pattern. Should be instance-profile/") + return fmt.Errorf("IAM Role resource does not follow the expected pattern. Should be instance-profile/") } return nil } func (s *serviceprovider) getEC2TagServiceName() error { + if s.ec2API == nil { + return fmt.Errorf("can't get EC2 tag since client is not set up yet ") + } serviceTagFilters, err := s.getEC2TagFilters() if err != nil { - return err + return fmt.Errorf("failed to get service name from EC2 tag: %s", err) } currentTagPriority := -1 for { @@ -149,6 +147,18 @@ func (s *serviceprovider) getEC2TagServiceName() error { return nil } +func (s *serviceprovider) getEC2Client() error { + if s.ec2API != nil { + return nil + } + region, err := getRegion(s.metadataProvider) + if err != nil { + return fmt.Errorf("failed to get EC2 client: %s", err) + } + s.ec2API = s.ec2Provider(region) + return nil +} + func (s *serviceprovider) getEC2TagFilters() ([]*ec2.Filter, error) { instanceDocument, err := s.metadataProvider.Get(context.Background()) if err != nil { @@ -176,5 +186,55 @@ func newServiceProvider(metadataProvider ec2metadataprovider.MetadataProvider, p return &serviceprovider{ metadataProvider: metadataProvider, ec2Provider: providerType, + ctx: context.Background(), + } +} + +func refreshLoop(ctx context.Context, updateFunc func() error, oneTime bool) { + // Offset retry by 1 so we can start with 1 minute wait time + // instead of immediately retrying + retry := 1 + for { + err := updateFunc() + if err == nil && oneTime { + return + } + + waitDuration := calculateWaitTime(retry, err) + wait := time.NewTimer(waitDuration) + select { + case <-ctx.Done(): + wait.Stop() + return + case <-wait.C: + } + + if retry > 1 { + log.Printf("D! serviceprovider: attribute retrieval retry count: %d", retry-1) + } + + if err != nil { + retry++ + log.Printf("D! serviceprovider: there was an error when retrieving service attribute. Reason: %s", err) + } else { + retry = 1 + } + + } +} + +// calculateWaitTime returns different time based on whether if +// a function call was returned with error. If returned with error, +// follow exponential backoff wait time, otherwise, refresh with jitter +func calculateWaitTime(retry int, err error) time.Duration { + var waitDuration time.Duration + if err == nil { + return time.Duration(rand.Intn(jitterMax-jitterMin)+jitterMin) * time.Second + } + if retry < len(ec2tagger.BackoffSleepArray) { + waitDuration = ec2tagger.BackoffSleepArray[retry] + } else { + waitDuration = ec2tagger.BackoffSleepArray[len(ec2tagger.BackoffSleepArray)-1] } + return waitDuration } diff --git a/internal/resourcestore/serviceprovider_test.go b/internal/resourcestore/serviceprovider_test.go index dd8d485c6c..df21357d79 100644 --- a/internal/resourcestore/serviceprovider_test.go +++ b/internal/resourcestore/serviceprovider_test.go @@ -4,6 +4,7 @@ package resourcestore import ( + "context" "testing" "time" @@ -61,14 +62,19 @@ func Test_serviceprovider_startServiceProvider(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { + testCtx, cancel := context.WithCancel(context.TODO()) s := serviceprovider{ metadataProvider: tt.args.metadataProvider, ec2Provider: func(s string) ec2iface.EC2API { return tt.args.ec2Client }, + ec2API: tt.args.ec2Client, + ctx: testCtx, } - s.startServiceProvider() - time.Sleep(time.Second) + go s.startServiceProvider() + time.Sleep(3 * time.Second) + cancel() + assert.Equal(t, tt.wantIAM, s.iamRole) assert.Equal(t, tt.wantTag, s.ec2TagServiceName) }) @@ -111,6 +117,7 @@ func Test_serviceprovider_ServiceAttribute(t *testing.T) { s := &serviceprovider{ iamRole: tt.fields.iamRole, ec2TagServiceName: tt.fields.ec2TagServiceName, + ctx: context.Background(), } assert.Equalf(t, tt.want, s.ServiceAttribute(), "ServiceAttribute()") }) @@ -229,3 +236,62 @@ func Test_serviceprovider_getEC2TagServiceName(t *testing.T) { }) } } + +func Test_refreshLoop(t *testing.T) { + type fields struct { + metadataProvider ec2metadataprovider.MetadataProvider + ec2API ec2iface.EC2API + iamRole string + ec2TagServiceName string + refreshInterval time.Duration + oneTime bool + } + type expectedInfo struct { + iamRole string + ec2TagServiceName string + } + tests := []struct { + name string + fields fields + expectedInfo expectedInfo + }{ + { + name: "HappyPath_CorrectRefresh", + fields: fields{ + metadataProvider: &mockMetadataProvider{ + InstanceIdentityDocument: &ec2metadata.EC2InstanceIdentityDocument{ + InstanceID: "i-123456789"}, + }, + ec2API: &mockServiceNameEC2Client{}, + iamRole: "original-role", + ec2TagServiceName: "original-tag-name", + refreshInterval: time.Millisecond, + }, + expectedInfo: expectedInfo{ + iamRole: "TestRole", + ec2TagServiceName: "test-service", + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + testCtx, cancel := context.WithCancel(context.TODO()) + s := &serviceprovider{ + metadataProvider: tt.fields.metadataProvider, + ec2API: tt.fields.ec2API, + ec2Provider: func(s string) ec2iface.EC2API { + return tt.fields.ec2API + }, + iamRole: tt.fields.iamRole, + ec2TagServiceName: tt.fields.ec2TagServiceName, + ctx: testCtx, + } + go refreshLoop(testCtx, s.getEC2TagServiceName, tt.fields.oneTime) + go refreshLoop(testCtx, s.getIAMRole, tt.fields.oneTime) + time.Sleep(time.Second) + cancel() + assert.Equal(t, tt.expectedInfo.iamRole, s.iamRole) + assert.Equal(t, tt.expectedInfo.ec2TagServiceName, s.ec2TagServiceName) + }) + } +} From ccffe7ff57ee2b947acd00982a28ae99545db53f Mon Sep 17 00:00:00 2001 From: Jason Polanco Date: Wed, 26 Jun 2024 09:05:48 -0400 Subject: [PATCH 30/55] [Compass] Update RID creation to use CWA config keys if defined (#731) --- internal/resourcestore/resourcestore.go | 17 ++++------- internal/resourcestore/resourcestore_test.go | 8 +++--- internal/resourcestore/serviceprovider.go | 13 ++++++++- .../resourcestore/serviceprovider_test.go | 28 ++++++++++++++++--- plugins/inputs/logfile/fileconfig.go | 2 +- plugins/inputs/logfile/logfile.go | 2 -- plugins/inputs/logfile/logfile_test.go | 5 ---- translator/config/schema.json | 12 ++++---- .../tomlConfigTemplate/tomlConfig.go | 2 +- translator/translate/agent/agent.go | 7 +---- .../agent/ruleDeploymentEnvironment.go | 14 ++++++---- translator/translate/agent/ruleServiceName.go | 11 ++++++-- translator/translate/logs/logs.go | 2 +- .../logs/ruleDeploymentEnvironment.go | 9 +++--- translator/translate/logs/ruleServiceName.go | 9 +++--- 15 files changed, 81 insertions(+), 60 deletions(-) diff --git a/internal/resourcestore/resourcestore.go b/internal/resourcestore/resourcestore.go index 3627c1c2db..20e74e7350 100644 --- a/internal/resourcestore/resourcestore.go +++ b/internal/resourcestore/resourcestore.go @@ -96,7 +96,6 @@ func initResourceStore() *ResourceStore { go rs.ec2Info.initEc2Info() } rs.serviceprovider = *newServiceProvider(rs.metadataprovider, getEC2Provider) - rs.serviceprovider.logFiles = map[string]ServiceAttribute{} go rs.serviceprovider.startServiceProvider() return rs } @@ -127,23 +126,19 @@ func (r *ResourceStore) CreateLogFileRID(fileGlobPath string, filePath string) * AttributeMaps: []map[string]*string{ r.createAttributeMaps(), }, - KeyAttributes: r.createServiceKeyAttributes(), + KeyAttributes: r.createServiceKeyAttributes(fileGlobPath), } } return nil } // AddServiceAttrEntryToResourceStore adds an entry to the resource store for the provided file -> serviceName, environmentName key-value pair -func (r *ResourceStore) AddServiceAttrEntryToResourceStore(key string, serviceName string, environmentName string) { - r.serviceprovider.logFiles[key] = ServiceAttribute{ServiceName: serviceName, Environment: environmentName} -} - -func (r *ResourceStore) LogFiles() map[string]ServiceAttribute { - return r.serviceprovider.logFiles +func (r *ResourceStore) AddServiceAttrEntryToResourceStore(fileGlob string, serviceName string, environmentName string) { + r.serviceprovider.logFiles[fileGlob] = ServiceAttribute{ServiceName: serviceName, ServiceNameSource: AgentConfig, Environment: environmentName} } func (r *ResourceStore) createAttributeMaps() map[string]*string { - serviceAttr := r.serviceprovider.ServiceAttribute() + serviceAttr := r.serviceprovider.ServiceAttribute("") attributeMap := make(map[string]*string) addNonEmptyToMap(attributeMap, InstanceIDKey, r.ec2Info.InstanceID) @@ -152,8 +147,8 @@ func (r *ResourceStore) createAttributeMaps() map[string]*string { return attributeMap } -func (r *ResourceStore) createServiceKeyAttributes() *cloudwatchlogs.KeyAttributes { - serviceAttr := r.serviceprovider.ServiceAttribute() +func (r *ResourceStore) createServiceKeyAttributes(fileGlob string) *cloudwatchlogs.KeyAttributes { + serviceAttr := r.serviceprovider.ServiceAttribute(fileGlob) serviceKeyAttr := &cloudwatchlogs.KeyAttributes{ Type: aws.String(Service), } diff --git a/internal/resourcestore/resourcestore_test.go b/internal/resourcestore/resourcestore_test.go index 600cf90dd3..1d64ab7a0d 100644 --- a/internal/resourcestore/resourcestore_test.go +++ b/internal/resourcestore/resourcestore_test.go @@ -147,7 +147,7 @@ func TestResourceStore_LogFiles(t *testing.T) { }, } if got := r.serviceprovider.logFiles; !reflect.DeepEqual(got, tt.want) { - t.Errorf("LogFiles() = %v, want %v", got, tt.want) + t.Errorf("logFiles() = %v, want %v", got, tt.want) } }) } @@ -280,7 +280,7 @@ func TestResourceStore_createServiceKeyAttributes(t *testing.T) { r := &ResourceStore{ serviceprovider: tt.fields.serviceprovider, } - assert.Equalf(t, tt.want, r.createServiceKeyAttributes(), "createServiceKeyAttributes()") + assert.Equalf(t, tt.want, r.createServiceKeyAttributes(""), "createServiceKeyAttributes()") }) } } @@ -353,9 +353,9 @@ func TestAddServiceKeyAttributeToLogFilesMap(t *testing.T) { expected := &ResourceStore{ serviceprovider: serviceprovider{ iamRole: "test-role", - logFiles: map[string]ServiceAttribute{"/opt/aws/amazon-cloudwatch-agent/logs/amazon-cloudwatch-agent.log": {ServiceName: "test", Environment: "ec2:test"}}, + logFiles: map[string]ServiceAttribute{"/opt/aws/amazon-cloudwatch-agent/logs/amazon-cloudwatch-agent.log": {ServiceName: "test", ServiceNameSource: AgentConfig, Environment: "ec2:test"}}, }, } - assert.Equal(t, true, reflect.DeepEqual(rs.LogFiles(), expected.serviceprovider.logFiles)) + assert.Equal(t, true, reflect.DeepEqual(rs.serviceprovider.logFiles, expected.serviceprovider.logFiles)) } diff --git a/internal/resourcestore/serviceprovider.go b/internal/resourcestore/serviceprovider.go index 79b18c95ed..4563c4bc53 100644 --- a/internal/resourcestore/serviceprovider.go +++ b/internal/resourcestore/serviceprovider.go @@ -30,6 +30,7 @@ const ( ResourceTags = "ResourceTags" jitterMax = 180 jitterMin = 60 + AgentConfig = "AgentConfig" ) var ( @@ -78,13 +79,22 @@ func (s *serviceprovider) startServiceProvider() { // 3. Process correlation // 4. instance tags - The tags attached to the EC2 instance. Only scrape for tag with the following key: service, application, app // 5. IAM Role - The IAM role name retrieved through IMDS(Instance Metadata Service) -func (s *serviceprovider) ServiceAttribute() ServiceAttribute { +func (s *serviceprovider) ServiceAttribute(fileGlob string) ServiceAttribute { serviceAttr := ServiceAttribute{} + // CWA config + if val, ok := s.logFiles[fileGlob]; ok { + serviceAttr.ServiceName = val.ServiceName + serviceAttr.ServiceNameSource = val.ServiceNameSource + serviceAttr.Environment = val.Environment + return serviceAttr + } + // Instance Tags if s.ec2TagServiceName != "" { serviceAttr.ServiceName = s.ec2TagServiceName serviceAttr.ServiceNameSource = ResourceTags return serviceAttr } + //IAM Role if s.iamRole != "" { serviceAttr.ServiceName = s.iamRole serviceAttr.ServiceNameSource = ClientIamRole @@ -187,6 +197,7 @@ func newServiceProvider(metadataProvider ec2metadataprovider.MetadataProvider, p metadataProvider: metadataProvider, ec2Provider: providerType, ctx: context.Background(), + logFiles: map[string]ServiceAttribute{}, } } diff --git a/internal/resourcestore/serviceprovider_test.go b/internal/resourcestore/serviceprovider_test.go index df21357d79..dd5a00c71e 100644 --- a/internal/resourcestore/serviceprovider_test.go +++ b/internal/resourcestore/serviceprovider_test.go @@ -85,11 +85,13 @@ func Test_serviceprovider_ServiceAttribute(t *testing.T) { type fields struct { iamRole string ec2TagServiceName string + logFiles map[string]ServiceAttribute } tests := []struct { - name string - fields fields - want ServiceAttribute + name string + fields fields + serviceProvider *serviceprovider + want ServiceAttribute }{ { name: "HappyPath_IAMRole", @@ -111,15 +113,33 @@ func Test_serviceprovider_ServiceAttribute(t *testing.T) { ServiceNameSource: ResourceTags, }, }, + { + name: "HappyPath_AgentConfig", + fields: fields{ + logFiles: map[string]ServiceAttribute{ + "test-file": { + ServiceName: "test-service", + ServiceNameSource: AgentConfig, + Environment: "test-environment", + }, + }, + }, + want: ServiceAttribute{ + ServiceName: "test-service", + ServiceNameSource: AgentConfig, + Environment: "test-environment", + }, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { s := &serviceprovider{ iamRole: tt.fields.iamRole, ec2TagServiceName: tt.fields.ec2TagServiceName, + logFiles: tt.fields.logFiles, ctx: context.Background(), } - assert.Equalf(t, tt.want, s.ServiceAttribute(), "ServiceAttribute()") + assert.Equalf(t, tt.want, s.ServiceAttribute("test-file"), "ServiceAttribute()") }) } } diff --git a/plugins/inputs/logfile/fileconfig.go b/plugins/inputs/logfile/fileconfig.go index 1ef082b809..1f41abe033 100644 --- a/plugins/inputs/logfile/fileconfig.go +++ b/plugins/inputs/logfile/fileconfig.go @@ -83,7 +83,7 @@ type FileConfig struct { Filters []*LogFilter `toml:"filters"` - //Customer specified service.name and deployment.environment + //Customer specified service.name ServiceName string `toml:"service_name"` //Customer specified deployment.environment Environment string `toml:"deployment_environment"` diff --git a/plugins/inputs/logfile/logfile.go b/plugins/inputs/logfile/logfile.go index a07030b809..de8fc612fe 100644 --- a/plugins/inputs/logfile/logfile.go +++ b/plugins/inputs/logfile/logfile.go @@ -162,8 +162,6 @@ func (t *LogFile) FindLogSrc() []logs.LogSrc { //Add file -> {serviceName, deploymentEnvironment} mapping to resource store rs.AddServiceAttrEntryToResourceStore(fileconfig.FilePath, fileconfig.ServiceName, fileconfig.Environment) - t.Log.Debugf("Created entry for file=%s with serviceName=%s, environment=%s in resource store", fileconfig.FilePath, fileconfig.ServiceName, fileconfig.Environment) - targetFiles, err := t.getTargetFiles(fileconfig) if err != nil { t.Log.Errorf("Failed to find target files for file config %v, with error: %v", fileconfig.FilePath, err) diff --git a/plugins/inputs/logfile/logfile_test.go b/plugins/inputs/logfile/logfile_test.go index fb680bfd71..b4239cb225 100644 --- a/plugins/inputs/logfile/logfile_test.go +++ b/plugins/inputs/logfile/logfile_test.go @@ -18,7 +18,6 @@ import ( "golang.org/x/text/encoding/simplifiedchinese" "golang.org/x/text/transform" - "github.com/aws/amazon-cloudwatch-agent/internal/resourcestore" "github.com/aws/amazon-cloudwatch-agent/logs" ) @@ -70,10 +69,6 @@ func TestLogs(t *testing.T) { t.Fatalf("%v log src was returned when only 1 should be available", len(lsrcs)) } - rs := resourcestore.GetResourceStore() - assert.Equal(t, rs.LogFiles()[filename].ServiceName, "test-service-name") - assert.Equal(t, rs.LogFiles()[filename].Environment, "ec2:test-environment") - done := make(chan struct{}) lsrc := lsrcs[0] lsrc.SetOutput(func(e logs.LogEvent) { diff --git a/translator/config/schema.json b/translator/config/schema.json index 4bdcf8e91f..1d46f82232 100644 --- a/translator/config/schema.json +++ b/translator/config/schema.json @@ -56,12 +56,12 @@ "service.name": { "type": "string", "minLength": 1, - "maxLength": 4096 + "maxLength": 512 }, "deployment.environment": { "type": "string", "minLength": 1, - "maxLength": 4096 + "maxLength": 512 } }, "additionalProperties": true @@ -789,12 +789,12 @@ "service.name": { "type": "string", "minLength": 1, - "maxLength": 4096 + "maxLength": 512 }, "deployment.environment": { "type": "string", "minLength": 1, - "maxLength": 4096 + "maxLength": 512 } }, "additionalProperties": false, @@ -879,12 +879,12 @@ "service.name": { "type": "string", "minLength": 1, - "maxLength": 4096 + "maxLength": 512 }, "deployment.environment": { "type": "string", "minLength": 1, - "maxLength": 4096 + "maxLength": 512 } }, "required": [ diff --git a/translator/tocwconfig/totomlconfig/tomlConfigTemplate/tomlConfig.go b/translator/tocwconfig/totomlconfig/tomlConfigTemplate/tomlConfig.go index 37dc56c85a..2a26ad9bb2 100644 --- a/translator/tocwconfig/totomlconfig/tomlConfigTemplate/tomlConfig.go +++ b/translator/tocwconfig/totomlconfig/tomlConfigTemplate/tomlConfig.go @@ -126,7 +126,7 @@ type ( Pipe bool RetentionInDays int `toml:"retention_in_days"` Timezone string - //Customer specified service.name and deployment.environment + //Customer specified service.name ServiceName string `toml:"service_name"` //Customer specified deployment.environment DeploymentEnvironment string `toml:"deployment_environment"` diff --git a/translator/translate/agent/agent.go b/translator/translate/agent/agent.go index 44a615a06d..fe196352af 100644 --- a/translator/translate/agent/agent.go +++ b/translator/translate/agent/agent.go @@ -37,18 +37,13 @@ type Agent struct { } var ( - Global_Config Agent = *new(Agent) - deploymentEnvironment DeploymentEnvironment - serviceName ServiceName + Global_Config Agent = *new(Agent) ) func (a *Agent) ApplyRule(input interface{}) (returnKey string, returnVal interface{}) { m := input.(map[string]interface{}) result := map[string]interface{}{} - //Apply DeploymentEnvironment and ServiceName rules - serviceName.ApplyRule(m[SectionKey]) - deploymentEnvironment.ApplyRule(m[SectionKey]) /* In JSON config file, it represent as "agent" : {//specification config information} To check the specification config entry diff --git a/translator/translate/agent/ruleDeploymentEnvironment.go b/translator/translate/agent/ruleDeploymentEnvironment.go index e638cfd935..02d7bdfa49 100644 --- a/translator/translate/agent/ruleDeploymentEnvironment.go +++ b/translator/translate/agent/ruleDeploymentEnvironment.go @@ -7,13 +7,17 @@ import ( "github.com/aws/amazon-cloudwatch-agent/translator" ) -type DeploymentEnvironment struct { -} +type DeploymentEnvironment struct{} func (f *DeploymentEnvironment) ApplyRule(input interface{}) (returnKey string, returnVal interface{}) { - _, returnVal = translator.DefaultCase("deployment.environment", "", input) - returnKey = "deployment_environment" + _, result := translator.DefaultCase("deployment.environment", "", input) + // Set global agent deployment environment - Global_Config.DeploymentEnvironment = returnVal.(string) + Global_Config.DeploymentEnvironment = result.(string) return } + +func init() { + f := new(DeploymentEnvironment) + RegisterRule("deployment.environment", f) +} diff --git a/translator/translate/agent/ruleServiceName.go b/translator/translate/agent/ruleServiceName.go index 9b34226bf8..15434bd185 100644 --- a/translator/translate/agent/ruleServiceName.go +++ b/translator/translate/agent/ruleServiceName.go @@ -11,9 +11,14 @@ type ServiceName struct { } func (f *ServiceName) ApplyRule(input interface{}) (returnKey string, returnVal interface{}) { - _, returnVal = translator.DefaultCase("service.name", "", input) - returnKey = "service_name" + _, result := translator.DefaultCase("service.name", "", input) + // Set global agent service name - Global_Config.ServiceName = returnVal.(string) + Global_Config.ServiceName = result.(string) return } + +func init() { + f := new(ServiceName) + RegisterRule("service.name", f) +} diff --git a/translator/translate/logs/logs.go b/translator/translate/logs/logs.go index c1412b3667..e0887b40c5 100644 --- a/translator/translate/logs/logs.go +++ b/translator/translate/logs/logs.go @@ -50,7 +50,7 @@ func (l *Logs) ApplyRule(input interface{}) (returnKey string, returnVal interfa cloudwatchConfig := map[string]interface{}{} GlobalLogConfig.MetadataInfo = util.GetMetadataInfo(util.Ec2MetadataInfoProvider) - //Apply DeploymentEnvironment and ServiceName rules + //Apply Environment and ServiceName rules serviceName.ApplyRule(im[SectionKey]) deploymentEnvironment.ApplyRule(im[SectionKey]) diff --git a/translator/translate/logs/ruleDeploymentEnvironment.go b/translator/translate/logs/ruleDeploymentEnvironment.go index 8f0c821fff..498bf889d1 100644 --- a/translator/translate/logs/ruleDeploymentEnvironment.go +++ b/translator/translate/logs/ruleDeploymentEnvironment.go @@ -12,14 +12,13 @@ type DeploymentEnvironment struct { } func (f *DeploymentEnvironment) ApplyRule(input interface{}) (returnKey string, returnVal interface{}) { - _, returnVal = translator.DefaultCase("deployment.environment", "", input) - returnKey = "deployment_environment" + _, result := translator.DefaultCase("deployment.environment", "", input) - if returnVal == "" { - returnVal = agent.Global_Config.DeploymentEnvironment + if result == "" { + result = agent.Global_Config.DeploymentEnvironment } // Set global log deployment environment - GlobalLogConfig.DeploymentEnvironment = returnVal.(string) + GlobalLogConfig.DeploymentEnvironment = result.(string) return } diff --git a/translator/translate/logs/ruleServiceName.go b/translator/translate/logs/ruleServiceName.go index 8e8f183aae..f209f23f17 100644 --- a/translator/translate/logs/ruleServiceName.go +++ b/translator/translate/logs/ruleServiceName.go @@ -12,14 +12,13 @@ type ServiceName struct { } func (f *ServiceName) ApplyRule(input interface{}) (returnKey string, returnVal interface{}) { - _, returnVal = translator.DefaultCase("service.name", "", input) - returnKey = "service_name" + _, result := translator.DefaultCase("service.name", "", input) - if returnVal == "" { - returnVal = agent.Global_Config.ServiceName + if result == "" { + result = agent.Global_Config.ServiceName } // Set global log service name - GlobalLogConfig.ServiceName = returnVal.(string) + GlobalLogConfig.ServiceName = result.(string) return } From d2852aa597f68302bfce8649f10e6a9e5d03d2d5 Mon Sep 17 00:00:00 2001 From: zhihonl <61301537+zhihonl@users.noreply.github.com> Date: Fri, 28 Jun 2024 10:27:35 -0400 Subject: [PATCH 31/55] Convert resourcestore into an OTEL extension (#732) --- .../amazon-cloudwatch-agent.go | 3 - extension/resourcestore/config.go | 17 +++++ extension/resourcestore/config_test.go | 19 +++++ .../resourcestore/ec2Info.go | 15 ++-- .../resourcestore/ec2Info_test.go | 0 .../resourcestore/extension.go | 69 ++++++++++--------- .../resourcestore/extension_test.go | 43 ++---------- extension/resourcestore/factory.go | 41 +++++++++++ extension/resourcestore/factory_test.go | 26 +++++++ .../resourcestore/serviceprovider.go | 17 ++--- .../resourcestore/serviceprovider_test.go | 18 +++-- go.mod | 2 + go.sum | 5 ++ plugins/inputs/logfile/logfile.go | 7 +- plugins/inputs/logfile/tailersrc.go | 8 ++- .../outputs/cloudwatchlogs/cloudwatchlogs.go | 16 ++--- service/defaultcomponents/components.go | 6 ++ service/defaultcomponents/components_test.go | 11 ++- 18 files changed, 206 insertions(+), 117 deletions(-) create mode 100644 extension/resourcestore/config.go create mode 100644 extension/resourcestore/config_test.go rename {internal => extension}/resourcestore/ec2Info.go (95%) rename {internal => extension}/resourcestore/ec2Info_test.go (100%) rename internal/resourcestore/resourcestore.go => extension/resourcestore/extension.go (80%) rename internal/resourcestore/resourcestore_test.go => extension/resourcestore/extension_test.go (91%) create mode 100644 extension/resourcestore/factory.go create mode 100644 extension/resourcestore/factory_test.go rename {internal => extension}/resourcestore/serviceprovider.go (93%) rename {internal => extension}/resourcestore/serviceprovider_test.go (95%) diff --git a/cmd/amazon-cloudwatch-agent/amazon-cloudwatch-agent.go b/cmd/amazon-cloudwatch-agent/amazon-cloudwatch-agent.go index 9dcc40376d..2afac53173 100644 --- a/cmd/amazon-cloudwatch-agent/amazon-cloudwatch-agent.go +++ b/cmd/amazon-cloudwatch-agent/amazon-cloudwatch-agent.go @@ -37,7 +37,6 @@ import ( "github.com/aws/amazon-cloudwatch-agent/cfg/envconfig" "github.com/aws/amazon-cloudwatch-agent/cmd/amazon-cloudwatch-agent/internal" "github.com/aws/amazon-cloudwatch-agent/extension/agenthealth/handler/useragent" - "github.com/aws/amazon-cloudwatch-agent/internal/resourcestore" "github.com/aws/amazon-cloudwatch-agent/internal/version" cwaLogger "github.com/aws/amazon-cloudwatch-agent/logger" "github.com/aws/amazon-cloudwatch-agent/logs" @@ -356,8 +355,6 @@ func runAgent(ctx context.Context, e := []string{"--config=" + yamlConfigPath + " --feature-gates=exporter.xray.allowDot"} cmd.SetArgs(e) - resourcestore.GetResourceStore() - return cmd.Execute() } diff --git a/extension/resourcestore/config.go b/extension/resourcestore/config.go new file mode 100644 index 0000000000..84a54c37d2 --- /dev/null +++ b/extension/resourcestore/config.go @@ -0,0 +1,17 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package resourcestore + +import ( + "go.opentelemetry.io/collector/component" +) + +type Config struct { + Mode string `mapstructure:"mode"` + Profile string `mapstructure:"profile,omitempty"` + RoleARN string `mapstructure:"role_arn,omitempty"` + Filename string `mapstructure:"shared_credential_file,omitempty"` +} + +var _ component.Config = (*Config)(nil) diff --git a/extension/resourcestore/config_test.go b/extension/resourcestore/config_test.go new file mode 100644 index 0000000000..cf30af680d --- /dev/null +++ b/extension/resourcestore/config_test.go @@ -0,0 +1,19 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package resourcestore + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/confmap" +) + +func TestUnmarshalDefaultConfig(t *testing.T) { + factory := NewFactory() + cfg := factory.CreateDefaultConfig() + assert.NoError(t, component.UnmarshalConfig(confmap.New(), cfg)) + assert.Equal(t, factory.CreateDefaultConfig(), cfg) +} diff --git a/internal/resourcestore/ec2Info.go b/extension/resourcestore/ec2Info.go similarity index 95% rename from internal/resourcestore/ec2Info.go rename to extension/resourcestore/ec2Info.go index 131f859ad0..f48094e1f1 100644 --- a/internal/resourcestore/ec2Info.go +++ b/extension/resourcestore/ec2Info.go @@ -28,12 +28,11 @@ type ec2Info struct { metadataProvider ec2metadataprovider.MetadataProvider ec2API ec2iface.EC2API ec2Provider ec2ProviderType - shutdownC chan bool + done chan struct{} } func (ei *ec2Info) initEc2Info() { log.Println("I! ec2Info: Initializing ec2Info") - ei.shutdownC = make(chan bool) if err := ei.setInstanceIdAndRegion(); err != nil { return } @@ -42,7 +41,6 @@ func (ei *ec2Info) initEc2Info() { return } log.Printf("D! ec2Info: Finished initializing ec2Info: InstanceId %s, AutoScalingGroup %s", ei.InstanceID, ei.AutoScalingGroup) - ei.Shutdown() } func (ei *ec2Info) setInstanceIdAndRegion() error { @@ -52,7 +50,7 @@ func (ei *ec2Info) setInstanceIdAndRegion() error { log.Printf("E! ec2Info: Failed to get Instance Id and region through metadata provider: %v", err) wait := time.NewTimer(1 * time.Minute) select { - case <-ei.shutdownC: + case <-ei.done: wait.Stop() return errors.New("ec2Info: shutdownC received") case <-wait.C: @@ -78,7 +76,7 @@ func (ei *ec2Info) setAutoScalingGroup() error { wait := time.NewTimer(waitDuration) select { - case <-ei.shutdownC: + case <-ei.done: wait.Stop() return errors.New("ec2Info: shutdownC received") case <-wait.C: @@ -160,13 +158,10 @@ func (ei *ec2Info) retrieveAsgNameWithDescribeTags(ec2API ec2iface.EC2API) error return nil } -func (ei *ec2Info) Shutdown() { - close(ei.shutdownC) -} - -func newEC2Info(metadataProvider ec2metadataprovider.MetadataProvider, providerType ec2ProviderType) *ec2Info { +func newEC2Info(metadataProvider ec2metadataprovider.MetadataProvider, providerType ec2ProviderType, done chan struct{}) *ec2Info { return &ec2Info{ metadataProvider: metadataProvider, ec2Provider: providerType, + done: done, } } diff --git a/internal/resourcestore/ec2Info_test.go b/extension/resourcestore/ec2Info_test.go similarity index 100% rename from internal/resourcestore/ec2Info_test.go rename to extension/resourcestore/ec2Info_test.go diff --git a/internal/resourcestore/resourcestore.go b/extension/resourcestore/extension.go similarity index 80% rename from internal/resourcestore/resourcestore.go rename to extension/resourcestore/extension.go index 20e74e7350..0799d1748a 100644 --- a/internal/resourcestore/resourcestore.go +++ b/extension/resourcestore/extension.go @@ -5,8 +5,6 @@ package resourcestore import ( "context" - "log" - "sync" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/client" @@ -15,6 +13,9 @@ import ( "github.com/aws/aws-sdk-go/service/ec2/ec2iface" "github.com/aws/aws-sdk-go/service/sts" "github.com/aws/aws-sdk-go/service/sts/stsiface" + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/extension" + "go.uber.org/zap" configaws "github.com/aws/amazon-cloudwatch-agent/cfg/aws" "github.com/aws/amazon-cloudwatch-agent/internal/ec2metadataprovider" @@ -30,11 +31,6 @@ const ( ServieNameSourceKey = "AWS.Internal.ServiceNameSource" ) -var ( - resourceStore *ResourceStore - once sync.Once -) - type ec2ProviderType func(string) ec2iface.EC2API type ServiceNameProvider interface { @@ -49,6 +45,10 @@ type eksInfo struct { } type ResourceStore struct { + logger *zap.Logger + config *Config + done chan struct{} + // mode should be EC2, ECS, EKS, and K8S mode string @@ -72,46 +72,45 @@ type ResourceStore struct { stsClient stsiface.STSAPI } -func GetResourceStore() *ResourceStore { - once.Do(func() { - resourceStore = initResourceStore() - }) - return resourceStore -} +var _ extension.Extension = (*ResourceStore)(nil) -func initResourceStore() *ResourceStore { +func (r *ResourceStore) Start(ctx context.Context, host component.Host) error { // Get IMDS client and EC2 API client which requires region for authentication // These will be passed down to any object that requires access to IMDS or EC2 // API client so we have single source of truth for credential - rs := &ResourceStore{ - metadataprovider: getMetaDataProvider(), - } + r.done = make(chan struct{}) + r.metadataprovider = getMetaDataProvider() if translatorCtx.CurrentContext().Mode() != "" { - rs.mode = translatorCtx.CurrentContext().Mode() - log.Printf("I! resourcestore: ResourceStore mode is %s ", rs.mode) + r.mode = translatorCtx.CurrentContext().Mode() + r.logger.Debug("ResourceStore mode is " + r.mode) } - switch rs.mode { + switch r.mode { case config.ModeEC2: - rs.ec2Info = *newEC2Info(rs.metadataprovider, getEC2Provider) - go rs.ec2Info.initEc2Info() + r.ec2Info = *newEC2Info(r.metadataprovider, getEC2Provider, r.done) + go r.ec2Info.initEc2Info() } - rs.serviceprovider = *newServiceProvider(rs.metadataprovider, getEC2Provider) - go rs.serviceprovider.startServiceProvider() - return rs + r.serviceprovider = *newServiceProvider(r.metadataprovider, getEC2Provider, r.done) + go r.serviceprovider.startServiceProvider() + return nil } -func (r *ResourceStore) Mode() string { - return r.mode +func (r *ResourceStore) Shutdown(_ context.Context) error { + close(r.done) + return nil } -func (r *ResourceStore) EC2Info() ec2Info { - return r.ec2Info +func (r *ResourceStore) Mode() string { + return r.mode } func (r *ResourceStore) EKSInfo() eksInfo { return r.eksInfo } +func (r *ResourceStore) EC2Info() ec2Info { + return r.ec2Info +} + func (r *ResourceStore) SetNativeCredential(client client.ConfigProvider) { r.nativeCredential = client } @@ -134,7 +133,9 @@ func (r *ResourceStore) CreateLogFileRID(fileGlobPath string, filePath string) * // AddServiceAttrEntryToResourceStore adds an entry to the resource store for the provided file -> serviceName, environmentName key-value pair func (r *ResourceStore) AddServiceAttrEntryToResourceStore(fileGlob string, serviceName string, environmentName string) { - r.serviceprovider.logFiles[fileGlob] = ServiceAttribute{ServiceName: serviceName, ServiceNameSource: AgentConfig, Environment: environmentName} + if r.serviceprovider.logFiles != nil { + r.serviceprovider.logFiles[fileGlob] = ServiceAttribute{ServiceName: serviceName, ServiceNameSource: AgentConfig, Environment: environmentName} + } } func (r *ResourceStore) createAttributeMaps() map[string]*string { @@ -164,13 +165,13 @@ func (r *ResourceStore) createServiceKeyAttributes(fileGlob string) *cloudwatchl // shouldReturnRID checks if the account ID for the instance is // matching the account ID when assuming role for the current credential. func (r *ResourceStore) shouldReturnRID() bool { - if r.nativeCredential == nil { - log.Printf("D! resourceStore: there is no credential stored for cross-account checks\n") + if r.nativeCredential == nil || r.metadataprovider == nil { + r.logger.Debug("there is no credential stored for cross-account checks") return false } doc, err := r.metadataprovider.Get(context.Background()) if err != nil { - log.Printf("D! resourceStore: an error occurred when getting instance document for cross-account checks. Reason: %v\n", err) + r.logger.Debug("an error occurred when getting instance document for cross-account checks. Reason: %v\n", zap.Error(err)) return false } instanceAccountID := doc.AccountID @@ -184,7 +185,7 @@ func (r *ResourceStore) shouldReturnRID() bool { } assumedRoleIdentity, err := r.stsClient.GetCallerIdentity(&sts.GetCallerIdentityInput{}) if err != nil { - log.Printf("D! resourceStore: an error occurred when calling STS GetCallerIdentity for cross-account checks. Reason: %v\n", err) + r.logger.Debug("an error occurred when calling STS GetCallerIdentity for cross-account checks. Reason: ", zap.Error(err)) return false } return instanceAccountID == *assumedRoleIdentity.Account diff --git a/internal/resourcestore/resourcestore_test.go b/extension/resourcestore/extension_test.go similarity index 91% rename from internal/resourcestore/resourcestore_test.go rename to extension/resourcestore/extension_test.go index 1d64ab7a0d..7a3445c1c2 100644 --- a/internal/resourcestore/resourcestore_test.go +++ b/extension/resourcestore/extension_test.go @@ -62,17 +62,6 @@ func (m *mockMetadataProvider) InstanceTagValue(ctx context.Context, tagKey stri return m.TagValue, nil } -func TestInitResourceStore(t *testing.T) { - tests := []struct { - name string - }{} - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - initResourceStore() - }) - } -} - func TestResourceStore_EC2Info(t *testing.T) { tests := []struct { name string @@ -103,30 +92,6 @@ func TestResourceStore_EC2Info(t *testing.T) { } } -func TestResourceStore_EKSInfo(t *testing.T) { - tests := []struct { - name string - eksInfoInput eksInfo - want eksInfo - }{ - { - name: "happypath", - eksInfoInput: eksInfo{ClusterName: "test-cluster"}, - want: eksInfo{ClusterName: "test-cluster"}, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - r := &ResourceStore{ - eksInfo: tt.eksInfoInput, - } - if got := r.EKSInfo(); !reflect.DeepEqual(got, tt.want) { - t.Errorf("EKSInfo() = %v, want %v", got, tt.want) - } - }) - } -} - func TestResourceStore_LogFiles(t *testing.T) { tests := []struct { name string @@ -346,7 +311,13 @@ func dereferenceMap(input map[string]*string) map[string]string { } func TestAddServiceKeyAttributeToLogFilesMap(t *testing.T) { - rs := initResourceStore() + rs := &ResourceStore{ + metadataprovider: &mockMetadataProvider{ + InstanceIdentityDocument: &ec2metadata.EC2InstanceIdentityDocument{ + AccountID: "987654321"}, + }, + serviceprovider: serviceprovider{logFiles: map[string]ServiceAttribute{}}, + } key := "/opt/aws/amazon-cloudwatch-agent/logs/amazon-cloudwatch-agent.log" rs.AddServiceAttrEntryToResourceStore(key, "test", "ec2:test") diff --git a/extension/resourcestore/factory.go b/extension/resourcestore/factory.go new file mode 100644 index 0000000000..98604cdb1e --- /dev/null +++ b/extension/resourcestore/factory.go @@ -0,0 +1,41 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package resourcestore + +import ( + "context" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/extension" +) + +var ( + TypeStr, _ = component.NewType("resourcestore") + resourceStore *ResourceStore +) + +func GetResourceStore() *ResourceStore { + return resourceStore +} + +func NewFactory() extension.Factory { + return extension.NewFactory( + TypeStr, + createDefaultConfig, + createExtension, + component.StabilityLevelAlpha, + ) +} + +func createDefaultConfig() component.Config { + return &Config{} +} + +func createExtension(_ context.Context, settings extension.CreateSettings, cfg component.Config) (extension.Extension, error) { + resourceStore = &ResourceStore{ + logger: settings.Logger, + config: cfg.(*Config), + } + return resourceStore, nil +} diff --git a/extension/resourcestore/factory_test.go b/extension/resourcestore/factory_test.go new file mode 100644 index 0000000000..160dc7fb54 --- /dev/null +++ b/extension/resourcestore/factory_test.go @@ -0,0 +1,26 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package resourcestore + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/extension/extensiontest" +) + +func TestCreateDefaultConfig(t *testing.T) { + cfg := NewFactory().CreateDefaultConfig() + assert.Equal(t, &Config{}, cfg) + assert.NoError(t, componenttest.CheckConfigStruct(cfg)) +} + +func TestCreateExtension(t *testing.T) { + cfg := &Config{} + got, err := NewFactory().CreateExtension(context.Background(), extensiontest.NewNopCreateSettings(), cfg) + assert.NoError(t, err) + assert.NotNil(t, got) +} diff --git a/internal/resourcestore/serviceprovider.go b/extension/resourcestore/serviceprovider.go similarity index 93% rename from internal/resourcestore/serviceprovider.go rename to extension/resourcestore/serviceprovider.go index 4563c4bc53..2e5b125928 100644 --- a/internal/resourcestore/serviceprovider.go +++ b/extension/resourcestore/serviceprovider.go @@ -53,7 +53,7 @@ type serviceprovider struct { ec2Provider ec2ProviderType iamRole string ec2TagServiceName string - ctx context.Context + done chan struct{} // logFiles is a variable reserved for communication between OTEL components and LogAgent // in order to achieve process correlations where the key is the log file path and the value @@ -66,10 +66,10 @@ type serviceprovider struct { func (s *serviceprovider) startServiceProvider() { err := s.getEC2Client() if err != nil { - go refreshLoop(s.ctx, s.getEC2Client, true) + go refreshLoop(s.done, s.getEC2Client, true) } - go refreshLoop(s.ctx, s.getIAMRole, false) - go refreshLoop(s.ctx, s.getEC2TagServiceName, false) + go refreshLoop(s.done, s.getIAMRole, false) + go refreshLoop(s.done, s.getEC2TagServiceName, false) } // ServiceAttribute function gets the relevant service attributes @@ -192,16 +192,16 @@ func (s *serviceprovider) getEC2TagFilters() ([]*ec2.Filter, error) { return tagFilters, nil } -func newServiceProvider(metadataProvider ec2metadataprovider.MetadataProvider, providerType ec2ProviderType) *serviceprovider { +func newServiceProvider(metadataProvider ec2metadataprovider.MetadataProvider, providerType ec2ProviderType, done chan struct{}) *serviceprovider { return &serviceprovider{ metadataProvider: metadataProvider, ec2Provider: providerType, - ctx: context.Background(), + done: done, logFiles: map[string]ServiceAttribute{}, } } -func refreshLoop(ctx context.Context, updateFunc func() error, oneTime bool) { +func refreshLoop(done chan struct{}, updateFunc func() error, oneTime bool) { // Offset retry by 1 so we can start with 1 minute wait time // instead of immediately retrying retry := 1 @@ -214,7 +214,8 @@ func refreshLoop(ctx context.Context, updateFunc func() error, oneTime bool) { waitDuration := calculateWaitTime(retry, err) wait := time.NewTimer(waitDuration) select { - case <-ctx.Done(): + case <-done: + log.Printf("D! serviceprovider: Shutting down now") wait.Stop() return case <-wait.C: diff --git a/internal/resourcestore/serviceprovider_test.go b/extension/resourcestore/serviceprovider_test.go similarity index 95% rename from internal/resourcestore/serviceprovider_test.go rename to extension/resourcestore/serviceprovider_test.go index dd5a00c71e..eb8e279ba5 100644 --- a/internal/resourcestore/serviceprovider_test.go +++ b/extension/resourcestore/serviceprovider_test.go @@ -4,7 +4,6 @@ package resourcestore import ( - "context" "testing" "time" @@ -62,18 +61,18 @@ func Test_serviceprovider_startServiceProvider(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - testCtx, cancel := context.WithCancel(context.TODO()) + done := make(chan struct{}) s := serviceprovider{ metadataProvider: tt.args.metadataProvider, ec2Provider: func(s string) ec2iface.EC2API { return tt.args.ec2Client }, ec2API: tt.args.ec2Client, - ctx: testCtx, + done: done, } go s.startServiceProvider() time.Sleep(3 * time.Second) - cancel() + close(done) assert.Equal(t, tt.wantIAM, s.iamRole) assert.Equal(t, tt.wantTag, s.ec2TagServiceName) @@ -137,7 +136,6 @@ func Test_serviceprovider_ServiceAttribute(t *testing.T) { iamRole: tt.fields.iamRole, ec2TagServiceName: tt.fields.ec2TagServiceName, logFiles: tt.fields.logFiles, - ctx: context.Background(), } assert.Equalf(t, tt.want, s.ServiceAttribute("test-file"), "ServiceAttribute()") }) @@ -295,7 +293,7 @@ func Test_refreshLoop(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - testCtx, cancel := context.WithCancel(context.TODO()) + done := make(chan struct{}) s := &serviceprovider{ metadataProvider: tt.fields.metadataProvider, ec2API: tt.fields.ec2API, @@ -304,12 +302,12 @@ func Test_refreshLoop(t *testing.T) { }, iamRole: tt.fields.iamRole, ec2TagServiceName: tt.fields.ec2TagServiceName, - ctx: testCtx, + done: done, } - go refreshLoop(testCtx, s.getEC2TagServiceName, tt.fields.oneTime) - go refreshLoop(testCtx, s.getIAMRole, tt.fields.oneTime) + go refreshLoop(done, s.getEC2TagServiceName, tt.fields.oneTime) + go refreshLoop(done, s.getIAMRole, tt.fields.oneTime) time.Sleep(time.Second) - cancel() + close(done) assert.Equal(t, tt.expectedInfo.iamRole, s.iamRole) assert.Equal(t, tt.expectedInfo.ec2TagServiceName, s.ec2TagServiceName) }) diff --git a/go.mod b/go.mod index 1c6c1af36d..e1006599d9 100644 --- a/go.mod +++ b/go.mod @@ -395,8 +395,10 @@ require ( go.opentelemetry.io/collector/confmap/provider/httpsprovider v0.98.0 // indirect go.opentelemetry.io/collector/confmap/provider/yamlprovider v0.98.0 // indirect go.opentelemetry.io/collector/connector v0.98.0 // indirect + go.opentelemetry.io/collector/exporter/nopexporter v0.98.0 // indirect go.opentelemetry.io/collector/extension/auth v0.98.0 // indirect go.opentelemetry.io/collector/pdata/testdata v0.98.0 // indirect + go.opentelemetry.io/collector/receiver/nopreceiver v0.98.0 // indirect go.opentelemetry.io/contrib/config v0.4.0 // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 // indirect diff --git a/go.sum b/go.sum index 0e5f34e122..bc7a1b3988 100644 --- a/go.sum +++ b/go.sum @@ -315,6 +315,7 @@ github.com/aws/smithy-go v1.17.0 h1:wWJD7LX6PBV6etBUwO0zElG0nWN9rUhp0WdYeHSHAaI= github.com/aws/smithy-go v1.17.0/go.mod h1:NukqUGpCZIILqqiV0NIjeFh24kd/FAa4beRb6nbIUPE= github.com/aws/telegraf v0.10.2-0.20240423220441-63baeaedb379 h1:EaMA5kc5yQzobctnBE8MYD9h4HPQ/YtCg4u0mFKXAj8= github.com/aws/telegraf v0.10.2-0.20240423220441-63baeaedb379/go.mod h1:tSaq8qDvwntXHIWy6YTHPoWttYsOnF7Hm3mpZfHkIrA= +github.com/aws/telegraf v0.10.2-0.20240624211148-226509a159c1/go.mod h1:5LhWLYfsZ7isLfw+TJUxPdTuzYuP8qiMiXz/DvqovRY= github.com/aws/telegraf/patches/gopsutil/v3 v3.0.0-20231109213610-a8c21c54a2be h1:sF6OUdk1hpuX7lf74vn+zBUFtQRe+hky0jmMYyFp5Kk= github.com/aws/telegraf/patches/gopsutil/v3 v3.0.0-20231109213610-a8c21c54a2be/go.mod h1:1W1wnODUDv+FBSAtAa878Kxto5kj8eV+kI0AF4LIjq4= github.com/awslabs/kinesis-aggregation/go v0.0.0-20210630091500-54e17340d32f h1:Pf0BjJDga7C98f0vhw+Ip5EaiE07S3lTKpIYPNS0nMo= @@ -1454,6 +1455,8 @@ go.opentelemetry.io/collector/exporter v0.98.0 h1:eN2qtkiwpeX9gBu9JZw1k/CZ3N9wZE go.opentelemetry.io/collector/exporter v0.98.0/go.mod h1:GCW46a0VAuW7nljlW//GgFXI+8mSrJjrdEKVO9icExE= go.opentelemetry.io/collector/exporter/loggingexporter v0.98.0 h1:2DNfziYl0w8Sq9bPdYlPpn5MLLQGB73LB7O1BIYQxA4= go.opentelemetry.io/collector/exporter/loggingexporter v0.98.0/go.mod h1:SBuTQ0sA3fEd/jAJFAxjTX8Ndwkc4Mtkc6gsz115S+8= +go.opentelemetry.io/collector/exporter/nopexporter v0.98.0 h1:Fc+TnZeSbMTDrk0r9kRMQK9J1IvjR9RXjIxDIn0NZY4= +go.opentelemetry.io/collector/exporter/nopexporter v0.98.0/go.mod h1:K4lPZjTM3hlacY+tqN2b0lALLb13K9gu5C57hox1V6g= go.opentelemetry.io/collector/extension v0.98.0 h1:08B5ipEsoNmPHY96j5EUsUrFre01GOZ4zgttUDtPUkY= go.opentelemetry.io/collector/extension v0.98.0/go.mod h1:fZ1Hnnahszl5j3xcW2sMRJ0FLWDOFkFMQeVDP0Se7i8= go.opentelemetry.io/collector/extension/auth v0.98.0 h1:7b1jioijJbTMqaOCrz5Hoqf+zJn2iPlGmtN7pXLNWbA= @@ -1476,6 +1479,8 @@ go.opentelemetry.io/collector/processor/batchprocessor v0.98.0 h1:iM4fMLGig3GKmz go.opentelemetry.io/collector/processor/batchprocessor v0.98.0/go.mod h1:ROnuUkZJgpKEIDf3AIVjgRGNI7KPqCKPXsw8whL6Hzs= go.opentelemetry.io/collector/receiver v0.98.0 h1:qw6JYwm+sHcZvM1DByo3QlGe6yGHuwd0yW4hEPVqYKU= go.opentelemetry.io/collector/receiver v0.98.0/go.mod h1:AwIWn+KnquTR+kbhXQrMH+i2PvTCFldSIJznBWFYs0s= +go.opentelemetry.io/collector/receiver/nopreceiver v0.98.0 h1:PQQwtE2+qe3U6I0cfMz+L79VS41oRMM2nY8S/2mJ95E= +go.opentelemetry.io/collector/receiver/nopreceiver v0.98.0/go.mod h1:ph9KWemo5QhxII+VEES9buzGMP6hSh/ZBLmS9oK+lJ0= go.opentelemetry.io/collector/receiver/otlpreceiver v0.98.0 h1:j7lfLwc5o1dtXIPXU8LjmxadejmJVRHN57ZYGH33Wq4= go.opentelemetry.io/collector/receiver/otlpreceiver v0.98.0/go.mod h1:uWDBDxaWuzF1U5S2UIhstO0+Q8aUiwiUu8uO1IYN2XQ= go.opentelemetry.io/collector/semconv v0.98.0 h1:zO4L4TmlxXoYu8UgPeYElGY19BW7wPjM+quL5CzoOoY= diff --git a/plugins/inputs/logfile/logfile.go b/plugins/inputs/logfile/logfile.go index de8fc612fe..531fd57c36 100644 --- a/plugins/inputs/logfile/logfile.go +++ b/plugins/inputs/logfile/logfile.go @@ -16,8 +16,8 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/inputs" + "github.com/aws/amazon-cloudwatch-agent/extension/resourcestore" "github.com/aws/amazon-cloudwatch-agent/internal/logscommon" - "github.com/aws/amazon-cloudwatch-agent/internal/resourcestore" "github.com/aws/amazon-cloudwatch-agent/logs" "github.com/aws/amazon-cloudwatch-agent/plugins/inputs/logfile/globpath" "github.com/aws/amazon-cloudwatch-agent/plugins/inputs/logfile/tail" @@ -160,8 +160,9 @@ func (t *LogFile) FindLogSrc() []logs.LogSrc { fileconfig := &t.FileConfig[i] //Add file -> {serviceName, deploymentEnvironment} mapping to resource store - rs.AddServiceAttrEntryToResourceStore(fileconfig.FilePath, fileconfig.ServiceName, fileconfig.Environment) - + if rs != nil { + rs.AddServiceAttrEntryToResourceStore(fileconfig.FilePath, fileconfig.ServiceName, fileconfig.Environment) + } targetFiles, err := t.getTargetFiles(fileconfig) if err != nil { t.Log.Errorf("Failed to find target files for file config %v, with error: %v", fileconfig.FilePath, err) diff --git a/plugins/inputs/logfile/tailersrc.go b/plugins/inputs/logfile/tailersrc.go index b6e1126737..2f4ea4c168 100644 --- a/plugins/inputs/logfile/tailersrc.go +++ b/plugins/inputs/logfile/tailersrc.go @@ -14,7 +14,7 @@ import ( "github.com/aws/aws-sdk-go/service/cloudwatchlogs" "golang.org/x/text/encoding" - "github.com/aws/amazon-cloudwatch-agent/internal/resourcestore" + "github.com/aws/amazon-cloudwatch-agent/extension/resourcestore" "github.com/aws/amazon-cloudwatch-agent/logs" "github.com/aws/amazon-cloudwatch-agent/plugins/inputs/logfile/tail" ) @@ -171,7 +171,11 @@ func (ts *tailerSrc) AddCleanUpFn(f func()) { } func (ts *tailerSrc) ResourceID() *cloudwatchlogs.Resource { - return resourcestore.GetResourceStore().CreateLogFileRID(ts.fileGlobPath, ts.tailer.Filename) + rs := resourcestore.GetResourceStore() + if rs != nil { + return resourcestore.GetResourceStore().CreateLogFileRID(ts.fileGlobPath, ts.tailer.Filename) + } + return nil } func (ts *tailerSrc) runTail() { diff --git a/plugins/outputs/cloudwatchlogs/cloudwatchlogs.go b/plugins/outputs/cloudwatchlogs/cloudwatchlogs.go index c8da16084a..222acb7d3f 100644 --- a/plugins/outputs/cloudwatchlogs/cloudwatchlogs.go +++ b/plugins/outputs/cloudwatchlogs/cloudwatchlogs.go @@ -24,9 +24,9 @@ import ( "github.com/aws/amazon-cloudwatch-agent/extension/agenthealth" "github.com/aws/amazon-cloudwatch-agent/extension/agenthealth/handler/stats/agent" "github.com/aws/amazon-cloudwatch-agent/extension/agenthealth/handler/useragent" + "github.com/aws/amazon-cloudwatch-agent/extension/resourcestore" "github.com/aws/amazon-cloudwatch-agent/handlers" "github.com/aws/amazon-cloudwatch-agent/internal" - "github.com/aws/amazon-cloudwatch-agent/internal/resourcestore" "github.com/aws/amazon-cloudwatch-agent/internal/retryer" "github.com/aws/amazon-cloudwatch-agent/logs" "github.com/aws/amazon-cloudwatch-agent/tool/util" @@ -126,10 +126,6 @@ func (c *CloudWatchLogs) CreateDest(group, stream string, retention int, logGrou } func (c *CloudWatchLogs) getDest(t Target, logSrc logs.LogSrc) *cwDest { - if cwd, ok := c.cwDests[t]; ok { - return cwd - } - credentialConfig := &configaws.CredentialConfig{ Region: c.Region, AccessKey: c.AccessKey, @@ -139,12 +135,16 @@ func (c *CloudWatchLogs) getDest(t Target, logSrc logs.LogSrc) *cwDest { Filename: c.Filename, Token: c.Token, } - - logThrottleRetryer := retryer.NewLogThrottleRetryer(c.Log) resourcestore := resourcestore.GetResourceStore() - if !resourcestore.NativeCredentialExists() { + if resourcestore != nil && !resourcestore.NativeCredentialExists() { resourcestore.SetNativeCredential(credentialConfig.Credentials()) } + if cwd, ok := c.cwDests[t]; ok { + return cwd + } + + logThrottleRetryer := retryer.NewLogThrottleRetryer(c.Log) + client := cloudwatchlogs.New( credentialConfig.Credentials(), &aws.Config{ diff --git a/service/defaultcomponents/components.go b/service/defaultcomponents/components.go index d2c314d5a6..27b3a1dac7 100644 --- a/service/defaultcomponents/components.go +++ b/service/defaultcomponents/components.go @@ -18,14 +18,17 @@ import ( "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/udplogreceiver" "go.opentelemetry.io/collector/exporter" "go.opentelemetry.io/collector/exporter/loggingexporter" + "go.opentelemetry.io/collector/exporter/nopexporter" "go.opentelemetry.io/collector/extension" "go.opentelemetry.io/collector/otelcol" "go.opentelemetry.io/collector/processor" "go.opentelemetry.io/collector/processor/batchprocessor" "go.opentelemetry.io/collector/receiver" + "go.opentelemetry.io/collector/receiver/nopreceiver" "go.opentelemetry.io/collector/receiver/otlpreceiver" "github.com/aws/amazon-cloudwatch-agent/extension/agenthealth" + "github.com/aws/amazon-cloudwatch-agent/extension/resourcestore" "github.com/aws/amazon-cloudwatch-agent/plugins/outputs/cloudwatch" "github.com/aws/amazon-cloudwatch-agent/plugins/processors/awsapplicationsignals" "github.com/aws/amazon-cloudwatch-agent/plugins/processors/ec2tagger" @@ -42,6 +45,7 @@ func Factories() (otelcol.Factories, error) { otlpreceiver.NewFactory(), tcplogreceiver.NewFactory(), udplogreceiver.NewFactory(), + nopreceiver.NewFactory(), ); err != nil { return otelcol.Factories{}, err } @@ -65,6 +69,7 @@ func Factories() (otelcol.Factories, error) { awsxrayexporter.NewFactory(), cloudwatch.NewFactory(), loggingexporter.NewFactory(), + nopexporter.NewFactory(), ); err != nil { return otelcol.Factories{}, err } @@ -72,6 +77,7 @@ func Factories() (otelcol.Factories, error) { if factories.Extensions, err = extension.MakeFactoryMap( agenthealth.NewFactory(), awsproxy.NewFactory(), + resourcestore.NewFactory(), ); err != nil { return otelcol.Factories{}, err } diff --git a/service/defaultcomponents/components_test.go b/service/defaultcomponents/components_test.go index 6c5f3b04c7..84e30bbdb0 100644 --- a/service/defaultcomponents/components_test.go +++ b/service/defaultcomponents/components_test.go @@ -11,10 +11,10 @@ import ( ) const ( - receiversCount = 5 + receiversCount = 6 processorCount = 8 - exportersCount = 5 - extensionsCount = 2 + exportersCount = 6 + extensionsCount = 3 ) func TestComponents(t *testing.T) { @@ -22,6 +22,9 @@ func TestComponents(t *testing.T) { assert.NoError(t, err) receivers := factories.Receivers assert.Len(t, receivers, receiversCount) + + nopType, _ := component.NewType("nop") + awscontainerinsightreceiverType, _ := component.NewType("awscontainerinsightreceiver") awsxrayType, _ := component.NewType("awsxray") otlpType, _ := component.NewType("otlp") @@ -32,6 +35,7 @@ func TestComponents(t *testing.T) { assert.NotNil(t, receivers[otlpType]) assert.NotNil(t, receivers[tcplogType]) assert.NotNil(t, receivers[udplogType]) + assert.NotNil(t, receivers[nopType]) processors := factories.Processors assert.Len(t, processors, processorCount) @@ -61,6 +65,7 @@ func TestComponents(t *testing.T) { assert.NotNil(t, exporters[awsemfType]) assert.NotNil(t, exporters[awscloudwatchType]) assert.NotNil(t, exporters[loggingType]) + assert.NotNil(t, exporters[nopType]) extensions := factories.Extensions assert.Len(t, extensions, extensionsCount) From b08f6c1d7ca52498a85f95310eb52c55c1a40e9f Mon Sep 17 00:00:00 2001 From: Bryce Carey Date: Fri, 28 Jun 2024 15:35:53 -0400 Subject: [PATCH 32/55] Adding character limits to entity fields and attributes based on AWS quotas (#733) --- extension/resourcestore/ec2Info.go | 33 +++++++++++--- extension/resourcestore/ec2Info_test.go | 54 +++++++++++++++++++++++ extension/resourcestore/extension.go | 10 ++--- extension/resourcestore/extension_test.go | 12 ++--- translator/config/schema.json | 18 +++++--- 5 files changed, 105 insertions(+), 22 deletions(-) diff --git a/extension/resourcestore/ec2Info.go b/extension/resourcestore/ec2Info.go index f48094e1f1..7f0f30d209 100644 --- a/extension/resourcestore/ec2Info.go +++ b/extension/resourcestore/ec2Info.go @@ -18,6 +18,16 @@ import ( "github.com/aws/amazon-cloudwatch-agent/plugins/processors/ec2tagger" ) +const ( + // InstanceId character maximum length is 19. + // See https://docs.aws.amazon.com/autoscaling/ec2/APIReference/API_Instance.html. + instanceIdSizeMax = 19 + + // AutoScalingGroup character maximum length is 255. + // See https://docs.aws.amazon.com/autoscaling/ec2/APIReference/API_AutoScalingGroup.html. + autoScalingGroupSizeMax = 255 +) + type ec2Info struct { InstanceID string AutoScalingGroup string @@ -41,6 +51,7 @@ func (ei *ec2Info) initEc2Info() { return } log.Printf("D! ec2Info: Finished initializing ec2Info: InstanceId %s, AutoScalingGroup %s", ei.InstanceID, ei.AutoScalingGroup) + ei.ignoreInvalidFields() } func (ei *ec2Info) setInstanceIdAndRegion() error { @@ -54,13 +65,13 @@ func (ei *ec2Info) setInstanceIdAndRegion() error { wait.Stop() return errors.New("ec2Info: shutdownC received") case <-wait.C: + continue } - } else { - ei.InstanceID = metadataDoc.InstanceID - ei.Region = metadataDoc.Region - log.Printf("D! ec2Info: Successfully retrieved Instance Id %s, Region %s", ei.InstanceID, ei.Region) - return nil } + log.Printf("D! ec2Info: Successfully retrieved Instance Id %s, Region %s", ei.InstanceID, ei.Region) + ei.InstanceID = metadataDoc.InstanceID + ei.Region = metadataDoc.Region + return nil } } @@ -165,3 +176,15 @@ func newEC2Info(metadataProvider ec2metadataprovider.MetadataProvider, providerT done: done, } } + +func (ei *ec2Info) ignoreInvalidFields() { + if idLength := len(ei.InstanceID); idLength > instanceIdSizeMax { + log.Printf("W! ec2Info: InstanceId length of %d exceeds %d characters and will be ignored", idLength, instanceIdSizeMax) + ei.InstanceID = "" + } + + if asgLength := len(ei.AutoScalingGroup); asgLength > autoScalingGroupSizeMax { + log.Printf("W! ec2Info: AutoScalingGroup length of %d exceeds %d characters and will be ignored", asgLength, autoScalingGroupSizeMax) + ei.AutoScalingGroup = "" + } +} diff --git a/extension/resourcestore/ec2Info_test.go b/extension/resourcestore/ec2Info_test.go index d58a1d98e0..5e1b694808 100644 --- a/extension/resourcestore/ec2Info_test.go +++ b/extension/resourcestore/ec2Info_test.go @@ -4,6 +4,7 @@ package resourcestore import ( + "strings" "testing" "github.com/aws/aws-sdk-go/aws/ec2metadata" @@ -196,3 +197,56 @@ func TestRetrieveASGNameWithDescribeTags(t *testing.T) { }) } } + +func TestIgnoreInvalidFields(t *testing.T) { + type want struct { + instanceId string + autoScalingGroup string + } + tests := []struct { + name string + args *ec2Info + want want + }{ + { + name: "Happy path", + args: &ec2Info{ + InstanceID: "i-01d2417c27a396e44", + AutoScalingGroup: "asg", + }, + want: want{ + instanceId: "i-01d2417c27a396e44", + autoScalingGroup: "asg", + }, + }, + { + name: "InstanceId too large", + args: &ec2Info{ + InstanceID: strings.Repeat("a", 20), + AutoScalingGroup: "asg", + }, + want: want{ + instanceId: "", + autoScalingGroup: "asg", + }, + }, + { + name: "AutoScalingGroup too large", + args: &ec2Info{ + InstanceID: "i-01d2417c27a396e44", + AutoScalingGroup: strings.Repeat("a", 256), + }, + want: want{ + instanceId: "i-01d2417c27a396e44", + autoScalingGroup: "", + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + tt.args.ignoreInvalidFields() + assert.Equal(t, tt.want.instanceId, tt.args.InstanceID) + assert.Equal(t, tt.want.autoScalingGroup, tt.args.AutoScalingGroup) + }) + } +} diff --git a/extension/resourcestore/extension.go b/extension/resourcestore/extension.go index 0799d1748a..0e1d878ba2 100644 --- a/extension/resourcestore/extension.go +++ b/extension/resourcestore/extension.go @@ -25,10 +25,10 @@ import ( ) const ( - Service = "Service" - InstanceIDKey = "EC2.InstanceId" - ASGKey = "EC2.AutoScalingGroup" - ServieNameSourceKey = "AWS.Internal.ServiceNameSource" + Service = "Service" + InstanceIDKey = "EC2.InstanceId" + ASGKey = "EC2.AutoScalingGroup" + ServiceNameSourceKey = "AWS.Internal.ServiceNameSource" ) type ec2ProviderType func(string) ec2iface.EC2API @@ -144,7 +144,7 @@ func (r *ResourceStore) createAttributeMaps() map[string]*string { addNonEmptyToMap(attributeMap, InstanceIDKey, r.ec2Info.InstanceID) addNonEmptyToMap(attributeMap, ASGKey, r.ec2Info.AutoScalingGroup) - addNonEmptyToMap(attributeMap, ServieNameSourceKey, serviceAttr.ServiceNameSource) + addNonEmptyToMap(attributeMap, ServiceNameSourceKey, serviceAttr.ServiceNameSource) return attributeMap } diff --git a/extension/resourcestore/extension_test.go b/extension/resourcestore/extension_test.go index 7a3445c1c2..2b51eccdfa 100644 --- a/extension/resourcestore/extension_test.go +++ b/extension/resourcestore/extension_test.go @@ -184,9 +184,9 @@ func TestResourceStore_createAttributeMaps(t *testing.T) { }, }, want: map[string]*string{ - ServieNameSourceKey: aws.String(ClientIamRole), - ASGKey: aws.String("test-asg"), - InstanceIDKey: aws.String("i-123456789"), + ServiceNameSourceKey: aws.String(ClientIamRole), + ASGKey: aws.String("test-asg"), + InstanceIDKey: aws.String("i-123456789"), }, }, { @@ -201,9 +201,9 @@ func TestResourceStore_createAttributeMaps(t *testing.T) { }, }, want: map[string]*string{ - ServieNameSourceKey: aws.String(ResourceTags), - ASGKey: aws.String("test-asg"), - InstanceIDKey: aws.String("i-123456789"), + ServiceNameSourceKey: aws.String(ResourceTags), + ASGKey: aws.String("test-asg"), + InstanceIDKey: aws.String("i-123456789"), }, }, } diff --git a/translator/config/schema.json b/translator/config/schema.json index 1d46f82232..fa1589eab8 100644 --- a/translator/config/schema.json +++ b/translator/config/schema.json @@ -54,14 +54,16 @@ "type": "boolean" }, "service.name": { + "description": "The name of the service to associate with the telemetry produced by the agent.", "type": "string", "minLength": 1, - "maxLength": 512 + "maxLength": 255 }, "deployment.environment": { + "description": "The name of the environment to associate with the telemetry produced by the agent.", "type": "string", "minLength": 1, - "maxLength": 512 + "maxLength": 259 } }, "additionalProperties": true @@ -787,14 +789,16 @@ "$ref": "#/definitions/endpointOverrideDefinition" }, "service.name": { + "description": "The name of the service to associate with the telemetry produced by the agent.", "type": "string", "minLength": 1, - "maxLength": 512 + "maxLength": 255 }, "deployment.environment": { + "description": "The name of the environment to associate with the telemetry produced by the agent.", "type": "string", "minLength": 1, - "maxLength": 512 + "maxLength": 259 } }, "additionalProperties": false, @@ -877,14 +881,16 @@ } }, "service.name": { + "description": "The name of the service to associate with the telemetry produced by the agent.", "type": "string", "minLength": 1, - "maxLength": 512 + "maxLength": 255 }, "deployment.environment": { + "description": "The name of the environment to associate with the telemetry produced by the agent.", "type": "string", "minLength": 1, - "maxLength": 512 + "maxLength": 259 } }, "required": [ From 37836ceffd79d363287003db1bf46d8a159e1a9c Mon Sep 17 00:00:00 2001 From: zhihonl <61301537+zhihonl@users.noreply.github.com> Date: Mon, 1 Jul 2024 15:09:10 -0400 Subject: [PATCH 33/55] Add platform type to RID attribute map (#737) --- extension/resourcestore/extension.go | 12 +++++++----- extension/resourcestore/extension_test.go | 12 ++++++++++++ 2 files changed, 19 insertions(+), 5 deletions(-) diff --git a/extension/resourcestore/extension.go b/extension/resourcestore/extension.go index 0e1d878ba2..e04cc907f1 100644 --- a/extension/resourcestore/extension.go +++ b/extension/resourcestore/extension.go @@ -21,7 +21,6 @@ import ( "github.com/aws/amazon-cloudwatch-agent/internal/ec2metadataprovider" "github.com/aws/amazon-cloudwatch-agent/internal/retryer" "github.com/aws/amazon-cloudwatch-agent/translator/config" - translatorCtx "github.com/aws/amazon-cloudwatch-agent/translator/context" ) const ( @@ -29,6 +28,8 @@ const ( InstanceIDKey = "EC2.InstanceId" ASGKey = "EC2.AutoScalingGroup" ServiceNameSourceKey = "AWS.Internal.ServiceNameSource" + PlatformType = "PlatformType" + EC2PlatForm = "AWS::EC2" ) type ec2ProviderType func(string) ec2iface.EC2API @@ -80,10 +81,7 @@ func (r *ResourceStore) Start(ctx context.Context, host component.Host) error { // API client so we have single source of truth for credential r.done = make(chan struct{}) r.metadataprovider = getMetaDataProvider() - if translatorCtx.CurrentContext().Mode() != "" { - r.mode = translatorCtx.CurrentContext().Mode() - r.logger.Debug("ResourceStore mode is " + r.mode) - } + r.mode = r.config.Mode switch r.mode { case config.ModeEC2: r.ec2Info = *newEC2Info(r.metadataprovider, getEC2Provider, r.done) @@ -145,6 +143,10 @@ func (r *ResourceStore) createAttributeMaps() map[string]*string { addNonEmptyToMap(attributeMap, InstanceIDKey, r.ec2Info.InstanceID) addNonEmptyToMap(attributeMap, ASGKey, r.ec2Info.AutoScalingGroup) addNonEmptyToMap(attributeMap, ServiceNameSourceKey, serviceAttr.ServiceNameSource) + switch r.mode { + case config.ModeEC2: + attributeMap[PlatformType] = aws.String(EC2PlatForm) + } return attributeMap } diff --git a/extension/resourcestore/extension_test.go b/extension/resourcestore/extension_test.go index 2b51eccdfa..a1df099b2e 100644 --- a/extension/resourcestore/extension_test.go +++ b/extension/resourcestore/extension_test.go @@ -19,6 +19,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/aws/amazon-cloudwatch-agent/internal/ec2metadataprovider" + "github.com/aws/amazon-cloudwatch-agent/translator/config" ) type mockMetadataProvider struct { @@ -166,6 +167,7 @@ func TestResourceStore_createAttributeMaps(t *testing.T) { type fields struct { ec2Info ec2Info serviceprovider serviceprovider + mode string } tests := []struct { name string @@ -206,12 +208,22 @@ func TestResourceStore_createAttributeMaps(t *testing.T) { InstanceIDKey: aws.String("i-123456789"), }, }, + { + name: "HappyPath_TagServiceName", + fields: fields{ + mode: config.ModeEC2, + }, + want: map[string]*string{ + PlatformType: aws.String(EC2PlatForm), + }, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { r := &ResourceStore{ ec2Info: tt.fields.ec2Info, serviceprovider: tt.fields.serviceprovider, + mode: tt.fields.mode, } assert.Equalf(t, dereferenceMap(tt.want), dereferenceMap(r.createAttributeMaps()), "createAttributeMaps()") }) From 6e09a621d49281185105a472de1774994f13ec5d Mon Sep 17 00:00:00 2001 From: zhihonl <61301537+zhihonl@users.noreply.github.com> Date: Tue, 2 Jul 2024 09:19:07 -0400 Subject: [PATCH 34/55] Add translator for resourcestore and change credential for EC2 calls (#736) --- extension/resourcestore/ec2Info.go | 7 +- extension/resourcestore/extension.go | 13 +-- extension/resourcestore/serviceprovider.go | 12 +-- .../resourcestore/serviceprovider_test.go | 5 +- .../sampleConfig/advanced_config_darwin.yaml | 3 + .../sampleConfig/advanced_config_linux.yaml | 3 + .../sampleConfig/advanced_config_windows.yaml | 3 + .../appsignals_and_eks_config.yaml | 3 + .../appsignals_and_k8s_config.yaml | 3 + .../appsignals_fallback_and_eks_config.yaml | 3 + .../appsignals_over_fallback_config.yaml | 3 + .../sampleConfig/base_appsignals_config.yaml | 5 ++ .../base_appsignals_fallback_config.yaml | 5 ++ .../base_container_insights_config.yaml | 3 + .../sampleConfig/basic_config_linux.yaml | 3 + .../sampleConfig/basic_config_windows.yaml | 3 + .../sampleConfig/collectd_config_linux.yaml | 3 + .../sampleConfig/compass_linux_config.yaml | 35 ++++++++ .../sampleConfig/complete_darwin_config.yaml | 3 + .../sampleConfig/complete_linux_config.yaml | 3 + .../sampleConfig/complete_windows_config.yaml | 3 + .../sampleConfig/config_with_env.yaml | 3 + .../sampleConfig/delta_config_linux.yaml | 3 + .../sampleConfig/delta_net_config_linux.yaml | 3 + .../sampleConfig/drop_origin_linux.yaml | 3 + .../emf_and_kubernetes_config.yaml | 5 ++ .../emf_and_kubernetes_with_gpu_config.yaml | 5 ++ .../ignore_append_dimensions.yaml | 3 + .../sampleConfig/invalid_input_linux.yaml | 3 + .../kubernetes_on_prem_config.yaml | 5 ++ .../sampleConfig/log_ecs_metric_only.yaml | 3 + .../tocwconfig/sampleConfig/log_filter.yaml | 35 ++++++++ .../sampleConfig/log_only_config_windows.yaml | 35 ++++++++ .../logs_and_kubernetes_config.yaml | 4 + .../sampleConfig/no_skip_log_timestamp.yaml | 35 ++++++++ .../no_skip_log_timestamp_windows.yaml | 35 ++++++++ .../sampleConfig/prometheus_config_linux.yaml | 3 + .../prometheus_config_windows.yaml | 3 + .../sampleConfig/skip_log_timestamp.yaml | 35 ++++++++ .../skip_log_timestamp_default.yaml | 35 ++++++++ .../skip_log_timestamp_default_windows.yaml | 35 ++++++++ .../skip_log_timestamp_windows.yaml | 35 ++++++++ .../sampleConfig/standard_config_linux.yaml | 3 + ...ndard_config_linux_with_common_config.yaml | 5 ++ .../sampleConfig/standard_config_windows.yaml | 3 + ...ard_config_windows_with_common_config.yaml | 5 ++ .../sampleConfig/statsd_config_linux.yaml | 3 + .../sampleConfig/statsd_config_windows.yaml | 3 + .../sampleConfig/trace_config_linux.yaml | 5 ++ .../sampleConfig/trace_config_windows.yaml | 5 ++ .../windows_eventlog_only_config.yaml | 35 ++++++++ .../translate/otel/exporter/translator.go | 33 +++++++ .../otel/exporter/translator_test.go | 20 +++++ .../extension/resourcestore/translator.go | 42 +++++++++ .../resourcestore/translator_test.go | 62 +++++++++++++ .../translate/otel/pipeline/nop/translator.go | 55 ++++++++++++ .../otel/pipeline/nop/translator_test.go | 89 +++++++++++++++++++ .../translate/otel/receiver/translator.go | 33 +++++++ .../otel/receiver/translator_test.go | 20 +++++ translator/translate/otel/translate_otel.go | 11 ++- 60 files changed, 866 insertions(+), 16 deletions(-) create mode 100644 translator/tocwconfig/sampleConfig/compass_linux_config.yaml create mode 100644 translator/tocwconfig/sampleConfig/log_filter.yaml create mode 100644 translator/tocwconfig/sampleConfig/log_only_config_windows.yaml create mode 100644 translator/tocwconfig/sampleConfig/no_skip_log_timestamp.yaml create mode 100644 translator/tocwconfig/sampleConfig/no_skip_log_timestamp_windows.yaml create mode 100644 translator/tocwconfig/sampleConfig/skip_log_timestamp.yaml create mode 100644 translator/tocwconfig/sampleConfig/skip_log_timestamp_default.yaml create mode 100644 translator/tocwconfig/sampleConfig/skip_log_timestamp_default_windows.yaml create mode 100644 translator/tocwconfig/sampleConfig/skip_log_timestamp_windows.yaml create mode 100644 translator/tocwconfig/sampleConfig/windows_eventlog_only_config.yaml create mode 100644 translator/translate/otel/exporter/translator.go create mode 100644 translator/translate/otel/exporter/translator_test.go create mode 100644 translator/translate/otel/extension/resourcestore/translator.go create mode 100644 translator/translate/otel/extension/resourcestore/translator_test.go create mode 100644 translator/translate/otel/pipeline/nop/translator.go create mode 100644 translator/translate/otel/pipeline/nop/translator_test.go create mode 100644 translator/translate/otel/receiver/translator.go create mode 100644 translator/translate/otel/receiver/translator_test.go diff --git a/extension/resourcestore/ec2Info.go b/extension/resourcestore/ec2Info.go index 7f0f30d209..cdf46c8e94 100644 --- a/extension/resourcestore/ec2Info.go +++ b/extension/resourcestore/ec2Info.go @@ -14,6 +14,7 @@ import ( "github.com/aws/aws-sdk-go/service/ec2" "github.com/aws/aws-sdk-go/service/ec2/ec2iface" + configaws "github.com/aws/amazon-cloudwatch-agent/cfg/aws" "github.com/aws/amazon-cloudwatch-agent/internal/ec2metadataprovider" "github.com/aws/amazon-cloudwatch-agent/plugins/processors/ec2tagger" ) @@ -38,6 +39,7 @@ type ec2Info struct { metadataProvider ec2metadataprovider.MetadataProvider ec2API ec2iface.EC2API ec2Provider ec2ProviderType + ec2Credential *configaws.CredentialConfig done chan struct{} } @@ -46,7 +48,7 @@ func (ei *ec2Info) initEc2Info() { if err := ei.setInstanceIdAndRegion(); err != nil { return } - ei.ec2API = ei.ec2Provider(ei.Region) + ei.ec2API = ei.ec2Provider(ei.Region, ei.ec2Credential) if err := ei.setAutoScalingGroup(); err != nil { return } @@ -169,10 +171,11 @@ func (ei *ec2Info) retrieveAsgNameWithDescribeTags(ec2API ec2iface.EC2API) error return nil } -func newEC2Info(metadataProvider ec2metadataprovider.MetadataProvider, providerType ec2ProviderType, done chan struct{}) *ec2Info { +func newEC2Info(metadataProvider ec2metadataprovider.MetadataProvider, providerType ec2ProviderType, ec2Credential *configaws.CredentialConfig, done chan struct{}) *ec2Info { return &ec2Info{ metadataProvider: metadataProvider, ec2Provider: providerType, + ec2Credential: ec2Credential, done: done, } } diff --git a/extension/resourcestore/extension.go b/extension/resourcestore/extension.go index e04cc907f1..0992349afa 100644 --- a/extension/resourcestore/extension.go +++ b/extension/resourcestore/extension.go @@ -32,7 +32,7 @@ const ( EC2PlatForm = "AWS::EC2" ) -type ec2ProviderType func(string) ec2iface.EC2API +type ec2ProviderType func(string, *configaws.CredentialConfig) ec2iface.EC2API type ServiceNameProvider interface { ServiceName() @@ -82,12 +82,16 @@ func (r *ResourceStore) Start(ctx context.Context, host component.Host) error { r.done = make(chan struct{}) r.metadataprovider = getMetaDataProvider() r.mode = r.config.Mode + ec2CredentialConfig := &configaws.CredentialConfig{ + Profile: r.config.Profile, + Filename: r.config.Filename, + } switch r.mode { case config.ModeEC2: - r.ec2Info = *newEC2Info(r.metadataprovider, getEC2Provider, r.done) + r.ec2Info = *newEC2Info(r.metadataprovider, getEC2Provider, ec2CredentialConfig, r.done) go r.ec2Info.initEc2Info() } - r.serviceprovider = *newServiceProvider(r.metadataprovider, getEC2Provider, r.done) + r.serviceprovider = *newServiceProvider(r.metadataprovider, getEC2Provider, ec2CredentialConfig, r.done) go r.serviceprovider.startServiceProvider() return nil } @@ -198,8 +202,7 @@ func getMetaDataProvider() ec2metadataprovider.MetadataProvider { return ec2metadataprovider.NewMetadataProvider(mdCredentialConfig.Credentials(), retryer.GetDefaultRetryNumber()) } -func getEC2Provider(region string) ec2iface.EC2API { - ec2CredentialConfig := &configaws.CredentialConfig{} +func getEC2Provider(region string, ec2CredentialConfig *configaws.CredentialConfig) ec2iface.EC2API { ec2CredentialConfig.Region = region return ec2.New( ec2CredentialConfig.Credentials(), diff --git a/extension/resourcestore/serviceprovider.go b/extension/resourcestore/serviceprovider.go index 2e5b125928..c576bcf4b4 100644 --- a/extension/resourcestore/serviceprovider.go +++ b/extension/resourcestore/serviceprovider.go @@ -17,6 +17,7 @@ import ( "github.com/aws/aws-sdk-go/service/ec2" "github.com/aws/aws-sdk-go/service/ec2/ec2iface" + configaws "github.com/aws/amazon-cloudwatch-agent/cfg/aws" "github.com/aws/amazon-cloudwatch-agent/internal/ec2metadataprovider" "github.com/aws/amazon-cloudwatch-agent/plugins/processors/ec2tagger" ) @@ -51,6 +52,7 @@ type serviceprovider struct { metadataProvider ec2metadataprovider.MetadataProvider ec2API ec2iface.EC2API ec2Provider ec2ProviderType + ec2Credential *configaws.CredentialConfig iamRole string ec2TagServiceName string done chan struct{} @@ -86,16 +88,15 @@ func (s *serviceprovider) ServiceAttribute(fileGlob string) ServiceAttribute { serviceAttr.ServiceName = val.ServiceName serviceAttr.ServiceNameSource = val.ServiceNameSource serviceAttr.Environment = val.Environment - return serviceAttr } // Instance Tags - if s.ec2TagServiceName != "" { + if s.ec2TagServiceName != "" && serviceAttr.ServiceName == "" { serviceAttr.ServiceName = s.ec2TagServiceName serviceAttr.ServiceNameSource = ResourceTags return serviceAttr } //IAM Role - if s.iamRole != "" { + if s.iamRole != "" && serviceAttr.ServiceName == "" { serviceAttr.ServiceName = s.iamRole serviceAttr.ServiceNameSource = ClientIamRole return serviceAttr @@ -165,7 +166,7 @@ func (s *serviceprovider) getEC2Client() error { if err != nil { return fmt.Errorf("failed to get EC2 client: %s", err) } - s.ec2API = s.ec2Provider(region) + s.ec2API = s.ec2Provider(region, s.ec2Credential) return nil } @@ -192,10 +193,11 @@ func (s *serviceprovider) getEC2TagFilters() ([]*ec2.Filter, error) { return tagFilters, nil } -func newServiceProvider(metadataProvider ec2metadataprovider.MetadataProvider, providerType ec2ProviderType, done chan struct{}) *serviceprovider { +func newServiceProvider(metadataProvider ec2metadataprovider.MetadataProvider, providerType ec2ProviderType, ec2Credential *configaws.CredentialConfig, done chan struct{}) *serviceprovider { return &serviceprovider{ metadataProvider: metadataProvider, ec2Provider: providerType, + ec2Credential: ec2Credential, done: done, logFiles: map[string]ServiceAttribute{}, } diff --git a/extension/resourcestore/serviceprovider_test.go b/extension/resourcestore/serviceprovider_test.go index eb8e279ba5..7b0cc98c2d 100644 --- a/extension/resourcestore/serviceprovider_test.go +++ b/extension/resourcestore/serviceprovider_test.go @@ -13,6 +13,7 @@ import ( "github.com/aws/aws-sdk-go/service/ec2/ec2iface" "github.com/stretchr/testify/assert" + configaws "github.com/aws/amazon-cloudwatch-agent/cfg/aws" "github.com/aws/amazon-cloudwatch-agent/internal/ec2metadataprovider" ) @@ -64,7 +65,7 @@ func Test_serviceprovider_startServiceProvider(t *testing.T) { done := make(chan struct{}) s := serviceprovider{ metadataProvider: tt.args.metadataProvider, - ec2Provider: func(s string) ec2iface.EC2API { + ec2Provider: func(s string, config *configaws.CredentialConfig) ec2iface.EC2API { return tt.args.ec2Client }, ec2API: tt.args.ec2Client, @@ -297,7 +298,7 @@ func Test_refreshLoop(t *testing.T) { s := &serviceprovider{ metadataProvider: tt.fields.metadataProvider, ec2API: tt.fields.ec2API, - ec2Provider: func(s string) ec2iface.EC2API { + ec2Provider: func(s string, config *configaws.CredentialConfig) ec2iface.EC2API { return tt.fields.ec2API }, iamRole: tt.fields.iamRole, diff --git a/translator/tocwconfig/sampleConfig/advanced_config_darwin.yaml b/translator/tocwconfig/sampleConfig/advanced_config_darwin.yaml index 3e4faa03ef..1f4fffc354 100644 --- a/translator/tocwconfig/sampleConfig/advanced_config_darwin.yaml +++ b/translator/tocwconfig/sampleConfig/advanced_config_darwin.yaml @@ -17,6 +17,8 @@ extensions: usage_flags: mode: EC2 region_type: ACJ + resourcestore: + mode: ec2 processors: cumulativetodelta/hostDeltaMetrics: exclude: @@ -65,6 +67,7 @@ receivers: service: extensions: - agenthealth/metrics + - resourcestore pipelines: metrics/host: exporters: diff --git a/translator/tocwconfig/sampleConfig/advanced_config_linux.yaml b/translator/tocwconfig/sampleConfig/advanced_config_linux.yaml index 80e9460692..54f65b8822 100644 --- a/translator/tocwconfig/sampleConfig/advanced_config_linux.yaml +++ b/translator/tocwconfig/sampleConfig/advanced_config_linux.yaml @@ -17,6 +17,8 @@ extensions: usage_flags: mode: EC2 region_type: ACJ + resourcestore: + mode: ec2 processors: cumulativetodelta/hostDeltaMetrics: exclude: @@ -73,6 +75,7 @@ receivers: service: extensions: - agenthealth/metrics + - resourcestore pipelines: metrics/host: exporters: diff --git a/translator/tocwconfig/sampleConfig/advanced_config_windows.yaml b/translator/tocwconfig/sampleConfig/advanced_config_windows.yaml index 7222672004..4d89a5f22c 100644 --- a/translator/tocwconfig/sampleConfig/advanced_config_windows.yaml +++ b/translator/tocwconfig/sampleConfig/advanced_config_windows.yaml @@ -17,6 +17,8 @@ extensions: usage_flags: mode: EC2 region_type: ACJ + resourcestore: + mode: ec2 processors: ec2tagger: ec2_instance_tag_keys: @@ -66,6 +68,7 @@ receivers: service: extensions: - agenthealth/metrics + - resourcestore pipelines: metrics/host: exporters: diff --git a/translator/tocwconfig/sampleConfig/appsignals_and_eks_config.yaml b/translator/tocwconfig/sampleConfig/appsignals_and_eks_config.yaml index fb91c0a6cf..59f329e7b7 100644 --- a/translator/tocwconfig/sampleConfig/appsignals_and_eks_config.yaml +++ b/translator/tocwconfig/sampleConfig/appsignals_and_eks_config.yaml @@ -285,6 +285,8 @@ extensions: region: us-east-1 service_name: "" role_arn: "" + resourcestore: + mode: ec2 processors: awsapplicationsignals: limiter: @@ -656,6 +658,7 @@ service: - awsproxy/application_signals - agenthealth/traces - agenthealth/logs + - resourcestore pipelines: metrics/application_signals: exporters: diff --git a/translator/tocwconfig/sampleConfig/appsignals_and_k8s_config.yaml b/translator/tocwconfig/sampleConfig/appsignals_and_k8s_config.yaml index a49ebeeb37..443b95e046 100644 --- a/translator/tocwconfig/sampleConfig/appsignals_and_k8s_config.yaml +++ b/translator/tocwconfig/sampleConfig/appsignals_and_k8s_config.yaml @@ -285,6 +285,8 @@ extensions: region: us-east-1 service_name: "" role_arn: "" + resourcestore: + mode: ec2 processors: awsapplicationsignals: limiter: @@ -636,6 +638,7 @@ service: - awsproxy/application_signals - agenthealth/traces - agenthealth/logs + - resourcestore pipelines: metrics/application_signals: exporters: diff --git a/translator/tocwconfig/sampleConfig/appsignals_fallback_and_eks_config.yaml b/translator/tocwconfig/sampleConfig/appsignals_fallback_and_eks_config.yaml index 6a4d070a80..ed8d715b7a 100644 --- a/translator/tocwconfig/sampleConfig/appsignals_fallback_and_eks_config.yaml +++ b/translator/tocwconfig/sampleConfig/appsignals_fallback_and_eks_config.yaml @@ -285,6 +285,8 @@ extensions: region: us-east-1 service_name: "" role_arn: "" + resourcestore: + mode: ec2 processors: awsapplicationsignals: limiter: @@ -656,6 +658,7 @@ service: - awsproxy/application_signals - agenthealth/traces - agenthealth/logs + - resourcestore pipelines: metrics/application_signals: exporters: diff --git a/translator/tocwconfig/sampleConfig/appsignals_over_fallback_config.yaml b/translator/tocwconfig/sampleConfig/appsignals_over_fallback_config.yaml index a8c3e656d9..9517705a33 100644 --- a/translator/tocwconfig/sampleConfig/appsignals_over_fallback_config.yaml +++ b/translator/tocwconfig/sampleConfig/appsignals_over_fallback_config.yaml @@ -285,6 +285,8 @@ extensions: region: us-east-1 service_name: "" role_arn: "" + resourcestore: + mode: ec2 processors: awsapplicationsignals: limiter: @@ -656,6 +658,7 @@ service: - awsproxy/application_signals - agenthealth/traces - agenthealth/logs + - resourcestore pipelines: metrics/application_signals: exporters: diff --git a/translator/tocwconfig/sampleConfig/base_appsignals_config.yaml b/translator/tocwconfig/sampleConfig/base_appsignals_config.yaml index dd5c0e8d4d..6f00e27b92 100644 --- a/translator/tocwconfig/sampleConfig/base_appsignals_config.yaml +++ b/translator/tocwconfig/sampleConfig/base_appsignals_config.yaml @@ -152,6 +152,10 @@ extensions: service_name: "" shared_credentials_file: - fake-path + resourcestore: + mode: onPremise + profile: AmazonCloudWatchAgent + shared_credential_file: fake-path processors: awsapplicationsignals: resolvers: @@ -467,6 +471,7 @@ service: - awsproxy/application_signals - agenthealth/traces - agenthealth/logs + - resourcestore pipelines: metrics/application_signals: exporters: diff --git a/translator/tocwconfig/sampleConfig/base_appsignals_fallback_config.yaml b/translator/tocwconfig/sampleConfig/base_appsignals_fallback_config.yaml index 11c0eefb2b..17c1424f02 100644 --- a/translator/tocwconfig/sampleConfig/base_appsignals_fallback_config.yaml +++ b/translator/tocwconfig/sampleConfig/base_appsignals_fallback_config.yaml @@ -152,6 +152,10 @@ extensions: service_name: "" shared_credentials_file: - fake-path + resourcestore: + mode: onPremise + profile: AmazonCloudWatchAgent + shared_credential_file: fake-path processors: awsapplicationsignals: resolvers: @@ -467,6 +471,7 @@ service: - awsproxy/application_signals - agenthealth/traces - agenthealth/logs + - resourcestore pipelines: metrics/application_signals: exporters: diff --git a/translator/tocwconfig/sampleConfig/base_container_insights_config.yaml b/translator/tocwconfig/sampleConfig/base_container_insights_config.yaml index 4a923363fe..bbf0a44a36 100644 --- a/translator/tocwconfig/sampleConfig/base_container_insights_config.yaml +++ b/translator/tocwconfig/sampleConfig/base_container_insights_config.yaml @@ -148,6 +148,8 @@ extensions: usage_flags: mode: EC2 region_type: ACJ + resourcestore: + mode: ec2 processors: batch/containerinsights: metadata_cardinality_limit: 1000 @@ -214,6 +216,7 @@ receivers: service: extensions: - agenthealth/logs + - resourcestore pipelines: logs/emf_logs: exporters: diff --git a/translator/tocwconfig/sampleConfig/basic_config_linux.yaml b/translator/tocwconfig/sampleConfig/basic_config_linux.yaml index a3229236cb..9d368bc4e4 100644 --- a/translator/tocwconfig/sampleConfig/basic_config_linux.yaml +++ b/translator/tocwconfig/sampleConfig/basic_config_linux.yaml @@ -17,6 +17,8 @@ extensions: usage_flags: mode: EC2 region_type: ACJ + resourcestore: + mode: ec2 processors: ec2tagger: ec2_instance_tag_keys: @@ -39,6 +41,7 @@ receivers: service: extensions: - agenthealth/metrics + - resourcestore pipelines: metrics/host: exporters: diff --git a/translator/tocwconfig/sampleConfig/basic_config_windows.yaml b/translator/tocwconfig/sampleConfig/basic_config_windows.yaml index 2dd4256f61..105a798799 100644 --- a/translator/tocwconfig/sampleConfig/basic_config_windows.yaml +++ b/translator/tocwconfig/sampleConfig/basic_config_windows.yaml @@ -17,6 +17,8 @@ extensions: usage_flags: mode: EC2 region_type: ACJ + resourcestore: + mode: ec2 processors: ec2tagger: ec2_instance_tag_keys: @@ -41,6 +43,7 @@ receivers: service: extensions: - agenthealth/metrics + - resourcestore pipelines: metrics/host: exporters: diff --git a/translator/tocwconfig/sampleConfig/collectd_config_linux.yaml b/translator/tocwconfig/sampleConfig/collectd_config_linux.yaml index 43ee813bf5..4a0453ee2b 100644 --- a/translator/tocwconfig/sampleConfig/collectd_config_linux.yaml +++ b/translator/tocwconfig/sampleConfig/collectd_config_linux.yaml @@ -17,6 +17,8 @@ extensions: usage_flags: mode: EC2 region_type: ACJ + resourcestore: + mode: ec2 receivers: telegraf_socket_listener: collection_interval: 1m0s @@ -25,6 +27,7 @@ receivers: service: extensions: - agenthealth/metrics + - resourcestore pipelines: metrics/host: exporters: diff --git a/translator/tocwconfig/sampleConfig/compass_linux_config.yaml b/translator/tocwconfig/sampleConfig/compass_linux_config.yaml new file mode 100644 index 0000000000..3f88407d97 --- /dev/null +++ b/translator/tocwconfig/sampleConfig/compass_linux_config.yaml @@ -0,0 +1,35 @@ +exporters: + nop: {} +extensions: + resourcestore: + mode: ec2 +receivers: + nop: {} +service: + extensions: + - resourcestore + pipelines: + metrics/nop: + exporters: + - nop + processors: [] + receivers: + - nop + telemetry: + logs: + development: false + disable_caller: false + disable_stacktrace: false + encoding: console + level: error + output_paths: + - /tmp/fake/log/hotdog.log + sampling: + enabled: true + initial: 2 + thereafter: 500 + tick: 10s + metrics: + address: "" + level: None + traces: {} diff --git a/translator/tocwconfig/sampleConfig/complete_darwin_config.yaml b/translator/tocwconfig/sampleConfig/complete_darwin_config.yaml index 71398a6a6c..2dc0671c6b 100644 --- a/translator/tocwconfig/sampleConfig/complete_darwin_config.yaml +++ b/translator/tocwconfig/sampleConfig/complete_darwin_config.yaml @@ -91,6 +91,8 @@ extensions: usage_flags: mode: EC2 region_type: ACJ + resourcestore: + mode: ec2 processors: batch/emf_logs: metadata_cardinality_limit: 1000 @@ -247,6 +249,7 @@ service: - agenthealth/metrics - agenthealth/logs - agenthealth/traces + - resourcestore pipelines: logs/emf_logs: exporters: diff --git a/translator/tocwconfig/sampleConfig/complete_linux_config.yaml b/translator/tocwconfig/sampleConfig/complete_linux_config.yaml index db528eb469..0485c6e8e7 100644 --- a/translator/tocwconfig/sampleConfig/complete_linux_config.yaml +++ b/translator/tocwconfig/sampleConfig/complete_linux_config.yaml @@ -94,6 +94,8 @@ extensions: usage_flags: mode: EC2 region_type: ACJ + resourcestore: + mode: ec2 processors: batch/emf_logs: metadata_cardinality_limit: 1000 @@ -250,6 +252,7 @@ service: - agenthealth/metrics - agenthealth/logs - agenthealth/traces + - resourcestore pipelines: logs/emf_logs: exporters: diff --git a/translator/tocwconfig/sampleConfig/complete_windows_config.yaml b/translator/tocwconfig/sampleConfig/complete_windows_config.yaml index eb55ab5b2a..acdd5570c0 100644 --- a/translator/tocwconfig/sampleConfig/complete_windows_config.yaml +++ b/translator/tocwconfig/sampleConfig/complete_windows_config.yaml @@ -91,6 +91,8 @@ extensions: usage_flags: mode: EC2 region_type: ACJ + resourcestore: + mode: ec2 processors: batch/emf_logs: metadata_cardinality_limit: 1000 @@ -234,6 +236,7 @@ service: - agenthealth/metrics - agenthealth/logs - agenthealth/traces + - resourcestore pipelines: logs/emf_logs: exporters: diff --git a/translator/tocwconfig/sampleConfig/config_with_env.yaml b/translator/tocwconfig/sampleConfig/config_with_env.yaml index 6ce8ebb230..e24d38b086 100644 --- a/translator/tocwconfig/sampleConfig/config_with_env.yaml +++ b/translator/tocwconfig/sampleConfig/config_with_env.yaml @@ -39,6 +39,8 @@ extensions: usage_flags: mode: EC2 region_type: ACJ + resourcestore: + mode: ec2 processors: batch/emf_logs: metadata_cardinality_limit: 1000 @@ -75,6 +77,7 @@ receivers: service: extensions: - agenthealth/logs + - resourcestore pipelines: logs/emf_logs: exporters: diff --git a/translator/tocwconfig/sampleConfig/delta_config_linux.yaml b/translator/tocwconfig/sampleConfig/delta_config_linux.yaml index 9cdb77edee..463908b790 100644 --- a/translator/tocwconfig/sampleConfig/delta_config_linux.yaml +++ b/translator/tocwconfig/sampleConfig/delta_config_linux.yaml @@ -17,6 +17,8 @@ extensions: usage_flags: mode: EC2 region_type: ACJ + resourcestore: + mode: ec2 processors: cumulativetodelta/hostDeltaMetrics: exclude: @@ -58,6 +60,7 @@ receivers: service: extensions: - agenthealth/metrics + - resourcestore pipelines: metrics/hostDeltaMetrics: exporters: diff --git a/translator/tocwconfig/sampleConfig/delta_net_config_linux.yaml b/translator/tocwconfig/sampleConfig/delta_net_config_linux.yaml index f06c994620..0a640aa79f 100644 --- a/translator/tocwconfig/sampleConfig/delta_net_config_linux.yaml +++ b/translator/tocwconfig/sampleConfig/delta_net_config_linux.yaml @@ -17,6 +17,8 @@ extensions: usage_flags: mode: EC2 region_type: ACJ + resourcestore: + mode: ec2 processors: cumulativetodelta/hostDeltaMetrics: exclude: @@ -42,6 +44,7 @@ receivers: service: extensions: - agenthealth/metrics + - resourcestore pipelines: metrics/hostDeltaMetrics: exporters: diff --git a/translator/tocwconfig/sampleConfig/drop_origin_linux.yaml b/translator/tocwconfig/sampleConfig/drop_origin_linux.yaml index 286bdf2e93..8edf76f397 100644 --- a/translator/tocwconfig/sampleConfig/drop_origin_linux.yaml +++ b/translator/tocwconfig/sampleConfig/drop_origin_linux.yaml @@ -22,6 +22,8 @@ extensions: usage_flags: mode: EC2 region_type: ACJ + resourcestore: + mode: ec2 processors: ec2tagger: ec2_instance_tag_keys: @@ -58,6 +60,7 @@ receivers: service: extensions: - agenthealth/metrics + - resourcestore pipelines: metrics/host: exporters: diff --git a/translator/tocwconfig/sampleConfig/emf_and_kubernetes_config.yaml b/translator/tocwconfig/sampleConfig/emf_and_kubernetes_config.yaml index c72450e249..47da803a54 100644 --- a/translator/tocwconfig/sampleConfig/emf_and_kubernetes_config.yaml +++ b/translator/tocwconfig/sampleConfig/emf_and_kubernetes_config.yaml @@ -392,6 +392,10 @@ extensions: usage_flags: mode: OP region_type: ACJ + resourcestore: + mode: onPremise + profile: default + shared_credential_file: /root/.aws/credentials processors: batch/containerinsights: metadata_cardinality_limit: 1000 @@ -470,6 +474,7 @@ receivers: service: extensions: - agenthealth/logs + - resourcestore pipelines: logs/emf_logs: exporters: diff --git a/translator/tocwconfig/sampleConfig/emf_and_kubernetes_with_gpu_config.yaml b/translator/tocwconfig/sampleConfig/emf_and_kubernetes_with_gpu_config.yaml index f8137e88c9..9e481623ff 100644 --- a/translator/tocwconfig/sampleConfig/emf_and_kubernetes_with_gpu_config.yaml +++ b/translator/tocwconfig/sampleConfig/emf_and_kubernetes_with_gpu_config.yaml @@ -657,6 +657,10 @@ extensions: usage_flags: mode: OP region_type: ACJ + resourcestore: + mode: onPremise + profile: default + shared_credential_file: /root/.aws/credentials processors: batch/containerinsights: metadata_cardinality_limit: 1000 @@ -1149,6 +1153,7 @@ receivers: service: extensions: - agenthealth/logs + - resourcestore pipelines: logs/emf_logs: exporters: diff --git a/translator/tocwconfig/sampleConfig/ignore_append_dimensions.yaml b/translator/tocwconfig/sampleConfig/ignore_append_dimensions.yaml index e323834f4e..6f2a28fc3a 100644 --- a/translator/tocwconfig/sampleConfig/ignore_append_dimensions.yaml +++ b/translator/tocwconfig/sampleConfig/ignore_append_dimensions.yaml @@ -17,6 +17,8 @@ extensions: usage_flags: mode: EC2 region_type: ACJ + resourcestore: + mode: ec2 processors: ec2tagger: imds_retries: 1 @@ -33,6 +35,7 @@ receivers: service: extensions: - agenthealth/metrics + - resourcestore pipelines: metrics/host: exporters: diff --git a/translator/tocwconfig/sampleConfig/invalid_input_linux.yaml b/translator/tocwconfig/sampleConfig/invalid_input_linux.yaml index a3229236cb..9d368bc4e4 100644 --- a/translator/tocwconfig/sampleConfig/invalid_input_linux.yaml +++ b/translator/tocwconfig/sampleConfig/invalid_input_linux.yaml @@ -17,6 +17,8 @@ extensions: usage_flags: mode: EC2 region_type: ACJ + resourcestore: + mode: ec2 processors: ec2tagger: ec2_instance_tag_keys: @@ -39,6 +41,7 @@ receivers: service: extensions: - agenthealth/metrics + - resourcestore pipelines: metrics/host: exporters: diff --git a/translator/tocwconfig/sampleConfig/kubernetes_on_prem_config.yaml b/translator/tocwconfig/sampleConfig/kubernetes_on_prem_config.yaml index e3531c6140..7a60beba8a 100644 --- a/translator/tocwconfig/sampleConfig/kubernetes_on_prem_config.yaml +++ b/translator/tocwconfig/sampleConfig/kubernetes_on_prem_config.yaml @@ -359,6 +359,10 @@ extensions: usage_flags: mode: OP region_type: ACJ + resourcestore: + mode: onPremise + profile: AmazonCloudWatchAgent + shared_credential_file: fake-path processors: batch/containerinsights: metadata_cardinality_limit: 1000 @@ -406,6 +410,7 @@ receivers: service: extensions: - agenthealth/logs + - resourcestore pipelines: metrics/containerinsights: exporters: diff --git a/translator/tocwconfig/sampleConfig/log_ecs_metric_only.yaml b/translator/tocwconfig/sampleConfig/log_ecs_metric_only.yaml index 824b9f27df..99542362b6 100644 --- a/translator/tocwconfig/sampleConfig/log_ecs_metric_only.yaml +++ b/translator/tocwconfig/sampleConfig/log_ecs_metric_only.yaml @@ -97,6 +97,8 @@ extensions: usage_flags: mode: EC2 region_type: ACJ + resourcestore: + mode: ec2 processors: batch/containerinsights: metadata_cardinality_limit: 1000 @@ -163,6 +165,7 @@ receivers: service: extensions: - agenthealth/logs + - resourcestore pipelines: logs/emf_logs: exporters: diff --git a/translator/tocwconfig/sampleConfig/log_filter.yaml b/translator/tocwconfig/sampleConfig/log_filter.yaml new file mode 100644 index 0000000000..a07aa2b23a --- /dev/null +++ b/translator/tocwconfig/sampleConfig/log_filter.yaml @@ -0,0 +1,35 @@ +exporters: + nop: {} +extensions: + resourcestore: + mode: "ec2" +receivers: + nop: {} +service: + extensions: + - resourcestore + pipelines: + metrics/nop: + exporters: + - nop + processors: [] + receivers: + - nop + telemetry: + logs: + development: false + disable_caller: false + disable_stacktrace: false + encoding: console + level: info + output_paths: + - /opt/aws/amazon-cloudwatch-agent/logs/amazon-cloudwatch-agent.log + sampling: + enabled: true + initial: 2 + thereafter: 500 + tick: 10s + metrics: + address: "" + level: None + traces: { } \ No newline at end of file diff --git a/translator/tocwconfig/sampleConfig/log_only_config_windows.yaml b/translator/tocwconfig/sampleConfig/log_only_config_windows.yaml new file mode 100644 index 0000000000..701f8b7752 --- /dev/null +++ b/translator/tocwconfig/sampleConfig/log_only_config_windows.yaml @@ -0,0 +1,35 @@ +exporters: + nop: {} +extensions: + resourcestore: + mode: "ec2" +receivers: + nop: {} +service: + extensions: + - resourcestore + pipelines: + metrics/nop: + exporters: + - nop + processors: [] + receivers: + - nop + telemetry: + logs: + development: false + disable_caller: false + disable_stacktrace: false + encoding: console + level: info + output_paths: + - c:\ProgramData\Amazon\AmazonCloudWatchAgent\Logs\amazon-cloudwatch-agent.log + sampling: + enabled: true + initial: 2 + thereafter: 500 + tick: 10s + metrics: + address: "" + level: None + traces: { } \ No newline at end of file diff --git a/translator/tocwconfig/sampleConfig/logs_and_kubernetes_config.yaml b/translator/tocwconfig/sampleConfig/logs_and_kubernetes_config.yaml index 05b02c06c9..fa7a4b110b 100644 --- a/translator/tocwconfig/sampleConfig/logs_and_kubernetes_config.yaml +++ b/translator/tocwconfig/sampleConfig/logs_and_kubernetes_config.yaml @@ -388,6 +388,9 @@ extensions: usage_flags: mode: EC2 region_type: ACJ + resourcestore: + mode: ec2 + processors: batch/containerinsights: metadata_cardinality_limit: 1000 @@ -464,6 +467,7 @@ receivers: service: extensions: - agenthealth/logs + - resourcestore pipelines: logs/emf_logs: exporters: diff --git a/translator/tocwconfig/sampleConfig/no_skip_log_timestamp.yaml b/translator/tocwconfig/sampleConfig/no_skip_log_timestamp.yaml new file mode 100644 index 0000000000..a07aa2b23a --- /dev/null +++ b/translator/tocwconfig/sampleConfig/no_skip_log_timestamp.yaml @@ -0,0 +1,35 @@ +exporters: + nop: {} +extensions: + resourcestore: + mode: "ec2" +receivers: + nop: {} +service: + extensions: + - resourcestore + pipelines: + metrics/nop: + exporters: + - nop + processors: [] + receivers: + - nop + telemetry: + logs: + development: false + disable_caller: false + disable_stacktrace: false + encoding: console + level: info + output_paths: + - /opt/aws/amazon-cloudwatch-agent/logs/amazon-cloudwatch-agent.log + sampling: + enabled: true + initial: 2 + thereafter: 500 + tick: 10s + metrics: + address: "" + level: None + traces: { } \ No newline at end of file diff --git a/translator/tocwconfig/sampleConfig/no_skip_log_timestamp_windows.yaml b/translator/tocwconfig/sampleConfig/no_skip_log_timestamp_windows.yaml new file mode 100644 index 0000000000..701f8b7752 --- /dev/null +++ b/translator/tocwconfig/sampleConfig/no_skip_log_timestamp_windows.yaml @@ -0,0 +1,35 @@ +exporters: + nop: {} +extensions: + resourcestore: + mode: "ec2" +receivers: + nop: {} +service: + extensions: + - resourcestore + pipelines: + metrics/nop: + exporters: + - nop + processors: [] + receivers: + - nop + telemetry: + logs: + development: false + disable_caller: false + disable_stacktrace: false + encoding: console + level: info + output_paths: + - c:\ProgramData\Amazon\AmazonCloudWatchAgent\Logs\amazon-cloudwatch-agent.log + sampling: + enabled: true + initial: 2 + thereafter: 500 + tick: 10s + metrics: + address: "" + level: None + traces: { } \ No newline at end of file diff --git a/translator/tocwconfig/sampleConfig/prometheus_config_linux.yaml b/translator/tocwconfig/sampleConfig/prometheus_config_linux.yaml index 83ca491154..3ec0d8cb2e 100644 --- a/translator/tocwconfig/sampleConfig/prometheus_config_linux.yaml +++ b/translator/tocwconfig/sampleConfig/prometheus_config_linux.yaml @@ -77,6 +77,8 @@ extensions: usage_flags: mode: EC2 region_type: ACJ + resourcestore: + mode: ec2 processors: batch/prometheus: metadata_cardinality_limit: 1000 @@ -91,6 +93,7 @@ receivers: service: extensions: - agenthealth/logs + - resourcestore pipelines: metrics/prometheus: exporters: diff --git a/translator/tocwconfig/sampleConfig/prometheus_config_windows.yaml b/translator/tocwconfig/sampleConfig/prometheus_config_windows.yaml index bb0866fc55..b648080e28 100644 --- a/translator/tocwconfig/sampleConfig/prometheus_config_windows.yaml +++ b/translator/tocwconfig/sampleConfig/prometheus_config_windows.yaml @@ -59,6 +59,8 @@ extensions: usage_flags: mode: EC2 region_type: ACJ + resourcestore: + mode: ec2 processors: batch/prometheus: metadata_cardinality_limit: 1000 @@ -73,6 +75,7 @@ receivers: service: extensions: - agenthealth/logs + - resourcestore pipelines: metrics/prometheus: exporters: diff --git a/translator/tocwconfig/sampleConfig/skip_log_timestamp.yaml b/translator/tocwconfig/sampleConfig/skip_log_timestamp.yaml new file mode 100644 index 0000000000..44c0e9ac18 --- /dev/null +++ b/translator/tocwconfig/sampleConfig/skip_log_timestamp.yaml @@ -0,0 +1,35 @@ +exporters: + nop: {} +extensions: + resourcestore: + mode: "ec2" +receivers: + nop: {} +service: + extensions: + - resourcestore + pipelines: + metrics/nop: + exporters: + - nop + processors: [] + receivers: + - nop + telemetry: + logs: + development: false + disable_caller: false + disable_stacktrace: false + encoding: console + level: info + output_paths: + - /opt/tmp/a.log + sampling: + enabled: true + initial: 2 + thereafter: 500 + tick: 10s + metrics: + address: "" + level: None + traces: { } \ No newline at end of file diff --git a/translator/tocwconfig/sampleConfig/skip_log_timestamp_default.yaml b/translator/tocwconfig/sampleConfig/skip_log_timestamp_default.yaml new file mode 100644 index 0000000000..a07aa2b23a --- /dev/null +++ b/translator/tocwconfig/sampleConfig/skip_log_timestamp_default.yaml @@ -0,0 +1,35 @@ +exporters: + nop: {} +extensions: + resourcestore: + mode: "ec2" +receivers: + nop: {} +service: + extensions: + - resourcestore + pipelines: + metrics/nop: + exporters: + - nop + processors: [] + receivers: + - nop + telemetry: + logs: + development: false + disable_caller: false + disable_stacktrace: false + encoding: console + level: info + output_paths: + - /opt/aws/amazon-cloudwatch-agent/logs/amazon-cloudwatch-agent.log + sampling: + enabled: true + initial: 2 + thereafter: 500 + tick: 10s + metrics: + address: "" + level: None + traces: { } \ No newline at end of file diff --git a/translator/tocwconfig/sampleConfig/skip_log_timestamp_default_windows.yaml b/translator/tocwconfig/sampleConfig/skip_log_timestamp_default_windows.yaml new file mode 100644 index 0000000000..701f8b7752 --- /dev/null +++ b/translator/tocwconfig/sampleConfig/skip_log_timestamp_default_windows.yaml @@ -0,0 +1,35 @@ +exporters: + nop: {} +extensions: + resourcestore: + mode: "ec2" +receivers: + nop: {} +service: + extensions: + - resourcestore + pipelines: + metrics/nop: + exporters: + - nop + processors: [] + receivers: + - nop + telemetry: + logs: + development: false + disable_caller: false + disable_stacktrace: false + encoding: console + level: info + output_paths: + - c:\ProgramData\Amazon\AmazonCloudWatchAgent\Logs\amazon-cloudwatch-agent.log + sampling: + enabled: true + initial: 2 + thereafter: 500 + tick: 10s + metrics: + address: "" + level: None + traces: { } \ No newline at end of file diff --git a/translator/tocwconfig/sampleConfig/skip_log_timestamp_windows.yaml b/translator/tocwconfig/sampleConfig/skip_log_timestamp_windows.yaml new file mode 100644 index 0000000000..088a6f3e4e --- /dev/null +++ b/translator/tocwconfig/sampleConfig/skip_log_timestamp_windows.yaml @@ -0,0 +1,35 @@ +exporters: + nop: {} +extensions: + resourcestore: + mode: "ec2" +receivers: + nop: {} +service: + extensions: + - resourcestore + pipelines: + metrics/nop: + exporters: + - nop + processors: [] + receivers: + - nop + telemetry: + logs: + development: false + disable_caller: false + disable_stacktrace: false + encoding: console + level: info + output_paths: + - c:\tmp\am.log + sampling: + enabled: true + initial: 2 + thereafter: 500 + tick: 10s + metrics: + address: "" + level: None + traces: { } \ No newline at end of file diff --git a/translator/tocwconfig/sampleConfig/standard_config_linux.yaml b/translator/tocwconfig/sampleConfig/standard_config_linux.yaml index ac1bf370ba..9bdb6e11cc 100644 --- a/translator/tocwconfig/sampleConfig/standard_config_linux.yaml +++ b/translator/tocwconfig/sampleConfig/standard_config_linux.yaml @@ -17,6 +17,8 @@ extensions: usage_flags: mode: EC2 region_type: ACJ + resourcestore: + mode: ec2 processors: cumulativetodelta/hostDeltaMetrics: exclude: @@ -60,6 +62,7 @@ receivers: service: extensions: - agenthealth/metrics + - resourcestore pipelines: metrics/host: exporters: diff --git a/translator/tocwconfig/sampleConfig/standard_config_linux_with_common_config.yaml b/translator/tocwconfig/sampleConfig/standard_config_linux_with_common_config.yaml index 415942372b..4e35dbcc1b 100644 --- a/translator/tocwconfig/sampleConfig/standard_config_linux_with_common_config.yaml +++ b/translator/tocwconfig/sampleConfig/standard_config_linux_with_common_config.yaml @@ -19,6 +19,10 @@ extensions: usage_flags: mode: EC2 region_type: ACJ + resourcestore: + mode: ec2 + profile: AmazonCloudWatchAgent + shared_credential_file: fake-path processors: cumulativetodelta/hostDeltaMetrics: exclude: @@ -65,6 +69,7 @@ receivers: service: extensions: - agenthealth/metrics + - resourcestore pipelines: metrics/host: exporters: diff --git a/translator/tocwconfig/sampleConfig/standard_config_windows.yaml b/translator/tocwconfig/sampleConfig/standard_config_windows.yaml index 54c873f82d..3077937363 100644 --- a/translator/tocwconfig/sampleConfig/standard_config_windows.yaml +++ b/translator/tocwconfig/sampleConfig/standard_config_windows.yaml @@ -17,6 +17,8 @@ extensions: usage_flags: mode: EC2 region_type: ACJ + resourcestore: + mode: ec2 processors: ec2tagger: ec2_instance_tag_keys: @@ -55,6 +57,7 @@ receivers: service: extensions: - agenthealth/metrics + - resourcestore pipelines: metrics/host: exporters: diff --git a/translator/tocwconfig/sampleConfig/standard_config_windows_with_common_config.yaml b/translator/tocwconfig/sampleConfig/standard_config_windows_with_common_config.yaml index 830c6ad2ca..59806ca520 100644 --- a/translator/tocwconfig/sampleConfig/standard_config_windows_with_common_config.yaml +++ b/translator/tocwconfig/sampleConfig/standard_config_windows_with_common_config.yaml @@ -19,6 +19,10 @@ extensions: usage_flags: mode: EC2 region_type: ACJ + resourcestore: + mode: ec2 + profile: AmazonCloudWatchAgent + shared_credential_file: fake-path processors: ec2tagger: ec2_instance_tag_keys: @@ -60,6 +64,7 @@ receivers: service: extensions: - agenthealth/metrics + - resourcestore pipelines: metrics/host: exporters: diff --git a/translator/tocwconfig/sampleConfig/statsd_config_linux.yaml b/translator/tocwconfig/sampleConfig/statsd_config_linux.yaml index 6e818d628f..202006271f 100644 --- a/translator/tocwconfig/sampleConfig/statsd_config_linux.yaml +++ b/translator/tocwconfig/sampleConfig/statsd_config_linux.yaml @@ -17,6 +17,8 @@ extensions: usage_flags: mode: EC2 region_type: ACJ + resourcestore: + mode: ec2 receivers: telegraf_statsd: collection_interval: 10s @@ -25,6 +27,7 @@ receivers: service: extensions: - agenthealth/metrics + - resourcestore pipelines: metrics/host: exporters: diff --git a/translator/tocwconfig/sampleConfig/statsd_config_windows.yaml b/translator/tocwconfig/sampleConfig/statsd_config_windows.yaml index b0c9488ad8..ca92d50f80 100644 --- a/translator/tocwconfig/sampleConfig/statsd_config_windows.yaml +++ b/translator/tocwconfig/sampleConfig/statsd_config_windows.yaml @@ -17,6 +17,8 @@ extensions: usage_flags: mode: EC2 region_type: ACJ + resourcestore: + mode: ec2 receivers: telegraf_statsd: collection_interval: 10s @@ -25,6 +27,7 @@ receivers: service: extensions: - agenthealth/metrics + - resourcestore pipelines: metrics/host: exporters: diff --git a/translator/tocwconfig/sampleConfig/trace_config_linux.yaml b/translator/tocwconfig/sampleConfig/trace_config_linux.yaml index 79dccf9674..5d05eec794 100644 --- a/translator/tocwconfig/sampleConfig/trace_config_linux.yaml +++ b/translator/tocwconfig/sampleConfig/trace_config_linux.yaml @@ -29,6 +29,10 @@ extensions: usage_flags: mode: EC2 region_type: ACJ + resourcestore: + mode: ec2 + profile: default + shared_credential_file: /root/.aws/credentials processors: batch/xray: metadata_cardinality_limit: 1000 @@ -78,6 +82,7 @@ receivers: service: extensions: - agenthealth/traces + - resourcestore pipelines: traces/xray: exporters: diff --git a/translator/tocwconfig/sampleConfig/trace_config_windows.yaml b/translator/tocwconfig/sampleConfig/trace_config_windows.yaml index f39e70b5e5..cc8ab5d475 100644 --- a/translator/tocwconfig/sampleConfig/trace_config_windows.yaml +++ b/translator/tocwconfig/sampleConfig/trace_config_windows.yaml @@ -29,6 +29,10 @@ extensions: usage_flags: mode: EC2 region_type: ACJ + resourcestore: + mode: ec2 + profile: default + shared_credential_file: /root/.aws/credentials processors: batch/xray: metadata_cardinality_limit: 1000 @@ -78,6 +82,7 @@ receivers: service: extensions: - agenthealth/traces + - resourcestore pipelines: traces/xray: exporters: diff --git a/translator/tocwconfig/sampleConfig/windows_eventlog_only_config.yaml b/translator/tocwconfig/sampleConfig/windows_eventlog_only_config.yaml new file mode 100644 index 0000000000..0a5a41b9ec --- /dev/null +++ b/translator/tocwconfig/sampleConfig/windows_eventlog_only_config.yaml @@ -0,0 +1,35 @@ +exporters: + nop: {} +extensions: + resourcestore: + mode: "ec2" +receivers: + nop: {} +service: + extensions: + - resourcestore + pipelines: + metrics/nop: + exporters: + - nop + processors: [] + receivers: + - nop + telemetry: + logs: + development: false + disable_caller: false + disable_stacktrace: false + encoding: console + level: info + output_paths: + - c:\ProgramData\Amazon\AmazonCloudWatchAgent\Logs\amazon-cloudwatch-agent.log + sampling: + enabled: true + initial: 2 + thereafter: 500 + tick: 10s + metrics: + address: "" + level: None + traces: { } \ No newline at end of file diff --git a/translator/translate/otel/exporter/translator.go b/translator/translate/otel/exporter/translator.go new file mode 100644 index 0000000000..d8c2acbc37 --- /dev/null +++ b/translator/translate/otel/exporter/translator.go @@ -0,0 +1,33 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package exporter + +import ( + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/confmap" + "go.opentelemetry.io/collector/exporter" + + "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/common" +) + +type translator struct { + name string + factory exporter.Factory +} + +func NewDefaultTranslator(factory exporter.Factory) common.Translator[component.Config] { + return NewDefaultTranslatorWithName("", factory) +} + +func NewDefaultTranslatorWithName(name string, factory exporter.Factory) common.Translator[component.Config] { + return &translator{name, factory} +} + +func (t *translator) Translate(*confmap.Conf) (component.Config, error) { + return t.factory.CreateDefaultConfig(), nil +} + +func (t *translator) ID() component.ID { + return component.NewIDWithName(t.factory.Type(), t.name) +} diff --git a/translator/translate/otel/exporter/translator_test.go b/translator/translate/otel/exporter/translator_test.go new file mode 100644 index 0000000000..d8946a3850 --- /dev/null +++ b/translator/translate/otel/exporter/translator_test.go @@ -0,0 +1,20 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package exporter + +import ( + "testing" + + "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/exporter/exportertest" +) + +func TestTranslator(t *testing.T) { + factory := exportertest.NewNopFactory() + got := NewDefaultTranslator(factory) + require.Equal(t, "nop", got.ID().String()) + cfg, err := got.Translate(nil) + require.NoError(t, err) + require.Equal(t, factory.CreateDefaultConfig(), cfg) +} diff --git a/translator/translate/otel/extension/resourcestore/translator.go b/translator/translate/otel/extension/resourcestore/translator.go new file mode 100644 index 0000000000..9367383b26 --- /dev/null +++ b/translator/translate/otel/extension/resourcestore/translator.go @@ -0,0 +1,42 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package resourcestore + +import ( + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/confmap" + "go.opentelemetry.io/collector/extension" + + "github.com/aws/amazon-cloudwatch-agent/extension/resourcestore" + "github.com/aws/amazon-cloudwatch-agent/translator/context" + "github.com/aws/amazon-cloudwatch-agent/translator/translate/agent" + "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/common" +) + +type translator struct { + name string + factory extension.Factory +} + +var _ common.Translator[component.Config] = (*translator)(nil) + +func NewTranslator() common.Translator[component.Config] { + return &translator{ + factory: resourcestore.NewFactory(), + } +} + +func (t *translator) ID() component.ID { + return component.NewIDWithName(t.factory.Type(), t.name) +} + +// Translate creates an extension configuration. +func (t *translator) Translate(conf *confmap.Conf) (component.Config, error) { + cfg := t.factory.CreateDefaultConfig().(*resourcestore.Config) + cfg.Mode = context.CurrentContext().Mode() + credentials := confmap.NewFromStringMap(agent.Global_Config.Credentials) + _ = credentials.Unmarshal(cfg) + + return cfg, nil +} diff --git a/translator/translate/otel/extension/resourcestore/translator_test.go b/translator/translate/otel/extension/resourcestore/translator_test.go new file mode 100644 index 0000000000..d9ace3c4ed --- /dev/null +++ b/translator/translate/otel/extension/resourcestore/translator_test.go @@ -0,0 +1,62 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package resourcestore + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "go.opentelemetry.io/collector/confmap" + + "github.com/aws/amazon-cloudwatch-agent/extension/resourcestore" + "github.com/aws/amazon-cloudwatch-agent/translator/config" + "github.com/aws/amazon-cloudwatch-agent/translator/context" + translateagent "github.com/aws/amazon-cloudwatch-agent/translator/translate/agent" +) + +func TestTranslate(t *testing.T) { + context.CurrentContext().SetMode(config.ModeEC2) + translateagent.Global_Config.Credentials = make(map[string]interface{}) + testCases := map[string]struct { + input map[string]interface{} + file_exists bool + profile_exists bool + want *resourcestore.Config + }{ + "OnlyProfile": { + input: map[string]interface{}{}, + profile_exists: true, + want: &resourcestore.Config{ + Mode: config.ModeEC2, + Profile: "test_profile", + }, + }, + "OnlyFile": { + input: map[string]interface{}{}, + file_exists: true, + want: &resourcestore.Config{ + Mode: config.ModeEC2, + Filename: "test_file", + }, + }, + } + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + translateagent.Global_Config.Credentials[translateagent.Profile_Key] = "" + translateagent.Global_Config.Credentials[translateagent.CredentialsSectionKey] = "" + if testCase.file_exists { + translateagent.Global_Config.Credentials[translateagent.CredentialsFile_Key] = "test_file" + } + if testCase.profile_exists { + translateagent.Global_Config.Credentials[translateagent.Profile_Key] = "test_profile" + } + tt := NewTranslator().(*translator) + assert.Equal(t, "resourcestore", tt.ID().String()) + conf := confmap.NewFromStringMap(testCase.input) + got, err := tt.Translate(conf) + assert.NoError(t, err) + assert.Equal(t, testCase.want, got) + }) + } +} diff --git a/translator/translate/otel/pipeline/nop/translator.go b/translator/translate/otel/pipeline/nop/translator.go new file mode 100644 index 0000000000..0efd23b470 --- /dev/null +++ b/translator/translate/otel/pipeline/nop/translator.go @@ -0,0 +1,55 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package nop + +import ( + "fmt" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/confmap" + "go.opentelemetry.io/collector/exporter/nopexporter" + "go.opentelemetry.io/collector/receiver/nopreceiver" + + "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/common" + "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/exporter" + "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/receiver" +) + +const ( + pipelineName = "nop" +) + +var ( + traceKey = common.ConfigKey(common.TracesKey) + metricKey = common.ConfigKey(common.MetricsKey) + emfKey = common.ConfigKey(common.LogsKey, common.MetricsCollectedKey) + logAgentKey = common.ConfigKey(common.LogsKey, common.LogsCollectedKey) +) + +type translator struct { +} + +var _ common.Translator[*common.ComponentTranslators] = (*translator)(nil) + +func NewTranslator() common.Translator[*common.ComponentTranslators] { + return &translator{} +} + +func (t *translator) ID() component.ID { + return component.NewIDWithName(component.DataTypeMetrics, pipelineName) +} + +func (t *translator) Translate(conf *confmap.Conf) (*common.ComponentTranslators, error) { + if conf == nil || !conf.IsSet(logAgentKey) { + return nil, &common.MissingKeyError{ID: t.ID(), JsonKey: fmt.Sprint(logAgentKey)} + } + + translators := &common.ComponentTranslators{ + Receivers: common.NewTranslatorMap(receiver.NewDefaultTranslator(nopreceiver.NewFactory())), + Processors: common.NewTranslatorMap[component.Config](), + Exporters: common.NewTranslatorMap(exporter.NewDefaultTranslator(nopexporter.NewFactory())), + Extensions: common.NewTranslatorMap[component.Config](), + } + return translators, nil +} diff --git a/translator/translate/otel/pipeline/nop/translator_test.go b/translator/translate/otel/pipeline/nop/translator_test.go new file mode 100644 index 0000000000..22c30371dd --- /dev/null +++ b/translator/translate/otel/pipeline/nop/translator_test.go @@ -0,0 +1,89 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package nop + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/confmap" + + "github.com/aws/amazon-cloudwatch-agent/internal/util/collections" + "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/common" +) + +func TestTranslator(t *testing.T) { + type want struct { + receivers []string + processors []string + exporters []string + extensions []string + } + tt := NewTranslator() + assert.EqualValues(t, "metrics/nop", tt.ID().String()) + testCases := map[string]struct { + input map[string]interface{} + want *want + wantErr error + }{ + "WithoutKey": { + input: map[string]interface{}{}, + wantErr: &common.MissingKeyError{ID: tt.ID(), JsonKey: fmt.Sprint(logAgentKey)}, + }, + "WithMetricsKey": { + input: map[string]interface{}{ + "metrics": map[string]interface{}{}, + }, + wantErr: &common.MissingKeyError{ID: tt.ID(), JsonKey: fmt.Sprint(logAgentKey)}, + }, + "WithTracesKey": { + input: map[string]interface{}{ + "traces": map[string]interface{}{}, + }, + wantErr: &common.MissingKeyError{ID: tt.ID(), JsonKey: fmt.Sprint(logAgentKey)}, + }, + "WithEMFKey": { + input: map[string]interface{}{ + "logs": map[string]interface{}{ + "metrics_collected": map[string]interface{}{}, + }, + }, + wantErr: &common.MissingKeyError{ID: tt.ID(), JsonKey: fmt.Sprint(logAgentKey)}, + }, + "WithLogsKey": { + input: map[string]interface{}{ + "logs": map[string]interface{}{ + "logs_collected": map[string]interface{}{ + "files": nil, + }, + }, + }, + want: &want{ + receivers: []string{"nop"}, + processors: []string{}, + exporters: []string{"nop"}, + extensions: []string{}, + }, + }, + } + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + conf := confmap.NewFromStringMap(testCase.input) + got, err := tt.Translate(conf) + assert.Equal(t, testCase.wantErr, err) + if testCase.want == nil { + assert.Nil(t, got) + } else { + require.NotNil(t, got) + assert.Equal(t, testCase.want.receivers, collections.MapSlice(got.Receivers.Keys(), component.ID.String)) + assert.Equal(t, testCase.want.processors, collections.MapSlice(got.Processors.Keys(), component.ID.String)) + assert.Equal(t, testCase.want.exporters, collections.MapSlice(got.Exporters.Keys(), component.ID.String)) + assert.Equal(t, testCase.want.extensions, collections.MapSlice(got.Extensions.Keys(), component.ID.String)) + } + }) + } +} diff --git a/translator/translate/otel/receiver/translator.go b/translator/translate/otel/receiver/translator.go new file mode 100644 index 0000000000..c07b95a3c6 --- /dev/null +++ b/translator/translate/otel/receiver/translator.go @@ -0,0 +1,33 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package receiver + +import ( + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/confmap" + "go.opentelemetry.io/collector/receiver" + + "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/common" +) + +type translator struct { + name string + factory receiver.Factory +} + +func NewDefaultTranslator(factory receiver.Factory) common.Translator[component.Config] { + return NewDefaultTranslatorWithName("", factory) +} + +func NewDefaultTranslatorWithName(name string, factory receiver.Factory) common.Translator[component.Config] { + return &translator{name, factory} +} + +func (t *translator) Translate(*confmap.Conf) (component.Config, error) { + return t.factory.CreateDefaultConfig(), nil +} + +func (t *translator) ID() component.ID { + return component.NewIDWithName(t.factory.Type(), t.name) +} diff --git a/translator/translate/otel/receiver/translator_test.go b/translator/translate/otel/receiver/translator_test.go new file mode 100644 index 0000000000..543392b693 --- /dev/null +++ b/translator/translate/otel/receiver/translator_test.go @@ -0,0 +1,20 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package receiver + +import ( + "testing" + + "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/receiver/receivertest" +) + +func TestTranslator(t *testing.T) { + factory := receivertest.NewNopFactory() + got := NewDefaultTranslator(factory) + require.Equal(t, "nop", got.ID().String()) + cfg, err := got.Translate(nil) + require.NoError(t, err) + require.Equal(t, factory.CreateDefaultConfig(), cfg) +} diff --git a/translator/translate/otel/translate_otel.go b/translator/translate/otel/translate_otel.go index 79649dfa45..152364befd 100644 --- a/translator/translate/otel/translate_otel.go +++ b/translator/translate/otel/translate_otel.go @@ -21,11 +21,13 @@ import ( receiverAdapter "github.com/aws/amazon-cloudwatch-agent/receiver/adapter" "github.com/aws/amazon-cloudwatch-agent/translator/context" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/common" + "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/extension/resourcestore" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/pipeline" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/pipeline/applicationsignals" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/pipeline/containerinsights" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/pipeline/emf_logs" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/pipeline/host" + "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/pipeline/nop" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/pipeline/prometheus" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/pipeline/xray" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/receiver/adapter" @@ -78,9 +80,14 @@ func Translate(jsonConfig interface{}, os string) (*otelcol.Config, error) { ) translators.Merge(registry) pipelines, err := pipeline.NewTranslator(translators).Translate(conf) - if err != nil { - return nil, err + if pipelines == nil { + translators.Set(nop.NewTranslator()) + pipelines, err = pipeline.NewTranslator(translators).Translate(conf) + if err != nil { + return nil, err + } } + pipelines.Translators.Extensions.Set(resourcestore.NewTranslator()) cfg := &otelcol.Config{ Receivers: map[component.ID]component.Config{}, Exporters: map[component.ID]component.Config{}, From 80eb438ed85474e1a582aa41b4a7541ae424cd8a Mon Sep 17 00:00:00 2001 From: Ben Strauss <81588812+straussb@users.noreply.github.com> Date: Tue, 2 Jul 2024 15:21:17 -0400 Subject: [PATCH 35/55] Add support for associating log group names with service/environment names, for RID creation. (#734) Customers may set the `aws.log.group.names` resource attribute when sending Application Signals (AS) telemetry to CloudWatch Agent, to indicate that the AS service writes logs to those log group(s). We want to link the AS service metrics and those logs together in the CloudWatch console/APIs. To enable this, when CWA uploads logs to those log groups, it should include the AS service/environment name in the entity. This commit includes the following major changes to address the issue: * Adds a new awsentityprocessor which extracts the `aws.log.group.names`, `service.name`, and `deployment.environment` from incoming OTel resource attributes and adds the associations to the ResourceStore. * Adds a new `AddServiceAttrEntryForLogGroup()` method to the ResourceStore for the processor to call. * Have `tailerSrc` pass its log group to `CreateLogFileRID()`, so the ResourceStore can look up the association. The following changes were made to support the major pieces of functionality: * Refactoring: introduced an interface for the serviceprovider to implement, in order to facilitate unit testing of ResourceStore code in isolation from serviceprovider implementation. * Passed `ec2Info` to the serviceprovider so that it can use the auto-scaling group when constructing the environment name (which is part of the ServiceAttribute). * Have `CreateLogFileRID()` take in a log group name now. * Introduced `LogFileGlob` and `LogGroupName` type aliases for `string`, to prevent accidentally passing one when you meant the other. (Most useful in the `CreateLogFileRID()` method which takes both.) * Only add instance ID and ASG name to the attribute map if our current mode is EC2. * Added two new ServiceNameSources. * Rename `serviceprovider.ServiceAttribute()` to `logFileServiceAttribute()`. And one other miscellaneous changes was included as well: * Rename the ServiceNameSource attribute name from `"AWS.Internal.ServiceNameSource"` to `"AWS.ServiceNameSource"`, in order to save some bytes on the wire. --- extension/resourcestore/extension.go | 75 +++-- extension/resourcestore/extension_test.go | 256 +++++++++++------- extension/resourcestore/serviceprovider.go | 181 ++++++++++--- .../resourcestore/serviceprovider_test.go | 195 +++++++++---- plugins/inputs/logfile/logfile.go | 3 +- plugins/inputs/logfile/tailersrc.go | 2 +- plugins/outputs/cloudwatchlogs/pusher.go | 1 + plugins/processors/awsentity/config.go | 17 ++ plugins/processors/awsentity/config_test.go | 19 ++ plugins/processors/awsentity/factory.go | 50 ++++ plugins/processors/awsentity/factory_test.go | 45 +++ plugins/processors/awsentity/processor.go | 66 +++++ .../processors/awsentity/processor_test.go | 146 ++++++++++ service/defaultcomponents/components.go | 2 + service/defaultcomponents/components_test.go | 8 +- .../appsignals_and_eks_config.yaml | 2 + .../appsignals_and_k8s_config.yaml | 2 + .../appsignals_fallback_and_eks_config.yaml | 2 + .../appsignals_over_fallback_config.yaml | 2 + .../sampleConfig/base_appsignals_config.yaml | 2 + .../base_appsignals_fallback_config.yaml | 2 + .../pipeline/applicationsignals/translator.go | 4 + .../applicationsignals/translator_test.go | 6 +- .../otel/processor/awsentity/translator.go | 33 +++ 24 files changed, 906 insertions(+), 215 deletions(-) create mode 100644 plugins/processors/awsentity/config.go create mode 100644 plugins/processors/awsentity/config_test.go create mode 100644 plugins/processors/awsentity/factory.go create mode 100644 plugins/processors/awsentity/factory_test.go create mode 100644 plugins/processors/awsentity/processor.go create mode 100644 plugins/processors/awsentity/processor_test.go create mode 100644 translator/translate/otel/processor/awsentity/translator.go diff --git a/extension/resourcestore/extension.go b/extension/resourcestore/extension.go index 0992349afa..96277654e3 100644 --- a/extension/resourcestore/extension.go +++ b/extension/resourcestore/extension.go @@ -27,18 +27,18 @@ const ( Service = "Service" InstanceIDKey = "EC2.InstanceId" ASGKey = "EC2.AutoScalingGroup" - ServiceNameSourceKey = "AWS.Internal.ServiceNameSource" + ServiceNameSourceKey = "AWS.ServiceNameSource" PlatformType = "PlatformType" EC2PlatForm = "AWS::EC2" ) type ec2ProviderType func(string, *configaws.CredentialConfig) ec2iface.EC2API -type ServiceNameProvider interface { - ServiceName() - startServiceProvider(metadataProvider ec2metadataprovider.MetadataProvider) - getIAMRole(metadataProvider ec2metadataprovider.MetadataProvider) - getEC2Tags(ec2API ec2iface.EC2API) +type serviceProviderInterface interface { + startServiceProvider() + addEntryForLogFile(LogFileGlob, ServiceAttribute) + addEntryForLogGroup(LogGroupName, ServiceAttribute) + logFileServiceAttribute(LogFileGlob, LogGroupName) ServiceAttribute } type eksInfo struct { @@ -62,7 +62,7 @@ type ResourceStore struct { // serviceprovider stores information about possible service names // that we can attach to the resource ID - serviceprovider serviceprovider + serviceprovider serviceProviderInterface // nativeCredential stores the credential config for agent's native // component such as LogAgent @@ -91,7 +91,7 @@ func (r *ResourceStore) Start(ctx context.Context, host component.Host) error { r.ec2Info = *newEC2Info(r.metadataprovider, getEC2Provider, ec2CredentialConfig, r.done) go r.ec2Info.initEc2Info() } - r.serviceprovider = *newServiceProvider(r.metadataprovider, getEC2Provider, ec2CredentialConfig, r.done) + r.serviceprovider = newServiceProvider(r.mode, &r.ec2Info, r.metadataprovider, getEC2Provider, ec2CredentialConfig, r.done) go r.serviceprovider.startServiceProvider() return nil } @@ -121,32 +121,51 @@ func (r *ResourceStore) NativeCredentialExists() bool { return r.nativeCredential != nil } -func (r *ResourceStore) CreateLogFileRID(fileGlobPath string, filePath string) *cloudwatchlogs.Resource { - if r.shouldReturnRID() { - return &cloudwatchlogs.Resource{ - AttributeMaps: []map[string]*string{ - r.createAttributeMaps(), - }, - KeyAttributes: r.createServiceKeyAttributes(fileGlobPath), - } +// CreateLogFileRID creates the RID for log events that are being uploaded from a log file in the environment. +func (r *ResourceStore) CreateLogFileRID(logFileGlob LogFileGlob, logGroupName LogGroupName) *cloudwatchlogs.Resource { + if !r.shouldReturnRID() { + return nil + } + + serviceAttr := r.serviceprovider.logFileServiceAttribute(logFileGlob, logGroupName) + + keyAttributes := r.createServiceKeyAttributes(serviceAttr) + attributeMap := r.createAttributeMap() + addNonEmptyToMap(attributeMap, ServiceNameSourceKey, serviceAttr.ServiceNameSource) + + return &cloudwatchlogs.Resource{ + KeyAttributes: keyAttributes, + AttributeMaps: []map[string]*string{attributeMap}, } - return nil } -// AddServiceAttrEntryToResourceStore adds an entry to the resource store for the provided file -> serviceName, environmentName key-value pair -func (r *ResourceStore) AddServiceAttrEntryToResourceStore(fileGlob string, serviceName string, environmentName string) { - if r.serviceprovider.logFiles != nil { - r.serviceprovider.logFiles[fileGlob] = ServiceAttribute{ServiceName: serviceName, ServiceNameSource: AgentConfig, Environment: environmentName} +// AddServiceAttrEntryForLogFile adds an entry to the resource store for the provided file glob -> (serviceName, environmentName) key-value pair +func (r *ResourceStore) AddServiceAttrEntryForLogFile(fileGlob LogFileGlob, serviceName string, environmentName string) { + if r.serviceprovider != nil { + r.serviceprovider.addEntryForLogFile(fileGlob, ServiceAttribute{ + ServiceName: serviceName, + ServiceNameSource: ServiceNameSourceUserConfiguration, + Environment: environmentName, + }) } } -func (r *ResourceStore) createAttributeMaps() map[string]*string { - serviceAttr := r.serviceprovider.ServiceAttribute("") +// AddServiceAttrEntryForLogGroup adds an entry to the resource store for the provided log group nme -> (serviceName, environmentName) key-value pair +func (r *ResourceStore) AddServiceAttrEntryForLogGroup(logGroupName LogGroupName, serviceName string, environmentName string) { + r.serviceprovider.addEntryForLogGroup(logGroupName, ServiceAttribute{ + ServiceName: serviceName, + ServiceNameSource: ServiceNameSourceInstrumentation, + Environment: environmentName, + }) +} + +func (r *ResourceStore) createAttributeMap() map[string]*string { attributeMap := make(map[string]*string) - addNonEmptyToMap(attributeMap, InstanceIDKey, r.ec2Info.InstanceID) - addNonEmptyToMap(attributeMap, ASGKey, r.ec2Info.AutoScalingGroup) - addNonEmptyToMap(attributeMap, ServiceNameSourceKey, serviceAttr.ServiceNameSource) + if r.mode == config.ModeEC2 { + addNonEmptyToMap(attributeMap, InstanceIDKey, r.ec2Info.InstanceID) + addNonEmptyToMap(attributeMap, ASGKey, r.ec2Info.AutoScalingGroup) + } switch r.mode { case config.ModeEC2: attributeMap[PlatformType] = aws.String(EC2PlatForm) @@ -154,8 +173,8 @@ func (r *ResourceStore) createAttributeMaps() map[string]*string { return attributeMap } -func (r *ResourceStore) createServiceKeyAttributes(fileGlob string) *cloudwatchlogs.KeyAttributes { - serviceAttr := r.serviceprovider.ServiceAttribute(fileGlob) +// createServiceKeyAttribute creates KeyAttributes for Service resources +func (r *ResourceStore) createServiceKeyAttributes(serviceAttr ServiceAttribute) *cloudwatchlogs.KeyAttributes { serviceKeyAttr := &cloudwatchlogs.KeyAttributes{ Type: aws.String(Service), } diff --git a/extension/resourcestore/extension_test.go b/extension/resourcestore/extension_test.go index a1df099b2e..99bc920dd7 100644 --- a/extension/resourcestore/extension_test.go +++ b/extension/resourcestore/extension_test.go @@ -17,23 +17,52 @@ import ( "github.com/aws/aws-sdk-go/service/sts" "github.com/aws/aws-sdk-go/service/sts/stsiface" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" "github.com/aws/amazon-cloudwatch-agent/internal/ec2metadataprovider" "github.com/aws/amazon-cloudwatch-agent/translator/config" ) -type mockMetadataProvider struct { - InstanceIdentityDocument *ec2metadata.EC2InstanceIdentityDocument - Tags string - TagValue string +type mockServiceProvider struct { + mock.Mock +} + +func (s *mockServiceProvider) startServiceProvider() {} + +func (s *mockServiceProvider) addEntryForLogGroup(logGroupName LogGroupName, serviceAttr ServiceAttribute) { + s.Called(logGroupName, serviceAttr) +} + +func (s *mockServiceProvider) addEntryForLogFile(logFileGlob LogFileGlob, serviceAttr ServiceAttribute) { + s.Called(logFileGlob, serviceAttr) +} + +func (s *mockServiceProvider) logFileServiceAttribute(glob LogFileGlob, name LogGroupName) ServiceAttribute { + args := s.Called(glob, name) + return args.Get(0).(ServiceAttribute) } type mockSTSClient struct { stsiface.STSAPI + accountId string } func (ms *mockSTSClient) GetCallerIdentity(*sts.GetCallerIdentityInput) (*sts.GetCallerIdentityOutput, error) { - return &sts.GetCallerIdentityOutput{Account: aws.String("123456789")}, nil + return &sts.GetCallerIdentityOutput{Account: aws.String(ms.accountId)}, nil +} + +type mockMetadataProvider struct { + InstanceIdentityDocument *ec2metadata.EC2InstanceIdentityDocument + Tags string + TagValue string +} + +func mockMetadataProviderWithAccountId(accountId string) *mockMetadataProvider { + return &mockMetadataProvider{ + InstanceIdentityDocument: &ec2metadata.EC2InstanceIdentityDocument{ + AccountID: accountId, + }, + } } func (m *mockMetadataProvider) Get(ctx context.Context) (ec2metadata.EC2InstanceIdentityDocument, error) { @@ -93,32 +122,6 @@ func TestResourceStore_EC2Info(t *testing.T) { } } -func TestResourceStore_LogFiles(t *testing.T) { - tests := []struct { - name string - logFileInput map[string]ServiceAttribute - want map[string]ServiceAttribute - }{ - { - name: "happypath", - logFileInput: map[string]ServiceAttribute{"/opt/aws/amazon-cloudwatch-agent/logs/amazon-cloudwatch-agent.log": {"cloudwatch-agent", "", "ec2:test"}}, - want: map[string]ServiceAttribute{"/opt/aws/amazon-cloudwatch-agent/logs/amazon-cloudwatch-agent.log": {"cloudwatch-agent", "", "ec2:test"}}, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - r := &ResourceStore{ - serviceprovider: serviceprovider{ - logFiles: tt.logFileInput, - }, - } - if got := r.serviceprovider.logFiles; !reflect.DeepEqual(got, tt.want) { - t.Errorf("logFiles() = %v, want %v", got, tt.want) - } - }) - } -} - func TestResourceStore_Mode(t *testing.T) { tests := []struct { name string @@ -165,9 +168,8 @@ func Test_getRegion(t *testing.T) { func TestResourceStore_createAttributeMaps(t *testing.T) { type fields struct { - ec2Info ec2Info - serviceprovider serviceprovider - mode string + ec2Info ec2Info + mode string } tests := []struct { name string @@ -175,41 +177,35 @@ func TestResourceStore_createAttributeMaps(t *testing.T) { want map[string]*string }{ { - name: "HappyPath_IAMRole", + name: "HappyPath", fields: fields{ ec2Info: ec2Info{ InstanceID: "i-123456789", AutoScalingGroup: "test-asg", }, - serviceprovider: serviceprovider{ - iamRole: "test-role", - }, + mode: config.ModeEC2, }, want: map[string]*string{ - ServiceNameSourceKey: aws.String(ClientIamRole), - ASGKey: aws.String("test-asg"), - InstanceIDKey: aws.String("i-123456789"), + ASGKey: aws.String("test-asg"), + InstanceIDKey: aws.String("i-123456789"), + PlatformType: aws.String(EC2PlatForm), }, }, { - name: "HappyPath_TagServiceName", + name: "HappyPath_AsgMissing", fields: fields{ ec2Info: ec2Info{ - InstanceID: "i-123456789", - AutoScalingGroup: "test-asg", - }, - serviceprovider: serviceprovider{ - ec2TagServiceName: "test-tag-service", + InstanceID: "i-123456789", }, + mode: config.ModeEC2, }, want: map[string]*string{ - ServiceNameSourceKey: aws.String(ResourceTags), - ASGKey: aws.String("test-asg"), - InstanceIDKey: aws.String("i-123456789"), + InstanceIDKey: aws.String("i-123456789"), + PlatformType: aws.String(EC2PlatForm), }, }, { - name: "HappyPath_TagServiceName", + name: "HappyPath_InstanceIdAndAsgMissing", fields: fields{ mode: config.ModeEC2, }, @@ -217,51 +213,113 @@ func TestResourceStore_createAttributeMaps(t *testing.T) { PlatformType: aws.String(EC2PlatForm), }, }, + { + name: "NonEC2", + fields: fields{ + ec2Info: ec2Info{ + InstanceID: "i-123456789", + AutoScalingGroup: "test-asg", + }, + mode: config.ModeOnPrem, + }, + want: map[string]*string{}, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { r := &ResourceStore{ - ec2Info: tt.fields.ec2Info, - serviceprovider: tt.fields.serviceprovider, - mode: tt.fields.mode, + ec2Info: tt.fields.ec2Info, + mode: tt.fields.mode, } - assert.Equalf(t, dereferenceMap(tt.want), dereferenceMap(r.createAttributeMaps()), "createAttributeMaps()") + assert.Equalf(t, dereferenceMap(tt.want), dereferenceMap(r.createAttributeMap()), "createAttributeMap()") }) } } func TestResourceStore_createServiceKeyAttributes(t *testing.T) { - type fields struct { - serviceprovider serviceprovider - } tests := []struct { - name string - fields fields - want *cloudwatchlogs.KeyAttributes + name string + serviceAttr ServiceAttribute + want *cloudwatchlogs.KeyAttributes }{ { - name: "HappyPath_", - fields: fields{ - serviceprovider: serviceprovider{ - iamRole: "test-role", - }, + name: "NameAndEnvironmentSet", + serviceAttr: ServiceAttribute{ServiceName: "test-service", Environment: "test-environment"}, + want: &cloudwatchlogs.KeyAttributes{ + Environment: aws.String("test-environment"), + Name: aws.String("test-service"), + Type: aws.String(Service), }, + }, + { + name: "OnlyNameSet", + serviceAttr: ServiceAttribute{ServiceName: "test-service"}, want: &cloudwatchlogs.KeyAttributes{ - Name: aws.String("test-role"), + Name: aws.String("test-service"), Type: aws.String(Service), }, }, + { + name: "OnlyEnvironmentSet", + serviceAttr: ServiceAttribute{Environment: "test-environment"}, + want: &cloudwatchlogs.KeyAttributes{ + Environment: aws.String("test-environment"), + Type: aws.String(Service), + }, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - r := &ResourceStore{ - serviceprovider: tt.fields.serviceprovider, - } - assert.Equalf(t, tt.want, r.createServiceKeyAttributes(""), "createServiceKeyAttributes()") + r := &ResourceStore{} + assert.Equalf(t, tt.want, r.createServiceKeyAttributes(tt.serviceAttr), "createServiceKeyAttributes()") }) } } +func TestResourceStore_createLogFileRID(t *testing.T) { + instanceId := "i-abcd1234" + accountId := "123456789012" + glob := LogFileGlob("glob") + group := LogGroupName("group") + serviceAttr := ServiceAttribute{ + ServiceName: "test-service", + ServiceNameSource: ServiceNameSourceUserConfiguration, + Environment: "test-environment", + } + sp := new(mockServiceProvider) + sp.On("logFileServiceAttribute", glob, group).Return(serviceAttr) + rs := ResourceStore{ + mode: config.ModeEC2, + ec2Info: ec2Info{InstanceID: instanceId}, + serviceprovider: sp, + metadataprovider: mockMetadataProviderWithAccountId(accountId), + stsClient: &mockSTSClient{accountId: accountId}, + nativeCredential: &session.Session{}, + } + + resource := rs.CreateLogFileRID(glob, group) + + expectedResource := cloudwatchlogs.Resource{ + KeyAttributes: &cloudwatchlogs.KeyAttributes{ + Environment: aws.String("test-environment"), + Name: aws.String("test-service"), + Type: aws.String(Service), + }, + AttributeMaps: []map[string]*string{ + { + InstanceIDKey: aws.String(instanceId), + ServiceNameSourceKey: aws.String(ServiceNameSourceUserConfiguration), + PlatformType: aws.String(EC2PlatForm), + }, + }, + } + assert.Equal(t, *expectedResource.KeyAttributes.Environment, *resource.KeyAttributes.Environment) + assert.Equal(t, *expectedResource.KeyAttributes.Name, *resource.KeyAttributes.Name) + assert.Equal(t, *expectedResource.KeyAttributes.Type, *resource.KeyAttributes.Type) + assert.Len(t, resource.AttributeMaps, 1) + assert.Equal(t, dereferenceMap(expectedResource.AttributeMaps[0]), dereferenceMap(resource.AttributeMaps[0])) +} + func TestResourceStore_shouldReturnRID(t *testing.T) { type fields struct { metadataprovider ec2metadataprovider.MetadataProvider @@ -273,14 +331,12 @@ func TestResourceStore_shouldReturnRID(t *testing.T) { fields fields want bool }{ + // TODO need tests for when you can't fetch from IMDS or STS (fail closed) { name: "HappyPath_AccountIDMatches", fields: fields{ - metadataprovider: &mockMetadataProvider{ - InstanceIdentityDocument: &ec2metadata.EC2InstanceIdentityDocument{ - AccountID: "123456789"}, - }, - stsClient: &mockSTSClient{}, + metadataprovider: mockMetadataProviderWithAccountId("123456789012"), + stsClient: &mockSTSClient{accountId: "123456789012"}, nativeCredential: &session.Session{}, }, want: true, @@ -288,11 +344,8 @@ func TestResourceStore_shouldReturnRID(t *testing.T) { { name: "HappyPath_AccountIDMismatches", fields: fields{ - metadataprovider: &mockMetadataProvider{ - InstanceIdentityDocument: &ec2metadata.EC2InstanceIdentityDocument{ - AccountID: "987654321"}, - }, - stsClient: &mockSTSClient{}, + metadataprovider: mockMetadataProviderWithAccountId("210987654321"), + stsClient: &mockSTSClient{accountId: "123456789012"}, nativeCredential: &session.Session{}, }, want: false, @@ -322,23 +375,34 @@ func dereferenceMap(input map[string]*string) map[string]string { return result } -func TestAddServiceKeyAttributeToLogFilesMap(t *testing.T) { - rs := &ResourceStore{ - metadataprovider: &mockMetadataProvider{ - InstanceIdentityDocument: &ec2metadata.EC2InstanceIdentityDocument{ - AccountID: "987654321"}, - }, - serviceprovider: serviceprovider{logFiles: map[string]ServiceAttribute{}}, +func TestResourceStore_addServiceAttrEntryForLogFile(t *testing.T) { + sp := new(mockServiceProvider) + rs := ResourceStore{serviceprovider: sp} + + key := LogFileGlob("/opt/aws/amazon-cloudwatch-agent/logs/amazon-cloudwatch-agent.log") + serviceAttr := ServiceAttribute{ + ServiceName: "test", + ServiceNameSource: ServiceNameSourceUserConfiguration, + Environment: "ec2:test", } - key := "/opt/aws/amazon-cloudwatch-agent/logs/amazon-cloudwatch-agent.log" - rs.AddServiceAttrEntryToResourceStore(key, "test", "ec2:test") + sp.On("addEntryForLogFile", key, serviceAttr).Return() + rs.AddServiceAttrEntryForLogFile(key, "test", "ec2:test") - expected := &ResourceStore{ - serviceprovider: serviceprovider{ - iamRole: "test-role", - logFiles: map[string]ServiceAttribute{"/opt/aws/amazon-cloudwatch-agent/logs/amazon-cloudwatch-agent.log": {ServiceName: "test", ServiceNameSource: AgentConfig, Environment: "ec2:test"}}, - }, + sp.AssertExpectations(t) +} + +func TestResourceStore_addServiceAttrEntryForLogGroup(t *testing.T) { + sp := new(mockServiceProvider) + rs := ResourceStore{serviceprovider: sp} + + key := LogGroupName("TestLogGroup") + serviceAttr := ServiceAttribute{ + ServiceName: "test", + ServiceNameSource: ServiceNameSourceInstrumentation, + Environment: "ec2:test", } + sp.On("addEntryForLogGroup", key, serviceAttr).Return() + rs.AddServiceAttrEntryForLogGroup(key, "test", "ec2:test") - assert.Equal(t, true, reflect.DeepEqual(rs.serviceprovider.logFiles, expected.serviceprovider.logFiles)) + sp.AssertExpectations(t) } diff --git a/extension/resourcestore/serviceprovider.go b/extension/resourcestore/serviceprovider.go index c576bcf4b4..01918aa9e6 100644 --- a/extension/resourcestore/serviceprovider.go +++ b/extension/resourcestore/serviceprovider.go @@ -20,6 +20,7 @@ import ( configaws "github.com/aws/amazon-cloudwatch-agent/cfg/aws" "github.com/aws/amazon-cloudwatch-agent/internal/ec2metadataprovider" "github.com/aws/amazon-cloudwatch-agent/plugins/processors/ec2tagger" + "github.com/aws/amazon-cloudwatch-agent/translator/config" ) const ( @@ -27,11 +28,19 @@ const ( SERVICE = "service" APPLICATION = "application" APP = "app" - ClientIamRole = "ClientIamRole" - ResourceTags = "ResourceTags" - jitterMax = 180 - jitterMin = 60 - AgentConfig = "AgentConfig" + + // Matches the default value from OTel + // https://opentelemetry.io/docs/languages/sdk-configuration/general/#otel_service_name + ServiceNameUnknown = "unknown_service" + + ServiceNameSourceClientIamRole = "ClientIamRole" + ServiceNameSourceInstrumentation = "Instrumentation" + ServiceNameSourceResourceTags = "ResourceTags" + ServiceNameSourceUnknown = "Unknown" + ServiceNameSourceUserConfiguration = "UserConfiguration" + + jitterMax = 180 + jitterMin = 60 ) var ( @@ -48,7 +57,12 @@ type ServiceAttribute struct { Environment string } +type LogGroupName string +type LogFileGlob string + type serviceprovider struct { + mode string + ec2Info *ec2Info metadataProvider ec2metadataprovider.MetadataProvider ec2API ec2iface.EC2API ec2Provider ec2ProviderType @@ -57,12 +71,15 @@ type serviceprovider struct { ec2TagServiceName string done chan struct{} - // logFiles is a variable reserved for communication between OTEL components and LogAgent - // in order to achieve process correlations where the key is the log file path and the value - // is the service name + // logFiles stores the service attributes that were configured for log files in CloudWatch Agent configuration. // Example: - // "/opt/aws/amazon-cloudwatch-agent/logs/amazon-cloudwatch-agent.log": "cloudwatch-agent" - logFiles map[string]ServiceAttribute + // "/opt/aws/amazon-cloudwatch-agent/logs/amazon-cloudwatch-agent.log": {ServiceName: "cloudwatch-agent"} + logFiles map[LogFileGlob]ServiceAttribute + + // logGroups stores the associations between log groups and service attributes that were observed from incoming + // telemetry. Example: + // "MyLogGroup": {ServiceName: "MyInstrumentedService"} + logGroups map[LogGroupName]ServiceAttribute } func (s *serviceprovider) startServiceProvider() { @@ -74,34 +91,120 @@ func (s *serviceprovider) startServiceProvider() { go refreshLoop(s.done, s.getEC2TagServiceName, false) } -// ServiceAttribute function gets the relevant service attributes +// addEntryForLogFile adds an association between a log file glob and a service attribute, as configured in the +// CloudWatch Agent config. +func (s *serviceprovider) addEntryForLogFile(logFileGlob LogFileGlob, serviceAttr ServiceAttribute) { + s.logFiles[logFileGlob] = serviceAttr +} + +// addEntryForLogGroup adds an association between a log group name and a service attribute, as observed from incoming +// telemetry received by CloudWatch Agent. +func (s *serviceprovider) addEntryForLogGroup(logGroupName LogGroupName, serviceAttr ServiceAttribute) { + s.logGroups[logGroupName] = serviceAttr +} + +type serviceAttributeProvider func() ServiceAttribute + +// mergeServiceAttributes takes in a list of functions that create ServiceAttributes, in descending priority order +// (highest priority first), and proceeds down the list until we have obtained both a ServiceName and an +// EnvironmentName. +func mergeServiceAttributes(providers []serviceAttributeProvider) ServiceAttribute { + ret := ServiceAttribute{} + + for _, provider := range providers { + serviceAttr := provider() + + if ret.ServiceName == "" { + ret.ServiceName = serviceAttr.ServiceName + ret.ServiceNameSource = serviceAttr.ServiceNameSource + } + if ret.Environment == "" { + ret.Environment = serviceAttr.Environment + } + + if ret.ServiceName != "" && ret.Environment != "" { + return ret + } + } + + return ret +} + +// logFileServiceAttribute function gets the relevant service attributes // service name is retrieved based on the following priority chain // 1. Incoming telemetry attributes // 2. CWA config -// 3. Process correlation -// 4. instance tags - The tags attached to the EC2 instance. Only scrape for tag with the following key: service, application, app -// 5. IAM Role - The IAM role name retrieved through IMDS(Instance Metadata Service) -func (s *serviceprovider) ServiceAttribute(fileGlob string) ServiceAttribute { - serviceAttr := ServiceAttribute{} - // CWA config - if val, ok := s.logFiles[fileGlob]; ok { - serviceAttr.ServiceName = val.ServiceName - serviceAttr.ServiceNameSource = val.ServiceNameSource - serviceAttr.Environment = val.Environment - } - // Instance Tags - if s.ec2TagServiceName != "" && serviceAttr.ServiceName == "" { - serviceAttr.ServiceName = s.ec2TagServiceName - serviceAttr.ServiceNameSource = ResourceTags - return serviceAttr - } - //IAM Role - if s.iamRole != "" && serviceAttr.ServiceName == "" { - serviceAttr.ServiceName = s.iamRole - serviceAttr.ServiceNameSource = ClientIamRole - return serviceAttr - } - return serviceAttr +// 3. instance tags - The tags attached to the EC2 instance. Only scrape for tag with the following key: service, application, app +// 4. IAM Role - The IAM role name retrieved through IMDS(Instance Metadata Service) +func (s *serviceprovider) logFileServiceAttribute(logFile LogFileGlob, logGroup LogGroupName) ServiceAttribute { + return mergeServiceAttributes([]serviceAttributeProvider{ + func() ServiceAttribute { return s.serviceAttributeForLogGroup(logGroup) }, + func() ServiceAttribute { return s.serviceAttributeForLogFile(logFile) }, + s.serviceAttributeFromEc2Tags, + s.serviceAttributeFromIamRole, + s.serviceAttributeFromAsg, + s.serviceAttributeFallback, + }) +} + +func (s *serviceprovider) serviceAttributeForLogGroup(logGroup LogGroupName) ServiceAttribute { + if logGroup == "" { + return ServiceAttribute{} + } + + return s.logGroups[logGroup] +} + +func (s *serviceprovider) serviceAttributeForLogFile(logFile LogFileGlob) ServiceAttribute { + if logFile == "" { + return ServiceAttribute{} + } + + return s.logFiles[logFile] +} + +func (s *serviceprovider) serviceAttributeFromEc2Tags() ServiceAttribute { + if s.ec2TagServiceName == "" { + return ServiceAttribute{} + } + + return ServiceAttribute{ + ServiceName: s.ec2TagServiceName, + ServiceNameSource: ServiceNameSourceResourceTags, + } +} + +func (s *serviceprovider) serviceAttributeFromIamRole() ServiceAttribute { + if s.iamRole == "" { + return ServiceAttribute{} + } + + return ServiceAttribute{ + ServiceName: s.iamRole, + ServiceNameSource: ServiceNameSourceClientIamRole, + } +} + +func (s *serviceprovider) serviceAttributeFromAsg() ServiceAttribute { + if s.ec2Info == nil || s.ec2Info.AutoScalingGroup == "" { + return ServiceAttribute{} + } + + return ServiceAttribute{ + Environment: "ec2:" + s.ec2Info.AutoScalingGroup, + } +} + +func (s *serviceprovider) serviceAttributeFallback() ServiceAttribute { + attr := ServiceAttribute{ + ServiceName: ServiceNameUnknown, + ServiceNameSource: ServiceNameSourceUnknown, + } + if s.mode == config.ModeEC2 { + attr.Environment = "ec2:default" + } + + return attr } func (s *serviceprovider) getIAMRole() error { @@ -193,13 +296,17 @@ func (s *serviceprovider) getEC2TagFilters() ([]*ec2.Filter, error) { return tagFilters, nil } -func newServiceProvider(metadataProvider ec2metadataprovider.MetadataProvider, providerType ec2ProviderType, ec2Credential *configaws.CredentialConfig, done chan struct{}) *serviceprovider { +func newServiceProvider(mode string, ec2Info *ec2Info, metadataProvider ec2metadataprovider.MetadataProvider, + providerType ec2ProviderType, ec2Credential *configaws.CredentialConfig, done chan struct{}) serviceProviderInterface { return &serviceprovider{ + mode: mode, + ec2Info: ec2Info, metadataProvider: metadataProvider, ec2Provider: providerType, ec2Credential: ec2Credential, done: done, - logFiles: map[string]ServiceAttribute{}, + logFiles: make(map[LogFileGlob]ServiceAttribute), + logGroups: make(map[LogGroupName]ServiceAttribute), } } diff --git a/extension/resourcestore/serviceprovider_test.go b/extension/resourcestore/serviceprovider_test.go index 7b0cc98c2d..b40180c811 100644 --- a/extension/resourcestore/serviceprovider_test.go +++ b/extension/resourcestore/serviceprovider_test.go @@ -15,6 +15,7 @@ import ( configaws "github.com/aws/amazon-cloudwatch-agent/cfg/aws" "github.com/aws/amazon-cloudwatch-agent/internal/ec2metadataprovider" + "github.com/aws/amazon-cloudwatch-agent/translator/config" ) type mockServiceNameEC2Client struct { @@ -81,68 +82,170 @@ func Test_serviceprovider_startServiceProvider(t *testing.T) { } } -func Test_serviceprovider_ServiceAttribute(t *testing.T) { - type fields struct { - iamRole string - ec2TagServiceName string - logFiles map[string]ServiceAttribute +func Test_serviceprovider_addEntryForLogFile(t *testing.T) { + s := &serviceprovider{ + logFiles: make(map[LogFileGlob]ServiceAttribute), + } + glob := LogFileGlob("glob") + serviceAttr := ServiceAttribute{ServiceName: "test-service"} + + s.addEntryForLogFile(glob, serviceAttr) + + actual := s.logFiles[glob] + assert.Equal(t, serviceAttr, actual) +} + +func Test_serviceprovider_addEntryForLogGroup(t *testing.T) { + s := &serviceprovider{ + logGroups: make(map[LogGroupName]ServiceAttribute), + } + group := LogGroupName("group") + serviceAttr := ServiceAttribute{ServiceName: "test-service"} + + s.addEntryForLogGroup(group, serviceAttr) + + actual := s.logGroups[group] + assert.Equal(t, serviceAttr, actual) +} + +func Test_serviceprovider_mergeServiceAttributes(t *testing.T) { + onlySvc1 := func() ServiceAttribute { + return ServiceAttribute{ServiceName: "service1", ServiceNameSource: "source1"} + } + onlySvc2 := func() ServiceAttribute { + return ServiceAttribute{ServiceName: "service2", ServiceNameSource: "source2"} + } + onlyEnv1 := func() ServiceAttribute { return ServiceAttribute{Environment: "environment1"} } + onlyEnv2 := func() ServiceAttribute { return ServiceAttribute{Environment: "environment2"} } + both2 := func() ServiceAttribute { + return ServiceAttribute{ServiceName: "service2", ServiceNameSource: "source2", Environment: "environment2"} + } + both3 := func() ServiceAttribute { + return ServiceAttribute{ServiceName: "service3", ServiceNameSource: "source3", Environment: "environment3"} } + empty := func() ServiceAttribute { return ServiceAttribute{} } + tests := []struct { - name string - fields fields - serviceProvider *serviceprovider - want ServiceAttribute + name string + providers []serviceAttributeProvider + want ServiceAttribute }{ { - name: "HappyPath_IAMRole", - fields: fields{ - iamRole: "TestRole", - }, - want: ServiceAttribute{ - ServiceName: "TestRole", - ServiceNameSource: ClientIamRole, - }, + name: "RespectServicePriority", + providers: []serviceAttributeProvider{onlySvc1, onlySvc2}, + want: ServiceAttribute{ServiceName: "service1", ServiceNameSource: "source1"}, }, { - name: "HappyPath_EC2TagServiceName", - fields: fields{ - ec2TagServiceName: "tag-service", - }, - want: ServiceAttribute{ - ServiceName: "tag-service", - ServiceNameSource: ResourceTags, - }, + name: "RespectEnvironmentPriority", + providers: []serviceAttributeProvider{onlyEnv1, onlyEnv2}, + want: ServiceAttribute{Environment: "environment1"}, }, { - name: "HappyPath_AgentConfig", - fields: fields{ - logFiles: map[string]ServiceAttribute{ - "test-file": { - ServiceName: "test-service", - ServiceNameSource: AgentConfig, - Environment: "test-environment", - }, - }, - }, - want: ServiceAttribute{ - ServiceName: "test-service", - ServiceNameSource: AgentConfig, - Environment: "test-environment", - }, + name: "CombineServiceAndEnvironment", + providers: []serviceAttributeProvider{onlySvc1, both2, both3}, + want: ServiceAttribute{ServiceName: "service1", ServiceNameSource: "source1", Environment: "environment2"}, + }, + { + name: "CombineEnvironmentAndService", + providers: []serviceAttributeProvider{onlyEnv1, both2, both3}, + want: ServiceAttribute{ServiceName: "service2", ServiceNameSource: "source2", Environment: "environment1"}, + }, + { + name: "EmptyList", + providers: []serviceAttributeProvider{}, + want: ServiceAttribute{}, + }, + { + name: "EmptyProvider", + providers: []serviceAttributeProvider{empty}, + want: ServiceAttribute{}, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - s := &serviceprovider{ - iamRole: tt.fields.iamRole, - ec2TagServiceName: tt.fields.ec2TagServiceName, - logFiles: tt.fields.logFiles, - } - assert.Equalf(t, tt.want, s.ServiceAttribute("test-file"), "ServiceAttribute()") + assert.Equalf(t, tt.want, mergeServiceAttributes(tt.providers), "mergeServiceAttributes()") }) } } +func Test_serviceprovider_serviceAttributeForLogGroup(t *testing.T) { + s := &serviceprovider{logGroups: map[LogGroupName]ServiceAttribute{"group": {ServiceName: "test-service"}}} + assert.Equal(t, ServiceAttribute{}, s.serviceAttributeForLogGroup("")) + assert.Equal(t, ServiceAttribute{}, s.serviceAttributeForLogGroup("othergroup")) + assert.Equal(t, ServiceAttribute{ServiceName: "test-service"}, s.serviceAttributeForLogGroup("group")) +} + +func Test_serviceprovider_serviceAttributeForLogFile(t *testing.T) { + s := &serviceprovider{logFiles: map[LogFileGlob]ServiceAttribute{"glob": {ServiceName: "test-service"}}} + assert.Equal(t, ServiceAttribute{}, s.serviceAttributeForLogFile("")) + assert.Equal(t, ServiceAttribute{}, s.serviceAttributeForLogFile("otherglob")) + assert.Equal(t, ServiceAttribute{ServiceName: "test-service"}, s.serviceAttributeForLogFile("glob")) +} + +func Test_serviceprovider_serviceAttributeFromEc2Tags(t *testing.T) { + s := &serviceprovider{} + assert.Equal(t, ServiceAttribute{}, s.serviceAttributeFromEc2Tags()) + + s = &serviceprovider{ec2TagServiceName: "test-service"} + assert.Equal(t, ServiceAttribute{ServiceName: "test-service", ServiceNameSource: ServiceNameSourceResourceTags}, s.serviceAttributeFromEc2Tags()) +} + +func Test_serviceprovider_serviceAttributeFromIamRole(t *testing.T) { + s := &serviceprovider{} + assert.Equal(t, ServiceAttribute{}, s.serviceAttributeFromIamRole()) + + s = &serviceprovider{iamRole: "test-service"} + assert.Equal(t, ServiceAttribute{ServiceName: "test-service", ServiceNameSource: ServiceNameSourceClientIamRole}, s.serviceAttributeFromIamRole()) +} + +func Test_serviceprovider_serviceAttributeFromAsg(t *testing.T) { + s := &serviceprovider{} + assert.Equal(t, ServiceAttribute{}, s.serviceAttributeFromAsg()) + + s = &serviceprovider{ec2Info: &ec2Info{}} + assert.Equal(t, ServiceAttribute{}, s.serviceAttributeFromAsg()) + + s = &serviceprovider{ec2Info: &ec2Info{AutoScalingGroup: "test-asg"}} + assert.Equal(t, ServiceAttribute{Environment: "ec2:test-asg"}, s.serviceAttributeFromAsg()) +} + +func Test_serviceprovider_serviceAttributeFallback(t *testing.T) { + s := &serviceprovider{} + assert.Equal(t, ServiceAttribute{ServiceName: ServiceNameUnknown, ServiceNameSource: ServiceNameSourceUnknown}, s.serviceAttributeFallback()) + + s = &serviceprovider{mode: config.ModeEC2} + assert.Equal(t, ServiceAttribute{ServiceName: ServiceNameUnknown, ServiceNameSource: ServiceNameSourceUnknown, Environment: "ec2:default"}, s.serviceAttributeFallback()) +} + +func Test_serviceprovider_logFileServiceAttribute(t *testing.T) { + s := &serviceprovider{ + mode: config.ModeEC2, + logGroups: make(map[LogGroupName]ServiceAttribute), + logFiles: make(map[LogFileGlob]ServiceAttribute), + } + + // Start with no known source for service attributes, then set values from the bottom of the priority list upward. + // This way we test the priority order - if we set the highest priority source first (log groups), then we wouldn't + // be able to test that lower priority sources should be used if necessary. + + assert.Equal(t, ServiceAttribute{ServiceName: ServiceNameUnknown, ServiceNameSource: ServiceNameSourceUnknown, Environment: "ec2:default"}, s.logFileServiceAttribute("glob", "group")) + + s.ec2Info = &ec2Info{AutoScalingGroup: "test-asg"} + assert.Equal(t, ServiceAttribute{ServiceName: ServiceNameUnknown, ServiceNameSource: ServiceNameSourceUnknown, Environment: "ec2:test-asg"}, s.logFileServiceAttribute("glob", "group")) + + s.iamRole = "test-role" + assert.Equal(t, ServiceAttribute{ServiceName: "test-role", ServiceNameSource: ServiceNameSourceClientIamRole, Environment: "ec2:test-asg"}, s.logFileServiceAttribute("glob", "group")) + + s.ec2TagServiceName = "test-service-from-tags" + assert.Equal(t, ServiceAttribute{ServiceName: "test-service-from-tags", ServiceNameSource: ServiceNameSourceResourceTags, Environment: "ec2:test-asg"}, s.logFileServiceAttribute("glob", "group")) + + s.logFiles["glob"] = ServiceAttribute{ServiceName: "test-service-from-logfile", ServiceNameSource: ServiceNameSourceUserConfiguration} + assert.Equal(t, ServiceAttribute{ServiceName: "test-service-from-logfile", ServiceNameSource: ServiceNameSourceUserConfiguration, Environment: "ec2:test-asg"}, s.logFileServiceAttribute("glob", "group")) + + s.logGroups["group"] = ServiceAttribute{ServiceName: "test-service-from-loggroup", ServiceNameSource: ServiceNameSourceInstrumentation} + assert.Equal(t, ServiceAttribute{ServiceName: "test-service-from-loggroup", ServiceNameSource: ServiceNameSourceInstrumentation, Environment: "ec2:test-asg"}, s.logFileServiceAttribute("glob", "group")) +} + func Test_serviceprovider_getIAMRole(t *testing.T) { type fields struct { metadataProvider ec2metadataprovider.MetadataProvider diff --git a/plugins/inputs/logfile/logfile.go b/plugins/inputs/logfile/logfile.go index 531fd57c36..e4109da705 100644 --- a/plugins/inputs/logfile/logfile.go +++ b/plugins/inputs/logfile/logfile.go @@ -161,8 +161,9 @@ func (t *LogFile) FindLogSrc() []logs.LogSrc { //Add file -> {serviceName, deploymentEnvironment} mapping to resource store if rs != nil { - rs.AddServiceAttrEntryToResourceStore(fileconfig.FilePath, fileconfig.ServiceName, fileconfig.Environment) + rs.AddServiceAttrEntryForLogFile(resourcestore.LogFileGlob(fileconfig.FilePath), fileconfig.ServiceName, fileconfig.Environment) } + targetFiles, err := t.getTargetFiles(fileconfig) if err != nil { t.Log.Errorf("Failed to find target files for file config %v, with error: %v", fileconfig.FilePath, err) diff --git a/plugins/inputs/logfile/tailersrc.go b/plugins/inputs/logfile/tailersrc.go index 2f4ea4c168..8a3d566aba 100644 --- a/plugins/inputs/logfile/tailersrc.go +++ b/plugins/inputs/logfile/tailersrc.go @@ -173,7 +173,7 @@ func (ts *tailerSrc) AddCleanUpFn(f func()) { func (ts *tailerSrc) ResourceID() *cloudwatchlogs.Resource { rs := resourcestore.GetResourceStore() if rs != nil { - return resourcestore.GetResourceStore().CreateLogFileRID(ts.fileGlobPath, ts.tailer.Filename) + return rs.CreateLogFileRID(resourcestore.LogFileGlob(ts.fileGlobPath), resourcestore.LogGroupName(ts.group)) } return nil } diff --git a/plugins/outputs/cloudwatchlogs/pusher.go b/plugins/outputs/cloudwatchlogs/pusher.go index 8b4864c265..a39f5769f9 100644 --- a/plugins/outputs/cloudwatchlogs/pusher.go +++ b/plugins/outputs/cloudwatchlogs/pusher.go @@ -224,6 +224,7 @@ func (p *pusher) send() { if p.logSrc != nil { resourceID = p.logSrc.ResourceID() } + input := &cloudwatchlogs.PutLogEventsInput{ LogEvents: p.events, LogGroupName: &p.Group, diff --git a/plugins/processors/awsentity/config.go b/plugins/processors/awsentity/config.go new file mode 100644 index 0000000000..de7724c13b --- /dev/null +++ b/plugins/processors/awsentity/config.go @@ -0,0 +1,17 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package awsentity + +import ( + "go.opentelemetry.io/collector/component" +) + +type Config struct{} + +// Verify Config implements Processor interface. +var _ component.Config = (*Config)(nil) + +func (cfg *Config) Validate() error { + return nil +} diff --git a/plugins/processors/awsentity/config_test.go b/plugins/processors/awsentity/config_test.go new file mode 100644 index 0000000000..0605b0f712 --- /dev/null +++ b/plugins/processors/awsentity/config_test.go @@ -0,0 +1,19 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package awsentity + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/confmap" +) + +func TestUnmarshalDefaultConfig(t *testing.T) { + factory := NewFactory() + cfg := factory.CreateDefaultConfig() + assert.NoError(t, component.UnmarshalConfig(confmap.New(), cfg)) + assert.Equal(t, factory.CreateDefaultConfig(), cfg) +} diff --git a/plugins/processors/awsentity/factory.go b/plugins/processors/awsentity/factory.go new file mode 100644 index 0000000000..1372032ecb --- /dev/null +++ b/plugins/processors/awsentity/factory.go @@ -0,0 +1,50 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package awsentity + +import ( + "context" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/processor" + "go.opentelemetry.io/collector/processor/processorhelper" +) + +const ( + stability = component.StabilityLevelBeta +) + +var ( + TypeStr, _ = component.NewType("awsentity") + processorCapabilities = consumer.Capabilities{MutatesData: false} +) + +func NewFactory() processor.Factory { + return processor.NewFactory( + TypeStr, + createDefaultConfig, + processor.WithMetrics(createMetricsProcessor, stability)) +} + +func createDefaultConfig() component.Config { + return &Config{} +} + +func createMetricsProcessor( + ctx context.Context, + set processor.CreateSettings, + cfg component.Config, + nextConsumer consumer.Metrics, +) (processor.Metrics, error) { + metricsProcessor := newAwsEntityProcessor(set.Logger) + + return processorhelper.NewMetricsProcessor( + ctx, + set, + cfg, + nextConsumer, + metricsProcessor.processMetrics, + processorhelper.WithCapabilities(processorCapabilities)) +} diff --git a/plugins/processors/awsentity/factory_test.go b/plugins/processors/awsentity/factory_test.go new file mode 100644 index 0000000000..a00799bc55 --- /dev/null +++ b/plugins/processors/awsentity/factory_test.go @@ -0,0 +1,45 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package awsentity + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/consumer/consumertest" + "go.opentelemetry.io/collector/processor/processortest" +) + +func TestCreateDefaultConfig(t *testing.T) { + factory := NewFactory() + require.NotNil(t, factory) + + cfg := factory.CreateDefaultConfig() + assert.NotNil(t, cfg, "failed to create default config") + assert.NoError(t, componenttest.CheckConfigStruct(cfg)) +} + +func TestCreateProcessor(t *testing.T) { + factory := NewFactory() + require.NotNil(t, factory) + + cfg := factory.CreateDefaultConfig() + setting := processortest.NewNopCreateSettings() + + tProcessor, err := factory.CreateTracesProcessor(context.Background(), setting, cfg, consumertest.NewNop()) + assert.Equal(t, err, component.ErrDataTypeIsNotSupported) + assert.Nil(t, tProcessor) + + mProcessor, err := factory.CreateMetricsProcessor(context.Background(), setting, cfg, consumertest.NewNop()) + assert.NoError(t, err) + assert.NotNil(t, mProcessor) + + lProcessor, err := factory.CreateLogsProcessor(context.Background(), setting, cfg, consumertest.NewNop()) + assert.Equal(t, err, component.ErrDataTypeIsNotSupported) + assert.Nil(t, lProcessor) +} diff --git a/plugins/processors/awsentity/processor.go b/plugins/processors/awsentity/processor.go new file mode 100644 index 0000000000..aa9f30b262 --- /dev/null +++ b/plugins/processors/awsentity/processor.go @@ -0,0 +1,66 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package awsentity + +import ( + "context" + "strings" + + "go.opentelemetry.io/collector/pdata/pmetric" + "go.uber.org/zap" + + "github.com/aws/amazon-cloudwatch-agent/extension/resourcestore" +) + +const ( + attributeAwsLogGroupNames = "aws.log.group.names" + attributeDeploymentEnvironment = "deployment.environment" + attributeServiceName = "service.name" +) + +// exposed as a variable for unit testing +var addToResourceStore = func(logGroupName resourcestore.LogGroupName, serviceName string, environmentName string) { + rs := resourcestore.GetResourceStore() + if rs == nil { + return + } + rs.AddServiceAttrEntryForLogGroup(logGroupName, serviceName, environmentName) +} + +// awsEntityProcessor looks for metrics that have the aws.log.group.names and either the service.name or +// deployment.environment resource attributes set, then adds the association between the log group(s) and the +// service/environment names to the resourcestore extension. +type awsEntityProcessor struct { + logger *zap.Logger +} + +func newAwsEntityProcessor(logger *zap.Logger) *awsEntityProcessor { + return &awsEntityProcessor{ + logger: logger, + } +} + +func (p *awsEntityProcessor) processMetrics(_ context.Context, md pmetric.Metrics) (pmetric.Metrics, error) { + rm := md.ResourceMetrics() + for i := 0; i < rm.Len(); i++ { + resourceAttrs := rm.At(i).Resource().Attributes() + logGroupNames, _ := resourceAttrs.Get(attributeAwsLogGroupNames) + serviceName, _ := resourceAttrs.Get(attributeServiceName) + environmentName, _ := resourceAttrs.Get(attributeDeploymentEnvironment) + + if logGroupNames.Str() == "" || (serviceName.Str() == "" && environmentName.Str() == "") { + continue + } + + logGroupNamesSlice := strings.Split(logGroupNames.Str(), "&") + for _, logGroupName := range logGroupNamesSlice { + if logGroupName == "" { + continue + } + addToResourceStore(resourcestore.LogGroupName(logGroupName), serviceName.Str(), environmentName.Str()) + } + } + + return md, nil +} diff --git a/plugins/processors/awsentity/processor_test.go b/plugins/processors/awsentity/processor_test.go new file mode 100644 index 0000000000..6a84e2049c --- /dev/null +++ b/plugins/processors/awsentity/processor_test.go @@ -0,0 +1,146 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package awsentity + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.uber.org/zap" + + "github.com/aws/amazon-cloudwatch-agent/extension/resourcestore" +) + +type mockResourceStore struct { + entries []resourceStoreEntry +} + +type resourceStoreEntry struct { + logGroupName resourcestore.LogGroupName + serviceName string + environmentName string +} + +func newMockResourceStore() *mockResourceStore { + return &mockResourceStore{ + entries: make([]resourceStoreEntry, 0), + } +} + +func newAddToMockResourceStore(rs *mockResourceStore) func(resourcestore.LogGroupName, string, string) { + return func(logGroupName resourcestore.LogGroupName, serviceName string, environmentName string) { + rs.entries = append(rs.entries, resourceStoreEntry{ + logGroupName: logGroupName, + serviceName: serviceName, + environmentName: environmentName, + }) + } +} + +func TestProcessMetrics(t *testing.T) { + logger, _ := zap.NewDevelopment() + p := newAwsEntityProcessor(logger) + ctx := context.Background() + + // empty metrics, no action + // metrics with no log group names, no action + // metrics with no service/environment, no action + // metrics with log group name and service, add to rs + // metrics with log group name and env, add to rs + // metrics with log group name and both, add to rs + // metrics with two log group names, add both + // metrics with two resourcemetrics, add both + tests := []struct { + name string + metrics pmetric.Metrics + want []resourceStoreEntry + }{ + { + name: "EmptyMetrics", + metrics: pmetric.NewMetrics(), + want: []resourceStoreEntry{}, + }, + { + name: "NoLogGroupNames", + metrics: generateMetrics(attributeServiceName, "test-service", attributeDeploymentEnvironment, "test-environment"), + want: []resourceStoreEntry{}, + }, + { + name: "NoServiceOrEnvironment", + metrics: generateMetrics(attributeAwsLogGroupNames, "test-log-group"), + want: []resourceStoreEntry{}, + }, + { + name: "LogGroupNameAndService", + metrics: generateMetrics(attributeAwsLogGroupNames, "test-log-group", attributeServiceName, "test-service"), + want: []resourceStoreEntry{{logGroupName: "test-log-group", serviceName: "test-service"}}, + }, + { + name: "LogGroupNameAndEnvironment", + metrics: generateMetrics(attributeAwsLogGroupNames, "test-log-group", attributeDeploymentEnvironment, "test-environment"), + want: []resourceStoreEntry{{logGroupName: "test-log-group", environmentName: "test-environment"}}, + }, + { + name: "LogGroupNameAndServiceAndEnvironment", + metrics: generateMetrics(attributeAwsLogGroupNames, "test-log-group", attributeServiceName, "test-service", attributeDeploymentEnvironment, "test-environment"), + want: []resourceStoreEntry{{logGroupName: "test-log-group", serviceName: "test-service", environmentName: "test-environment"}}, + }, + { + name: "TwoLogGroupNames", + metrics: generateMetrics(attributeAwsLogGroupNames, "test-log-group1&test-log-group2", attributeServiceName, "test-service"), + want: []resourceStoreEntry{ + {logGroupName: "test-log-group1", serviceName: "test-service"}, + {logGroupName: "test-log-group2", serviceName: "test-service"}, + }, + }, + { + name: "EmptyLogGroupNames", + metrics: generateMetrics(attributeAwsLogGroupNames, "&&test-log-group1&&test-log-group2&&", attributeServiceName, "test-service"), + want: []resourceStoreEntry{ + {logGroupName: "test-log-group1", serviceName: "test-service"}, + {logGroupName: "test-log-group2", serviceName: "test-service"}, + }, + }, + { + name: "TwoResourceMetrics", + metrics: generateMetricsWithTwoResources(), + want: []resourceStoreEntry{ + {logGroupName: "test-log-group1", serviceName: "test-service1"}, + {logGroupName: "test-log-group2", serviceName: "test-service2"}, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + rs := newMockResourceStore() + addToResourceStore = newAddToMockResourceStore(rs) + _, err := p.processMetrics(ctx, tt.metrics) + assert.NoError(t, err) + assert.Equal(t, tt.want, rs.entries) + }) + } +} + +func generateMetrics(resourceAttrs ...string) pmetric.Metrics { + md := pmetric.NewMetrics() + generateResource(md, resourceAttrs...) + return md +} + +func generateMetricsWithTwoResources() pmetric.Metrics { + md := pmetric.NewMetrics() + generateResource(md, attributeAwsLogGroupNames, "test-log-group1", attributeServiceName, "test-service1") + generateResource(md, attributeAwsLogGroupNames, "test-log-group2", attributeServiceName, "test-service2") + return md +} + +func generateResource(md pmetric.Metrics, resourceAttrs ...string) { + attrs := md.ResourceMetrics().AppendEmpty().Resource().Attributes() + for i := 0; i < len(resourceAttrs); i += 2 { + attrs.PutStr(resourceAttrs[i], resourceAttrs[i+1]) + } +} diff --git a/service/defaultcomponents/components.go b/service/defaultcomponents/components.go index 27b3a1dac7..7da2d1ebca 100644 --- a/service/defaultcomponents/components.go +++ b/service/defaultcomponents/components.go @@ -31,6 +31,7 @@ import ( "github.com/aws/amazon-cloudwatch-agent/extension/resourcestore" "github.com/aws/amazon-cloudwatch-agent/plugins/outputs/cloudwatch" "github.com/aws/amazon-cloudwatch-agent/plugins/processors/awsapplicationsignals" + "github.com/aws/amazon-cloudwatch-agent/plugins/processors/awsentity" "github.com/aws/amazon-cloudwatch-agent/plugins/processors/ec2tagger" "github.com/aws/amazon-cloudwatch-agent/plugins/processors/gpuattributes" ) @@ -52,6 +53,7 @@ func Factories() (otelcol.Factories, error) { if factories.Processors, err = processor.MakeFactoryMap( awsapplicationsignals.NewFactory(), + awsentity.NewFactory(), batchprocessor.NewFactory(), cumulativetodeltaprocessor.NewFactory(), ec2tagger.NewFactory(), diff --git a/service/defaultcomponents/components_test.go b/service/defaultcomponents/components_test.go index 84e30bbdb0..ed14cd6f8a 100644 --- a/service/defaultcomponents/components_test.go +++ b/service/defaultcomponents/components_test.go @@ -12,7 +12,7 @@ import ( const ( receiversCount = 6 - processorCount = 8 + processorCount = 9 exportersCount = 6 extensionsCount = 3 ) @@ -40,19 +40,21 @@ func TestComponents(t *testing.T) { processors := factories.Processors assert.Len(t, processors, processorCount) awsapplicationsignalsType, _ := component.NewType("awsapplicationsignals") + awsentityType, _ := component.NewType("awsentity") batchType, _ := component.NewType("batch") cumulativetodeltaType, _ := component.NewType("cumulativetodelta") ec2taggerType, _ := component.NewType("ec2tagger") + gpuattributesType, _ := component.NewType("gpuattributes") metricstransformType, _ := component.NewType("metricstransform") transformType, _ := component.NewType("transform") - gpuattributesType, _ := component.NewType("gpuattributes") assert.NotNil(t, processors[awsapplicationsignalsType]) + assert.NotNil(t, processors[awsentityType]) assert.NotNil(t, processors[batchType]) assert.NotNil(t, processors[cumulativetodeltaType]) assert.NotNil(t, processors[ec2taggerType]) + assert.NotNil(t, processors[gpuattributesType]) assert.NotNil(t, processors[metricstransformType]) assert.NotNil(t, processors[transformType]) - assert.NotNil(t, processors[gpuattributesType]) exporters := factories.Exporters assert.Len(t, exporters, exportersCount) diff --git a/translator/tocwconfig/sampleConfig/appsignals_and_eks_config.yaml b/translator/tocwconfig/sampleConfig/appsignals_and_eks_config.yaml index 59f329e7b7..796b5102bc 100644 --- a/translator/tocwconfig/sampleConfig/appsignals_and_eks_config.yaml +++ b/translator/tocwconfig/sampleConfig/appsignals_and_eks_config.yaml @@ -298,6 +298,7 @@ processors: resolvers: - name: TestCluster platform: eks + awsentity: {} batch/containerinsights: metadata_cardinality_limit: 1000 send_batch_max_size: 0 @@ -664,6 +665,7 @@ service: exporters: - awsemf/application_signals processors: + - awsentity - resourcedetection - awsapplicationsignals receivers: diff --git a/translator/tocwconfig/sampleConfig/appsignals_and_k8s_config.yaml b/translator/tocwconfig/sampleConfig/appsignals_and_k8s_config.yaml index 443b95e046..565a405b20 100644 --- a/translator/tocwconfig/sampleConfig/appsignals_and_k8s_config.yaml +++ b/translator/tocwconfig/sampleConfig/appsignals_and_k8s_config.yaml @@ -298,6 +298,7 @@ processors: resolvers: - name: TestCluster platform: k8s + awsentity: {} batch/containerinsights: metadata_cardinality_limit: 1000 send_batch_max_size: 0 @@ -644,6 +645,7 @@ service: exporters: - awsemf/application_signals processors: + - awsentity - resourcedetection - awsapplicationsignals receivers: diff --git a/translator/tocwconfig/sampleConfig/appsignals_fallback_and_eks_config.yaml b/translator/tocwconfig/sampleConfig/appsignals_fallback_and_eks_config.yaml index ed8d715b7a..7c9ebccbf2 100644 --- a/translator/tocwconfig/sampleConfig/appsignals_fallback_and_eks_config.yaml +++ b/translator/tocwconfig/sampleConfig/appsignals_fallback_and_eks_config.yaml @@ -298,6 +298,7 @@ processors: resolvers: - name: TestCluster platform: eks + awsentity: {} batch/containerinsights: metadata_cardinality_limit: 1000 send_batch_max_size: 0 @@ -664,6 +665,7 @@ service: exporters: - awsemf/application_signals processors: + - awsentity - resourcedetection - awsapplicationsignals receivers: diff --git a/translator/tocwconfig/sampleConfig/appsignals_over_fallback_config.yaml b/translator/tocwconfig/sampleConfig/appsignals_over_fallback_config.yaml index 9517705a33..4439757050 100644 --- a/translator/tocwconfig/sampleConfig/appsignals_over_fallback_config.yaml +++ b/translator/tocwconfig/sampleConfig/appsignals_over_fallback_config.yaml @@ -298,6 +298,7 @@ processors: resolvers: - name: TestCluster platform: eks + awsentity: {} batch/containerinsights: metadata_cardinality_limit: 1000 send_batch_max_size: 0 @@ -664,6 +665,7 @@ service: exporters: - awsemf/application_signals processors: + - awsentity - resourcedetection - awsapplicationsignals receivers: diff --git a/translator/tocwconfig/sampleConfig/base_appsignals_config.yaml b/translator/tocwconfig/sampleConfig/base_appsignals_config.yaml index 6f00e27b92..28ad9f4c28 100644 --- a/translator/tocwconfig/sampleConfig/base_appsignals_config.yaml +++ b/translator/tocwconfig/sampleConfig/base_appsignals_config.yaml @@ -157,6 +157,7 @@ extensions: profile: AmazonCloudWatchAgent shared_credential_file: fake-path processors: + awsentity: {} awsapplicationsignals: resolvers: - name: "" @@ -477,6 +478,7 @@ service: exporters: - awsemf/application_signals processors: + - awsentity - resourcedetection - awsapplicationsignals receivers: diff --git a/translator/tocwconfig/sampleConfig/base_appsignals_fallback_config.yaml b/translator/tocwconfig/sampleConfig/base_appsignals_fallback_config.yaml index 17c1424f02..26b3a9d669 100644 --- a/translator/tocwconfig/sampleConfig/base_appsignals_fallback_config.yaml +++ b/translator/tocwconfig/sampleConfig/base_appsignals_fallback_config.yaml @@ -161,6 +161,7 @@ processors: resolvers: - name: "" platform: generic + awsentity: {} resourcedetection: aks: resource_attributes: @@ -477,6 +478,7 @@ service: exporters: - awsemf/application_signals processors: + - awsentity - resourcedetection - awsapplicationsignals receivers: diff --git a/translator/translate/otel/pipeline/applicationsignals/translator.go b/translator/translate/otel/pipeline/applicationsignals/translator.go index 803afbd002..da982f9a40 100644 --- a/translator/translate/otel/pipeline/applicationsignals/translator.go +++ b/translator/translate/otel/pipeline/applicationsignals/translator.go @@ -15,6 +15,7 @@ import ( "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/extension/agenthealth" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/extension/awsproxy" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/processor/awsapplicationsignals" + "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/processor/awsentity" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/processor/resourcedetection" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/receiver/otlp" ) @@ -51,6 +52,9 @@ func (t *translator) Translate(conf *confmap.Conf) (*common.ComponentTranslators Extensions: common.NewTranslatorMap[component.Config](), } + if t.dataType == component.DataTypeMetrics { + translators.Processors.Set(awsentity.NewTranslator()) + } translators.Processors.Set(resourcedetection.NewTranslator(resourcedetection.WithDataType(t.dataType))) translators.Processors.Set(awsapplicationsignals.NewTranslator(awsapplicationsignals.WithDataType(t.dataType))) diff --git a/translator/translate/otel/pipeline/applicationsignals/translator_test.go b/translator/translate/otel/pipeline/applicationsignals/translator_test.go index 575f726c95..47363361f9 100644 --- a/translator/translate/otel/pipeline/applicationsignals/translator_test.go +++ b/translator/translate/otel/pipeline/applicationsignals/translator_test.go @@ -125,7 +125,7 @@ func TestTranslatorMetricsForKubernetes(t *testing.T) { }, want: &want{ receivers: []string{"otlp/application_signals"}, - processors: []string{"resourcedetection", "awsapplicationsignals"}, + processors: []string{"awsentity", "resourcedetection", "awsapplicationsignals"}, exporters: []string{"awsemf/application_signals"}, extensions: []string{"agenthealth/logs"}, }, @@ -142,7 +142,7 @@ func TestTranslatorMetricsForKubernetes(t *testing.T) { }, want: &want{ receivers: []string{"otlp/application_signals"}, - processors: []string{"resourcedetection", "awsapplicationsignals"}, + processors: []string{"awsentity", "resourcedetection", "awsapplicationsignals"}, exporters: []string{"awsemf/application_signals"}, extensions: []string{"agenthealth/logs"}, }, @@ -200,7 +200,7 @@ func TestTranslatorMetricsForEC2(t *testing.T) { }, want: &want{ receivers: []string{"otlp/application_signals"}, - processors: []string{"resourcedetection", "awsapplicationsignals"}, + processors: []string{"awsentity", "resourcedetection", "awsapplicationsignals"}, exporters: []string{"awsemf/application_signals"}, extensions: []string{"agenthealth/logs"}, }, diff --git a/translator/translate/otel/processor/awsentity/translator.go b/translator/translate/otel/processor/awsentity/translator.go new file mode 100644 index 0000000000..cf887fdc9e --- /dev/null +++ b/translator/translate/otel/processor/awsentity/translator.go @@ -0,0 +1,33 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package awsentity + +import ( + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/confmap" + "go.opentelemetry.io/collector/processor" + + "github.com/aws/amazon-cloudwatch-agent/plugins/processors/awsentity" + "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/common" +) + +const name = "awsentity" + +type translator struct { + factory processor.Factory +} + +func NewTranslator() common.Translator[component.Config] { + return &translator{ + factory: awsentity.NewFactory(), + } +} + +func (t *translator) ID() component.ID { + return component.NewIDWithName(t.factory.Type(), "") +} + +func (t *translator) Translate(_ *confmap.Conf) (component.Config, error) { + return t.factory.CreateDefaultConfig().(*awsentity.Config), nil +} From b67a45d67464c19ac6025bccd37bd7228aebaab2 Mon Sep 17 00:00:00 2001 From: zhihonl <61301537+zhihonl@users.noreply.github.com> Date: Wed, 3 Jul 2024 10:27:59 -0400 Subject: [PATCH 36/55] Refactor aws-go-sdk cloudwatchlogs API with entity model (#740) --- .../{resourcestore => entitystore}/config.go | 2 +- .../config_test.go | 2 +- .../{resourcestore => entitystore}/ec2Info.go | 2 +- .../ec2Info_test.go | 2 +- .../extension.go | 63 +++++++++-------- .../extension_test.go | 69 +++++++++---------- .../{resourcestore => entitystore}/factory.go | 14 ++-- .../factory_test.go | 2 +- .../serviceprovider.go | 2 +- .../serviceprovider_test.go | 2 +- logs/logs.go | 7 +- plugins/inputs/logfile/logfile.go | 8 +-- plugins/inputs/logfile/tailersrc.go | 8 +-- .../wineventlog/wineventlog.go | 2 +- .../outputs/cloudwatchlogs/cloudwatchlogs.go | 8 +-- plugins/outputs/cloudwatchlogs/pusher.go | 6 +- plugins/outputs/cloudwatchlogs/pusher_test.go | 40 +++++------ plugins/processors/awsentity/processor.go | 10 +-- .../processors/awsentity/processor_test.go | 46 ++++++------- service/defaultcomponents/components.go | 4 +- .../sampleConfig/advanced_config_darwin.yaml | 4 +- .../sampleConfig/advanced_config_linux.yaml | 4 +- .../sampleConfig/advanced_config_windows.yaml | 4 +- .../appsignals_and_eks_config.yaml | 4 +- .../appsignals_and_k8s_config.yaml | 4 +- .../appsignals_fallback_and_eks_config.yaml | 4 +- .../appsignals_over_fallback_config.yaml | 4 +- .../sampleConfig/base_appsignals_config.yaml | 4 +- .../base_appsignals_fallback_config.yaml | 4 +- .../base_container_insights_config.yaml | 4 +- .../sampleConfig/basic_config_linux.yaml | 4 +- .../sampleConfig/basic_config_windows.yaml | 4 +- .../sampleConfig/collectd_config_linux.yaml | 4 +- .../sampleConfig/compass_linux_config.yaml | 4 +- .../sampleConfig/complete_darwin_config.yaml | 4 +- .../sampleConfig/complete_linux_config.yaml | 4 +- .../sampleConfig/complete_windows_config.yaml | 4 +- .../sampleConfig/config_with_env.yaml | 4 +- .../sampleConfig/delta_config_linux.yaml | 4 +- .../sampleConfig/delta_net_config_linux.yaml | 4 +- .../sampleConfig/drop_origin_linux.yaml | 4 +- .../emf_and_kubernetes_config.yaml | 4 +- .../emf_and_kubernetes_with_gpu_config.yaml | 4 +- .../ignore_append_dimensions.yaml | 4 +- .../sampleConfig/invalid_input_linux.yaml | 4 +- .../kubernetes_on_prem_config.yaml | 4 +- .../sampleConfig/log_ecs_metric_only.yaml | 4 +- .../tocwconfig/sampleConfig/log_filter.yaml | 4 +- .../sampleConfig/log_only_config_windows.yaml | 4 +- .../logs_and_kubernetes_config.yaml | 4 +- .../sampleConfig/no_skip_log_timestamp.yaml | 4 +- .../no_skip_log_timestamp_windows.yaml | 4 +- .../sampleConfig/prometheus_config_linux.yaml | 4 +- .../prometheus_config_windows.yaml | 4 +- .../sampleConfig/skip_log_timestamp.yaml | 4 +- .../skip_log_timestamp_default.yaml | 4 +- .../skip_log_timestamp_default_windows.yaml | 4 +- .../skip_log_timestamp_windows.yaml | 4 +- .../sampleConfig/standard_config_linux.yaml | 4 +- ...ndard_config_linux_with_common_config.yaml | 4 +- .../sampleConfig/standard_config_windows.yaml | 4 +- ...ard_config_windows_with_common_config.yaml | 4 +- .../sampleConfig/statsd_config_linux.yaml | 4 +- .../sampleConfig/statsd_config_windows.yaml | 4 +- .../sampleConfig/trace_config_linux.yaml | 4 +- .../sampleConfig/trace_config_windows.yaml | 4 +- .../windows_eventlog_only_config.yaml | 4 +- .../translator.go | 8 +-- .../translator_test.go | 12 ++-- translator/translate/otel/translate_otel.go | 4 +- 70 files changed, 248 insertions(+), 263 deletions(-) rename extension/{resourcestore => entitystore}/config.go (95%) rename extension/{resourcestore => entitystore}/config_test.go (95%) rename extension/{resourcestore => entitystore}/ec2Info.go (99%) rename extension/{resourcestore => entitystore}/ec2Info_test.go (99%) rename extension/{resourcestore => entitystore}/extension.go (74%) rename extension/{resourcestore => entitystore}/extension_test.go (83%) rename extension/{resourcestore => entitystore}/factory.go (73%) rename extension/{resourcestore => entitystore}/factory_test.go (96%) rename extension/{resourcestore => entitystore}/serviceprovider.go (99%) rename extension/{resourcestore => entitystore}/serviceprovider_test.go (99%) rename translator/translate/otel/extension/{resourcestore => entitystore}/translator.go (84%) rename translator/translate/otel/extension/{resourcestore => entitystore}/translator_test.go (87%) diff --git a/extension/resourcestore/config.go b/extension/entitystore/config.go similarity index 95% rename from extension/resourcestore/config.go rename to extension/entitystore/config.go index 84a54c37d2..f30d8cfbd1 100644 --- a/extension/resourcestore/config.go +++ b/extension/entitystore/config.go @@ -1,7 +1,7 @@ // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: MIT -package resourcestore +package entitystore import ( "go.opentelemetry.io/collector/component" diff --git a/extension/resourcestore/config_test.go b/extension/entitystore/config_test.go similarity index 95% rename from extension/resourcestore/config_test.go rename to extension/entitystore/config_test.go index cf30af680d..fc0576330f 100644 --- a/extension/resourcestore/config_test.go +++ b/extension/entitystore/config_test.go @@ -1,7 +1,7 @@ // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: MIT -package resourcestore +package entitystore import ( "testing" diff --git a/extension/resourcestore/ec2Info.go b/extension/entitystore/ec2Info.go similarity index 99% rename from extension/resourcestore/ec2Info.go rename to extension/entitystore/ec2Info.go index cdf46c8e94..1b2ec011b9 100644 --- a/extension/resourcestore/ec2Info.go +++ b/extension/entitystore/ec2Info.go @@ -1,7 +1,7 @@ // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: MIT -package resourcestore +package entitystore import ( "context" diff --git a/extension/resourcestore/ec2Info_test.go b/extension/entitystore/ec2Info_test.go similarity index 99% rename from extension/resourcestore/ec2Info_test.go rename to extension/entitystore/ec2Info_test.go index 5e1b694808..ef09ccb4bf 100644 --- a/extension/resourcestore/ec2Info_test.go +++ b/extension/entitystore/ec2Info_test.go @@ -1,7 +1,7 @@ // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: MIT -package resourcestore +package entitystore import ( "strings" diff --git a/extension/resourcestore/extension.go b/extension/entitystore/extension.go similarity index 74% rename from extension/resourcestore/extension.go rename to extension/entitystore/extension.go index 96277654e3..c252681e59 100644 --- a/extension/resourcestore/extension.go +++ b/extension/entitystore/extension.go @@ -1,7 +1,7 @@ // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: MIT -package resourcestore +package entitystore import ( "context" @@ -30,6 +30,9 @@ const ( ServiceNameSourceKey = "AWS.ServiceNameSource" PlatformType = "PlatformType" EC2PlatForm = "AWS::EC2" + Type = "Type" + Name = "Name" + Environment = "Environment" ) type ec2ProviderType func(string, *configaws.CredentialConfig) ec2iface.EC2API @@ -45,7 +48,7 @@ type eksInfo struct { ClusterName string } -type ResourceStore struct { +type EntityStore struct { logger *zap.Logger config *Config done chan struct{} @@ -61,7 +64,7 @@ type ResourceStore struct { eksInfo eksInfo // serviceprovider stores information about possible service names - // that we can attach to the resource ID + // that we can attach to the entity serviceprovider serviceProviderInterface // nativeCredential stores the credential config for agent's native @@ -73,9 +76,9 @@ type ResourceStore struct { stsClient stsiface.STSAPI } -var _ extension.Extension = (*ResourceStore)(nil) +var _ extension.Extension = (*EntityStore)(nil) -func (r *ResourceStore) Start(ctx context.Context, host component.Host) error { +func (r *EntityStore) Start(ctx context.Context, host component.Host) error { // Get IMDS client and EC2 API client which requires region for authentication // These will be passed down to any object that requires access to IMDS or EC2 // API client so we have single source of truth for credential @@ -96,34 +99,34 @@ func (r *ResourceStore) Start(ctx context.Context, host component.Host) error { return nil } -func (r *ResourceStore) Shutdown(_ context.Context) error { +func (r *EntityStore) Shutdown(_ context.Context) error { close(r.done) return nil } -func (r *ResourceStore) Mode() string { +func (r *EntityStore) Mode() string { return r.mode } -func (r *ResourceStore) EKSInfo() eksInfo { +func (r *EntityStore) EKSInfo() eksInfo { return r.eksInfo } -func (r *ResourceStore) EC2Info() ec2Info { +func (r *EntityStore) EC2Info() ec2Info { return r.ec2Info } -func (r *ResourceStore) SetNativeCredential(client client.ConfigProvider) { +func (r *EntityStore) SetNativeCredential(client client.ConfigProvider) { r.nativeCredential = client } -func (r *ResourceStore) NativeCredentialExists() bool { +func (r *EntityStore) NativeCredentialExists() bool { return r.nativeCredential != nil } -// CreateLogFileRID creates the RID for log events that are being uploaded from a log file in the environment. -func (r *ResourceStore) CreateLogFileRID(logFileGlob LogFileGlob, logGroupName LogGroupName) *cloudwatchlogs.Resource { - if !r.shouldReturnRID() { +// CreateLogFileEntity creates the entity for log events that are being uploaded from a log file in the environment. +func (r *EntityStore) CreateLogFileEntity(logFileGlob LogFileGlob, logGroupName LogGroupName) *cloudwatchlogs.Entity { + if !r.shouldReturnEntity() { return nil } @@ -133,14 +136,14 @@ func (r *ResourceStore) CreateLogFileRID(logFileGlob LogFileGlob, logGroupName L attributeMap := r.createAttributeMap() addNonEmptyToMap(attributeMap, ServiceNameSourceKey, serviceAttr.ServiceNameSource) - return &cloudwatchlogs.Resource{ + return &cloudwatchlogs.Entity{ KeyAttributes: keyAttributes, - AttributeMaps: []map[string]*string{attributeMap}, + Attributes: attributeMap, } } -// AddServiceAttrEntryForLogFile adds an entry to the resource store for the provided file glob -> (serviceName, environmentName) key-value pair -func (r *ResourceStore) AddServiceAttrEntryForLogFile(fileGlob LogFileGlob, serviceName string, environmentName string) { +// AddServiceAttrEntryForLogFile adds an entry to the entity store for the provided file glob -> (serviceName, environmentName) key-value pair +func (r *EntityStore) AddServiceAttrEntryForLogFile(fileGlob LogFileGlob, serviceName string, environmentName string) { if r.serviceprovider != nil { r.serviceprovider.addEntryForLogFile(fileGlob, ServiceAttribute{ ServiceName: serviceName, @@ -150,8 +153,8 @@ func (r *ResourceStore) AddServiceAttrEntryForLogFile(fileGlob LogFileGlob, serv } } -// AddServiceAttrEntryForLogGroup adds an entry to the resource store for the provided log group nme -> (serviceName, environmentName) key-value pair -func (r *ResourceStore) AddServiceAttrEntryForLogGroup(logGroupName LogGroupName, serviceName string, environmentName string) { +// AddServiceAttrEntryForLogGroup adds an entry to the entity store for the provided log group nme -> (serviceName, environmentName) key-value pair +func (r *EntityStore) AddServiceAttrEntryForLogGroup(logGroupName LogGroupName, serviceName string, environmentName string) { r.serviceprovider.addEntryForLogGroup(logGroupName, ServiceAttribute{ ServiceName: serviceName, ServiceNameSource: ServiceNameSourceInstrumentation, @@ -159,7 +162,7 @@ func (r *ResourceStore) AddServiceAttrEntryForLogGroup(logGroupName LogGroupName }) } -func (r *ResourceStore) createAttributeMap() map[string]*string { +func (r *EntityStore) createAttributeMap() map[string]*string { attributeMap := make(map[string]*string) if r.mode == config.ModeEC2 { @@ -173,23 +176,19 @@ func (r *ResourceStore) createAttributeMap() map[string]*string { return attributeMap } -// createServiceKeyAttribute creates KeyAttributes for Service resources -func (r *ResourceStore) createServiceKeyAttributes(serviceAttr ServiceAttribute) *cloudwatchlogs.KeyAttributes { - serviceKeyAttr := &cloudwatchlogs.KeyAttributes{ +// createServiceKeyAttribute creates KeyAttributes for Service entities +func (r *EntityStore) createServiceKeyAttributes(serviceAttr ServiceAttribute) map[string]*string { + serviceKeyAttr := map[string]*string{ Type: aws.String(Service), } - if serviceAttr.ServiceName != "" { - serviceKeyAttr.SetName(serviceAttr.ServiceName) - } - if serviceAttr.Environment != "" { - serviceKeyAttr.SetEnvironment(serviceAttr.Environment) - } + addNonEmptyToMap(serviceKeyAttr, Name, serviceAttr.ServiceName) + addNonEmptyToMap(serviceKeyAttr, Environment, serviceAttr.Environment) return serviceKeyAttr } -// shouldReturnRID checks if the account ID for the instance is +// shouldReturnEntity checks if the account ID for the instance is // matching the account ID when assuming role for the current credential. -func (r *ResourceStore) shouldReturnRID() bool { +func (r *EntityStore) shouldReturnEntity() bool { if r.nativeCredential == nil || r.metadataprovider == nil { r.logger.Debug("there is no credential stored for cross-account checks") return false diff --git a/extension/resourcestore/extension_test.go b/extension/entitystore/extension_test.go similarity index 83% rename from extension/resourcestore/extension_test.go rename to extension/entitystore/extension_test.go index 99bc920dd7..d96a5a91ca 100644 --- a/extension/resourcestore/extension_test.go +++ b/extension/entitystore/extension_test.go @@ -1,7 +1,7 @@ // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: MIT -package resourcestore +package entitystore import ( "context" @@ -92,7 +92,7 @@ func (m *mockMetadataProvider) InstanceTagValue(ctx context.Context, tagKey stri return m.TagValue, nil } -func TestResourceStore_EC2Info(t *testing.T) { +func TestEntityStore_EC2Info(t *testing.T) { tests := []struct { name string ec2InfoInput ec2Info @@ -112,7 +112,7 @@ func TestResourceStore_EC2Info(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - r := &ResourceStore{ + r := &EntityStore{ ec2Info: tt.ec2InfoInput, } if got := r.EC2Info(); !reflect.DeepEqual(got, tt.want) { @@ -122,7 +122,7 @@ func TestResourceStore_EC2Info(t *testing.T) { } } -func TestResourceStore_Mode(t *testing.T) { +func TestEntityStore_Mode(t *testing.T) { tests := []struct { name string modeInput string @@ -132,7 +132,7 @@ func TestResourceStore_Mode(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - r := &ResourceStore{ + r := &EntityStore{ mode: tt.modeInput, } if got := r.Mode(); got != tt.want { @@ -166,7 +166,7 @@ func Test_getRegion(t *testing.T) { } } -func TestResourceStore_createAttributeMaps(t *testing.T) { +func TestEntityStore_createAttributeMaps(t *testing.T) { type fields struct { ec2Info ec2Info mode string @@ -227,7 +227,7 @@ func TestResourceStore_createAttributeMaps(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - r := &ResourceStore{ + r := &EntityStore{ ec2Info: tt.fields.ec2Info, mode: tt.fields.mode, } @@ -236,16 +236,16 @@ func TestResourceStore_createAttributeMaps(t *testing.T) { } } -func TestResourceStore_createServiceKeyAttributes(t *testing.T) { +func TestEntityStore_createServiceKeyAttributes(t *testing.T) { tests := []struct { name string serviceAttr ServiceAttribute - want *cloudwatchlogs.KeyAttributes + want map[string]*string }{ { name: "NameAndEnvironmentSet", serviceAttr: ServiceAttribute{ServiceName: "test-service", Environment: "test-environment"}, - want: &cloudwatchlogs.KeyAttributes{ + want: map[string]*string{ Environment: aws.String("test-environment"), Name: aws.String("test-service"), Type: aws.String(Service), @@ -254,7 +254,7 @@ func TestResourceStore_createServiceKeyAttributes(t *testing.T) { { name: "OnlyNameSet", serviceAttr: ServiceAttribute{ServiceName: "test-service"}, - want: &cloudwatchlogs.KeyAttributes{ + want: map[string]*string{ Name: aws.String("test-service"), Type: aws.String(Service), }, @@ -262,7 +262,7 @@ func TestResourceStore_createServiceKeyAttributes(t *testing.T) { { name: "OnlyEnvironmentSet", serviceAttr: ServiceAttribute{Environment: "test-environment"}, - want: &cloudwatchlogs.KeyAttributes{ + want: map[string]*string{ Environment: aws.String("test-environment"), Type: aws.String(Service), }, @@ -270,13 +270,13 @@ func TestResourceStore_createServiceKeyAttributes(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - r := &ResourceStore{} - assert.Equalf(t, tt.want, r.createServiceKeyAttributes(tt.serviceAttr), "createServiceKeyAttributes()") + r := &EntityStore{} + assert.Equalf(t, dereferenceMap(tt.want), dereferenceMap(r.createServiceKeyAttributes(tt.serviceAttr)), "createServiceKeyAttributes()") }) } } -func TestResourceStore_createLogFileRID(t *testing.T) { +func TestEntityStore_createLogFileRID(t *testing.T) { instanceId := "i-abcd1234" accountId := "123456789012" glob := LogFileGlob("glob") @@ -288,7 +288,7 @@ func TestResourceStore_createLogFileRID(t *testing.T) { } sp := new(mockServiceProvider) sp.On("logFileServiceAttribute", glob, group).Return(serviceAttr) - rs := ResourceStore{ + rs := EntityStore{ mode: config.ModeEC2, ec2Info: ec2Info{InstanceID: instanceId}, serviceprovider: sp, @@ -297,30 +297,25 @@ func TestResourceStore_createLogFileRID(t *testing.T) { nativeCredential: &session.Session{}, } - resource := rs.CreateLogFileRID(glob, group) + entity := rs.CreateLogFileEntity(glob, group) - expectedResource := cloudwatchlogs.Resource{ - KeyAttributes: &cloudwatchlogs.KeyAttributes{ + expectedEntity := cloudwatchlogs.Entity{ + KeyAttributes: map[string]*string{ Environment: aws.String("test-environment"), Name: aws.String("test-service"), Type: aws.String(Service), }, - AttributeMaps: []map[string]*string{ - { - InstanceIDKey: aws.String(instanceId), - ServiceNameSourceKey: aws.String(ServiceNameSourceUserConfiguration), - PlatformType: aws.String(EC2PlatForm), - }, + Attributes: map[string]*string{ + InstanceIDKey: aws.String(instanceId), + ServiceNameSourceKey: aws.String(ServiceNameSourceUserConfiguration), + PlatformType: aws.String(EC2PlatForm), }, } - assert.Equal(t, *expectedResource.KeyAttributes.Environment, *resource.KeyAttributes.Environment) - assert.Equal(t, *expectedResource.KeyAttributes.Name, *resource.KeyAttributes.Name) - assert.Equal(t, *expectedResource.KeyAttributes.Type, *resource.KeyAttributes.Type) - assert.Len(t, resource.AttributeMaps, 1) - assert.Equal(t, dereferenceMap(expectedResource.AttributeMaps[0]), dereferenceMap(resource.AttributeMaps[0])) + assert.Equal(t, dereferenceMap(expectedEntity.KeyAttributes), dereferenceMap(entity.KeyAttributes)) + assert.Equal(t, dereferenceMap(expectedEntity.Attributes), dereferenceMap(entity.Attributes)) } -func TestResourceStore_shouldReturnRID(t *testing.T) { +func TestEntityStore_shouldReturnRID(t *testing.T) { type fields struct { metadataprovider ec2metadataprovider.MetadataProvider stsClient stsiface.STSAPI @@ -353,12 +348,12 @@ func TestResourceStore_shouldReturnRID(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - r := &ResourceStore{ + r := &EntityStore{ metadataprovider: tt.fields.metadataprovider, stsClient: tt.fields.stsClient, nativeCredential: tt.fields.nativeCredential, } - assert.Equalf(t, tt.want, r.shouldReturnRID(), "shouldReturnRID()") + assert.Equalf(t, tt.want, r.shouldReturnEntity(), "shouldReturnEntity()") }) } } @@ -375,9 +370,9 @@ func dereferenceMap(input map[string]*string) map[string]string { return result } -func TestResourceStore_addServiceAttrEntryForLogFile(t *testing.T) { +func TestEntityStore_addServiceAttrEntryForLogFile(t *testing.T) { sp := new(mockServiceProvider) - rs := ResourceStore{serviceprovider: sp} + rs := EntityStore{serviceprovider: sp} key := LogFileGlob("/opt/aws/amazon-cloudwatch-agent/logs/amazon-cloudwatch-agent.log") serviceAttr := ServiceAttribute{ @@ -391,9 +386,9 @@ func TestResourceStore_addServiceAttrEntryForLogFile(t *testing.T) { sp.AssertExpectations(t) } -func TestResourceStore_addServiceAttrEntryForLogGroup(t *testing.T) { +func TestEntityStore_addServiceAttrEntryForLogGroup(t *testing.T) { sp := new(mockServiceProvider) - rs := ResourceStore{serviceprovider: sp} + rs := EntityStore{serviceprovider: sp} key := LogGroupName("TestLogGroup") serviceAttr := ServiceAttribute{ diff --git a/extension/resourcestore/factory.go b/extension/entitystore/factory.go similarity index 73% rename from extension/resourcestore/factory.go rename to extension/entitystore/factory.go index 98604cdb1e..10e2e20913 100644 --- a/extension/resourcestore/factory.go +++ b/extension/entitystore/factory.go @@ -1,7 +1,7 @@ // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: MIT -package resourcestore +package entitystore import ( "context" @@ -11,12 +11,12 @@ import ( ) var ( - TypeStr, _ = component.NewType("resourcestore") - resourceStore *ResourceStore + TypeStr, _ = component.NewType("entitystore") + entityStore *EntityStore ) -func GetResourceStore() *ResourceStore { - return resourceStore +func GetEntityStore() *EntityStore { + return entityStore } func NewFactory() extension.Factory { @@ -33,9 +33,9 @@ func createDefaultConfig() component.Config { } func createExtension(_ context.Context, settings extension.CreateSettings, cfg component.Config) (extension.Extension, error) { - resourceStore = &ResourceStore{ + entityStore = &EntityStore{ logger: settings.Logger, config: cfg.(*Config), } - return resourceStore, nil + return entityStore, nil } diff --git a/extension/resourcestore/factory_test.go b/extension/entitystore/factory_test.go similarity index 96% rename from extension/resourcestore/factory_test.go rename to extension/entitystore/factory_test.go index 160dc7fb54..f0bd0305b5 100644 --- a/extension/resourcestore/factory_test.go +++ b/extension/entitystore/factory_test.go @@ -1,7 +1,7 @@ // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: MIT -package resourcestore +package entitystore import ( "context" diff --git a/extension/resourcestore/serviceprovider.go b/extension/entitystore/serviceprovider.go similarity index 99% rename from extension/resourcestore/serviceprovider.go rename to extension/entitystore/serviceprovider.go index 01918aa9e6..254a241607 100644 --- a/extension/resourcestore/serviceprovider.go +++ b/extension/entitystore/serviceprovider.go @@ -1,7 +1,7 @@ // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: MIT -package resourcestore +package entitystore import ( "context" diff --git a/extension/resourcestore/serviceprovider_test.go b/extension/entitystore/serviceprovider_test.go similarity index 99% rename from extension/resourcestore/serviceprovider_test.go rename to extension/entitystore/serviceprovider_test.go index b40180c811..678b9d15f6 100644 --- a/extension/resourcestore/serviceprovider_test.go +++ b/extension/entitystore/serviceprovider_test.go @@ -1,7 +1,7 @@ // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: MIT -package resourcestore +package entitystore import ( "testing" diff --git a/logs/logs.go b/logs/logs.go index fcdd031269..913377b610 100644 --- a/logs/logs.go +++ b/logs/logs.go @@ -40,7 +40,7 @@ type LogSrc interface { Description() string Retention() int Class() string - ResourceID() *cloudwatchlogs.Resource + Entity() *cloudwatchlogs.Entity Stop() } @@ -74,11 +74,6 @@ func NewLogAgent(c *config.Config) *LogAgent { } } -type ResourceID struct { - KeyAttributes map[string]string - AttributeMap map[string]string -} - // Run LogAgent will scan all input and output plugins for LogCollection and LogBackend. // And connect all the LogSrc from the LogCollection found to the respective LogDest // based on the configured "destination", and "name" diff --git a/plugins/inputs/logfile/logfile.go b/plugins/inputs/logfile/logfile.go index e4109da705..fde171135e 100644 --- a/plugins/inputs/logfile/logfile.go +++ b/plugins/inputs/logfile/logfile.go @@ -16,7 +16,7 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/inputs" - "github.com/aws/amazon-cloudwatch-agent/extension/resourcestore" + "github.com/aws/amazon-cloudwatch-agent/extension/entitystore" "github.com/aws/amazon-cloudwatch-agent/internal/logscommon" "github.com/aws/amazon-cloudwatch-agent/logs" "github.com/aws/amazon-cloudwatch-agent/plugins/inputs/logfile/globpath" @@ -153,15 +153,15 @@ func (t *LogFile) FindLogSrc() []logs.LogSrc { t.cleanUpStoppedTailerSrc() - rs := resourcestore.GetResourceStore() + rs := entitystore.GetEntityStore() // Create a "tailer" for each file for i := range t.FileConfig { fileconfig := &t.FileConfig[i] - //Add file -> {serviceName, deploymentEnvironment} mapping to resource store + //Add file -> {serviceName, deploymentEnvironment} mapping to entity store if rs != nil { - rs.AddServiceAttrEntryForLogFile(resourcestore.LogFileGlob(fileconfig.FilePath), fileconfig.ServiceName, fileconfig.Environment) + rs.AddServiceAttrEntryForLogFile(entitystore.LogFileGlob(fileconfig.FilePath), fileconfig.ServiceName, fileconfig.Environment) } targetFiles, err := t.getTargetFiles(fileconfig) diff --git a/plugins/inputs/logfile/tailersrc.go b/plugins/inputs/logfile/tailersrc.go index 8a3d566aba..6e9c58c7fb 100644 --- a/plugins/inputs/logfile/tailersrc.go +++ b/plugins/inputs/logfile/tailersrc.go @@ -14,7 +14,7 @@ import ( "github.com/aws/aws-sdk-go/service/cloudwatchlogs" "golang.org/x/text/encoding" - "github.com/aws/amazon-cloudwatch-agent/extension/resourcestore" + "github.com/aws/amazon-cloudwatch-agent/extension/entitystore" "github.com/aws/amazon-cloudwatch-agent/logs" "github.com/aws/amazon-cloudwatch-agent/plugins/inputs/logfile/tail" ) @@ -170,10 +170,10 @@ func (ts *tailerSrc) AddCleanUpFn(f func()) { ts.cleanUpFns = append(ts.cleanUpFns, f) } -func (ts *tailerSrc) ResourceID() *cloudwatchlogs.Resource { - rs := resourcestore.GetResourceStore() +func (ts *tailerSrc) Entity() *cloudwatchlogs.Entity { + rs := entitystore.GetEntityStore() if rs != nil { - return rs.CreateLogFileRID(resourcestore.LogFileGlob(ts.fileGlobPath), resourcestore.LogGroupName(ts.group)) + return rs.CreateLogFileEntity(entitystore.LogFileGlob(ts.fileGlobPath), entitystore.LogGroupName(ts.group)) } return nil } diff --git a/plugins/inputs/windows_event_log/wineventlog/wineventlog.go b/plugins/inputs/windows_event_log/wineventlog/wineventlog.go index 0fa149590d..4d6f513218 100644 --- a/plugins/inputs/windows_event_log/wineventlog/wineventlog.go +++ b/plugins/inputs/windows_event_log/wineventlog/wineventlog.go @@ -111,7 +111,7 @@ func (w *windowsEventLog) Stop() { close(w.done) } -func (w *windowsEventLog) ResourceID() *cloudwatchlogs.Resource { +func (w *windowsEventLog) Entity() *cloudwatchlogs.Entity { return nil } diff --git a/plugins/outputs/cloudwatchlogs/cloudwatchlogs.go b/plugins/outputs/cloudwatchlogs/cloudwatchlogs.go index 222acb7d3f..2dc8bb3a46 100644 --- a/plugins/outputs/cloudwatchlogs/cloudwatchlogs.go +++ b/plugins/outputs/cloudwatchlogs/cloudwatchlogs.go @@ -24,7 +24,7 @@ import ( "github.com/aws/amazon-cloudwatch-agent/extension/agenthealth" "github.com/aws/amazon-cloudwatch-agent/extension/agenthealth/handler/stats/agent" "github.com/aws/amazon-cloudwatch-agent/extension/agenthealth/handler/useragent" - "github.com/aws/amazon-cloudwatch-agent/extension/resourcestore" + "github.com/aws/amazon-cloudwatch-agent/extension/entitystore" "github.com/aws/amazon-cloudwatch-agent/handlers" "github.com/aws/amazon-cloudwatch-agent/internal" "github.com/aws/amazon-cloudwatch-agent/internal/retryer" @@ -135,9 +135,9 @@ func (c *CloudWatchLogs) getDest(t Target, logSrc logs.LogSrc) *cwDest { Filename: c.Filename, Token: c.Token, } - resourcestore := resourcestore.GetResourceStore() - if resourcestore != nil && !resourcestore.NativeCredentialExists() { - resourcestore.SetNativeCredential(credentialConfig.Credentials()) + entitystore := entitystore.GetEntityStore() + if entitystore != nil && !entitystore.NativeCredentialExists() { + entitystore.SetNativeCredential(credentialConfig.Credentials()) } if cwd, ok := c.cwDests[t]; ok { return cwd diff --git a/plugins/outputs/cloudwatchlogs/pusher.go b/plugins/outputs/cloudwatchlogs/pusher.go index a39f5769f9..33d3ba052e 100644 --- a/plugins/outputs/cloudwatchlogs/pusher.go +++ b/plugins/outputs/cloudwatchlogs/pusher.go @@ -220,9 +220,9 @@ func (p *pusher) send() { if p.needSort { sort.Stable(ByTimestamp(p.events)) } - var resourceID *cloudwatchlogs.Resource + var entity *cloudwatchlogs.Entity if p.logSrc != nil { - resourceID = p.logSrc.ResourceID() + entity = p.logSrc.Entity() } input := &cloudwatchlogs.PutLogEventsInput{ @@ -230,7 +230,7 @@ func (p *pusher) send() { LogGroupName: &p.Group, LogStreamName: &p.Stream, SequenceToken: p.sequenceToken, - Resource: resourceID, + Entity: entity, } startTime := time.Now() diff --git a/plugins/outputs/cloudwatchlogs/pusher_test.go b/plugins/outputs/cloudwatchlogs/pusher_test.go index 726a32ee79..cf5ccc166a 100644 --- a/plugins/outputs/cloudwatchlogs/pusher_test.go +++ b/plugins/outputs/cloudwatchlogs/pusher_test.go @@ -29,18 +29,16 @@ type mockLogSrc struct { logs.LogSrc } -func (m *mockLogSrc) ResourceID() *cloudwatchlogs.Resource { - return &cloudwatchlogs.Resource{ - AttributeMaps: []map[string]*string{ - { - "PlatformType": aws.String("AWS::EC2"), - "EC2.InstanceId": aws.String("i-123456789"), - "EC2.AutoScalingGroup": aws.String("test-group"), - }, +func (m *mockLogSrc) Entity() *cloudwatchlogs.Entity { + return &cloudwatchlogs.Entity{ + Attributes: map[string]*string{ + "PlatformType": aws.String("AWS::EC2"), + "EC2.InstanceId": aws.String("i-123456789"), + "EC2.AutoScalingGroup": aws.String("test-group"), }, - KeyAttributes: &cloudwatchlogs.KeyAttributes{ - Name: aws.String("myService"), - Environment: aws.String("myEnvironment"), + KeyAttributes: map[string]*string{ + "Name": aws.String("myService"), + "Environment": aws.String("myEnvironment"), }, } } @@ -109,17 +107,15 @@ func TestAddSingleEvent(t *testing.T) { var s svcMock called := false nst := "NEXT_SEQ_TOKEN" - expectedResourceID := &cloudwatchlogs.Resource{ - AttributeMaps: []map[string]*string{ - { - "PlatformType": aws.String("AWS::EC2"), - "EC2.InstanceId": aws.String("i-123456789"), - "EC2.AutoScalingGroup": aws.String("test-group"), - }, + expectedEntity := &cloudwatchlogs.Entity{ + Attributes: map[string]*string{ + "PlatformType": aws.String("AWS::EC2"), + "EC2.InstanceId": aws.String("i-123456789"), + "EC2.AutoScalingGroup": aws.String("test-group"), }, - KeyAttributes: &cloudwatchlogs.KeyAttributes{ - Name: aws.String("myService"), - Environment: aws.String("myEnvironment"), + KeyAttributes: map[string]*string{ + "Name": aws.String("myService"), + "Environment": aws.String("myEnvironment"), }, } @@ -137,7 +133,7 @@ func TestAddSingleEvent(t *testing.T) { if len(in.LogEvents) != 1 || *in.LogEvents[0].Message != "MSG" { t.Errorf("PutLogEvents called with incorrect message, got: '%v'", *in.LogEvents[0].Message) } - require.Equal(t, expectedResourceID, in.Resource) + require.Equal(t, expectedEntity, in.Entity) return &cloudwatchlogs.PutLogEventsOutput{ NextSequenceToken: &nst, }, nil diff --git a/plugins/processors/awsentity/processor.go b/plugins/processors/awsentity/processor.go index aa9f30b262..2f166962bd 100644 --- a/plugins/processors/awsentity/processor.go +++ b/plugins/processors/awsentity/processor.go @@ -10,7 +10,7 @@ import ( "go.opentelemetry.io/collector/pdata/pmetric" "go.uber.org/zap" - "github.com/aws/amazon-cloudwatch-agent/extension/resourcestore" + "github.com/aws/amazon-cloudwatch-agent/extension/entitystore" ) const ( @@ -20,8 +20,8 @@ const ( ) // exposed as a variable for unit testing -var addToResourceStore = func(logGroupName resourcestore.LogGroupName, serviceName string, environmentName string) { - rs := resourcestore.GetResourceStore() +var addToEntityStore = func(logGroupName entitystore.LogGroupName, serviceName string, environmentName string) { + rs := entitystore.GetEntityStore() if rs == nil { return } @@ -30,7 +30,7 @@ var addToResourceStore = func(logGroupName resourcestore.LogGroupName, serviceNa // awsEntityProcessor looks for metrics that have the aws.log.group.names and either the service.name or // deployment.environment resource attributes set, then adds the association between the log group(s) and the -// service/environment names to the resourcestore extension. +// service/environment names to the entitystore extension. type awsEntityProcessor struct { logger *zap.Logger } @@ -58,7 +58,7 @@ func (p *awsEntityProcessor) processMetrics(_ context.Context, md pmetric.Metric if logGroupName == "" { continue } - addToResourceStore(resourcestore.LogGroupName(logGroupName), serviceName.Str(), environmentName.Str()) + addToEntityStore(entitystore.LogGroupName(logGroupName), serviceName.Str(), environmentName.Str()) } } diff --git a/plugins/processors/awsentity/processor_test.go b/plugins/processors/awsentity/processor_test.go index 6a84e2049c..d785352a23 100644 --- a/plugins/processors/awsentity/processor_test.go +++ b/plugins/processors/awsentity/processor_test.go @@ -11,28 +11,28 @@ import ( "go.opentelemetry.io/collector/pdata/pmetric" "go.uber.org/zap" - "github.com/aws/amazon-cloudwatch-agent/extension/resourcestore" + "github.com/aws/amazon-cloudwatch-agent/extension/entitystore" ) -type mockResourceStore struct { - entries []resourceStoreEntry +type mockEntityStore struct { + entries []entityStoreEntry } -type resourceStoreEntry struct { - logGroupName resourcestore.LogGroupName +type entityStoreEntry struct { + logGroupName entitystore.LogGroupName serviceName string environmentName string } -func newMockResourceStore() *mockResourceStore { - return &mockResourceStore{ - entries: make([]resourceStoreEntry, 0), +func newMockEntityStore() *mockEntityStore { + return &mockEntityStore{ + entries: make([]entityStoreEntry, 0), } } -func newAddToMockResourceStore(rs *mockResourceStore) func(resourcestore.LogGroupName, string, string) { - return func(logGroupName resourcestore.LogGroupName, serviceName string, environmentName string) { - rs.entries = append(rs.entries, resourceStoreEntry{ +func newAddToMockEntityStore(rs *mockEntityStore) func(entitystore.LogGroupName, string, string) { + return func(logGroupName entitystore.LogGroupName, serviceName string, environmentName string) { + rs.entries = append(rs.entries, entityStoreEntry{ logGroupName: logGroupName, serviceName: serviceName, environmentName: environmentName, @@ -56,42 +56,42 @@ func TestProcessMetrics(t *testing.T) { tests := []struct { name string metrics pmetric.Metrics - want []resourceStoreEntry + want []entityStoreEntry }{ { name: "EmptyMetrics", metrics: pmetric.NewMetrics(), - want: []resourceStoreEntry{}, + want: []entityStoreEntry{}, }, { name: "NoLogGroupNames", metrics: generateMetrics(attributeServiceName, "test-service", attributeDeploymentEnvironment, "test-environment"), - want: []resourceStoreEntry{}, + want: []entityStoreEntry{}, }, { name: "NoServiceOrEnvironment", metrics: generateMetrics(attributeAwsLogGroupNames, "test-log-group"), - want: []resourceStoreEntry{}, + want: []entityStoreEntry{}, }, { name: "LogGroupNameAndService", metrics: generateMetrics(attributeAwsLogGroupNames, "test-log-group", attributeServiceName, "test-service"), - want: []resourceStoreEntry{{logGroupName: "test-log-group", serviceName: "test-service"}}, + want: []entityStoreEntry{{logGroupName: "test-log-group", serviceName: "test-service"}}, }, { name: "LogGroupNameAndEnvironment", metrics: generateMetrics(attributeAwsLogGroupNames, "test-log-group", attributeDeploymentEnvironment, "test-environment"), - want: []resourceStoreEntry{{logGroupName: "test-log-group", environmentName: "test-environment"}}, + want: []entityStoreEntry{{logGroupName: "test-log-group", environmentName: "test-environment"}}, }, { name: "LogGroupNameAndServiceAndEnvironment", metrics: generateMetrics(attributeAwsLogGroupNames, "test-log-group", attributeServiceName, "test-service", attributeDeploymentEnvironment, "test-environment"), - want: []resourceStoreEntry{{logGroupName: "test-log-group", serviceName: "test-service", environmentName: "test-environment"}}, + want: []entityStoreEntry{{logGroupName: "test-log-group", serviceName: "test-service", environmentName: "test-environment"}}, }, { name: "TwoLogGroupNames", metrics: generateMetrics(attributeAwsLogGroupNames, "test-log-group1&test-log-group2", attributeServiceName, "test-service"), - want: []resourceStoreEntry{ + want: []entityStoreEntry{ {logGroupName: "test-log-group1", serviceName: "test-service"}, {logGroupName: "test-log-group2", serviceName: "test-service"}, }, @@ -99,7 +99,7 @@ func TestProcessMetrics(t *testing.T) { { name: "EmptyLogGroupNames", metrics: generateMetrics(attributeAwsLogGroupNames, "&&test-log-group1&&test-log-group2&&", attributeServiceName, "test-service"), - want: []resourceStoreEntry{ + want: []entityStoreEntry{ {logGroupName: "test-log-group1", serviceName: "test-service"}, {logGroupName: "test-log-group2", serviceName: "test-service"}, }, @@ -107,7 +107,7 @@ func TestProcessMetrics(t *testing.T) { { name: "TwoResourceMetrics", metrics: generateMetricsWithTwoResources(), - want: []resourceStoreEntry{ + want: []entityStoreEntry{ {logGroupName: "test-log-group1", serviceName: "test-service1"}, {logGroupName: "test-log-group2", serviceName: "test-service2"}, }, @@ -116,8 +116,8 @@ func TestProcessMetrics(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - rs := newMockResourceStore() - addToResourceStore = newAddToMockResourceStore(rs) + rs := newMockEntityStore() + addToEntityStore = newAddToMockEntityStore(rs) _, err := p.processMetrics(ctx, tt.metrics) assert.NoError(t, err) assert.Equal(t, tt.want, rs.entries) diff --git a/service/defaultcomponents/components.go b/service/defaultcomponents/components.go index 7da2d1ebca..e60edd0528 100644 --- a/service/defaultcomponents/components.go +++ b/service/defaultcomponents/components.go @@ -28,7 +28,7 @@ import ( "go.opentelemetry.io/collector/receiver/otlpreceiver" "github.com/aws/amazon-cloudwatch-agent/extension/agenthealth" - "github.com/aws/amazon-cloudwatch-agent/extension/resourcestore" + "github.com/aws/amazon-cloudwatch-agent/extension/entitystore" "github.com/aws/amazon-cloudwatch-agent/plugins/outputs/cloudwatch" "github.com/aws/amazon-cloudwatch-agent/plugins/processors/awsapplicationsignals" "github.com/aws/amazon-cloudwatch-agent/plugins/processors/awsentity" @@ -79,7 +79,7 @@ func Factories() (otelcol.Factories, error) { if factories.Extensions, err = extension.MakeFactoryMap( agenthealth.NewFactory(), awsproxy.NewFactory(), - resourcestore.NewFactory(), + entitystore.NewFactory(), ); err != nil { return otelcol.Factories{}, err } diff --git a/translator/tocwconfig/sampleConfig/advanced_config_darwin.yaml b/translator/tocwconfig/sampleConfig/advanced_config_darwin.yaml index 1f4fffc354..7f1c36479a 100644 --- a/translator/tocwconfig/sampleConfig/advanced_config_darwin.yaml +++ b/translator/tocwconfig/sampleConfig/advanced_config_darwin.yaml @@ -17,7 +17,7 @@ extensions: usage_flags: mode: EC2 region_type: ACJ - resourcestore: + entitystore: mode: ec2 processors: cumulativetodelta/hostDeltaMetrics: @@ -67,7 +67,7 @@ receivers: service: extensions: - agenthealth/metrics - - resourcestore + - entitystore pipelines: metrics/host: exporters: diff --git a/translator/tocwconfig/sampleConfig/advanced_config_linux.yaml b/translator/tocwconfig/sampleConfig/advanced_config_linux.yaml index 54f65b8822..3ea95f6fc6 100644 --- a/translator/tocwconfig/sampleConfig/advanced_config_linux.yaml +++ b/translator/tocwconfig/sampleConfig/advanced_config_linux.yaml @@ -17,7 +17,7 @@ extensions: usage_flags: mode: EC2 region_type: ACJ - resourcestore: + entitystore: mode: ec2 processors: cumulativetodelta/hostDeltaMetrics: @@ -75,7 +75,7 @@ receivers: service: extensions: - agenthealth/metrics - - resourcestore + - entitystore pipelines: metrics/host: exporters: diff --git a/translator/tocwconfig/sampleConfig/advanced_config_windows.yaml b/translator/tocwconfig/sampleConfig/advanced_config_windows.yaml index 4d89a5f22c..639875086e 100644 --- a/translator/tocwconfig/sampleConfig/advanced_config_windows.yaml +++ b/translator/tocwconfig/sampleConfig/advanced_config_windows.yaml @@ -17,7 +17,7 @@ extensions: usage_flags: mode: EC2 region_type: ACJ - resourcestore: + entitystore: mode: ec2 processors: ec2tagger: @@ -68,7 +68,7 @@ receivers: service: extensions: - agenthealth/metrics - - resourcestore + - entitystore pipelines: metrics/host: exporters: diff --git a/translator/tocwconfig/sampleConfig/appsignals_and_eks_config.yaml b/translator/tocwconfig/sampleConfig/appsignals_and_eks_config.yaml index 796b5102bc..ebb4a066e2 100644 --- a/translator/tocwconfig/sampleConfig/appsignals_and_eks_config.yaml +++ b/translator/tocwconfig/sampleConfig/appsignals_and_eks_config.yaml @@ -285,7 +285,7 @@ extensions: region: us-east-1 service_name: "" role_arn: "" - resourcestore: + entitystore: mode: ec2 processors: awsapplicationsignals: @@ -659,7 +659,7 @@ service: - awsproxy/application_signals - agenthealth/traces - agenthealth/logs - - resourcestore + - entitystore pipelines: metrics/application_signals: exporters: diff --git a/translator/tocwconfig/sampleConfig/appsignals_and_k8s_config.yaml b/translator/tocwconfig/sampleConfig/appsignals_and_k8s_config.yaml index 565a405b20..d4e8665f65 100644 --- a/translator/tocwconfig/sampleConfig/appsignals_and_k8s_config.yaml +++ b/translator/tocwconfig/sampleConfig/appsignals_and_k8s_config.yaml @@ -285,7 +285,7 @@ extensions: region: us-east-1 service_name: "" role_arn: "" - resourcestore: + entitystore: mode: ec2 processors: awsapplicationsignals: @@ -639,7 +639,7 @@ service: - awsproxy/application_signals - agenthealth/traces - agenthealth/logs - - resourcestore + - entitystore pipelines: metrics/application_signals: exporters: diff --git a/translator/tocwconfig/sampleConfig/appsignals_fallback_and_eks_config.yaml b/translator/tocwconfig/sampleConfig/appsignals_fallback_and_eks_config.yaml index 7c9ebccbf2..144f00571d 100644 --- a/translator/tocwconfig/sampleConfig/appsignals_fallback_and_eks_config.yaml +++ b/translator/tocwconfig/sampleConfig/appsignals_fallback_and_eks_config.yaml @@ -285,7 +285,7 @@ extensions: region: us-east-1 service_name: "" role_arn: "" - resourcestore: + entitystore: mode: ec2 processors: awsapplicationsignals: @@ -659,7 +659,7 @@ service: - awsproxy/application_signals - agenthealth/traces - agenthealth/logs - - resourcestore + - entitystore pipelines: metrics/application_signals: exporters: diff --git a/translator/tocwconfig/sampleConfig/appsignals_over_fallback_config.yaml b/translator/tocwconfig/sampleConfig/appsignals_over_fallback_config.yaml index 4439757050..00934619ab 100644 --- a/translator/tocwconfig/sampleConfig/appsignals_over_fallback_config.yaml +++ b/translator/tocwconfig/sampleConfig/appsignals_over_fallback_config.yaml @@ -285,7 +285,7 @@ extensions: region: us-east-1 service_name: "" role_arn: "" - resourcestore: + entitystore: mode: ec2 processors: awsapplicationsignals: @@ -659,7 +659,7 @@ service: - awsproxy/application_signals - agenthealth/traces - agenthealth/logs - - resourcestore + - entitystore pipelines: metrics/application_signals: exporters: diff --git a/translator/tocwconfig/sampleConfig/base_appsignals_config.yaml b/translator/tocwconfig/sampleConfig/base_appsignals_config.yaml index 28ad9f4c28..05e6e0f058 100644 --- a/translator/tocwconfig/sampleConfig/base_appsignals_config.yaml +++ b/translator/tocwconfig/sampleConfig/base_appsignals_config.yaml @@ -152,7 +152,7 @@ extensions: service_name: "" shared_credentials_file: - fake-path - resourcestore: + entitystore: mode: onPremise profile: AmazonCloudWatchAgent shared_credential_file: fake-path @@ -472,7 +472,7 @@ service: - awsproxy/application_signals - agenthealth/traces - agenthealth/logs - - resourcestore + - entitystore pipelines: metrics/application_signals: exporters: diff --git a/translator/tocwconfig/sampleConfig/base_appsignals_fallback_config.yaml b/translator/tocwconfig/sampleConfig/base_appsignals_fallback_config.yaml index 26b3a9d669..97738da8a1 100644 --- a/translator/tocwconfig/sampleConfig/base_appsignals_fallback_config.yaml +++ b/translator/tocwconfig/sampleConfig/base_appsignals_fallback_config.yaml @@ -152,7 +152,7 @@ extensions: service_name: "" shared_credentials_file: - fake-path - resourcestore: + entitystore: mode: onPremise profile: AmazonCloudWatchAgent shared_credential_file: fake-path @@ -472,7 +472,7 @@ service: - awsproxy/application_signals - agenthealth/traces - agenthealth/logs - - resourcestore + - entitystore pipelines: metrics/application_signals: exporters: diff --git a/translator/tocwconfig/sampleConfig/base_container_insights_config.yaml b/translator/tocwconfig/sampleConfig/base_container_insights_config.yaml index bbf0a44a36..c0946df460 100644 --- a/translator/tocwconfig/sampleConfig/base_container_insights_config.yaml +++ b/translator/tocwconfig/sampleConfig/base_container_insights_config.yaml @@ -148,7 +148,7 @@ extensions: usage_flags: mode: EC2 region_type: ACJ - resourcestore: + entitystore: mode: ec2 processors: batch/containerinsights: @@ -216,7 +216,7 @@ receivers: service: extensions: - agenthealth/logs - - resourcestore + - entitystore pipelines: logs/emf_logs: exporters: diff --git a/translator/tocwconfig/sampleConfig/basic_config_linux.yaml b/translator/tocwconfig/sampleConfig/basic_config_linux.yaml index 9d368bc4e4..244af9bf2d 100644 --- a/translator/tocwconfig/sampleConfig/basic_config_linux.yaml +++ b/translator/tocwconfig/sampleConfig/basic_config_linux.yaml @@ -17,7 +17,7 @@ extensions: usage_flags: mode: EC2 region_type: ACJ - resourcestore: + entitystore: mode: ec2 processors: ec2tagger: @@ -41,7 +41,7 @@ receivers: service: extensions: - agenthealth/metrics - - resourcestore + - entitystore pipelines: metrics/host: exporters: diff --git a/translator/tocwconfig/sampleConfig/basic_config_windows.yaml b/translator/tocwconfig/sampleConfig/basic_config_windows.yaml index 105a798799..bec9fcb66d 100644 --- a/translator/tocwconfig/sampleConfig/basic_config_windows.yaml +++ b/translator/tocwconfig/sampleConfig/basic_config_windows.yaml @@ -17,7 +17,7 @@ extensions: usage_flags: mode: EC2 region_type: ACJ - resourcestore: + entitystore: mode: ec2 processors: ec2tagger: @@ -43,7 +43,7 @@ receivers: service: extensions: - agenthealth/metrics - - resourcestore + - entitystore pipelines: metrics/host: exporters: diff --git a/translator/tocwconfig/sampleConfig/collectd_config_linux.yaml b/translator/tocwconfig/sampleConfig/collectd_config_linux.yaml index 4a0453ee2b..f50ea024d7 100644 --- a/translator/tocwconfig/sampleConfig/collectd_config_linux.yaml +++ b/translator/tocwconfig/sampleConfig/collectd_config_linux.yaml @@ -17,7 +17,7 @@ extensions: usage_flags: mode: EC2 region_type: ACJ - resourcestore: + entitystore: mode: ec2 receivers: telegraf_socket_listener: @@ -27,7 +27,7 @@ receivers: service: extensions: - agenthealth/metrics - - resourcestore + - entitystore pipelines: metrics/host: exporters: diff --git a/translator/tocwconfig/sampleConfig/compass_linux_config.yaml b/translator/tocwconfig/sampleConfig/compass_linux_config.yaml index 3f88407d97..04c49be2c6 100644 --- a/translator/tocwconfig/sampleConfig/compass_linux_config.yaml +++ b/translator/tocwconfig/sampleConfig/compass_linux_config.yaml @@ -1,13 +1,13 @@ exporters: nop: {} extensions: - resourcestore: + entitystore: mode: ec2 receivers: nop: {} service: extensions: - - resourcestore + - entitystore pipelines: metrics/nop: exporters: diff --git a/translator/tocwconfig/sampleConfig/complete_darwin_config.yaml b/translator/tocwconfig/sampleConfig/complete_darwin_config.yaml index 2dc0671c6b..b236428ee4 100644 --- a/translator/tocwconfig/sampleConfig/complete_darwin_config.yaml +++ b/translator/tocwconfig/sampleConfig/complete_darwin_config.yaml @@ -91,7 +91,7 @@ extensions: usage_flags: mode: EC2 region_type: ACJ - resourcestore: + entitystore: mode: ec2 processors: batch/emf_logs: @@ -249,7 +249,7 @@ service: - agenthealth/metrics - agenthealth/logs - agenthealth/traces - - resourcestore + - entitystore pipelines: logs/emf_logs: exporters: diff --git a/translator/tocwconfig/sampleConfig/complete_linux_config.yaml b/translator/tocwconfig/sampleConfig/complete_linux_config.yaml index 0485c6e8e7..78883abadb 100644 --- a/translator/tocwconfig/sampleConfig/complete_linux_config.yaml +++ b/translator/tocwconfig/sampleConfig/complete_linux_config.yaml @@ -94,7 +94,7 @@ extensions: usage_flags: mode: EC2 region_type: ACJ - resourcestore: + entitystore: mode: ec2 processors: batch/emf_logs: @@ -252,7 +252,7 @@ service: - agenthealth/metrics - agenthealth/logs - agenthealth/traces - - resourcestore + - entitystore pipelines: logs/emf_logs: exporters: diff --git a/translator/tocwconfig/sampleConfig/complete_windows_config.yaml b/translator/tocwconfig/sampleConfig/complete_windows_config.yaml index acdd5570c0..791f6f3186 100644 --- a/translator/tocwconfig/sampleConfig/complete_windows_config.yaml +++ b/translator/tocwconfig/sampleConfig/complete_windows_config.yaml @@ -91,7 +91,7 @@ extensions: usage_flags: mode: EC2 region_type: ACJ - resourcestore: + entitystore: mode: ec2 processors: batch/emf_logs: @@ -236,7 +236,7 @@ service: - agenthealth/metrics - agenthealth/logs - agenthealth/traces - - resourcestore + - entitystore pipelines: logs/emf_logs: exporters: diff --git a/translator/tocwconfig/sampleConfig/config_with_env.yaml b/translator/tocwconfig/sampleConfig/config_with_env.yaml index e24d38b086..5bb686ff14 100644 --- a/translator/tocwconfig/sampleConfig/config_with_env.yaml +++ b/translator/tocwconfig/sampleConfig/config_with_env.yaml @@ -39,7 +39,7 @@ extensions: usage_flags: mode: EC2 region_type: ACJ - resourcestore: + entitystore: mode: ec2 processors: batch/emf_logs: @@ -77,7 +77,7 @@ receivers: service: extensions: - agenthealth/logs - - resourcestore + - entitystore pipelines: logs/emf_logs: exporters: diff --git a/translator/tocwconfig/sampleConfig/delta_config_linux.yaml b/translator/tocwconfig/sampleConfig/delta_config_linux.yaml index 463908b790..850a3af086 100644 --- a/translator/tocwconfig/sampleConfig/delta_config_linux.yaml +++ b/translator/tocwconfig/sampleConfig/delta_config_linux.yaml @@ -17,7 +17,7 @@ extensions: usage_flags: mode: EC2 region_type: ACJ - resourcestore: + entitystore: mode: ec2 processors: cumulativetodelta/hostDeltaMetrics: @@ -60,7 +60,7 @@ receivers: service: extensions: - agenthealth/metrics - - resourcestore + - entitystore pipelines: metrics/hostDeltaMetrics: exporters: diff --git a/translator/tocwconfig/sampleConfig/delta_net_config_linux.yaml b/translator/tocwconfig/sampleConfig/delta_net_config_linux.yaml index 0a640aa79f..e375628c32 100644 --- a/translator/tocwconfig/sampleConfig/delta_net_config_linux.yaml +++ b/translator/tocwconfig/sampleConfig/delta_net_config_linux.yaml @@ -17,7 +17,7 @@ extensions: usage_flags: mode: EC2 region_type: ACJ - resourcestore: + entitystore: mode: ec2 processors: cumulativetodelta/hostDeltaMetrics: @@ -44,7 +44,7 @@ receivers: service: extensions: - agenthealth/metrics - - resourcestore + - entitystore pipelines: metrics/hostDeltaMetrics: exporters: diff --git a/translator/tocwconfig/sampleConfig/drop_origin_linux.yaml b/translator/tocwconfig/sampleConfig/drop_origin_linux.yaml index 8edf76f397..b9cec6a88a 100644 --- a/translator/tocwconfig/sampleConfig/drop_origin_linux.yaml +++ b/translator/tocwconfig/sampleConfig/drop_origin_linux.yaml @@ -22,7 +22,7 @@ extensions: usage_flags: mode: EC2 region_type: ACJ - resourcestore: + entitystore: mode: ec2 processors: ec2tagger: @@ -60,7 +60,7 @@ receivers: service: extensions: - agenthealth/metrics - - resourcestore + - entitystore pipelines: metrics/host: exporters: diff --git a/translator/tocwconfig/sampleConfig/emf_and_kubernetes_config.yaml b/translator/tocwconfig/sampleConfig/emf_and_kubernetes_config.yaml index 47da803a54..b5e429b5e4 100644 --- a/translator/tocwconfig/sampleConfig/emf_and_kubernetes_config.yaml +++ b/translator/tocwconfig/sampleConfig/emf_and_kubernetes_config.yaml @@ -392,7 +392,7 @@ extensions: usage_flags: mode: OP region_type: ACJ - resourcestore: + entitystore: mode: onPremise profile: default shared_credential_file: /root/.aws/credentials @@ -474,7 +474,7 @@ receivers: service: extensions: - agenthealth/logs - - resourcestore + - entitystore pipelines: logs/emf_logs: exporters: diff --git a/translator/tocwconfig/sampleConfig/emf_and_kubernetes_with_gpu_config.yaml b/translator/tocwconfig/sampleConfig/emf_and_kubernetes_with_gpu_config.yaml index 9e481623ff..6dcce4c98a 100644 --- a/translator/tocwconfig/sampleConfig/emf_and_kubernetes_with_gpu_config.yaml +++ b/translator/tocwconfig/sampleConfig/emf_and_kubernetes_with_gpu_config.yaml @@ -657,7 +657,7 @@ extensions: usage_flags: mode: OP region_type: ACJ - resourcestore: + entitystore: mode: onPremise profile: default shared_credential_file: /root/.aws/credentials @@ -1153,7 +1153,7 @@ receivers: service: extensions: - agenthealth/logs - - resourcestore + - entitystore pipelines: logs/emf_logs: exporters: diff --git a/translator/tocwconfig/sampleConfig/ignore_append_dimensions.yaml b/translator/tocwconfig/sampleConfig/ignore_append_dimensions.yaml index 6f2a28fc3a..086a3d45e5 100644 --- a/translator/tocwconfig/sampleConfig/ignore_append_dimensions.yaml +++ b/translator/tocwconfig/sampleConfig/ignore_append_dimensions.yaml @@ -17,7 +17,7 @@ extensions: usage_flags: mode: EC2 region_type: ACJ - resourcestore: + entitystore: mode: ec2 processors: ec2tagger: @@ -35,7 +35,7 @@ receivers: service: extensions: - agenthealth/metrics - - resourcestore + - entitystore pipelines: metrics/host: exporters: diff --git a/translator/tocwconfig/sampleConfig/invalid_input_linux.yaml b/translator/tocwconfig/sampleConfig/invalid_input_linux.yaml index 9d368bc4e4..244af9bf2d 100644 --- a/translator/tocwconfig/sampleConfig/invalid_input_linux.yaml +++ b/translator/tocwconfig/sampleConfig/invalid_input_linux.yaml @@ -17,7 +17,7 @@ extensions: usage_flags: mode: EC2 region_type: ACJ - resourcestore: + entitystore: mode: ec2 processors: ec2tagger: @@ -41,7 +41,7 @@ receivers: service: extensions: - agenthealth/metrics - - resourcestore + - entitystore pipelines: metrics/host: exporters: diff --git a/translator/tocwconfig/sampleConfig/kubernetes_on_prem_config.yaml b/translator/tocwconfig/sampleConfig/kubernetes_on_prem_config.yaml index 7a60beba8a..30d961e8a9 100644 --- a/translator/tocwconfig/sampleConfig/kubernetes_on_prem_config.yaml +++ b/translator/tocwconfig/sampleConfig/kubernetes_on_prem_config.yaml @@ -359,7 +359,7 @@ extensions: usage_flags: mode: OP region_type: ACJ - resourcestore: + entitystore: mode: onPremise profile: AmazonCloudWatchAgent shared_credential_file: fake-path @@ -410,7 +410,7 @@ receivers: service: extensions: - agenthealth/logs - - resourcestore + - entitystore pipelines: metrics/containerinsights: exporters: diff --git a/translator/tocwconfig/sampleConfig/log_ecs_metric_only.yaml b/translator/tocwconfig/sampleConfig/log_ecs_metric_only.yaml index 99542362b6..1c41bc2294 100644 --- a/translator/tocwconfig/sampleConfig/log_ecs_metric_only.yaml +++ b/translator/tocwconfig/sampleConfig/log_ecs_metric_only.yaml @@ -97,7 +97,7 @@ extensions: usage_flags: mode: EC2 region_type: ACJ - resourcestore: + entitystore: mode: ec2 processors: batch/containerinsights: @@ -165,7 +165,7 @@ receivers: service: extensions: - agenthealth/logs - - resourcestore + - entitystore pipelines: logs/emf_logs: exporters: diff --git a/translator/tocwconfig/sampleConfig/log_filter.yaml b/translator/tocwconfig/sampleConfig/log_filter.yaml index a07aa2b23a..53768311a0 100644 --- a/translator/tocwconfig/sampleConfig/log_filter.yaml +++ b/translator/tocwconfig/sampleConfig/log_filter.yaml @@ -1,13 +1,13 @@ exporters: nop: {} extensions: - resourcestore: + entitystore: mode: "ec2" receivers: nop: {} service: extensions: - - resourcestore + - entitystore pipelines: metrics/nop: exporters: diff --git a/translator/tocwconfig/sampleConfig/log_only_config_windows.yaml b/translator/tocwconfig/sampleConfig/log_only_config_windows.yaml index 701f8b7752..f6d9c2ba5a 100644 --- a/translator/tocwconfig/sampleConfig/log_only_config_windows.yaml +++ b/translator/tocwconfig/sampleConfig/log_only_config_windows.yaml @@ -1,13 +1,13 @@ exporters: nop: {} extensions: - resourcestore: + entitystore: mode: "ec2" receivers: nop: {} service: extensions: - - resourcestore + - entitystore pipelines: metrics/nop: exporters: diff --git a/translator/tocwconfig/sampleConfig/logs_and_kubernetes_config.yaml b/translator/tocwconfig/sampleConfig/logs_and_kubernetes_config.yaml index fa7a4b110b..09f9186f1c 100644 --- a/translator/tocwconfig/sampleConfig/logs_and_kubernetes_config.yaml +++ b/translator/tocwconfig/sampleConfig/logs_and_kubernetes_config.yaml @@ -388,7 +388,7 @@ extensions: usage_flags: mode: EC2 region_type: ACJ - resourcestore: + entitystore: mode: ec2 processors: @@ -467,7 +467,7 @@ receivers: service: extensions: - agenthealth/logs - - resourcestore + - entitystore pipelines: logs/emf_logs: exporters: diff --git a/translator/tocwconfig/sampleConfig/no_skip_log_timestamp.yaml b/translator/tocwconfig/sampleConfig/no_skip_log_timestamp.yaml index a07aa2b23a..53768311a0 100644 --- a/translator/tocwconfig/sampleConfig/no_skip_log_timestamp.yaml +++ b/translator/tocwconfig/sampleConfig/no_skip_log_timestamp.yaml @@ -1,13 +1,13 @@ exporters: nop: {} extensions: - resourcestore: + entitystore: mode: "ec2" receivers: nop: {} service: extensions: - - resourcestore + - entitystore pipelines: metrics/nop: exporters: diff --git a/translator/tocwconfig/sampleConfig/no_skip_log_timestamp_windows.yaml b/translator/tocwconfig/sampleConfig/no_skip_log_timestamp_windows.yaml index 701f8b7752..f6d9c2ba5a 100644 --- a/translator/tocwconfig/sampleConfig/no_skip_log_timestamp_windows.yaml +++ b/translator/tocwconfig/sampleConfig/no_skip_log_timestamp_windows.yaml @@ -1,13 +1,13 @@ exporters: nop: {} extensions: - resourcestore: + entitystore: mode: "ec2" receivers: nop: {} service: extensions: - - resourcestore + - entitystore pipelines: metrics/nop: exporters: diff --git a/translator/tocwconfig/sampleConfig/prometheus_config_linux.yaml b/translator/tocwconfig/sampleConfig/prometheus_config_linux.yaml index 3ec0d8cb2e..cfd3398d31 100644 --- a/translator/tocwconfig/sampleConfig/prometheus_config_linux.yaml +++ b/translator/tocwconfig/sampleConfig/prometheus_config_linux.yaml @@ -77,7 +77,7 @@ extensions: usage_flags: mode: EC2 region_type: ACJ - resourcestore: + entitystore: mode: ec2 processors: batch/prometheus: @@ -93,7 +93,7 @@ receivers: service: extensions: - agenthealth/logs - - resourcestore + - entitystore pipelines: metrics/prometheus: exporters: diff --git a/translator/tocwconfig/sampleConfig/prometheus_config_windows.yaml b/translator/tocwconfig/sampleConfig/prometheus_config_windows.yaml index b648080e28..9d5f047c00 100644 --- a/translator/tocwconfig/sampleConfig/prometheus_config_windows.yaml +++ b/translator/tocwconfig/sampleConfig/prometheus_config_windows.yaml @@ -59,7 +59,7 @@ extensions: usage_flags: mode: EC2 region_type: ACJ - resourcestore: + entitystore: mode: ec2 processors: batch/prometheus: @@ -75,7 +75,7 @@ receivers: service: extensions: - agenthealth/logs - - resourcestore + - entitystore pipelines: metrics/prometheus: exporters: diff --git a/translator/tocwconfig/sampleConfig/skip_log_timestamp.yaml b/translator/tocwconfig/sampleConfig/skip_log_timestamp.yaml index 44c0e9ac18..b4ed735ac8 100644 --- a/translator/tocwconfig/sampleConfig/skip_log_timestamp.yaml +++ b/translator/tocwconfig/sampleConfig/skip_log_timestamp.yaml @@ -1,13 +1,13 @@ exporters: nop: {} extensions: - resourcestore: + entitystore: mode: "ec2" receivers: nop: {} service: extensions: - - resourcestore + - entitystore pipelines: metrics/nop: exporters: diff --git a/translator/tocwconfig/sampleConfig/skip_log_timestamp_default.yaml b/translator/tocwconfig/sampleConfig/skip_log_timestamp_default.yaml index a07aa2b23a..53768311a0 100644 --- a/translator/tocwconfig/sampleConfig/skip_log_timestamp_default.yaml +++ b/translator/tocwconfig/sampleConfig/skip_log_timestamp_default.yaml @@ -1,13 +1,13 @@ exporters: nop: {} extensions: - resourcestore: + entitystore: mode: "ec2" receivers: nop: {} service: extensions: - - resourcestore + - entitystore pipelines: metrics/nop: exporters: diff --git a/translator/tocwconfig/sampleConfig/skip_log_timestamp_default_windows.yaml b/translator/tocwconfig/sampleConfig/skip_log_timestamp_default_windows.yaml index 701f8b7752..f6d9c2ba5a 100644 --- a/translator/tocwconfig/sampleConfig/skip_log_timestamp_default_windows.yaml +++ b/translator/tocwconfig/sampleConfig/skip_log_timestamp_default_windows.yaml @@ -1,13 +1,13 @@ exporters: nop: {} extensions: - resourcestore: + entitystore: mode: "ec2" receivers: nop: {} service: extensions: - - resourcestore + - entitystore pipelines: metrics/nop: exporters: diff --git a/translator/tocwconfig/sampleConfig/skip_log_timestamp_windows.yaml b/translator/tocwconfig/sampleConfig/skip_log_timestamp_windows.yaml index 088a6f3e4e..9c812a85fa 100644 --- a/translator/tocwconfig/sampleConfig/skip_log_timestamp_windows.yaml +++ b/translator/tocwconfig/sampleConfig/skip_log_timestamp_windows.yaml @@ -1,13 +1,13 @@ exporters: nop: {} extensions: - resourcestore: + entitystore: mode: "ec2" receivers: nop: {} service: extensions: - - resourcestore + - entitystore pipelines: metrics/nop: exporters: diff --git a/translator/tocwconfig/sampleConfig/standard_config_linux.yaml b/translator/tocwconfig/sampleConfig/standard_config_linux.yaml index 9bdb6e11cc..a9a55aa6f3 100644 --- a/translator/tocwconfig/sampleConfig/standard_config_linux.yaml +++ b/translator/tocwconfig/sampleConfig/standard_config_linux.yaml @@ -17,7 +17,7 @@ extensions: usage_flags: mode: EC2 region_type: ACJ - resourcestore: + entitystore: mode: ec2 processors: cumulativetodelta/hostDeltaMetrics: @@ -62,7 +62,7 @@ receivers: service: extensions: - agenthealth/metrics - - resourcestore + - entitystore pipelines: metrics/host: exporters: diff --git a/translator/tocwconfig/sampleConfig/standard_config_linux_with_common_config.yaml b/translator/tocwconfig/sampleConfig/standard_config_linux_with_common_config.yaml index 4e35dbcc1b..95bf544afd 100644 --- a/translator/tocwconfig/sampleConfig/standard_config_linux_with_common_config.yaml +++ b/translator/tocwconfig/sampleConfig/standard_config_linux_with_common_config.yaml @@ -19,7 +19,7 @@ extensions: usage_flags: mode: EC2 region_type: ACJ - resourcestore: + entitystore: mode: ec2 profile: AmazonCloudWatchAgent shared_credential_file: fake-path @@ -69,7 +69,7 @@ receivers: service: extensions: - agenthealth/metrics - - resourcestore + - entitystore pipelines: metrics/host: exporters: diff --git a/translator/tocwconfig/sampleConfig/standard_config_windows.yaml b/translator/tocwconfig/sampleConfig/standard_config_windows.yaml index 3077937363..213c8a1066 100644 --- a/translator/tocwconfig/sampleConfig/standard_config_windows.yaml +++ b/translator/tocwconfig/sampleConfig/standard_config_windows.yaml @@ -17,7 +17,7 @@ extensions: usage_flags: mode: EC2 region_type: ACJ - resourcestore: + entitystore: mode: ec2 processors: ec2tagger: @@ -57,7 +57,7 @@ receivers: service: extensions: - agenthealth/metrics - - resourcestore + - entitystore pipelines: metrics/host: exporters: diff --git a/translator/tocwconfig/sampleConfig/standard_config_windows_with_common_config.yaml b/translator/tocwconfig/sampleConfig/standard_config_windows_with_common_config.yaml index 59806ca520..f310dce431 100644 --- a/translator/tocwconfig/sampleConfig/standard_config_windows_with_common_config.yaml +++ b/translator/tocwconfig/sampleConfig/standard_config_windows_with_common_config.yaml @@ -19,7 +19,7 @@ extensions: usage_flags: mode: EC2 region_type: ACJ - resourcestore: + entitystore: mode: ec2 profile: AmazonCloudWatchAgent shared_credential_file: fake-path @@ -64,7 +64,7 @@ receivers: service: extensions: - agenthealth/metrics - - resourcestore + - entitystore pipelines: metrics/host: exporters: diff --git a/translator/tocwconfig/sampleConfig/statsd_config_linux.yaml b/translator/tocwconfig/sampleConfig/statsd_config_linux.yaml index 202006271f..7ec240c0d6 100644 --- a/translator/tocwconfig/sampleConfig/statsd_config_linux.yaml +++ b/translator/tocwconfig/sampleConfig/statsd_config_linux.yaml @@ -17,7 +17,7 @@ extensions: usage_flags: mode: EC2 region_type: ACJ - resourcestore: + entitystore: mode: ec2 receivers: telegraf_statsd: @@ -27,7 +27,7 @@ receivers: service: extensions: - agenthealth/metrics - - resourcestore + - entitystore pipelines: metrics/host: exporters: diff --git a/translator/tocwconfig/sampleConfig/statsd_config_windows.yaml b/translator/tocwconfig/sampleConfig/statsd_config_windows.yaml index ca92d50f80..224508896d 100644 --- a/translator/tocwconfig/sampleConfig/statsd_config_windows.yaml +++ b/translator/tocwconfig/sampleConfig/statsd_config_windows.yaml @@ -17,7 +17,7 @@ extensions: usage_flags: mode: EC2 region_type: ACJ - resourcestore: + entitystore: mode: ec2 receivers: telegraf_statsd: @@ -27,7 +27,7 @@ receivers: service: extensions: - agenthealth/metrics - - resourcestore + - entitystore pipelines: metrics/host: exporters: diff --git a/translator/tocwconfig/sampleConfig/trace_config_linux.yaml b/translator/tocwconfig/sampleConfig/trace_config_linux.yaml index 5d05eec794..b284afb1da 100644 --- a/translator/tocwconfig/sampleConfig/trace_config_linux.yaml +++ b/translator/tocwconfig/sampleConfig/trace_config_linux.yaml @@ -29,7 +29,7 @@ extensions: usage_flags: mode: EC2 region_type: ACJ - resourcestore: + entitystore: mode: ec2 profile: default shared_credential_file: /root/.aws/credentials @@ -82,7 +82,7 @@ receivers: service: extensions: - agenthealth/traces - - resourcestore + - entitystore pipelines: traces/xray: exporters: diff --git a/translator/tocwconfig/sampleConfig/trace_config_windows.yaml b/translator/tocwconfig/sampleConfig/trace_config_windows.yaml index cc8ab5d475..56813bb925 100644 --- a/translator/tocwconfig/sampleConfig/trace_config_windows.yaml +++ b/translator/tocwconfig/sampleConfig/trace_config_windows.yaml @@ -29,7 +29,7 @@ extensions: usage_flags: mode: EC2 region_type: ACJ - resourcestore: + entitystore: mode: ec2 profile: default shared_credential_file: /root/.aws/credentials @@ -82,7 +82,7 @@ receivers: service: extensions: - agenthealth/traces - - resourcestore + - entitystore pipelines: traces/xray: exporters: diff --git a/translator/tocwconfig/sampleConfig/windows_eventlog_only_config.yaml b/translator/tocwconfig/sampleConfig/windows_eventlog_only_config.yaml index 0a5a41b9ec..cab0687041 100644 --- a/translator/tocwconfig/sampleConfig/windows_eventlog_only_config.yaml +++ b/translator/tocwconfig/sampleConfig/windows_eventlog_only_config.yaml @@ -1,13 +1,13 @@ exporters: nop: {} extensions: - resourcestore: + entitystore: mode: "ec2" receivers: nop: {} service: extensions: - - resourcestore + - entitystore pipelines: metrics/nop: exporters: diff --git a/translator/translate/otel/extension/resourcestore/translator.go b/translator/translate/otel/extension/entitystore/translator.go similarity index 84% rename from translator/translate/otel/extension/resourcestore/translator.go rename to translator/translate/otel/extension/entitystore/translator.go index 9367383b26..7558db78cb 100644 --- a/translator/translate/otel/extension/resourcestore/translator.go +++ b/translator/translate/otel/extension/entitystore/translator.go @@ -1,14 +1,14 @@ // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: MIT -package resourcestore +package entitystore import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/confmap" "go.opentelemetry.io/collector/extension" - "github.com/aws/amazon-cloudwatch-agent/extension/resourcestore" + "github.com/aws/amazon-cloudwatch-agent/extension/entitystore" "github.com/aws/amazon-cloudwatch-agent/translator/context" "github.com/aws/amazon-cloudwatch-agent/translator/translate/agent" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/common" @@ -23,7 +23,7 @@ var _ common.Translator[component.Config] = (*translator)(nil) func NewTranslator() common.Translator[component.Config] { return &translator{ - factory: resourcestore.NewFactory(), + factory: entitystore.NewFactory(), } } @@ -33,7 +33,7 @@ func (t *translator) ID() component.ID { // Translate creates an extension configuration. func (t *translator) Translate(conf *confmap.Conf) (component.Config, error) { - cfg := t.factory.CreateDefaultConfig().(*resourcestore.Config) + cfg := t.factory.CreateDefaultConfig().(*entitystore.Config) cfg.Mode = context.CurrentContext().Mode() credentials := confmap.NewFromStringMap(agent.Global_Config.Credentials) _ = credentials.Unmarshal(cfg) diff --git a/translator/translate/otel/extension/resourcestore/translator_test.go b/translator/translate/otel/extension/entitystore/translator_test.go similarity index 87% rename from translator/translate/otel/extension/resourcestore/translator_test.go rename to translator/translate/otel/extension/entitystore/translator_test.go index d9ace3c4ed..c07d809653 100644 --- a/translator/translate/otel/extension/resourcestore/translator_test.go +++ b/translator/translate/otel/extension/entitystore/translator_test.go @@ -1,7 +1,7 @@ // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: MIT -package resourcestore +package entitystore import ( "testing" @@ -9,7 +9,7 @@ import ( "github.com/stretchr/testify/assert" "go.opentelemetry.io/collector/confmap" - "github.com/aws/amazon-cloudwatch-agent/extension/resourcestore" + "github.com/aws/amazon-cloudwatch-agent/extension/entitystore" "github.com/aws/amazon-cloudwatch-agent/translator/config" "github.com/aws/amazon-cloudwatch-agent/translator/context" translateagent "github.com/aws/amazon-cloudwatch-agent/translator/translate/agent" @@ -22,12 +22,12 @@ func TestTranslate(t *testing.T) { input map[string]interface{} file_exists bool profile_exists bool - want *resourcestore.Config + want *entitystore.Config }{ "OnlyProfile": { input: map[string]interface{}{}, profile_exists: true, - want: &resourcestore.Config{ + want: &entitystore.Config{ Mode: config.ModeEC2, Profile: "test_profile", }, @@ -35,7 +35,7 @@ func TestTranslate(t *testing.T) { "OnlyFile": { input: map[string]interface{}{}, file_exists: true, - want: &resourcestore.Config{ + want: &entitystore.Config{ Mode: config.ModeEC2, Filename: "test_file", }, @@ -52,7 +52,7 @@ func TestTranslate(t *testing.T) { translateagent.Global_Config.Credentials[translateagent.Profile_Key] = "test_profile" } tt := NewTranslator().(*translator) - assert.Equal(t, "resourcestore", tt.ID().String()) + assert.Equal(t, "entitystore", tt.ID().String()) conf := confmap.NewFromStringMap(testCase.input) got, err := tt.Translate(conf) assert.NoError(t, err) diff --git a/translator/translate/otel/translate_otel.go b/translator/translate/otel/translate_otel.go index 152364befd..5d492dd998 100644 --- a/translator/translate/otel/translate_otel.go +++ b/translator/translate/otel/translate_otel.go @@ -21,7 +21,7 @@ import ( receiverAdapter "github.com/aws/amazon-cloudwatch-agent/receiver/adapter" "github.com/aws/amazon-cloudwatch-agent/translator/context" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/common" - "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/extension/resourcestore" + "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/extension/entitystore" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/pipeline" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/pipeline/applicationsignals" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/pipeline/containerinsights" @@ -87,7 +87,7 @@ func Translate(jsonConfig interface{}, os string) (*otelcol.Config, error) { return nil, err } } - pipelines.Translators.Extensions.Set(resourcestore.NewTranslator()) + pipelines.Translators.Extensions.Set(entitystore.NewTranslator()) cfg := &otelcol.Config{ Receivers: map[component.ID]component.Config{}, Exporters: map[component.ID]component.Config{}, From d93a18370ad5dbff42c7e4e192cbd85175539a43 Mon Sep 17 00:00:00 2001 From: zhihonl <61301537+zhihonl@users.noreply.github.com> Date: Wed, 3 Jul 2024 10:51:46 -0400 Subject: [PATCH 37/55] Add workflow for uploading Compass agent artifacts to S3 bucket (#739) --- .github/workflows/compass-beta-release.yml | 18 ++++++++++++++++++ .github/workflows/test-build.yml | 10 ++++++++++ 2 files changed, 28 insertions(+) create mode 100644 .github/workflows/compass-beta-release.yml diff --git a/.github/workflows/compass-beta-release.yml b/.github/workflows/compass-beta-release.yml new file mode 100644 index 0000000000..85599bd051 --- /dev/null +++ b/.github/workflows/compass-beta-release.yml @@ -0,0 +1,18 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: MIT + +name: Compass Beta Release +on: + workflow_dispatch: +jobs: + BuildAndUpload: + uses: ./.github/workflows/test-build.yml + secrets: inherit + permissions: + id-token: write + contents: read + with: + BucketKey: "compass-beta-release" + PackageBucketKey: "compass-beta-release" + TerraformAWSAssumeRole: ${{ vars.TERRAFORM_AWS_ASSUME_ROLE }} + Bucket: "private-cloudwatch-agent-integration-test" diff --git a/.github/workflows/test-build.yml b/.github/workflows/test-build.yml index 9a4f68fa4f..9f5747f375 100644 --- a/.github/workflows/test-build.yml +++ b/.github/workflows/test-build.yml @@ -111,6 +111,16 @@ jobs: gpg_private_key: ${{ secrets.GPG_PRIVATE_KEY }} passphrase: ${{ secrets.PASSPHRASE }} + - name: Replace AWS SDK (Linux) + run: | + mkdir ~/gosdk + aws s3 cp s3://compass-pre-release/staging.zip ~ + unzip -q -d ~/gosdk ~/staging.zip || true + sdkPath=$(echo ~/gosdk/apollo/env/AWSGoSDK-Release/var/tmp/release-automation/staging-*/sdk/src/github.com/aws/aws-sdk-go) + echo $sdkPath + ls + go mod edit -replace github.com/aws/aws-sdk-go=$sdkPath + - name: Build Binaries if: contains(inputs.BucketKey, 'test') == false || steps.cached_binaries.outputs.cache-hit == false run: make amazon-cloudwatch-agent-linux amazon-cloudwatch-agent-windows package-rpm package-deb package-win From 6b2f30c34014894f3472b107d2c03624bab27f5e Mon Sep 17 00:00:00 2001 From: Ben Strauss <81588812+straussb@users.noreply.github.com> Date: Fri, 12 Jul 2024 11:49:48 -0400 Subject: [PATCH 38/55] Update all EntityStore variable names. (#743) --- extension/entitystore/extension.go | 98 +++++++++---------- extension/entitystore/extension_test.go | 32 +++--- plugins/inputs/logfile/logfile.go | 6 +- plugins/inputs/logfile/tailersrc.go | 6 +- .../outputs/cloudwatchlogs/cloudwatchlogs.go | 6 +- plugins/processors/awsentity/processor.go | 6 +- 6 files changed, 77 insertions(+), 77 deletions(-) diff --git a/extension/entitystore/extension.go b/extension/entitystore/extension.go index c252681e59..9ec9cf4ed4 100644 --- a/extension/entitystore/extension.go +++ b/extension/entitystore/extension.go @@ -78,62 +78,62 @@ type EntityStore struct { var _ extension.Extension = (*EntityStore)(nil) -func (r *EntityStore) Start(ctx context.Context, host component.Host) error { +func (e *EntityStore) Start(ctx context.Context, host component.Host) error { // Get IMDS client and EC2 API client which requires region for authentication // These will be passed down to any object that requires access to IMDS or EC2 // API client so we have single source of truth for credential - r.done = make(chan struct{}) - r.metadataprovider = getMetaDataProvider() - r.mode = r.config.Mode + e.done = make(chan struct{}) + e.metadataprovider = getMetaDataProvider() + e.mode = e.config.Mode ec2CredentialConfig := &configaws.CredentialConfig{ - Profile: r.config.Profile, - Filename: r.config.Filename, + Profile: e.config.Profile, + Filename: e.config.Filename, } - switch r.mode { + switch e.mode { case config.ModeEC2: - r.ec2Info = *newEC2Info(r.metadataprovider, getEC2Provider, ec2CredentialConfig, r.done) - go r.ec2Info.initEc2Info() + e.ec2Info = *newEC2Info(e.metadataprovider, getEC2Provider, ec2CredentialConfig, e.done) + go e.ec2Info.initEc2Info() } - r.serviceprovider = newServiceProvider(r.mode, &r.ec2Info, r.metadataprovider, getEC2Provider, ec2CredentialConfig, r.done) - go r.serviceprovider.startServiceProvider() + e.serviceprovider = newServiceProvider(e.mode, &e.ec2Info, e.metadataprovider, getEC2Provider, ec2CredentialConfig, e.done) + go e.serviceprovider.startServiceProvider() return nil } -func (r *EntityStore) Shutdown(_ context.Context) error { - close(r.done) +func (e *EntityStore) Shutdown(_ context.Context) error { + close(e.done) return nil } -func (r *EntityStore) Mode() string { - return r.mode +func (e *EntityStore) Mode() string { + return e.mode } -func (r *EntityStore) EKSInfo() eksInfo { - return r.eksInfo +func (e *EntityStore) EKSInfo() eksInfo { + return e.eksInfo } -func (r *EntityStore) EC2Info() ec2Info { - return r.ec2Info +func (e *EntityStore) EC2Info() ec2Info { + return e.ec2Info } -func (r *EntityStore) SetNativeCredential(client client.ConfigProvider) { - r.nativeCredential = client +func (e *EntityStore) SetNativeCredential(client client.ConfigProvider) { + e.nativeCredential = client } -func (r *EntityStore) NativeCredentialExists() bool { - return r.nativeCredential != nil +func (e *EntityStore) NativeCredentialExists() bool { + return e.nativeCredential != nil } // CreateLogFileEntity creates the entity for log events that are being uploaded from a log file in the environment. -func (r *EntityStore) CreateLogFileEntity(logFileGlob LogFileGlob, logGroupName LogGroupName) *cloudwatchlogs.Entity { - if !r.shouldReturnEntity() { +func (e *EntityStore) CreateLogFileEntity(logFileGlob LogFileGlob, logGroupName LogGroupName) *cloudwatchlogs.Entity { + if !e.shouldReturnEntity() { return nil } - serviceAttr := r.serviceprovider.logFileServiceAttribute(logFileGlob, logGroupName) + serviceAttr := e.serviceprovider.logFileServiceAttribute(logFileGlob, logGroupName) - keyAttributes := r.createServiceKeyAttributes(serviceAttr) - attributeMap := r.createAttributeMap() + keyAttributes := e.createServiceKeyAttributes(serviceAttr) + attributeMap := e.createAttributeMap() addNonEmptyToMap(attributeMap, ServiceNameSourceKey, serviceAttr.ServiceNameSource) return &cloudwatchlogs.Entity{ @@ -143,9 +143,9 @@ func (r *EntityStore) CreateLogFileEntity(logFileGlob LogFileGlob, logGroupName } // AddServiceAttrEntryForLogFile adds an entry to the entity store for the provided file glob -> (serviceName, environmentName) key-value pair -func (r *EntityStore) AddServiceAttrEntryForLogFile(fileGlob LogFileGlob, serviceName string, environmentName string) { - if r.serviceprovider != nil { - r.serviceprovider.addEntryForLogFile(fileGlob, ServiceAttribute{ +func (e *EntityStore) AddServiceAttrEntryForLogFile(fileGlob LogFileGlob, serviceName string, environmentName string) { + if e.serviceprovider != nil { + e.serviceprovider.addEntryForLogFile(fileGlob, ServiceAttribute{ ServiceName: serviceName, ServiceNameSource: ServiceNameSourceUserConfiguration, Environment: environmentName, @@ -154,22 +154,22 @@ func (r *EntityStore) AddServiceAttrEntryForLogFile(fileGlob LogFileGlob, servic } // AddServiceAttrEntryForLogGroup adds an entry to the entity store for the provided log group nme -> (serviceName, environmentName) key-value pair -func (r *EntityStore) AddServiceAttrEntryForLogGroup(logGroupName LogGroupName, serviceName string, environmentName string) { - r.serviceprovider.addEntryForLogGroup(logGroupName, ServiceAttribute{ +func (e *EntityStore) AddServiceAttrEntryForLogGroup(logGroupName LogGroupName, serviceName string, environmentName string) { + e.serviceprovider.addEntryForLogGroup(logGroupName, ServiceAttribute{ ServiceName: serviceName, ServiceNameSource: ServiceNameSourceInstrumentation, Environment: environmentName, }) } -func (r *EntityStore) createAttributeMap() map[string]*string { +func (e *EntityStore) createAttributeMap() map[string]*string { attributeMap := make(map[string]*string) - if r.mode == config.ModeEC2 { - addNonEmptyToMap(attributeMap, InstanceIDKey, r.ec2Info.InstanceID) - addNonEmptyToMap(attributeMap, ASGKey, r.ec2Info.AutoScalingGroup) + if e.mode == config.ModeEC2 { + addNonEmptyToMap(attributeMap, InstanceIDKey, e.ec2Info.InstanceID) + addNonEmptyToMap(attributeMap, ASGKey, e.ec2Info.AutoScalingGroup) } - switch r.mode { + switch e.mode { case config.ModeEC2: attributeMap[PlatformType] = aws.String(EC2PlatForm) } @@ -177,7 +177,7 @@ func (r *EntityStore) createAttributeMap() map[string]*string { } // createServiceKeyAttribute creates KeyAttributes for Service entities -func (r *EntityStore) createServiceKeyAttributes(serviceAttr ServiceAttribute) map[string]*string { +func (e *EntityStore) createServiceKeyAttributes(serviceAttr ServiceAttribute) map[string]*string { serviceKeyAttr := map[string]*string{ Type: aws.String(Service), } @@ -188,28 +188,28 @@ func (r *EntityStore) createServiceKeyAttributes(serviceAttr ServiceAttribute) m // shouldReturnEntity checks if the account ID for the instance is // matching the account ID when assuming role for the current credential. -func (r *EntityStore) shouldReturnEntity() bool { - if r.nativeCredential == nil || r.metadataprovider == nil { - r.logger.Debug("there is no credential stored for cross-account checks") +func (e *EntityStore) shouldReturnEntity() bool { + if e.nativeCredential == nil || e.metadataprovider == nil { + e.logger.Debug("there is no credential stored for cross-account checks") return false } - doc, err := r.metadataprovider.Get(context.Background()) + doc, err := e.metadataprovider.Get(context.Background()) if err != nil { - r.logger.Debug("an error occurred when getting instance document for cross-account checks. Reason: %v\n", zap.Error(err)) + e.logger.Debug("an error occurred when getting instance document for cross-account checks. Reason: %v\n", zap.Error(err)) return false } instanceAccountID := doc.AccountID - if r.stsClient == nil { - r.stsClient = sts.New( - r.nativeCredential, + if e.stsClient == nil { + e.stsClient = sts.New( + e.nativeCredential, &aws.Config{ LogLevel: configaws.SDKLogLevel(), Logger: configaws.SDKLogger{}, }) } - assumedRoleIdentity, err := r.stsClient.GetCallerIdentity(&sts.GetCallerIdentityInput{}) + assumedRoleIdentity, err := e.stsClient.GetCallerIdentity(&sts.GetCallerIdentityInput{}) if err != nil { - r.logger.Debug("an error occurred when calling STS GetCallerIdentity for cross-account checks. Reason: ", zap.Error(err)) + e.logger.Debug("an error occurred when calling STS GetCallerIdentity for cross-account checks. Reason: ", zap.Error(err)) return false } return instanceAccountID == *assumedRoleIdentity.Account diff --git a/extension/entitystore/extension_test.go b/extension/entitystore/extension_test.go index d96a5a91ca..7b4a84256f 100644 --- a/extension/entitystore/extension_test.go +++ b/extension/entitystore/extension_test.go @@ -112,10 +112,10 @@ func TestEntityStore_EC2Info(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - r := &EntityStore{ + e := &EntityStore{ ec2Info: tt.ec2InfoInput, } - if got := r.EC2Info(); !reflect.DeepEqual(got, tt.want) { + if got := e.EC2Info(); !reflect.DeepEqual(got, tt.want) { t.Errorf("EC2Info() = %v, want %v", got, tt.want) } }) @@ -132,10 +132,10 @@ func TestEntityStore_Mode(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - r := &EntityStore{ + e := &EntityStore{ mode: tt.modeInput, } - if got := r.Mode(); got != tt.want { + if got := e.Mode(); got != tt.want { t.Errorf("Mode() = %v, want %v", got, tt.want) } }) @@ -227,11 +227,11 @@ func TestEntityStore_createAttributeMaps(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - r := &EntityStore{ + e := &EntityStore{ ec2Info: tt.fields.ec2Info, mode: tt.fields.mode, } - assert.Equalf(t, dereferenceMap(tt.want), dereferenceMap(r.createAttributeMap()), "createAttributeMap()") + assert.Equalf(t, dereferenceMap(tt.want), dereferenceMap(e.createAttributeMap()), "createAttributeMap()") }) } } @@ -270,8 +270,8 @@ func TestEntityStore_createServiceKeyAttributes(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - r := &EntityStore{} - assert.Equalf(t, dereferenceMap(tt.want), dereferenceMap(r.createServiceKeyAttributes(tt.serviceAttr)), "createServiceKeyAttributes()") + e := &EntityStore{} + assert.Equalf(t, dereferenceMap(tt.want), dereferenceMap(e.createServiceKeyAttributes(tt.serviceAttr)), "createServiceKeyAttributes()") }) } } @@ -288,7 +288,7 @@ func TestEntityStore_createLogFileRID(t *testing.T) { } sp := new(mockServiceProvider) sp.On("logFileServiceAttribute", glob, group).Return(serviceAttr) - rs := EntityStore{ + e := EntityStore{ mode: config.ModeEC2, ec2Info: ec2Info{InstanceID: instanceId}, serviceprovider: sp, @@ -297,7 +297,7 @@ func TestEntityStore_createLogFileRID(t *testing.T) { nativeCredential: &session.Session{}, } - entity := rs.CreateLogFileEntity(glob, group) + entity := e.CreateLogFileEntity(glob, group) expectedEntity := cloudwatchlogs.Entity{ KeyAttributes: map[string]*string{ @@ -348,12 +348,12 @@ func TestEntityStore_shouldReturnRID(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - r := &EntityStore{ + e := &EntityStore{ metadataprovider: tt.fields.metadataprovider, stsClient: tt.fields.stsClient, nativeCredential: tt.fields.nativeCredential, } - assert.Equalf(t, tt.want, r.shouldReturnEntity(), "shouldReturnEntity()") + assert.Equalf(t, tt.want, e.shouldReturnEntity(), "shouldReturnEntity()") }) } } @@ -372,7 +372,7 @@ func dereferenceMap(input map[string]*string) map[string]string { func TestEntityStore_addServiceAttrEntryForLogFile(t *testing.T) { sp := new(mockServiceProvider) - rs := EntityStore{serviceprovider: sp} + e := EntityStore{serviceprovider: sp} key := LogFileGlob("/opt/aws/amazon-cloudwatch-agent/logs/amazon-cloudwatch-agent.log") serviceAttr := ServiceAttribute{ @@ -381,14 +381,14 @@ func TestEntityStore_addServiceAttrEntryForLogFile(t *testing.T) { Environment: "ec2:test", } sp.On("addEntryForLogFile", key, serviceAttr).Return() - rs.AddServiceAttrEntryForLogFile(key, "test", "ec2:test") + e.AddServiceAttrEntryForLogFile(key, "test", "ec2:test") sp.AssertExpectations(t) } func TestEntityStore_addServiceAttrEntryForLogGroup(t *testing.T) { sp := new(mockServiceProvider) - rs := EntityStore{serviceprovider: sp} + e := EntityStore{serviceprovider: sp} key := LogGroupName("TestLogGroup") serviceAttr := ServiceAttribute{ @@ -397,7 +397,7 @@ func TestEntityStore_addServiceAttrEntryForLogGroup(t *testing.T) { Environment: "ec2:test", } sp.On("addEntryForLogGroup", key, serviceAttr).Return() - rs.AddServiceAttrEntryForLogGroup(key, "test", "ec2:test") + e.AddServiceAttrEntryForLogGroup(key, "test", "ec2:test") sp.AssertExpectations(t) } diff --git a/plugins/inputs/logfile/logfile.go b/plugins/inputs/logfile/logfile.go index fde171135e..1626957ebc 100644 --- a/plugins/inputs/logfile/logfile.go +++ b/plugins/inputs/logfile/logfile.go @@ -153,15 +153,15 @@ func (t *LogFile) FindLogSrc() []logs.LogSrc { t.cleanUpStoppedTailerSrc() - rs := entitystore.GetEntityStore() + es := entitystore.GetEntityStore() // Create a "tailer" for each file for i := range t.FileConfig { fileconfig := &t.FileConfig[i] //Add file -> {serviceName, deploymentEnvironment} mapping to entity store - if rs != nil { - rs.AddServiceAttrEntryForLogFile(entitystore.LogFileGlob(fileconfig.FilePath), fileconfig.ServiceName, fileconfig.Environment) + if es != nil { + es.AddServiceAttrEntryForLogFile(entitystore.LogFileGlob(fileconfig.FilePath), fileconfig.ServiceName, fileconfig.Environment) } targetFiles, err := t.getTargetFiles(fileconfig) diff --git a/plugins/inputs/logfile/tailersrc.go b/plugins/inputs/logfile/tailersrc.go index 6e9c58c7fb..4ff0e40b08 100644 --- a/plugins/inputs/logfile/tailersrc.go +++ b/plugins/inputs/logfile/tailersrc.go @@ -171,9 +171,9 @@ func (ts *tailerSrc) AddCleanUpFn(f func()) { } func (ts *tailerSrc) Entity() *cloudwatchlogs.Entity { - rs := entitystore.GetEntityStore() - if rs != nil { - return rs.CreateLogFileEntity(entitystore.LogFileGlob(ts.fileGlobPath), entitystore.LogGroupName(ts.group)) + es := entitystore.GetEntityStore() + if es != nil { + return es.CreateLogFileEntity(entitystore.LogFileGlob(ts.fileGlobPath), entitystore.LogGroupName(ts.group)) } return nil } diff --git a/plugins/outputs/cloudwatchlogs/cloudwatchlogs.go b/plugins/outputs/cloudwatchlogs/cloudwatchlogs.go index 2dc8bb3a46..d7c36a208c 100644 --- a/plugins/outputs/cloudwatchlogs/cloudwatchlogs.go +++ b/plugins/outputs/cloudwatchlogs/cloudwatchlogs.go @@ -135,9 +135,9 @@ func (c *CloudWatchLogs) getDest(t Target, logSrc logs.LogSrc) *cwDest { Filename: c.Filename, Token: c.Token, } - entitystore := entitystore.GetEntityStore() - if entitystore != nil && !entitystore.NativeCredentialExists() { - entitystore.SetNativeCredential(credentialConfig.Credentials()) + es := entitystore.GetEntityStore() + if es != nil && !es.NativeCredentialExists() { + es.SetNativeCredential(credentialConfig.Credentials()) } if cwd, ok := c.cwDests[t]; ok { return cwd diff --git a/plugins/processors/awsentity/processor.go b/plugins/processors/awsentity/processor.go index 2f166962bd..9814e4abba 100644 --- a/plugins/processors/awsentity/processor.go +++ b/plugins/processors/awsentity/processor.go @@ -21,11 +21,11 @@ const ( // exposed as a variable for unit testing var addToEntityStore = func(logGroupName entitystore.LogGroupName, serviceName string, environmentName string) { - rs := entitystore.GetEntityStore() - if rs == nil { + es := entitystore.GetEntityStore() + if es == nil { return } - rs.AddServiceAttrEntryForLogGroup(logGroupName, serviceName, environmentName) + es.AddServiceAttrEntryForLogGroup(logGroupName, serviceName, environmentName) } // awsEntityProcessor looks for metrics that have the aws.log.group.names and either the service.name or From 54d8d40742727eab23c15b033dc381eaaeaba751 Mon Sep 17 00:00:00 2001 From: zhihonl <61301537+zhihonl@users.noreply.github.com> Date: Fri, 12 Jul 2024 14:23:36 -0400 Subject: [PATCH 39/55] Remove service name source from entity KeyAttributes (#745) --- plugins/outputs/cloudwatchlogs/pusher.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/plugins/outputs/cloudwatchlogs/pusher.go b/plugins/outputs/cloudwatchlogs/pusher.go index 33d3ba052e..23d74a40b1 100644 --- a/plugins/outputs/cloudwatchlogs/pusher.go +++ b/plugins/outputs/cloudwatchlogs/pusher.go @@ -14,6 +14,7 @@ import ( "github.com/aws/aws-sdk-go/service/cloudwatchlogs" "github.com/influxdata/telegraf" + "github.com/aws/amazon-cloudwatch-agent/extension/entitystore" "github.com/aws/amazon-cloudwatch-agent/logs" "github.com/aws/amazon-cloudwatch-agent/profiler" ) @@ -223,6 +224,10 @@ func (p *pusher) send() { var entity *cloudwatchlogs.Entity if p.logSrc != nil { entity = p.logSrc.Entity() + // The following logics should be removed after Compass GA1 + if entity != nil && entity.Attributes != nil { + delete(entity.Attributes, entitystore.ServiceNameSourceKey) + } } input := &cloudwatchlogs.PutLogEventsInput{ From 14842919d1eb1a7e05843012ed9600c8fcb8629a Mon Sep 17 00:00:00 2001 From: Ben Strauss <81588812+straussb@users.noreply.github.com> Date: Fri, 19 Jul 2024 11:54:32 -0400 Subject: [PATCH 40/55] Only populate Entity in PutLogEvents calls in us-east-1 for now. (#749) The CloudWatch Logs deployment has only reached there and we want to prevent accidental errors in other regions. --- plugins/outputs/cloudwatchlogs/cloudwatchlogs.go | 2 +- plugins/outputs/cloudwatchlogs/pusher.go | 8 ++++++-- plugins/outputs/cloudwatchlogs/pusher_test.go | 2 +- 3 files changed, 8 insertions(+), 4 deletions(-) diff --git a/plugins/outputs/cloudwatchlogs/cloudwatchlogs.go b/plugins/outputs/cloudwatchlogs/cloudwatchlogs.go index d7c36a208c..aa9d1474b3 100644 --- a/plugins/outputs/cloudwatchlogs/cloudwatchlogs.go +++ b/plugins/outputs/cloudwatchlogs/cloudwatchlogs.go @@ -167,7 +167,7 @@ func (c *CloudWatchLogs) getDest(t Target, logSrc logs.LogSrc) *cwDest { c.Log.Info("Configured middleware on AWS client") } } - pusher := NewPusher(t, client, c.ForceFlushInterval.Duration, maxRetryTimeout, c.Log, c.pusherStopChan, &c.pusherWaitGroup, logSrc) + pusher := NewPusher(c.Region, t, client, c.ForceFlushInterval.Duration, maxRetryTimeout, c.Log, c.pusherStopChan, &c.pusherWaitGroup, logSrc) cwd := &cwDest{pusher: pusher, retryer: logThrottleRetryer} c.cwDests[t] = cwd return cwd diff --git a/plugins/outputs/cloudwatchlogs/pusher.go b/plugins/outputs/cloudwatchlogs/pusher.go index 23d74a40b1..ed84c7a407 100644 --- a/plugins/outputs/cloudwatchlogs/pusher.go +++ b/plugins/outputs/cloudwatchlogs/pusher.go @@ -44,6 +44,7 @@ type pusher struct { RetryDuration time.Duration Log telegraf.Logger + region string logSrc logs.LogSrc events []*cloudwatchlogs.InputLogEvent minT, maxT *time.Time @@ -65,13 +66,14 @@ type pusher struct { wg *sync.WaitGroup } -func NewPusher(target Target, service CloudWatchLogsService, flushTimeout time.Duration, retryDuration time.Duration, logger telegraf.Logger, stop <-chan struct{}, wg *sync.WaitGroup, logSrc logs.LogSrc) *pusher { +func NewPusher(region string, target Target, service CloudWatchLogsService, flushTimeout time.Duration, retryDuration time.Duration, logger telegraf.Logger, stop <-chan struct{}, wg *sync.WaitGroup, logSrc logs.LogSrc) *pusher { p := &pusher{ Target: target, Service: service, FlushTimeout: flushTimeout, RetryDuration: retryDuration, Log: logger, + region: region, logSrc: logSrc, events: make([]*cloudwatchlogs.InputLogEvent, 0, 10), eventsCh: make(chan logs.LogEvent, 100), @@ -235,7 +237,9 @@ func (p *pusher) send() { LogGroupName: &p.Group, LogStreamName: &p.Stream, SequenceToken: p.sequenceToken, - Entity: entity, + } + if p.region == "us-east-1" { + input.Entity = entity } startTime := time.Now() diff --git a/plugins/outputs/cloudwatchlogs/pusher_test.go b/plugins/outputs/cloudwatchlogs/pusher_test.go index cf5ccc166a..3545b00ef8 100644 --- a/plugins/outputs/cloudwatchlogs/pusher_test.go +++ b/plugins/outputs/cloudwatchlogs/pusher_test.go @@ -796,6 +796,6 @@ func TestResendWouldStopAfterExhaustedRetries(t *testing.T) { func testPreparation(retention int, s *svcMock, flushTimeout time.Duration, retryDuration time.Duration) (chan struct{}, *pusher) { stop := make(chan struct{}) mockLogSrcObj := &mockLogSrc{} - p := NewPusher(Target{"G", "S", util.StandardLogGroupClass, retention}, s, flushTimeout, retryDuration, models.NewLogger("cloudwatchlogs", "test", ""), stop, &wg, mockLogSrcObj) + p := NewPusher("us-east-1", Target{"G", "S", util.StandardLogGroupClass, retention}, s, flushTimeout, retryDuration, models.NewLogger("cloudwatchlogs", "test", ""), stop, &wg, mockLogSrcObj) return stop, p } From b0865e6e6d578d61412a999fc296a92b62deaec9 Mon Sep 17 00:00:00 2001 From: zhihonl <61301537+zhihonl@users.noreply.github.com> Date: Mon, 22 Jul 2024 15:56:48 -0400 Subject: [PATCH 41/55] Add Compass integration test for entity association (#748) --- .github/workflows/integration-test.yml | 74 +++- test/README.md | 11 + test/compass/compass_test.go | 342 ++++++++++++++++++ .../resources/compass_default_log.json | 22 ++ .../resources/compass_service_in_config.json | 23 ++ test/go.mod | 58 +++ test/go.sum | 119 ++++++ 7 files changed, 648 insertions(+), 1 deletion(-) create mode 100644 test/README.md create mode 100644 test/compass/compass_test.go create mode 100644 test/compass/resources/compass_default_log.json create mode 100644 test/compass/resources/compass_service_in_config.json create mode 100644 test/go.mod create mode 100644 test/go.sum diff --git a/.github/workflows/integration-test.yml b/.github/workflows/integration-test.yml index 4c806cba83..055e8ec90c 100644 --- a/.github/workflows/integration-test.yml +++ b/.github/workflows/integration-test.yml @@ -1354,4 +1354,76 @@ jobs: permissions: id-token: write contents: read - secrets: inherit \ No newline at end of file + secrets: inherit + + CompassLinuxIntegrationTest: + needs: [ BuildAndUpload ] + name: 'CompassLinuxIntegrationTest' + runs-on: ubuntu-latest + permissions: + id-token: write + contents: read + steps: + - name: Checkout CWA Test + uses: actions/checkout@v3 + with: + repository: ${{env.CWA_GITHUB_TEST_REPO_NAME}} + ref: ${{env.CWA_GITHUB_TEST_REPO_BRANCH}} + + - name: Configure AWS Credentials + uses: aws-actions/configure-aws-credentials@v2 + with: + role-to-assume: ${{ env.TERRAFORM_AWS_ASSUME_ROLE }} + aws-region: us-east-1 + role-duration-seconds: ${{ env.TERRAFORM_AWS_ASSUME_ROLE_DURATION }} + + - name: Cache if success + id: compass-integration-test + uses: actions/cache@v3 + with: + path: go.mod + key: compass-integration-test-${{ github.sha }} + + # nick-fields/retry@v2 starts at base dir + - name: Terraform apply + if: steps.compass-integration-test.outputs.cache-hit != 'true' + uses: nick-fields/retry@v2 + env: + TF_VAR_test_name: compass + TF_VAR_ssh_key_name: ${{ env.KEY_NAME }} + TF_VAR_ssh_key_value: ${{ env.PRIVATE_KEY }} + TF_VAR_user: ec2-user + TF_VAR_ami: cloudwatch-agent-integration-test-al2* + TF_VAR_arc: amd64 + TF_VAR_ec2_instance_type: t3a.medium + TF_VAR_github_test_repo: ${{ env.CWA_GITHUB_TEST_REPO_URL }} + TF_VAR_github_test_repo_branch: ${{ env.CWA_GITHUB_TEST_REPO_BRANCH }} + TF_VAR_cwa_github_sha: ${{ github.sha }} + TF_VAR_s3_bucket: ${{ vars.S3_INTEGRATION_BUCKET }} + TF_VAR_binary_name: amazon-cloudwatch-agent.rpm + TF_VAR_install_agent: go run ./install/install_agent.go rpm + TF_VAR_pre_test_setup: | + git clone https://x-access-token:${{ secrets.GITHUB_TOKEN }}@github.com/aws/private-amazon-cloudwatch-agent-staging.git compass + cd compass + git checkout ${{ github.sha }} + cd test + with: + max_attempts: 3 + timeout_minutes: 60 + retry_wait_seconds: 5 + command: | + cd terraform/ec2/linux + terraform init + if terraform apply --auto-approve -var="test_dir=./compass" -var="region=us-east-1" ; then terraform destroy -auto-approve + else + terraform destroy -auto-approve && exit 1 + fi + #This is here just in case workflow cancel + - name: Terraform destroy + if: ${{ cancelled() || failure() }} + uses: nick-fields/retry@v2 + with: + max_attempts: 3 + timeout_minutes: 8 + retry_wait_seconds: 5 + command: cd terraform/ec2/linux && terraform destroy --auto-approve \ No newline at end of file diff --git a/test/README.md b/test/README.md new file mode 100644 index 0000000000..8676abc5db --- /dev/null +++ b/test/README.md @@ -0,0 +1,11 @@ +## Private Integration Tests +The `test` module is meant to serve as a place for integration tests that cannot be placed in the external `amazon-cloudwatch-agent-test` repo. +These follow the pattern established by the external test repo and import dependencies from it to reuse as much as possible. Therefore, there are +a few requirements that are needed before running the tests. + +### Base Requirements +- GoLang 1.22+ +- A built and installed version of the agent from this repo + +### Compass +The compass integration tests. Verifies that PutLogEvents calls are attached with entities by the agent. \ No newline at end of file diff --git a/test/compass/compass_test.go b/test/compass/compass_test.go new file mode 100644 index 0000000000..6343ef05e7 --- /dev/null +++ b/test/compass/compass_test.go @@ -0,0 +1,342 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package compass + +import ( + "context" + "errors" + "fmt" + "log" + "os" + "path/filepath" + "testing" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/config" + "github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs" + cwlTypes "github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs/types" + "github.com/aws/aws-sdk-go-v2/service/ec2" + ec2Types "github.com/aws/aws-sdk-go-v2/service/ec2/types" + "github.com/google/uuid" + "github.com/stretchr/testify/assert" + + "github.com/aws/amazon-cloudwatch-agent-test/environment" + "github.com/aws/amazon-cloudwatch-agent-test/util/awsservice" + "github.com/aws/amazon-cloudwatch-agent-test/util/common" +) + +const ( + configOutputPath = "/opt/aws/amazon-cloudwatch-agent/bin/config.json" + logLineId1 = "foo" + logLineId2 = "bar" + logFilePath = "/tmp/cwagent_log_test.log" + sleepForFlush = 60 * time.Second + retryWaitTime = 30 * time.Second + cwlPerfEndpoint = "https://logs-perf.us-east-1.amazonaws.com" + iadRegionalCode = "us-east-1" + + entityType = "@entity.KeyAttributes.Type" + entityName = "@entity.KeyAttributes.Name" + entityEnvironment = "@entity.KeyAttributes.Environment" + + entityPlatform = "@entity.Attributes.PlatformType" + entityInstanceId = "@entity.Attributes.EC2.InstanceId" +) + +var ( + logLineIds = []string{logLineId1, logLineId2} + rnf *cwlTypes.ResourceNotFoundException + cwlClient *cloudwatchlogs.Client + ec2Client *ec2.Client +) + +type expectedEntity struct { + entityType string + name string + environment string + platformType string + instanceId string +} + +func init() { + environment.RegisterEnvironmentMetaDataFlags() + awsCfg, err := config.LoadDefaultConfig( + context.Background(), + config.WithRegion(iadRegionalCode), + ) + if err != nil { + // handle error + fmt.Println("There was an error trying to load default config: ", err) + return + } + + cwlClient = cloudwatchlogs.NewFromConfig(awsCfg, func(o *cloudwatchlogs.Options) { + o.BaseEndpoint = aws.String(cwlPerfEndpoint) + }) + ec2Client = ec2.NewFromConfig(awsCfg) + +} + +// TestWriteLogsToCloudWatch writes N number of logs, and then validates that the +// log events are associated with entities from CloudWatch Logs +func TestWriteLogsToCloudWatch(t *testing.T) { + // this uses the {instance_id} placeholder in the agent configuration, + // so we need to determine the host's instance ID for validation + instanceId := awsservice.GetInstanceId() + log.Printf("Found instance id %s", instanceId) + + defer awsservice.DeleteLogGroupAndStream(instanceId, instanceId) + + testCases := map[string]struct { + agentConfigPath string + iterations int + useEC2Tag bool + expectedEntity expectedEntity + }{ + "Compass/IAMRole": { + agentConfigPath: filepath.Join("resources", "compass_default_log.json"), + iterations: 1000, + expectedEntity: expectedEntity{ + entityType: "Service", + name: "cwa-e2e-iam-instance-profile", + environment: "ec2:default", + platformType: "AWS::EC2", + instanceId: instanceId, + }, + }, + "Compass/EC2Tags": { + agentConfigPath: filepath.Join("resources", "compass_default_log.json"), + iterations: 1000, + useEC2Tag: true, + expectedEntity: expectedEntity{ + entityType: "Service", + name: "compass-service-test", + environment: "ec2:default", + platformType: "AWS::EC2", + instanceId: instanceId, + }, + }, + "Compass/ServiceInConfig": { + agentConfigPath: filepath.Join("resources", "compass_service_in_config.json"), + iterations: 1000, + expectedEntity: expectedEntity{ + entityType: "Service", + name: "compass-service", + environment: "compass-environment", + platformType: "AWS::EC2", + instanceId: instanceId, + }, + }, + } + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + if testCase.useEC2Tag { + input := &ec2.CreateTagsInput{ + Resources: []string{instanceId}, + Tags: []ec2Types.Tag{ + { + Key: aws.String("service"), + Value: aws.String("compass-service-test"), + }, + }, + } + _, err := ec2Client.CreateTags(context.TODO(), input) + assert.NoError(t, err) + } + id := uuid.New() + f, err := os.Create(logFilePath + "-" + id.String()) + if err != nil { + t.Fatalf("Error occurred creating log file for writing: %v", err) + } + common.DeleteFile(common.AgentLogFile) + common.TouchFile(common.AgentLogFile) + + common.CopyFile(testCase.agentConfigPath, configOutputPath) + + common.StartAgent(configOutputPath, true, false) + + // ensure that there is enough time from the "start" time and the first log line, + // so we don't miss it in the GetLogEvents call + writeLogLines(t, f, testCase.iterations) + time.Sleep(sleepForFlush) + common.StopAgent() + end := time.Now() + + // check CWL to ensure we got the expected entities in the log group + ValidateEntity(t, instanceId, instanceId, &end, testCase.expectedEntity) + + f.Close() + os.Remove(logFilePath + "-" + id.String()) + }) + } +} + +func writeLogLines(t *testing.T, f *os.File, iterations int) { + log.Printf("Writing %d lines to %s", iterations*len(logLineIds), f.Name()) + + for i := 0; i < iterations; i++ { + ts := time.Now() + for _, id := range logLineIds { + _, err := f.WriteString(fmt.Sprintf("%s - [%s] #%d This is a log line.\n", ts.Format(time.StampMilli), id, i)) + if err != nil { + // don't need to fatal error here. if a log line doesn't get written, the count + // when validating the log stream should be incorrect and fail there. + t.Logf("Error occurred writing log line: %v", err) + } + } + time.Sleep(30 * time.Millisecond) + } +} + +// ValidateLogs queries a given LogGroup/LogStream combination given the start and end times, and executes an +// arbitrary validator function on the found logs. +func ValidateEntity(t *testing.T, logGroup, logStream string, end *time.Time, expectedEntity expectedEntity) { + log.Printf("Checking log group/stream: %s/%s", logGroup, logStream) + + logGroupInfo, err := getLogGroup() + for _, lg := range logGroupInfo { + if *lg.LogGroupName == logGroup { + log.Println("Log group " + *lg.LogGroupName + " exists") + break + } + } + assert.NoError(t, err) + begin := end.Add(-sleepForFlush * 2) + log.Printf("Start time is " + begin.String() + " and end time is " + end.String()) + queryId, err := getLogQueryId(logGroup, &begin, end) + assert.NoError(t, err) + log.Printf("queryId is " + *queryId) + result, err := getQueryResult(queryId) + assert.NoError(t, err) + if !assert.NotZero(t, len(result)) { + return + } + requiredEntityFields := map[string]bool{ + entityType: false, + entityName: false, + entityEnvironment: false, + entityPlatform: false, + entityInstanceId: false, + } + for _, field := range result[0] { + switch aws.ToString(field.Field) { + case entityType: + requiredEntityFields[entityType] = true + assert.Equal(t, expectedEntity.entityType, aws.ToString(field.Value)) + case entityName: + requiredEntityFields[entityName] = true + assert.Equal(t, expectedEntity.name, aws.ToString(field.Value)) + case entityEnvironment: + requiredEntityFields[entityEnvironment] = true + assert.Equal(t, expectedEntity.environment, aws.ToString(field.Value)) + case entityPlatform: + requiredEntityFields[entityPlatform] = true + assert.Equal(t, expectedEntity.platformType, aws.ToString(field.Value)) + case entityInstanceId: + requiredEntityFields[entityInstanceId] = true + assert.Equal(t, expectedEntity.instanceId, aws.ToString(field.Value)) + + } + fmt.Printf("%s: %s\n", aws.ToString(field.Field), aws.ToString(field.Value)) + } + allEntityFieldsFound := true + for _, value := range requiredEntityFields { + if !value { + allEntityFieldsFound = false + } + } + assert.True(t, allEntityFieldsFound) +} + +func getLogQueryId(logGroup string, since, until *time.Time) (*string, error) { + var queryId *string + params := &cloudwatchlogs.StartQueryInput{ + QueryString: aws.String("fields @message, @entity.KeyAttributes.Type, @entity.KeyAttributes.Name, @entity.KeyAttributes.Environment, @entity.Attributes.PlatformType, @entity.Attributes.EC2.InstanceId"), + LogGroupName: aws.String(logGroup), + } + if since != nil { + params.StartTime = aws.Int64(since.UnixMilli()) + } + if until != nil { + params.EndTime = aws.Int64(until.UnixMilli()) + } + attempts := 0 + + for { + output, err := cwlClient.StartQuery(context.Background(), params) + attempts += 1 + + if err != nil { + if errors.As(err, &rnf) && attempts <= awsservice.StandardRetries { + // The log group/stream hasn't been created yet, so wait and retry + time.Sleep(retryWaitTime) + continue + } + + // if the error is not a ResourceNotFoundException, we should fail here. + return queryId, err + } + queryId = output.QueryId + return queryId, err + } +} + +func getQueryResult(queryId *string) ([][]cwlTypes.ResultField, error) { + attempts := 0 + var results [][]cwlTypes.ResultField + params := &cloudwatchlogs.GetQueryResultsInput{ + QueryId: aws.String(*queryId), + } + for { + if attempts > awsservice.StandardRetries { + return results, errors.New("exceeded retry count") + } + result, err := cwlClient.GetQueryResults(context.Background(), params) + log.Printf("GetQueryResult status is: %v", result.Status) + attempts += 1 + if result.Status != cwlTypes.QueryStatusComplete { + log.Printf("GetQueryResult: sleeping for 5 seconds until status is complete") + time.Sleep(5 * time.Second) + continue + } + log.Printf("GetQueryResult: result length is %d", len(result.Results)) + if err != nil { + if errors.As(err, &rnf) { + // The log group/stream hasn't been created yet, so wait and retry + time.Sleep(retryWaitTime) + continue + } + + // if the error is not a ResourceNotFoundException, we should fail here. + return results, err + } + results = result.Results + return results, err + } +} + +func getLogGroup() ([]cwlTypes.LogGroup, error) { + attempts := 0 + var logGroups []cwlTypes.LogGroup + params := &cloudwatchlogs.DescribeLogGroupsInput{} + for { + output, err := cwlClient.DescribeLogGroups(context.Background(), params) + + attempts += 1 + + if err != nil { + if errors.As(err, &rnf) && attempts <= awsservice.StandardRetries { + // The log group/stream hasn't been created yet, so wait and retry + time.Sleep(retryWaitTime) + continue + } + + // if the error is not a ResourceNotFoundException, we should fail here. + return logGroups, err + } + logGroups = output.LogGroups + return logGroups, err + } +} diff --git a/test/compass/resources/compass_default_log.json b/test/compass/resources/compass_default_log.json new file mode 100644 index 0000000000..a4b3c40c35 --- /dev/null +++ b/test/compass/resources/compass_default_log.json @@ -0,0 +1,22 @@ +{ + "agent": { + "run_as_user": "root", + "debug": true + }, + "logs": { + "endpoint_override": "https://logs-perf.us-east-1.amazonaws.com", + "force_flush_interval": 1, + "logs_collected": { + "files": { + "collect_list": [ + { + "file_path": "/tmp/cwagent_log_test.log*", + "log_group_name": "{instance_id}", + "log_stream_name": "{instance_id}", + "timezone": "UTC" + } + ] + } + } + } +} diff --git a/test/compass/resources/compass_service_in_config.json b/test/compass/resources/compass_service_in_config.json new file mode 100644 index 0000000000..e66a91f9a3 --- /dev/null +++ b/test/compass/resources/compass_service_in_config.json @@ -0,0 +1,23 @@ +{ + "agent": { + "run_as_user": "root", + "debug": true + }, + "logs": { + "endpoint_override": "https://logs-perf.us-east-1.amazonaws.com", + "logs_collected": { + "files": { + "collect_list": [ + { + "file_path": "/tmp/cwagent_log_test.log*", + "log_group_name": "{instance_id}", + "log_stream_name": "{instance_id}", + "timezone": "UTC", + "service.name": "compass-service", + "deployment.environment": "compass-environment" + } + ] + } + } + } +} diff --git a/test/go.mod b/test/go.mod new file mode 100644 index 0000000000..fc4978f595 --- /dev/null +++ b/test/go.mod @@ -0,0 +1,58 @@ +module github.com/aws/private-amazon-cloudwatch-agent-staging/test + +go 1.22.4 + +require ( + github.com/aws/amazon-cloudwatch-agent-test v0.0.0-20240613210401-2cd967b759dc + github.com/aws/aws-sdk-go-v2 v1.23.5 + github.com/aws/aws-sdk-go-v2/config v1.25.11 + github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs v1.29.2 + github.com/stretchr/testify v1.8.4 +) + +require ( + collectd.org v0.5.0 // indirect + github.com/DataDog/datadog-go v4.8.3+incompatible // indirect + github.com/Microsoft/go-winio v0.6.1 // indirect + github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.5.3 // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.16.9 // indirect + github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue v1.12.9 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.9 // indirect + github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.15.4 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.8 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.8 // indirect + github.com/aws/aws-sdk-go-v2/internal/ini v1.7.1 // indirect + github.com/aws/aws-sdk-go-v2/internal/v4a v1.2.8 // indirect + github.com/aws/aws-sdk-go-v2/service/cloudformation v1.42.0 // indirect + github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.31.2 // indirect + github.com/aws/aws-sdk-go-v2/service/dynamodb v1.26.3 // indirect + github.com/aws/aws-sdk-go-v2/service/dynamodbstreams v1.18.2 // indirect + github.com/aws/aws-sdk-go-v2/service/ec2 v1.138.2 // indirect + github.com/aws/aws-sdk-go-v2/service/ecs v1.35.2 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.3 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.2.8 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.8.9 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.8 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.16.8 // indirect + github.com/aws/aws-sdk-go-v2/service/s3 v1.47.2 // indirect + github.com/aws/aws-sdk-go-v2/service/ssm v1.44.2 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.18.2 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.2 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.26.2 // indirect + github.com/aws/aws-sdk-go-v2/service/xray v1.23.2 // indirect + github.com/aws/smithy-go v1.18.1 // indirect + github.com/cenkalti/backoff/v4 v4.2.1 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/google/uuid v1.4.0 // indirect + github.com/jmespath/go-jmespath v0.4.0 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/prozz/aws-embedded-metrics-golang v1.2.0 // indirect + github.com/qri-io/jsonpointer v0.1.1 // indirect + github.com/qri-io/jsonschema v0.2.1 // indirect + go.uber.org/multierr v1.11.0 // indirect + golang.org/x/exp v0.0.0-20231127185646-65229373498e // indirect + golang.org/x/mod v0.14.0 // indirect + golang.org/x/sys v0.15.0 // indirect + golang.org/x/tools v0.16.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect +) diff --git a/test/go.sum b/test/go.sum new file mode 100644 index 0000000000..fa1e4997a1 --- /dev/null +++ b/test/go.sum @@ -0,0 +1,119 @@ +collectd.org v0.5.0 h1:mRTLdljvxJNXPMMO9RSxf0PANDAqu/Tz+I6Dt6OjB28= +collectd.org v0.5.0/go.mod h1:A/8DzQBkF6abtvrT2j/AU/4tiBgJWYyh0y/oB/4MlWE= +github.com/DataDog/datadog-go v4.8.3+incompatible h1:fNGaYSuObuQb5nzeTQqowRAd9bpDIRRV4/gUtIBjh8Q= +github.com/DataDog/datadog-go v4.8.3+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= +github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= +github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= +github.com/aws/amazon-cloudwatch-agent-test v0.0.0-20240613210401-2cd967b759dc h1:oC0cgVlspqNbwRKk9Zk9zweYKZcjnW48Hwp0isLh1Co= +github.com/aws/amazon-cloudwatch-agent-test v0.0.0-20240613210401-2cd967b759dc/go.mod h1:E/w/idAjJTY+laomuWIO8wCE8Rtq3hSA2sVeNeV+YGA= +github.com/aws/aws-sdk-go-v2 v1.23.5 h1:xK6C4udTyDMd82RFvNkDQxtAd00xlzFUtX4fF2nMZyg= +github.com/aws/aws-sdk-go-v2 v1.23.5/go.mod h1:t3szzKfP0NeRU27uBFczDivYJjsmSnqI8kIvKyWb9ds= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.5.3 h1:Zx9+31KyB8wQna6SXFWOewlgoY5uGdDAu6PTOEU3OQI= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.5.3/go.mod h1:zxbEJhRdKTH1nqS2qu6UJ7zGe25xaHxZXaC2CvuQFnA= +github.com/aws/aws-sdk-go-v2/config v1.25.11 h1:RWzp7jhPRliIcACefGkKp03L0Yofmd2p8M25kbiyvno= +github.com/aws/aws-sdk-go-v2/config v1.25.11/go.mod h1:BVUs0chMdygHsQtvaMyEOpW2GIW+ubrxJLgIz/JU29s= +github.com/aws/aws-sdk-go-v2/credentials v1.16.9 h1:LQo3MUIOzod9JdUK+wxmSdgzLVYUbII3jXn3S/HJZU0= +github.com/aws/aws-sdk-go-v2/credentials v1.16.9/go.mod h1:R7mDuIJoCjH6TxGUc/cylE7Lp/o0bhKVoxdBThsjqCM= +github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue v1.12.9 h1:/KXnrU9g/RzJwJKuZ7G635w9segJCpg9OIwkjPYZs7g= +github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue v1.12.9/go.mod h1:i6u5850nH0SFslKYMUVLW8Uc+JgEdpx4XHNA7T1S2C0= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.9 h1:FZVFahMyZle6WcogZCOxo6D/lkDA2lqKIn4/ueUmVXw= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.9/go.mod h1:kjq7REMIkxdtcEC9/4BVXjOsNY5isz6jQbEgk6osRTU= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.15.4 h1:TUCNKBd4/JEefsZDxo5deRmrRRPZHqGyBYiUAeBKOWU= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.15.4/go.mod h1:egDkcl+zsgFqS6VO142bKboip5Pe1sNMwN55Xy38QsM= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.8 h1:8GVZIR0y6JRIUNSYI1xAMF4HDfV8H/bOsZ/8AD/uY5Q= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.8/go.mod h1:rwBfu0SoUkBUZndVgPZKAD9Y2JigaZtRP68unRiYToQ= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.8 h1:ZE2ds/qeBkhk3yqYvS3CDCFNvd9ir5hMjlVStLZWrvM= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.8/go.mod h1:/lAPPymDYL023+TS6DJmjuL42nxix2AvEvfjqOBRODk= +github.com/aws/aws-sdk-go-v2/internal/ini v1.7.1 h1:uR9lXYjdPX0xY+NhvaJ4dD8rpSRz5VY81ccIIoNG+lw= +github.com/aws/aws-sdk-go-v2/internal/ini v1.7.1/go.mod h1:6fQQgfuGmw8Al/3M2IgIllycxV7ZW7WCdVSqfBeUiCY= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.2.8 h1:abKT+RuM1sdCNZIGIfZpLkvxEX3Rpsto019XG/rkYG8= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.2.8/go.mod h1:Owc4ysUE71JSruVTTa3h4f2pp3E4hlcAtmeNXxDmjj4= +github.com/aws/aws-sdk-go-v2/service/cloudformation v1.42.0 h1:Eub6qmSRH5ahS1zhVLa1i1qT3raC9Sxrn2kgtG19J3I= +github.com/aws/aws-sdk-go-v2/service/cloudformation v1.42.0/go.mod h1:ehWDbgXo5Zy6eLjP+xX+Vf8wXaSyLGeRf6KlvoVAaXk= +github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.31.2 h1:HWB+RXvOQQkhEp8QCpTlgullbCiysRQlo6ulVZRBBtM= +github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.31.2/go.mod h1:YHhAfr9Qd5xd0fLT2B7LxDFWbIZ6RbaI81Hu2ASCiTY= +github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs v1.29.2 h1:pq1AgSc6YRDkT3/iuXgPUPL0ArmdEmjPoAl0YEJZ4d4= +github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs v1.29.2/go.mod h1:ZGxc+lOwUVsyeKrneIf8/hhowNgyqvCcwmLU/Hrscbk= +github.com/aws/aws-sdk-go-v2/service/dynamodb v1.26.3 h1:Ytz7+VR04GK7wF1C+yQScMZ4Q01xeL4EbQ4kOQ8HY1c= +github.com/aws/aws-sdk-go-v2/service/dynamodb v1.26.3/go.mod h1:qqiIi0EbEEovHG/nQXYGAXcVvHPaUg7KMwh3VARzQz4= +github.com/aws/aws-sdk-go-v2/service/dynamodbstreams v1.18.2 h1:/zmckWK6/SL9MTnCD8p2vOEmOT+LFQtXeoo/bTRBa3c= +github.com/aws/aws-sdk-go-v2/service/dynamodbstreams v1.18.2/go.mod h1:Wkk+2ZcFVCqnuf/yXjvSlySsoy5l2RSFfv/ikosEv3M= +github.com/aws/aws-sdk-go-v2/service/ec2 v1.138.2 h1:e3Imv1oXz+W3Tfclflkh72t5TUPUwWdkHP7ctQGk8Dc= +github.com/aws/aws-sdk-go-v2/service/ec2 v1.138.2/go.mod h1:d1hAqgLDOPaSO1Piy/0bBmj6oAplFwv6p0cquHntNHM= +github.com/aws/aws-sdk-go-v2/service/ecs v1.35.2 h1:yIr1T8uPhZT2cKCBeO39utfzG/RKJn3SxbuBOdj18Nc= +github.com/aws/aws-sdk-go-v2/service/ecs v1.35.2/go.mod h1:MvDz+yXfa2sSEfHB57rdf83deKJIeKEopqHFhVmaRlk= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.3 h1:e3PCNeEaev/ZF01cQyNZgmYE9oYYePIMJs2mWSKG514= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.3/go.mod h1:gIeeNyaL8tIEqZrzAnTeyhHcE0yysCtcaP+N9kxLZ+E= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.2.8 h1:xyfOAYV/ujzZOo01H9+OnyeiRKmTEp6EsITTsmq332Q= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.2.8/go.mod h1:coLeQEoKzW9ViTL2bn0YUlU7K0RYjivKudG74gtd+sI= +github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.8.9 h1:Vn/qqsXxe3JEALfoU6ypVt86fb811wKqv4kdxvAUk/Q= +github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.8.9/go.mod h1:TQYzeHkuQrsz/AsxxK96CYJO4KRd4E6QozqktOR2h3w= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.8 h1:EamsKe+ZjkOQjDdHd86/JCEucjFKQ9T0atWKO4s2Lgs= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.8/go.mod h1:Q0vV3/csTpbkfKLI5Sb56cJQTCTtJ0ixdb7P+Wedqiw= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.16.8 h1:ip5ia3JOXl4OAsqeTdrOOmqKgoWiu+t9XSOnRzBwmRs= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.16.8/go.mod h1:kE+aERnK9VQIw1vrk7ElAvhCsgLNzGyCPNg2Qe4Eq4c= +github.com/aws/aws-sdk-go-v2/service/s3 v1.47.2 h1:DLSAG8zpJV2pYsU+UPkj1IEZghyBnnUsvIRs6UuXSDU= +github.com/aws/aws-sdk-go-v2/service/s3 v1.47.2/go.mod h1:thjZng67jGsvMyVZnSxlcqKyLwB0XTG8bHIRZPTJ+Bs= +github.com/aws/aws-sdk-go-v2/service/ssm v1.44.2 h1:lmdmYCvG1EJKGLEsUsYDNO6MwZyBZROrRg04Vrb5TwA= +github.com/aws/aws-sdk-go-v2/service/ssm v1.44.2/go.mod h1:pHJ1md/3F3WkYfZ4JKOllPfXQi4NiWk7NxbeOD53HQc= +github.com/aws/aws-sdk-go-v2/service/sso v1.18.2 h1:xJPydhNm0Hiqct5TVKEuHG7weC0+sOs4MUnd7A5n5F4= +github.com/aws/aws-sdk-go-v2/service/sso v1.18.2/go.mod h1:zxk6y1X2KXThESWMS5CrKRvISD8mbIMab6nZrCGxDG0= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.2 h1:8dU9zqA77C5egbU6yd4hFLaiIdPv3rU+6cp7sz5FjCU= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.2/go.mod h1:7Lt5mjQ8x5rVdKqg+sKKDeuwoszDJIIPmkd8BVsEdS0= +github.com/aws/aws-sdk-go-v2/service/sts v1.26.2 h1:fFrLsy08wEbAisqW3KDl/cPHrF43GmV79zXB9EwJiZw= +github.com/aws/aws-sdk-go-v2/service/sts v1.26.2/go.mod h1:7Ld9eTqocTvJqqJ5K/orbSDwmGcpRdlDiLjz2DO+SL8= +github.com/aws/aws-sdk-go-v2/service/xray v1.23.2 h1:mFHM/R2FYnCkmUB52SqJncU5TWDCfI55uXlNTp96g3Y= +github.com/aws/aws-sdk-go-v2/service/xray v1.23.2/go.mod h1:zz5H6SRVFHj93yt3lxA8Ql63c/pY90YjNvvalulrCTk= +github.com/aws/smithy-go v1.18.1 h1:pOdBTUfXNazOlxLrgeYalVnuTpKreACHtc62xLwIB3c= +github.com/aws/smithy-go v1.18.1/go.mod h1:NukqUGpCZIILqqiV0NIjeFh24kd/FAa4beRb6nbIUPE= +github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= +github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg= +github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/uuid v1.4.0 h1:MtMxsa51/r9yyhkyLsVeVt0B+BGQZzpQiTQ4eHZ8bc4= +github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= +github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= +github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= +github.com/kinbiko/jsonassert v1.0.1 h1:8gdLmUaPWuxk2TzQSofKRqatFH6zwTF6AsUH4bugJYY= +github.com/kinbiko/jsonassert v1.0.1/go.mod h1:QRwBwiAsrcJpjw+L+Q4WS8psLxuUY+HylVZS/4j74TM= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prozz/aws-embedded-metrics-golang v1.2.0 h1:b/LFb8J9LbgANow/9nYZE3M3bkb457/dj0zAB3hPyvo= +github.com/prozz/aws-embedded-metrics-golang v1.2.0/go.mod h1:MXOqF9cJCEHjj77LWq7NWK44/AOyaFzwmcAYqR3057M= +github.com/qri-io/jsonpointer v0.1.1 h1:prVZBZLL6TW5vsSB9fFHFAMBLI4b0ri5vribQlTJiBA= +github.com/qri-io/jsonpointer v0.1.1/go.mod h1:DnJPaYgiKu56EuDp8TU5wFLdZIcAnb/uH9v37ZaMV64= +github.com/qri-io/jsonschema v0.2.1 h1:NNFoKms+kut6ABPf6xiKNM5214jzxAhDBrPHCJ97Wg0= +github.com/qri-io/jsonschema v0.2.1/go.mod h1:g7DPkiOsK1xv6T/Ao5scXRkd+yTFygcANPBaaqW+VrI= +github.com/sergi/go-diff v1.0.0 h1:Kpca3qRNrduNnOQeazBd0ysaKrUJiIuISHxogkT9RPQ= +github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +golang.org/x/exp v0.0.0-20231127185646-65229373498e h1:Gvh4YaCaXNs6dKTlfgismwWZKyjVZXwOPfIyUaqU3No= +golang.org/x/exp v0.0.0-20231127185646-65229373498e/go.mod h1:iRJReGqOEeBhDZGkGbynYwcHlctCvnjTYIamk7uXpHI= +golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0= +golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c= +golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U= +golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE= +golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc= +golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/tools v0.16.0 h1:GO788SKMRunPIBCXiQyo2AaexLstOrVhuAL5YwsckQM= +golang.org/x/tools v0.16.0/go.mod h1:kYVVN6I1mBNoB1OX+noeBjbRk4IUEPa7JJ+TJMEooJ0= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= From 8d88e2f9c36d9fc4d2854d99964412fbf573f9b7 Mon Sep 17 00:00:00 2001 From: zhihonl <61301537+zhihonl@users.noreply.github.com> Date: Wed, 24 Jul 2024 14:26:04 -0400 Subject: [PATCH 42/55] Add cross-account integration tests for Compass features (#751) --- test/compass/compass_test.go | 229 ++++++++++++++++-- .../resources/compass_role_arn_check.json | 24 ++ 2 files changed, 231 insertions(+), 22 deletions(-) create mode 100644 test/compass/resources/compass_role_arn_check.json diff --git a/test/compass/compass_test.go b/test/compass/compass_test.go index 6343ef05e7..711674c505 100644 --- a/test/compass/compass_test.go +++ b/test/compass/compass_test.go @@ -10,15 +10,18 @@ import ( "log" "os" "path/filepath" + "strings" "testing" "time" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/config" + "github.com/aws/aws-sdk-go-v2/credentials/stscreds" "github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs" cwlTypes "github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs/types" "github.com/aws/aws-sdk-go-v2/service/ec2" ec2Types "github.com/aws/aws-sdk-go-v2/service/ec2/types" + "github.com/aws/aws-sdk-go-v2/service/sts" "github.com/google/uuid" "github.com/stretchr/testify/assert" @@ -41,15 +44,18 @@ const ( entityName = "@entity.KeyAttributes.Name" entityEnvironment = "@entity.KeyAttributes.Environment" - entityPlatform = "@entity.Attributes.PlatformType" - entityInstanceId = "@entity.Attributes.EC2.InstanceId" + entityPlatform = "@entity.Attributes.PlatformType" + entityInstanceId = "@entity.Attributes.EC2.InstanceId" + credsDir = "/tmp/.aws" + testAccountRoleArn = "arn:aws:iam::874389809020:role/CloudWatchAgentServerRole" ) var ( - logLineIds = []string{logLineId1, logLineId2} - rnf *cwlTypes.ResourceNotFoundException - cwlClient *cloudwatchlogs.Client - ec2Client *ec2.Client + logLineIds = []string{logLineId1, logLineId2} + rnf *cwlTypes.ResourceNotFoundException + cwlClient *cloudwatchlogs.Client + crossAccountLogClient *cloudwatchlogs.Client + ec2Client *ec2.Client ) type expectedEntity struct { @@ -77,6 +83,28 @@ func init() { }) ec2Client = ec2.NewFromConfig(awsCfg) + // Initialize STS client for cross-account checks + stsClient := sts.NewFromConfig(awsCfg) + + // Assume the role in the target account + appCreds := stscreds.NewAssumeRoleProvider(stsClient, testAccountRoleArn) + + // Create a new configuration using the assumed role credentials + assumedCfg, err := config.LoadDefaultConfig(context.TODO(), + config.WithRegion(iadRegionalCode), + config.WithCredentialsProvider( + appCreds, + ), + ) + if err != nil { + log.Fatalf("unable to load assumed role config, %v", err) + } + + // Create a CloudWatch Logs client with the assumed role credentials + crossAccountLogClient = cloudwatchlogs.NewFromConfig(assumedCfg, func(o *cloudwatchlogs.Options) { + o.BaseEndpoint = aws.String(cwlPerfEndpoint) + }) + } // TestWriteLogsToCloudWatch writes N number of logs, and then validates that the @@ -87,7 +115,13 @@ func TestWriteLogsToCloudWatch(t *testing.T) { instanceId := awsservice.GetInstanceId() log.Printf("Found instance id %s", instanceId) - defer awsservice.DeleteLogGroupAndStream(instanceId, instanceId) + err := ResetProfile() + // Truncate the common-config so we don't use the profile credential + if err != nil { + log.Fatalf("Error truncating file: %s", err) + } + + defer DeleteLogGroupAndStream(cwlClient, instanceId, instanceId) testCases := map[string]struct { agentConfigPath string @@ -132,6 +166,7 @@ func TestWriteLogsToCloudWatch(t *testing.T) { } for name, testCase := range testCases { t.Run(name, func(t *testing.T) { + if testCase.useEC2Tag { input := &ec2.CreateTagsInput{ Resources: []string{instanceId}, @@ -165,7 +200,70 @@ func TestWriteLogsToCloudWatch(t *testing.T) { end := time.Now() // check CWL to ensure we got the expected entities in the log group - ValidateEntity(t, instanceId, instanceId, &end, testCase.expectedEntity) + ValidateEntity(t, cwlClient, instanceId, instanceId, &end, testCase.expectedEntity, false) + + f.Close() + os.Remove(logFilePath + "-" + id.String()) + }) + } +} + +// TestCrossAccount writes N number of logs, and then validates that the +// log events being sent to the other account are not associated with entity +func TestCrossAccount(t *testing.T) { + // this uses the {instance_id} placeholder in the agent configuration, + // so we need to determine the host's instance ID for validation + instanceId := awsservice.GetInstanceId() + log.Printf("Found instance id %s", instanceId) + defer DeleteLogGroupAndStream(crossAccountLogClient, instanceId, instanceId) + + testCases := map[string]struct { + agentConfigPath string + iterations int + setupFunction func() error + entityFieldsShouldMiss bool + expectedEntity expectedEntity + }{ + "Compass/RoleArnCrossAccount": { + agentConfigPath: filepath.Join("resources", "compass_role_arn_check.json"), + entityFieldsShouldMiss: true, + setupFunction: SetupRoleArnCredential, + iterations: 1000, + }, + "Compass/ProfileCrossAccount": { + agentConfigPath: filepath.Join("resources", "compass_default_log.json"), + entityFieldsShouldMiss: true, + setupFunction: SetupProfileCredential, + iterations: 1000, + }, + } + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + err := testCase.setupFunction() + if err != nil { + t.Fatalf("Error setting up cross-account credential: %v", err) + } + id := uuid.New() + f, err := os.Create(logFilePath + "-" + id.String()) + if err != nil { + t.Fatalf("Error occurred creating log file for writing: %v", err) + } + common.DeleteFile(common.AgentLogFile) + common.TouchFile(common.AgentLogFile) + + common.CopyFile(testCase.agentConfigPath, configOutputPath) + + common.StartAgent(configOutputPath, true, false) + + // ensure that there is enough time from the "start" time and the first log line, + // so we don't miss it in the GetLogEvents call + writeLogLines(t, f, testCase.iterations) + time.Sleep(sleepForFlush) + common.StopAgent() + end := time.Now() + + // check CWL to ensure we got the expected entities in the log group + ValidateEntity(t, crossAccountLogClient, instanceId, instanceId, &end, testCase.expectedEntity, testCase.entityFieldsShouldMiss) f.Close() os.Remove(logFilePath + "-" + id.String()) @@ -173,6 +271,61 @@ func TestWriteLogsToCloudWatch(t *testing.T) { } } +func SetupRoleArnCredential() error { + err := ResetProfile() + // Truncate the common-config so we don't use the profile credential + if err != nil { + return fmt.Errorf("error truncating file: %s", err) + } + log.Println("common-config has been emptied successfully") + + jsonPath := filepath.Join("resources", "compass_role_arn_check.json") + // Read the JSON file + fileContent, err := os.ReadFile(jsonPath) + if err != nil { + return fmt.Errorf("error reading file: %s", err) + } + // Convert the file content to a string + jsonString := string(fileContent) + + // Replace the placeholder with the actual role ARN + updatedJsonString := strings.ReplaceAll(jsonString, "{integ-test-role-arn}", testAccountRoleArn) + + // Write the updated JSON string back to the file + err = os.WriteFile(jsonPath, []byte(updatedJsonString), 0644) + if err != nil { + return fmt.Errorf("error writing file: %s", err) + } + + log.Println("Successfully updated the role ARN in the JSON file") + return nil +} + +func SetupProfileCredential() error { + err := common.RunCommands(profileSetupCommand(testAccountRoleArn)) + return err +} + +func ResetProfile() error { + err := common.RunCommands(profileResetCommand()) + // Truncate the common-config so we don't use the profile credential + return err +} + +func profileSetupCommand(roleArn string) []string { + return []string{ + "mkdir -p " + credsDir, + "printf '[default]\naws_access_key_id=%s\naws_secret_access_key=%s\naws_session_token=%s' $(aws sts assume-role --role-arn " + roleArn + " --role-session-name test --query 'Credentials.[AccessKeyId,SecretAccessKey,SessionToken]' --output text) | tee " + credsDir + "/credentials>/dev/null", + "printf '[credentials]\n shared_credential_profile = \"default\"\n shared_credential_file = \"" + credsDir + "/credentials\"' | sudo tee /opt/aws/amazon-cloudwatch-agent/etc/common-config.toml>/dev/null", + } +} + +func profileResetCommand() []string { + return []string{ + "sudo truncate -s 0 /opt/aws/amazon-cloudwatch-agent/etc/common-config.toml", + } +} + func writeLogLines(t *testing.T, f *os.File, iterations int) { log.Printf("Writing %d lines to %s", iterations*len(logLineIds), f.Name()) @@ -192,10 +345,10 @@ func writeLogLines(t *testing.T, f *os.File, iterations int) { // ValidateLogs queries a given LogGroup/LogStream combination given the start and end times, and executes an // arbitrary validator function on the found logs. -func ValidateEntity(t *testing.T, logGroup, logStream string, end *time.Time, expectedEntity expectedEntity) { +func ValidateEntity(t *testing.T, logClient *cloudwatchlogs.Client, logGroup, logStream string, end *time.Time, expectedEntity expectedEntity, entityFieldsShouldMiss bool) { log.Printf("Checking log group/stream: %s/%s", logGroup, logStream) - logGroupInfo, err := getLogGroup() + logGroupInfo, err := getLogGroup(logClient) for _, lg := range logGroupInfo { if *lg.LogGroupName == logGroup { log.Println("Log group " + *lg.LogGroupName + " exists") @@ -205,10 +358,10 @@ func ValidateEntity(t *testing.T, logGroup, logStream string, end *time.Time, ex assert.NoError(t, err) begin := end.Add(-sleepForFlush * 2) log.Printf("Start time is " + begin.String() + " and end time is " + end.String()) - queryId, err := getLogQueryId(logGroup, &begin, end) + queryId, err := getLogQueryId(logClient, logGroup, &begin, end) assert.NoError(t, err) log.Printf("queryId is " + *queryId) - result, err := getQueryResult(queryId) + result, err := getQueryResult(logClient, queryId) assert.NoError(t, err) if !assert.NotZero(t, len(result)) { return @@ -241,16 +394,20 @@ func ValidateEntity(t *testing.T, logGroup, logStream string, end *time.Time, ex } fmt.Printf("%s: %s\n", aws.ToString(field.Field), aws.ToString(field.Value)) } - allEntityFieldsFound := true + entityFieldFoundCount := 0 for _, value := range requiredEntityFields { - if !value { - allEntityFieldsFound = false + if value { + entityFieldFoundCount += 1 } } - assert.True(t, allEntityFieldsFound) + if entityFieldsShouldMiss { + assert.Equal(t, 0, entityFieldFoundCount) + } else { + assert.Equal(t, 5, entityFieldFoundCount) + } } -func getLogQueryId(logGroup string, since, until *time.Time) (*string, error) { +func getLogQueryId(logClient *cloudwatchlogs.Client, logGroup string, since, until *time.Time) (*string, error) { var queryId *string params := &cloudwatchlogs.StartQueryInput{ QueryString: aws.String("fields @message, @entity.KeyAttributes.Type, @entity.KeyAttributes.Name, @entity.KeyAttributes.Environment, @entity.Attributes.PlatformType, @entity.Attributes.EC2.InstanceId"), @@ -265,7 +422,7 @@ func getLogQueryId(logGroup string, since, until *time.Time) (*string, error) { attempts := 0 for { - output, err := cwlClient.StartQuery(context.Background(), params) + output, err := logClient.StartQuery(context.Background(), params) attempts += 1 if err != nil { @@ -283,7 +440,7 @@ func getLogQueryId(logGroup string, since, until *time.Time) (*string, error) { } } -func getQueryResult(queryId *string) ([][]cwlTypes.ResultField, error) { +func getQueryResult(logClient *cloudwatchlogs.Client, queryId *string) ([][]cwlTypes.ResultField, error) { attempts := 0 var results [][]cwlTypes.ResultField params := &cloudwatchlogs.GetQueryResultsInput{ @@ -293,7 +450,7 @@ func getQueryResult(queryId *string) ([][]cwlTypes.ResultField, error) { if attempts > awsservice.StandardRetries { return results, errors.New("exceeded retry count") } - result, err := cwlClient.GetQueryResults(context.Background(), params) + result, err := logClient.GetQueryResults(context.Background(), params) log.Printf("GetQueryResult status is: %v", result.Status) attempts += 1 if result.Status != cwlTypes.QueryStatusComplete { @@ -317,12 +474,12 @@ func getQueryResult(queryId *string) ([][]cwlTypes.ResultField, error) { } } -func getLogGroup() ([]cwlTypes.LogGroup, error) { +func getLogGroup(logClient *cloudwatchlogs.Client) ([]cwlTypes.LogGroup, error) { attempts := 0 var logGroups []cwlTypes.LogGroup params := &cloudwatchlogs.DescribeLogGroupsInput{} for { - output, err := cwlClient.DescribeLogGroups(context.Background(), params) + output, err := logClient.DescribeLogGroups(context.Background(), params) attempts += 1 @@ -340,3 +497,31 @@ func getLogGroup() ([]cwlTypes.LogGroup, error) { return logGroups, err } } + +// DeleteLogGroupAndStream cleans up a log group and stream by name. This gracefully handles +// ResourceNotFoundException errors from calling the APIs +func DeleteLogGroupAndStream(logClient *cloudwatchlogs.Client, logGroupName, logStreamName string) { + DeleteLogStream(logClient, logGroupName, logStreamName) + DeleteLogGroup(logClient, logGroupName) +} + +// DeleteLogStream cleans up log stream by name +func DeleteLogStream(logClient *cloudwatchlogs.Client, logGroupName, logStreamName string) { + _, err := logClient.DeleteLogStream(context.TODO(), &cloudwatchlogs.DeleteLogStreamInput{ + LogGroupName: aws.String(logGroupName), + LogStreamName: aws.String(logStreamName), + }) + if err != nil && !errors.As(err, &rnf) { + log.Printf("Error occurred while deleting log stream %s: %v", logStreamName, err) + } +} + +// DeleteLogGroup cleans up log group by name +func DeleteLogGroup(logClient *cloudwatchlogs.Client, logGroupName string) { + _, err := logClient.DeleteLogGroup(context.TODO(), &cloudwatchlogs.DeleteLogGroupInput{ + LogGroupName: aws.String(logGroupName), + }) + if err != nil && !errors.As(err, &rnf) { + log.Printf("Error occurred while deleting log group %s: %v", logGroupName, err) + } +} diff --git a/test/compass/resources/compass_role_arn_check.json b/test/compass/resources/compass_role_arn_check.json new file mode 100644 index 0000000000..90b1eaaabb --- /dev/null +++ b/test/compass/resources/compass_role_arn_check.json @@ -0,0 +1,24 @@ +{ + "agent": { + "run_as_user": "root", + "debug": true, + "credentials": { + "role_arn": "{integ-test-role-arn}" + } + }, + "logs": { + "endpoint_override": "https://logs-perf.us-east-1.amazonaws.com", + "logs_collected": { + "files": { + "collect_list": [ + { + "file_path": "/tmp/cwagent_log_test.log*", + "log_group_name": "{instance_id}", + "log_stream_name": "{instance_id}", + "timezone": "UTC" + } + ] + } + } + } +} From ae2b75e5667908e027e26a84decf4d6191571236 Mon Sep 17 00:00:00 2001 From: zhihonl <61301537+zhihonl@users.noreply.github.com> Date: Thu, 25 Jul 2024 11:27:02 -0400 Subject: [PATCH 43/55] Refactor compass code to get the region from translator (#752) --- extension/entitystore/config.go | 1 + extension/entitystore/ec2Info.go | 12 +++++----- extension/entitystore/ec2Info_test.go | 6 ++--- extension/entitystore/extension.go | 12 ++-------- extension/entitystore/extension_test.go | 24 ------------------- extension/entitystore/serviceprovider.go | 11 ++++----- .../sampleConfig/advanced_config_darwin.yaml | 1 + .../sampleConfig/advanced_config_linux.yaml | 1 + .../sampleConfig/advanced_config_windows.yaml | 1 + .../appsignals_and_eks_config.yaml | 1 + .../appsignals_and_k8s_config.yaml | 1 + .../appsignals_fallback_and_eks_config.yaml | 1 + .../appsignals_over_fallback_config.yaml | 1 + .../sampleConfig/base_appsignals_config.yaml | 1 + .../base_appsignals_fallback_config.yaml | 1 + .../base_container_insights_config.yaml | 1 + .../sampleConfig/basic_config_linux.yaml | 1 + .../sampleConfig/basic_config_windows.yaml | 1 + .../sampleConfig/collectd_config_linux.yaml | 1 + .../sampleConfig/compass_linux_config.yaml | 1 + .../sampleConfig/complete_darwin_config.yaml | 1 + .../sampleConfig/complete_linux_config.yaml | 1 + .../sampleConfig/complete_windows_config.yaml | 1 + .../sampleConfig/config_with_env.yaml | 1 + .../sampleConfig/delta_config_linux.yaml | 1 + .../sampleConfig/delta_net_config_linux.yaml | 1 + .../sampleConfig/drop_origin_linux.yaml | 1 + .../emf_and_kubernetes_config.yaml | 1 + .../emf_and_kubernetes_with_gpu_config.yaml | 1 + .../ignore_append_dimensions.yaml | 1 + .../sampleConfig/invalid_input_linux.yaml | 1 + .../kubernetes_on_prem_config.yaml | 1 + .../sampleConfig/log_ecs_metric_only.yaml | 1 + .../tocwconfig/sampleConfig/log_filter.yaml | 1 + .../sampleConfig/log_only_config_windows.yaml | 1 + .../logs_and_kubernetes_config.yaml | 1 + .../sampleConfig/no_skip_log_timestamp.yaml | 1 + .../no_skip_log_timestamp_windows.yaml | 1 + .../sampleConfig/prometheus_config_linux.yaml | 1 + .../prometheus_config_windows.yaml | 1 + .../sampleConfig/skip_log_timestamp.yaml | 1 + .../skip_log_timestamp_default.yaml | 1 + .../skip_log_timestamp_default_windows.yaml | 1 + .../skip_log_timestamp_windows.yaml | 1 + .../sampleConfig/standard_config_linux.yaml | 1 + ...ndard_config_linux_with_common_config.yaml | 1 + .../sampleConfig/standard_config_windows.yaml | 1 + ...ard_config_windows_with_common_config.yaml | 1 + .../sampleConfig/statsd_config_linux.yaml | 1 + .../sampleConfig/statsd_config_windows.yaml | 1 + .../sampleConfig/trace_config_linux.yaml | 1 + .../sampleConfig/trace_config_windows.yaml | 1 + .../windows_eventlog_only_config.yaml | 1 + .../otel/extension/entitystore/translator.go | 1 + .../extension/entitystore/translator_test.go | 3 +++ 55 files changed, 66 insertions(+), 51 deletions(-) diff --git a/extension/entitystore/config.go b/extension/entitystore/config.go index f30d8cfbd1..c866540bb0 100644 --- a/extension/entitystore/config.go +++ b/extension/entitystore/config.go @@ -9,6 +9,7 @@ import ( type Config struct { Mode string `mapstructure:"mode"` + Region string `mapstructure:"region"` Profile string `mapstructure:"profile,omitempty"` RoleARN string `mapstructure:"role_arn,omitempty"` Filename string `mapstructure:"shared_credential_file,omitempty"` diff --git a/extension/entitystore/ec2Info.go b/extension/entitystore/ec2Info.go index 1b2ec011b9..d8fb9941af 100644 --- a/extension/entitystore/ec2Info.go +++ b/extension/entitystore/ec2Info.go @@ -45,7 +45,7 @@ type ec2Info struct { func (ei *ec2Info) initEc2Info() { log.Println("I! ec2Info: Initializing ec2Info") - if err := ei.setInstanceIdAndRegion(); err != nil { + if err := ei.setInstanceId(); err != nil { return } ei.ec2API = ei.ec2Provider(ei.Region, ei.ec2Credential) @@ -56,11 +56,11 @@ func (ei *ec2Info) initEc2Info() { ei.ignoreInvalidFields() } -func (ei *ec2Info) setInstanceIdAndRegion() error { +func (ei *ec2Info) setInstanceId() error { for { metadataDoc, err := ei.metadataProvider.Get(context.Background()) if err != nil { - log.Printf("E! ec2Info: Failed to get Instance Id and region through metadata provider: %v", err) + log.Printf("E! ec2Info: Failed to get Instance Id through metadata provider: %v", err) wait := time.NewTimer(1 * time.Minute) select { case <-ei.done: @@ -70,9 +70,8 @@ func (ei *ec2Info) setInstanceIdAndRegion() error { continue } } - log.Printf("D! ec2Info: Successfully retrieved Instance Id %s, Region %s", ei.InstanceID, ei.Region) + log.Printf("D! ec2Info: Successfully retrieved Instance Id %s", ei.InstanceID) ei.InstanceID = metadataDoc.InstanceID - ei.Region = metadataDoc.Region return nil } } @@ -171,12 +170,13 @@ func (ei *ec2Info) retrieveAsgNameWithDescribeTags(ec2API ec2iface.EC2API) error return nil } -func newEC2Info(metadataProvider ec2metadataprovider.MetadataProvider, providerType ec2ProviderType, ec2Credential *configaws.CredentialConfig, done chan struct{}) *ec2Info { +func newEC2Info(metadataProvider ec2metadataprovider.MetadataProvider, providerType ec2ProviderType, ec2Credential *configaws.CredentialConfig, done chan struct{}, region string) *ec2Info { return &ec2Info{ metadataProvider: metadataProvider, ec2Provider: providerType, ec2Credential: ec2Credential, done: done, + Region: region, } } diff --git a/extension/entitystore/ec2Info_test.go b/extension/entitystore/ec2Info_test.go index ef09ccb4bf..ddffefd14d 100644 --- a/extension/entitystore/ec2Info_test.go +++ b/extension/entitystore/ec2Info_test.go @@ -82,7 +82,6 @@ func TestSetInstanceIdAndRegion(t *testing.T) { wantErr: false, want: ec2Info{ InstanceID: mockedInstanceIdentityDoc.InstanceID, - Region: mockedInstanceIdentityDoc.Region, }, }, } @@ -91,11 +90,10 @@ func TestSetInstanceIdAndRegion(t *testing.T) { ei := &ec2Info{ metadataProvider: tt.args.metadataProvider, } - if err := ei.setInstanceIdAndRegion(); (err != nil) != tt.wantErr { - t.Errorf("setInstanceIdAndRegion() error = %v, wantErr %v", err, tt.wantErr) + if err := ei.setInstanceId(); (err != nil) != tt.wantErr { + t.Errorf("setInstanceId() error = %v, wantErr %v", err, tt.wantErr) } assert.Equal(t, tt.want.InstanceID, ei.InstanceID) - assert.Equal(t, tt.want.Region, ei.Region) }) } } diff --git a/extension/entitystore/extension.go b/extension/entitystore/extension.go index 9ec9cf4ed4..d725edae9a 100644 --- a/extension/entitystore/extension.go +++ b/extension/entitystore/extension.go @@ -91,10 +91,10 @@ func (e *EntityStore) Start(ctx context.Context, host component.Host) error { } switch e.mode { case config.ModeEC2: - e.ec2Info = *newEC2Info(e.metadataprovider, getEC2Provider, ec2CredentialConfig, e.done) + e.ec2Info = *newEC2Info(e.metadataprovider, getEC2Provider, ec2CredentialConfig, e.done, e.config.Region) go e.ec2Info.initEc2Info() } - e.serviceprovider = newServiceProvider(e.mode, &e.ec2Info, e.metadataprovider, getEC2Provider, ec2CredentialConfig, e.done) + e.serviceprovider = newServiceProvider(e.mode, e.config.Region, &e.ec2Info, e.metadataprovider, getEC2Provider, ec2CredentialConfig, e.done) go e.serviceprovider.startServiceProvider() return nil } @@ -230,14 +230,6 @@ func getEC2Provider(region string, ec2CredentialConfig *configaws.CredentialConf }) } -func getRegion(metadataProvider ec2metadataprovider.MetadataProvider) (string, error) { - instanceDocument, err := metadataProvider.Get(context.Background()) - if err != nil { - return "", err - } - return instanceDocument.Region, nil -} - func addNonEmptyToMap(m map[string]*string, key, value string) { if value != "" { m[key] = aws.String(value) diff --git a/extension/entitystore/extension_test.go b/extension/entitystore/extension_test.go index 7b4a84256f..a368bce519 100644 --- a/extension/entitystore/extension_test.go +++ b/extension/entitystore/extension_test.go @@ -142,30 +142,6 @@ func TestEntityStore_Mode(t *testing.T) { } } -func Test_getRegion(t *testing.T) { - tests := []struct { - name string - metadataProvider ec2metadataprovider.MetadataProvider - want string - }{ - { - name: "HappyPath", - metadataProvider: &mockMetadataProvider{ - InstanceIdentityDocument: &ec2metadata.EC2InstanceIdentityDocument{ - Region: "us-west-2"}, - }, - want: "us-west-2", - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got, err := getRegion(tt.metadataProvider) - assert.NoError(t, err) - assert.Equalf(t, tt.want, got, "getRegion(%v)", tt.metadataProvider) - }) - } -} - func TestEntityStore_createAttributeMaps(t *testing.T) { type fields struct { ec2Info ec2Info diff --git a/extension/entitystore/serviceprovider.go b/extension/entitystore/serviceprovider.go index 254a241607..ee962f21a8 100644 --- a/extension/entitystore/serviceprovider.go +++ b/extension/entitystore/serviceprovider.go @@ -69,6 +69,7 @@ type serviceprovider struct { ec2Credential *configaws.CredentialConfig iamRole string ec2TagServiceName string + region string done chan struct{} // logFiles stores the service attributes that were configured for log files in CloudWatch Agent configuration. @@ -265,11 +266,7 @@ func (s *serviceprovider) getEC2Client() error { if s.ec2API != nil { return nil } - region, err := getRegion(s.metadataProvider) - if err != nil { - return fmt.Errorf("failed to get EC2 client: %s", err) - } - s.ec2API = s.ec2Provider(region, s.ec2Credential) + s.ec2API = s.ec2Provider(s.region, s.ec2Credential) return nil } @@ -296,10 +293,10 @@ func (s *serviceprovider) getEC2TagFilters() ([]*ec2.Filter, error) { return tagFilters, nil } -func newServiceProvider(mode string, ec2Info *ec2Info, metadataProvider ec2metadataprovider.MetadataProvider, - providerType ec2ProviderType, ec2Credential *configaws.CredentialConfig, done chan struct{}) serviceProviderInterface { +func newServiceProvider(mode string, region string, ec2Info *ec2Info, metadataProvider ec2metadataprovider.MetadataProvider, providerType ec2ProviderType, ec2Credential *configaws.CredentialConfig, done chan struct{}) serviceProviderInterface { return &serviceprovider{ mode: mode, + region: region, ec2Info: ec2Info, metadataProvider: metadataProvider, ec2Provider: providerType, diff --git a/translator/tocwconfig/sampleConfig/advanced_config_darwin.yaml b/translator/tocwconfig/sampleConfig/advanced_config_darwin.yaml index 7f1c36479a..b8b2f08118 100644 --- a/translator/tocwconfig/sampleConfig/advanced_config_darwin.yaml +++ b/translator/tocwconfig/sampleConfig/advanced_config_darwin.yaml @@ -19,6 +19,7 @@ extensions: region_type: ACJ entitystore: mode: ec2 + region: us-west-2 processors: cumulativetodelta/hostDeltaMetrics: exclude: diff --git a/translator/tocwconfig/sampleConfig/advanced_config_linux.yaml b/translator/tocwconfig/sampleConfig/advanced_config_linux.yaml index 3ea95f6fc6..b8c01b0d28 100644 --- a/translator/tocwconfig/sampleConfig/advanced_config_linux.yaml +++ b/translator/tocwconfig/sampleConfig/advanced_config_linux.yaml @@ -19,6 +19,7 @@ extensions: region_type: ACJ entitystore: mode: ec2 + region: us-west-2 processors: cumulativetodelta/hostDeltaMetrics: exclude: diff --git a/translator/tocwconfig/sampleConfig/advanced_config_windows.yaml b/translator/tocwconfig/sampleConfig/advanced_config_windows.yaml index 639875086e..eace7e9d9b 100644 --- a/translator/tocwconfig/sampleConfig/advanced_config_windows.yaml +++ b/translator/tocwconfig/sampleConfig/advanced_config_windows.yaml @@ -19,6 +19,7 @@ extensions: region_type: ACJ entitystore: mode: ec2 + region: us-west-2 processors: ec2tagger: ec2_instance_tag_keys: diff --git a/translator/tocwconfig/sampleConfig/appsignals_and_eks_config.yaml b/translator/tocwconfig/sampleConfig/appsignals_and_eks_config.yaml index ebb4a066e2..fdd8cfaf5e 100644 --- a/translator/tocwconfig/sampleConfig/appsignals_and_eks_config.yaml +++ b/translator/tocwconfig/sampleConfig/appsignals_and_eks_config.yaml @@ -287,6 +287,7 @@ extensions: role_arn: "" entitystore: mode: ec2 + region: us-east-1 processors: awsapplicationsignals: limiter: diff --git a/translator/tocwconfig/sampleConfig/appsignals_and_k8s_config.yaml b/translator/tocwconfig/sampleConfig/appsignals_and_k8s_config.yaml index d4e8665f65..0418a82893 100644 --- a/translator/tocwconfig/sampleConfig/appsignals_and_k8s_config.yaml +++ b/translator/tocwconfig/sampleConfig/appsignals_and_k8s_config.yaml @@ -287,6 +287,7 @@ extensions: role_arn: "" entitystore: mode: ec2 + region: us-east-1 processors: awsapplicationsignals: limiter: diff --git a/translator/tocwconfig/sampleConfig/appsignals_fallback_and_eks_config.yaml b/translator/tocwconfig/sampleConfig/appsignals_fallback_and_eks_config.yaml index 144f00571d..9728e3a838 100644 --- a/translator/tocwconfig/sampleConfig/appsignals_fallback_and_eks_config.yaml +++ b/translator/tocwconfig/sampleConfig/appsignals_fallback_and_eks_config.yaml @@ -287,6 +287,7 @@ extensions: role_arn: "" entitystore: mode: ec2 + region: us-east-1 processors: awsapplicationsignals: limiter: diff --git a/translator/tocwconfig/sampleConfig/appsignals_over_fallback_config.yaml b/translator/tocwconfig/sampleConfig/appsignals_over_fallback_config.yaml index 00934619ab..a0d628b79a 100644 --- a/translator/tocwconfig/sampleConfig/appsignals_over_fallback_config.yaml +++ b/translator/tocwconfig/sampleConfig/appsignals_over_fallback_config.yaml @@ -287,6 +287,7 @@ extensions: role_arn: "" entitystore: mode: ec2 + region: us-east-1 processors: awsapplicationsignals: limiter: diff --git a/translator/tocwconfig/sampleConfig/base_appsignals_config.yaml b/translator/tocwconfig/sampleConfig/base_appsignals_config.yaml index 05e6e0f058..6b0a7ecbfe 100644 --- a/translator/tocwconfig/sampleConfig/base_appsignals_config.yaml +++ b/translator/tocwconfig/sampleConfig/base_appsignals_config.yaml @@ -155,6 +155,7 @@ extensions: entitystore: mode: onPremise profile: AmazonCloudWatchAgent + region: us-east-1 shared_credential_file: fake-path processors: awsentity: {} diff --git a/translator/tocwconfig/sampleConfig/base_appsignals_fallback_config.yaml b/translator/tocwconfig/sampleConfig/base_appsignals_fallback_config.yaml index 97738da8a1..ce424e59dd 100644 --- a/translator/tocwconfig/sampleConfig/base_appsignals_fallback_config.yaml +++ b/translator/tocwconfig/sampleConfig/base_appsignals_fallback_config.yaml @@ -155,6 +155,7 @@ extensions: entitystore: mode: onPremise profile: AmazonCloudWatchAgent + region: us-east-1 shared_credential_file: fake-path processors: awsapplicationsignals: diff --git a/translator/tocwconfig/sampleConfig/base_container_insights_config.yaml b/translator/tocwconfig/sampleConfig/base_container_insights_config.yaml index c0946df460..fbb9dcfcdd 100644 --- a/translator/tocwconfig/sampleConfig/base_container_insights_config.yaml +++ b/translator/tocwconfig/sampleConfig/base_container_insights_config.yaml @@ -150,6 +150,7 @@ extensions: region_type: ACJ entitystore: mode: ec2 + region: us-east-1 processors: batch/containerinsights: metadata_cardinality_limit: 1000 diff --git a/translator/tocwconfig/sampleConfig/basic_config_linux.yaml b/translator/tocwconfig/sampleConfig/basic_config_linux.yaml index 244af9bf2d..020287c779 100644 --- a/translator/tocwconfig/sampleConfig/basic_config_linux.yaml +++ b/translator/tocwconfig/sampleConfig/basic_config_linux.yaml @@ -19,6 +19,7 @@ extensions: region_type: ACJ entitystore: mode: ec2 + region: us-east-1 processors: ec2tagger: ec2_instance_tag_keys: diff --git a/translator/tocwconfig/sampleConfig/basic_config_windows.yaml b/translator/tocwconfig/sampleConfig/basic_config_windows.yaml index bec9fcb66d..9e7e5a8ca6 100644 --- a/translator/tocwconfig/sampleConfig/basic_config_windows.yaml +++ b/translator/tocwconfig/sampleConfig/basic_config_windows.yaml @@ -19,6 +19,7 @@ extensions: region_type: ACJ entitystore: mode: ec2 + region: us-west-2 processors: ec2tagger: ec2_instance_tag_keys: diff --git a/translator/tocwconfig/sampleConfig/collectd_config_linux.yaml b/translator/tocwconfig/sampleConfig/collectd_config_linux.yaml index f50ea024d7..102cabaf4c 100644 --- a/translator/tocwconfig/sampleConfig/collectd_config_linux.yaml +++ b/translator/tocwconfig/sampleConfig/collectd_config_linux.yaml @@ -19,6 +19,7 @@ extensions: region_type: ACJ entitystore: mode: ec2 + region: us-west-2 receivers: telegraf_socket_listener: collection_interval: 1m0s diff --git a/translator/tocwconfig/sampleConfig/compass_linux_config.yaml b/translator/tocwconfig/sampleConfig/compass_linux_config.yaml index 04c49be2c6..be53979284 100644 --- a/translator/tocwconfig/sampleConfig/compass_linux_config.yaml +++ b/translator/tocwconfig/sampleConfig/compass_linux_config.yaml @@ -3,6 +3,7 @@ exporters: extensions: entitystore: mode: ec2 + region: us-west-2 receivers: nop: {} service: diff --git a/translator/tocwconfig/sampleConfig/complete_darwin_config.yaml b/translator/tocwconfig/sampleConfig/complete_darwin_config.yaml index b236428ee4..4be7d6d682 100644 --- a/translator/tocwconfig/sampleConfig/complete_darwin_config.yaml +++ b/translator/tocwconfig/sampleConfig/complete_darwin_config.yaml @@ -93,6 +93,7 @@ extensions: region_type: ACJ entitystore: mode: ec2 + region: us-west-2 processors: batch/emf_logs: metadata_cardinality_limit: 1000 diff --git a/translator/tocwconfig/sampleConfig/complete_linux_config.yaml b/translator/tocwconfig/sampleConfig/complete_linux_config.yaml index 78883abadb..7df9e48852 100644 --- a/translator/tocwconfig/sampleConfig/complete_linux_config.yaml +++ b/translator/tocwconfig/sampleConfig/complete_linux_config.yaml @@ -96,6 +96,7 @@ extensions: region_type: ACJ entitystore: mode: ec2 + region: us-west-2 processors: batch/emf_logs: metadata_cardinality_limit: 1000 diff --git a/translator/tocwconfig/sampleConfig/complete_windows_config.yaml b/translator/tocwconfig/sampleConfig/complete_windows_config.yaml index 791f6f3186..23b8e306ac 100644 --- a/translator/tocwconfig/sampleConfig/complete_windows_config.yaml +++ b/translator/tocwconfig/sampleConfig/complete_windows_config.yaml @@ -93,6 +93,7 @@ extensions: region_type: ACJ entitystore: mode: ec2 + region: us-west-2 processors: batch/emf_logs: metadata_cardinality_limit: 1000 diff --git a/translator/tocwconfig/sampleConfig/config_with_env.yaml b/translator/tocwconfig/sampleConfig/config_with_env.yaml index 5bb686ff14..c776e99d76 100644 --- a/translator/tocwconfig/sampleConfig/config_with_env.yaml +++ b/translator/tocwconfig/sampleConfig/config_with_env.yaml @@ -41,6 +41,7 @@ extensions: region_type: ACJ entitystore: mode: ec2 + region: ${ENV_REGION} processors: batch/emf_logs: metadata_cardinality_limit: 1000 diff --git a/translator/tocwconfig/sampleConfig/delta_config_linux.yaml b/translator/tocwconfig/sampleConfig/delta_config_linux.yaml index 850a3af086..fc9cde4d17 100644 --- a/translator/tocwconfig/sampleConfig/delta_config_linux.yaml +++ b/translator/tocwconfig/sampleConfig/delta_config_linux.yaml @@ -19,6 +19,7 @@ extensions: region_type: ACJ entitystore: mode: ec2 + region: us-east-1 processors: cumulativetodelta/hostDeltaMetrics: exclude: diff --git a/translator/tocwconfig/sampleConfig/delta_net_config_linux.yaml b/translator/tocwconfig/sampleConfig/delta_net_config_linux.yaml index e375628c32..11f2845732 100644 --- a/translator/tocwconfig/sampleConfig/delta_net_config_linux.yaml +++ b/translator/tocwconfig/sampleConfig/delta_net_config_linux.yaml @@ -19,6 +19,7 @@ extensions: region_type: ACJ entitystore: mode: ec2 + region: us-east-1 processors: cumulativetodelta/hostDeltaMetrics: exclude: diff --git a/translator/tocwconfig/sampleConfig/drop_origin_linux.yaml b/translator/tocwconfig/sampleConfig/drop_origin_linux.yaml index b9cec6a88a..b7909d8e6d 100644 --- a/translator/tocwconfig/sampleConfig/drop_origin_linux.yaml +++ b/translator/tocwconfig/sampleConfig/drop_origin_linux.yaml @@ -24,6 +24,7 @@ extensions: region_type: ACJ entitystore: mode: ec2 + region: us-west-2 processors: ec2tagger: ec2_instance_tag_keys: diff --git a/translator/tocwconfig/sampleConfig/emf_and_kubernetes_config.yaml b/translator/tocwconfig/sampleConfig/emf_and_kubernetes_config.yaml index b5e429b5e4..a29a555920 100644 --- a/translator/tocwconfig/sampleConfig/emf_and_kubernetes_config.yaml +++ b/translator/tocwconfig/sampleConfig/emf_and_kubernetes_config.yaml @@ -395,6 +395,7 @@ extensions: entitystore: mode: onPremise profile: default + region: us-east-1 shared_credential_file: /root/.aws/credentials processors: batch/containerinsights: diff --git a/translator/tocwconfig/sampleConfig/emf_and_kubernetes_with_gpu_config.yaml b/translator/tocwconfig/sampleConfig/emf_and_kubernetes_with_gpu_config.yaml index 6dcce4c98a..9c190ce61b 100644 --- a/translator/tocwconfig/sampleConfig/emf_and_kubernetes_with_gpu_config.yaml +++ b/translator/tocwconfig/sampleConfig/emf_and_kubernetes_with_gpu_config.yaml @@ -660,6 +660,7 @@ extensions: entitystore: mode: onPremise profile: default + region: us-east-1 shared_credential_file: /root/.aws/credentials processors: batch/containerinsights: diff --git a/translator/tocwconfig/sampleConfig/ignore_append_dimensions.yaml b/translator/tocwconfig/sampleConfig/ignore_append_dimensions.yaml index 086a3d45e5..df878013c5 100644 --- a/translator/tocwconfig/sampleConfig/ignore_append_dimensions.yaml +++ b/translator/tocwconfig/sampleConfig/ignore_append_dimensions.yaml @@ -19,6 +19,7 @@ extensions: region_type: ACJ entitystore: mode: ec2 + region: us-east-1 processors: ec2tagger: imds_retries: 1 diff --git a/translator/tocwconfig/sampleConfig/invalid_input_linux.yaml b/translator/tocwconfig/sampleConfig/invalid_input_linux.yaml index 244af9bf2d..020287c779 100644 --- a/translator/tocwconfig/sampleConfig/invalid_input_linux.yaml +++ b/translator/tocwconfig/sampleConfig/invalid_input_linux.yaml @@ -19,6 +19,7 @@ extensions: region_type: ACJ entitystore: mode: ec2 + region: us-east-1 processors: ec2tagger: ec2_instance_tag_keys: diff --git a/translator/tocwconfig/sampleConfig/kubernetes_on_prem_config.yaml b/translator/tocwconfig/sampleConfig/kubernetes_on_prem_config.yaml index 30d961e8a9..2877911ab4 100644 --- a/translator/tocwconfig/sampleConfig/kubernetes_on_prem_config.yaml +++ b/translator/tocwconfig/sampleConfig/kubernetes_on_prem_config.yaml @@ -362,6 +362,7 @@ extensions: entitystore: mode: onPremise profile: AmazonCloudWatchAgent + region: us-east-1 shared_credential_file: fake-path processors: batch/containerinsights: diff --git a/translator/tocwconfig/sampleConfig/log_ecs_metric_only.yaml b/translator/tocwconfig/sampleConfig/log_ecs_metric_only.yaml index 1c41bc2294..3dc25c812f 100644 --- a/translator/tocwconfig/sampleConfig/log_ecs_metric_only.yaml +++ b/translator/tocwconfig/sampleConfig/log_ecs_metric_only.yaml @@ -99,6 +99,7 @@ extensions: region_type: ACJ entitystore: mode: ec2 + region: us-west-2 processors: batch/containerinsights: metadata_cardinality_limit: 1000 diff --git a/translator/tocwconfig/sampleConfig/log_filter.yaml b/translator/tocwconfig/sampleConfig/log_filter.yaml index 53768311a0..0b694fca94 100644 --- a/translator/tocwconfig/sampleConfig/log_filter.yaml +++ b/translator/tocwconfig/sampleConfig/log_filter.yaml @@ -3,6 +3,7 @@ exporters: extensions: entitystore: mode: "ec2" + region: us-east-1 receivers: nop: {} service: diff --git a/translator/tocwconfig/sampleConfig/log_only_config_windows.yaml b/translator/tocwconfig/sampleConfig/log_only_config_windows.yaml index f6d9c2ba5a..a01b2785ed 100644 --- a/translator/tocwconfig/sampleConfig/log_only_config_windows.yaml +++ b/translator/tocwconfig/sampleConfig/log_only_config_windows.yaml @@ -3,6 +3,7 @@ exporters: extensions: entitystore: mode: "ec2" + region: us-west-2 receivers: nop: {} service: diff --git a/translator/tocwconfig/sampleConfig/logs_and_kubernetes_config.yaml b/translator/tocwconfig/sampleConfig/logs_and_kubernetes_config.yaml index 09f9186f1c..8db3c89fc1 100644 --- a/translator/tocwconfig/sampleConfig/logs_and_kubernetes_config.yaml +++ b/translator/tocwconfig/sampleConfig/logs_and_kubernetes_config.yaml @@ -390,6 +390,7 @@ extensions: region_type: ACJ entitystore: mode: ec2 + region: us-east-1 processors: batch/containerinsights: diff --git a/translator/tocwconfig/sampleConfig/no_skip_log_timestamp.yaml b/translator/tocwconfig/sampleConfig/no_skip_log_timestamp.yaml index 53768311a0..e3327e1d00 100644 --- a/translator/tocwconfig/sampleConfig/no_skip_log_timestamp.yaml +++ b/translator/tocwconfig/sampleConfig/no_skip_log_timestamp.yaml @@ -3,6 +3,7 @@ exporters: extensions: entitystore: mode: "ec2" + region: us-west-2 receivers: nop: {} service: diff --git a/translator/tocwconfig/sampleConfig/no_skip_log_timestamp_windows.yaml b/translator/tocwconfig/sampleConfig/no_skip_log_timestamp_windows.yaml index f6d9c2ba5a..a01b2785ed 100644 --- a/translator/tocwconfig/sampleConfig/no_skip_log_timestamp_windows.yaml +++ b/translator/tocwconfig/sampleConfig/no_skip_log_timestamp_windows.yaml @@ -3,6 +3,7 @@ exporters: extensions: entitystore: mode: "ec2" + region: us-west-2 receivers: nop: {} service: diff --git a/translator/tocwconfig/sampleConfig/prometheus_config_linux.yaml b/translator/tocwconfig/sampleConfig/prometheus_config_linux.yaml index cfd3398d31..ce4ad17041 100644 --- a/translator/tocwconfig/sampleConfig/prometheus_config_linux.yaml +++ b/translator/tocwconfig/sampleConfig/prometheus_config_linux.yaml @@ -79,6 +79,7 @@ extensions: region_type: ACJ entitystore: mode: ec2 + region: us-east-1 processors: batch/prometheus: metadata_cardinality_limit: 1000 diff --git a/translator/tocwconfig/sampleConfig/prometheus_config_windows.yaml b/translator/tocwconfig/sampleConfig/prometheus_config_windows.yaml index 9d5f047c00..2a47f34ae3 100644 --- a/translator/tocwconfig/sampleConfig/prometheus_config_windows.yaml +++ b/translator/tocwconfig/sampleConfig/prometheus_config_windows.yaml @@ -61,6 +61,7 @@ extensions: region_type: ACJ entitystore: mode: ec2 + region: us-east-1 processors: batch/prometheus: metadata_cardinality_limit: 1000 diff --git a/translator/tocwconfig/sampleConfig/skip_log_timestamp.yaml b/translator/tocwconfig/sampleConfig/skip_log_timestamp.yaml index b4ed735ac8..df1b94808d 100644 --- a/translator/tocwconfig/sampleConfig/skip_log_timestamp.yaml +++ b/translator/tocwconfig/sampleConfig/skip_log_timestamp.yaml @@ -3,6 +3,7 @@ exporters: extensions: entitystore: mode: "ec2" + region: us-west-2 receivers: nop: {} service: diff --git a/translator/tocwconfig/sampleConfig/skip_log_timestamp_default.yaml b/translator/tocwconfig/sampleConfig/skip_log_timestamp_default.yaml index 53768311a0..e3327e1d00 100644 --- a/translator/tocwconfig/sampleConfig/skip_log_timestamp_default.yaml +++ b/translator/tocwconfig/sampleConfig/skip_log_timestamp_default.yaml @@ -3,6 +3,7 @@ exporters: extensions: entitystore: mode: "ec2" + region: us-west-2 receivers: nop: {} service: diff --git a/translator/tocwconfig/sampleConfig/skip_log_timestamp_default_windows.yaml b/translator/tocwconfig/sampleConfig/skip_log_timestamp_default_windows.yaml index f6d9c2ba5a..a01b2785ed 100644 --- a/translator/tocwconfig/sampleConfig/skip_log_timestamp_default_windows.yaml +++ b/translator/tocwconfig/sampleConfig/skip_log_timestamp_default_windows.yaml @@ -3,6 +3,7 @@ exporters: extensions: entitystore: mode: "ec2" + region: us-west-2 receivers: nop: {} service: diff --git a/translator/tocwconfig/sampleConfig/skip_log_timestamp_windows.yaml b/translator/tocwconfig/sampleConfig/skip_log_timestamp_windows.yaml index 9c812a85fa..e3dd15b6ba 100644 --- a/translator/tocwconfig/sampleConfig/skip_log_timestamp_windows.yaml +++ b/translator/tocwconfig/sampleConfig/skip_log_timestamp_windows.yaml @@ -3,6 +3,7 @@ exporters: extensions: entitystore: mode: "ec2" + region: us-west-2 receivers: nop: {} service: diff --git a/translator/tocwconfig/sampleConfig/standard_config_linux.yaml b/translator/tocwconfig/sampleConfig/standard_config_linux.yaml index a9a55aa6f3..a5246678ee 100644 --- a/translator/tocwconfig/sampleConfig/standard_config_linux.yaml +++ b/translator/tocwconfig/sampleConfig/standard_config_linux.yaml @@ -19,6 +19,7 @@ extensions: region_type: ACJ entitystore: mode: ec2 + region: us-west-2 processors: cumulativetodelta/hostDeltaMetrics: exclude: diff --git a/translator/tocwconfig/sampleConfig/standard_config_linux_with_common_config.yaml b/translator/tocwconfig/sampleConfig/standard_config_linux_with_common_config.yaml index 95bf544afd..e101f3ccdb 100644 --- a/translator/tocwconfig/sampleConfig/standard_config_linux_with_common_config.yaml +++ b/translator/tocwconfig/sampleConfig/standard_config_linux_with_common_config.yaml @@ -22,6 +22,7 @@ extensions: entitystore: mode: ec2 profile: AmazonCloudWatchAgent + region: us-west-2 shared_credential_file: fake-path processors: cumulativetodelta/hostDeltaMetrics: diff --git a/translator/tocwconfig/sampleConfig/standard_config_windows.yaml b/translator/tocwconfig/sampleConfig/standard_config_windows.yaml index 213c8a1066..1ba46d361c 100644 --- a/translator/tocwconfig/sampleConfig/standard_config_windows.yaml +++ b/translator/tocwconfig/sampleConfig/standard_config_windows.yaml @@ -19,6 +19,7 @@ extensions: region_type: ACJ entitystore: mode: ec2 + region: us-west-2 processors: ec2tagger: ec2_instance_tag_keys: diff --git a/translator/tocwconfig/sampleConfig/standard_config_windows_with_common_config.yaml b/translator/tocwconfig/sampleConfig/standard_config_windows_with_common_config.yaml index f310dce431..f5d87c1b88 100644 --- a/translator/tocwconfig/sampleConfig/standard_config_windows_with_common_config.yaml +++ b/translator/tocwconfig/sampleConfig/standard_config_windows_with_common_config.yaml @@ -22,6 +22,7 @@ extensions: entitystore: mode: ec2 profile: AmazonCloudWatchAgent + region: us-west-2 shared_credential_file: fake-path processors: ec2tagger: diff --git a/translator/tocwconfig/sampleConfig/statsd_config_linux.yaml b/translator/tocwconfig/sampleConfig/statsd_config_linux.yaml index 7ec240c0d6..fe837f5718 100644 --- a/translator/tocwconfig/sampleConfig/statsd_config_linux.yaml +++ b/translator/tocwconfig/sampleConfig/statsd_config_linux.yaml @@ -19,6 +19,7 @@ extensions: region_type: ACJ entitystore: mode: ec2 + region: us-west-2 receivers: telegraf_statsd: collection_interval: 10s diff --git a/translator/tocwconfig/sampleConfig/statsd_config_windows.yaml b/translator/tocwconfig/sampleConfig/statsd_config_windows.yaml index 224508896d..57c2a820bd 100644 --- a/translator/tocwconfig/sampleConfig/statsd_config_windows.yaml +++ b/translator/tocwconfig/sampleConfig/statsd_config_windows.yaml @@ -19,6 +19,7 @@ extensions: region_type: ACJ entitystore: mode: ec2 + region: us-west-2 receivers: telegraf_statsd: collection_interval: 10s diff --git a/translator/tocwconfig/sampleConfig/trace_config_linux.yaml b/translator/tocwconfig/sampleConfig/trace_config_linux.yaml index b284afb1da..e5d3aa409d 100644 --- a/translator/tocwconfig/sampleConfig/trace_config_linux.yaml +++ b/translator/tocwconfig/sampleConfig/trace_config_linux.yaml @@ -32,6 +32,7 @@ extensions: entitystore: mode: ec2 profile: default + region: us-west-2 shared_credential_file: /root/.aws/credentials processors: batch/xray: diff --git a/translator/tocwconfig/sampleConfig/trace_config_windows.yaml b/translator/tocwconfig/sampleConfig/trace_config_windows.yaml index 56813bb925..ddb9a010b8 100644 --- a/translator/tocwconfig/sampleConfig/trace_config_windows.yaml +++ b/translator/tocwconfig/sampleConfig/trace_config_windows.yaml @@ -32,6 +32,7 @@ extensions: entitystore: mode: ec2 profile: default + region: us-west-2 shared_credential_file: /root/.aws/credentials processors: batch/xray: diff --git a/translator/tocwconfig/sampleConfig/windows_eventlog_only_config.yaml b/translator/tocwconfig/sampleConfig/windows_eventlog_only_config.yaml index cab0687041..78c680ddcf 100644 --- a/translator/tocwconfig/sampleConfig/windows_eventlog_only_config.yaml +++ b/translator/tocwconfig/sampleConfig/windows_eventlog_only_config.yaml @@ -3,6 +3,7 @@ exporters: extensions: entitystore: mode: "ec2" + region: us-west-2 receivers: nop: {} service: diff --git a/translator/translate/otel/extension/entitystore/translator.go b/translator/translate/otel/extension/entitystore/translator.go index 7558db78cb..ab80a851fa 100644 --- a/translator/translate/otel/extension/entitystore/translator.go +++ b/translator/translate/otel/extension/entitystore/translator.go @@ -35,6 +35,7 @@ func (t *translator) ID() component.ID { func (t *translator) Translate(conf *confmap.Conf) (component.Config, error) { cfg := t.factory.CreateDefaultConfig().(*entitystore.Config) cfg.Mode = context.CurrentContext().Mode() + cfg.Region = agent.Global_Config.Region credentials := confmap.NewFromStringMap(agent.Global_Config.Credentials) _ = credentials.Unmarshal(cfg) diff --git a/translator/translate/otel/extension/entitystore/translator_test.go b/translator/translate/otel/extension/entitystore/translator_test.go index c07d809653..74a11f8138 100644 --- a/translator/translate/otel/extension/entitystore/translator_test.go +++ b/translator/translate/otel/extension/entitystore/translator_test.go @@ -18,6 +18,7 @@ import ( func TestTranslate(t *testing.T) { context.CurrentContext().SetMode(config.ModeEC2) translateagent.Global_Config.Credentials = make(map[string]interface{}) + translateagent.Global_Config.Region = "us-east-1" testCases := map[string]struct { input map[string]interface{} file_exists bool @@ -29,6 +30,7 @@ func TestTranslate(t *testing.T) { profile_exists: true, want: &entitystore.Config{ Mode: config.ModeEC2, + Region: "us-east-1", Profile: "test_profile", }, }, @@ -37,6 +39,7 @@ func TestTranslate(t *testing.T) { file_exists: true, want: &entitystore.Config{ Mode: config.ModeEC2, + Region: "us-east-1", Filename: "test_file", }, }, From 184e0b75220a0ae19e160f8f04f26341b3ccaeef Mon Sep 17 00:00:00 2001 From: zhihonl <61301537+zhihonl@users.noreply.github.com> Date: Wed, 31 Jul 2024 14:27:44 -0400 Subject: [PATCH 44/55] Add EntityRejected count health metrics to agent client handler (#754) --- .../agenthealth/handler/stats/agent/agent.go | 4 ++++ .../handler/stats/client/client.go | 23 +++++++++++++++++++ .../handler/stats/client/client_test.go | 21 ++++++++++++++++- 3 files changed, 47 insertions(+), 1 deletion(-) diff --git a/extension/agenthealth/handler/stats/agent/agent.go b/extension/agenthealth/handler/stats/agent/agent.go index 1c3b0dc348..83237d54e6 100644 --- a/extension/agenthealth/handler/stats/agent/agent.go +++ b/extension/agenthealth/handler/stats/agent/agent.go @@ -29,6 +29,7 @@ type Stats struct { RunningInContainer *int `json:"ric,omitempty"` RegionType *string `json:"rt,omitempty"` Mode *string `json:"m,omitempty"` + EntityRejected *int `json:"ent,omitempty"` } // Merge the other Stats into the current. If the field is not nil, @@ -76,6 +77,9 @@ func (s *Stats) Merge(other Stats) { if other.Mode != nil { s.Mode = other.Mode } + if other.EntityRejected != nil { + s.EntityRejected = other.EntityRejected + } } func (s *Stats) Marshal() (string, error) { diff --git a/extension/agenthealth/handler/stats/client/client.go b/extension/agenthealth/handler/stats/client/client.go index 188ef6b207..9e6910b1c8 100644 --- a/extension/agenthealth/handler/stats/client/client.go +++ b/extension/agenthealth/handler/stats/client/client.go @@ -4,6 +4,7 @@ package client import ( + "bytes" "context" "io" "net/http" @@ -23,6 +24,10 @@ const ( cacheSize = 1000 ) +var ( + rejectedEntityInfo = []byte("\"rejectedEntityInfo\"") +) + type Stats interface { awsmiddleware.RequestHandler awsmiddleware.ResponseHandler @@ -108,6 +113,9 @@ func (csh *clientStatsHandler) HandleResponse(ctx context.Context, r *http.Respo } latency := time.Since(recorder.start) stats.LatencyMillis = aws.Int64(latency.Milliseconds()) + if rejectedEntityInfoExists(r) { + stats.EntityRejected = aws.Int(1) + } csh.statsByOperation.Store(operation, stats) } @@ -122,3 +130,18 @@ func (csh *clientStatsHandler) Stats(operation string) agent.Stats { } return stats } + +// rejectedEntityInfoExists checks if the response body +// contains element rejectedEntityInfo +func rejectedEntityInfoExists(r *http.Response) bool { + // Example body for rejectedEntityInfo would be: + // {"rejectedEntityInfo":{"errorType":"InvalidAttributes"}} + if r == nil || r.Body == nil { + return false + } + bodyBytes, err := io.ReadAll(r.Body) + if err != nil { + return false + } + return bytes.Contains(bodyBytes, rejectedEntityInfo) +} diff --git a/extension/agenthealth/handler/stats/client/client_test.go b/extension/agenthealth/handler/stats/client/client_test.go index 35ac023e0e..3641ada9aa 100644 --- a/extension/agenthealth/handler/stats/client/client_test.go +++ b/extension/agenthealth/handler/stats/client/client_test.go @@ -6,6 +6,7 @@ package client import ( "bytes" "context" + "io" "net/http" "testing" "time" @@ -37,11 +38,16 @@ func TestHandle(t *testing.T) { assert.Nil(t, got.PayloadBytes) assert.Nil(t, got.StatusCode) time.Sleep(time.Millisecond) - handler.HandleResponse(ctx, &http.Response{StatusCode: http.StatusOK}) + handler.HandleResponse(ctx, &http.Response{ + StatusCode: http.StatusOK, + Body: io.NopCloser(bytes.NewBufferString(`{"rejectedEntityInfo":{"errorType":"InvalidAttributes"}}`)), + }) got = handler.Stats(operation) assert.NotNil(t, got.LatencyMillis) assert.NotNil(t, got.PayloadBytes) assert.NotNil(t, got.StatusCode) + assert.NotNil(t, got.EntityRejected) + assert.Equal(t, 1, *got.EntityRejected) assert.Equal(t, http.StatusOK, *got.StatusCode) assert.Equal(t, 20, *got.PayloadBytes) assert.GreaterOrEqual(t, *got.LatencyMillis, int64(1)) @@ -65,3 +71,16 @@ func TestHandle(t *testing.T) { assert.NotNil(t, got.PayloadBytes) assert.Equal(t, 29, *got.PayloadBytes) } + +func BenchmarkRejectedEntityInfoExists(b *testing.B) { + body := `{"rejectedEntityInfo":{"errorType":"InvalidAttributes"}}` + resp := &http.Response{ + Body: io.NopCloser(bytes.NewBufferString(body)), + } + + for n := 0; n < b.N; n++ { + rejectedEntityInfoExists(resp) + // Reset the body for the next iteration + resp.Body = io.NopCloser(bytes.NewBufferString(body)) + } +} From ed7768a6fb4c86a8724a8581efd2e0e4fe88f15a Mon Sep 17 00:00:00 2001 From: zhihonl <61301537+zhihonl@users.noreply.github.com> Date: Wed, 31 Jul 2024 17:15:44 -0400 Subject: [PATCH 45/55] Revert "Remove service name source from entity KeyAttributes (#745)" (#755) --- plugins/outputs/cloudwatchlogs/pusher.go | 5 ----- 1 file changed, 5 deletions(-) diff --git a/plugins/outputs/cloudwatchlogs/pusher.go b/plugins/outputs/cloudwatchlogs/pusher.go index ed84c7a407..201c9ffdb9 100644 --- a/plugins/outputs/cloudwatchlogs/pusher.go +++ b/plugins/outputs/cloudwatchlogs/pusher.go @@ -14,7 +14,6 @@ import ( "github.com/aws/aws-sdk-go/service/cloudwatchlogs" "github.com/influxdata/telegraf" - "github.com/aws/amazon-cloudwatch-agent/extension/entitystore" "github.com/aws/amazon-cloudwatch-agent/logs" "github.com/aws/amazon-cloudwatch-agent/profiler" ) @@ -226,10 +225,6 @@ func (p *pusher) send() { var entity *cloudwatchlogs.Entity if p.logSrc != nil { entity = p.logSrc.Entity() - // The following logics should be removed after Compass GA1 - if entity != nil && entity.Attributes != nil { - delete(entity.Attributes, entitystore.ServiceNameSourceKey) - } } input := &cloudwatchlogs.PutLogEventsInput{ From e05407a647180de70970f90730e1943b6f00ab36 Mon Sep 17 00:00:00 2001 From: zhihonl <61301537+zhihonl@users.noreply.github.com> Date: Tue, 6 Aug 2024 10:08:08 -0400 Subject: [PATCH 46/55] Close HTTP response body after done with entity health metric parsing (#761) --- extension/agenthealth/handler/stats/client/client.go | 1 + 1 file changed, 1 insertion(+) diff --git a/extension/agenthealth/handler/stats/client/client.go b/extension/agenthealth/handler/stats/client/client.go index 9e6910b1c8..39c4c667d7 100644 --- a/extension/agenthealth/handler/stats/client/client.go +++ b/extension/agenthealth/handler/stats/client/client.go @@ -139,6 +139,7 @@ func rejectedEntityInfoExists(r *http.Response) bool { if r == nil || r.Body == nil { return false } + defer r.Body.Close() bodyBytes, err := io.ReadAll(r.Body) if err != nil { return false From 63aa71d0ada3683659d311723a3002310c8e6f8d Mon Sep 17 00:00:00 2001 From: zhihonl <61301537+zhihonl@users.noreply.github.com> Date: Fri, 16 Aug 2024 14:58:19 -0400 Subject: [PATCH 47/55] Refactor ec2Info loggers and add tests to ensure no resource info leaks (#778) --- extension/entitystore/ec2Info.go | 36 +++++------ extension/entitystore/ec2Info_test.go | 80 ++++++++++++++++++++++++- extension/entitystore/extension.go | 2 +- extension/entitystore/extension_test.go | 4 ++ 4 files changed, 102 insertions(+), 20 deletions(-) diff --git a/extension/entitystore/ec2Info.go b/extension/entitystore/ec2Info.go index d8fb9941af..7619d898b4 100644 --- a/extension/entitystore/ec2Info.go +++ b/extension/entitystore/ec2Info.go @@ -6,13 +6,13 @@ package entitystore import ( "context" "errors" - "log" "strings" "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ec2" "github.com/aws/aws-sdk-go/service/ec2/ec2iface" + "go.uber.org/zap" configaws "github.com/aws/amazon-cloudwatch-agent/cfg/aws" "github.com/aws/amazon-cloudwatch-agent/internal/ec2metadataprovider" @@ -40,11 +40,12 @@ type ec2Info struct { ec2API ec2iface.EC2API ec2Provider ec2ProviderType ec2Credential *configaws.CredentialConfig + logger *zap.Logger done chan struct{} } func (ei *ec2Info) initEc2Info() { - log.Println("I! ec2Info: Initializing ec2Info") + ei.logger.Debug("Initializing ec2Info") if err := ei.setInstanceId(); err != nil { return } @@ -52,7 +53,7 @@ func (ei *ec2Info) initEc2Info() { if err := ei.setAutoScalingGroup(); err != nil { return } - log.Printf("D! ec2Info: Finished initializing ec2Info: InstanceId %s, AutoScalingGroup %s", ei.InstanceID, ei.AutoScalingGroup) + ei.logger.Debug("Finished initializing ec2Info") ei.ignoreInvalidFields() } @@ -60,17 +61,17 @@ func (ei *ec2Info) setInstanceId() error { for { metadataDoc, err := ei.metadataProvider.Get(context.Background()) if err != nil { - log.Printf("E! ec2Info: Failed to get Instance Id through metadata provider: %v", err) + ei.logger.Warn("Failed to get Instance Id through metadata provider", zap.Error(err)) wait := time.NewTimer(1 * time.Minute) select { case <-ei.done: wait.Stop() - return errors.New("ec2Info: shutdownC received") + return errors.New("shutdown signal received") case <-wait.C: continue } } - log.Printf("D! ec2Info: Successfully retrieved Instance Id %s", ei.InstanceID) + ei.logger.Debug("Successfully retrieved Instance ID") ei.InstanceID = metadataDoc.InstanceID return nil } @@ -90,18 +91,18 @@ func (ei *ec2Info) setAutoScalingGroup() error { select { case <-ei.done: wait.Stop() - return errors.New("ec2Info: shutdownC received") + return errors.New("shutdown signal received") case <-wait.C: } if retry > 0 { - log.Printf("D! ec2Info: initial retrieval of tags and volumes with retry: %d", retry) + ei.logger.Debug("Initial retrieval of tags and volumes", zap.Int("retry", retry)) } if err := ei.retrieveAsgName(ei.ec2API); err != nil { - log.Printf("E! ec2Info: Unable to describe ec2 tags for retry %d with error %v", retry, err) + ei.logger.Warn("Unable to describe ec2 tags", zap.Int("retry", retry), zap.Error(err)) } else { - log.Println("I! ec2Info: Retrieval of tags succeeded") + ei.logger.Debug("Retrieval of auto-scaling group tags succeeded") return nil } @@ -117,14 +118,14 @@ as we need to distinguish the tags not being fetchable at all, from the ASG tag func (ei *ec2Info) retrieveAsgName(ec2API ec2iface.EC2API) error { tags, err := ei.metadataProvider.InstanceTags(context.Background()) if err != nil { - log.Printf("E! ec2Info: Failed to get tags through metadata provider: %v", err.Error()) + ei.logger.Debug("Failed to get tags through metadata provider", zap.Error(err)) return ei.retrieveAsgNameWithDescribeTags(ec2API) } else if strings.Contains(tags, ec2tagger.Ec2InstanceTagKeyASG) { asg, err := ei.metadataProvider.InstanceTagValue(context.Background(), ec2tagger.Ec2InstanceTagKeyASG) if err != nil { - log.Printf("E! ec2Info: Failed to get AutoScalingGroup through metadata provider: %v", err.Error()) + ei.logger.Error("Failed to get AutoScalingGroup through metadata provider", zap.Error(err)) } else { - log.Printf("D! ec2Info: AutoScalingGroup retrieved through IMDS: %s", asg) + ei.logger.Debug("AutoScalingGroup retrieved through IMDS") ei.AutoScalingGroup = asg } } @@ -152,7 +153,7 @@ func (ei *ec2Info) retrieveAsgNameWithDescribeTags(ec2API ec2iface.EC2API) error for { result, err := ec2API.DescribeTags(input) if err != nil { - log.Println("E! ec2Info: Unable to retrieve EC2 AutoScalingGroup. This feature must only be used on an EC2 instance.") + ei.logger.Error("Unable to retrieve EC2 AutoScalingGroup. This feature must only be used on an EC2 instance.") return err } for _, tag := range result.Tags { @@ -170,24 +171,25 @@ func (ei *ec2Info) retrieveAsgNameWithDescribeTags(ec2API ec2iface.EC2API) error return nil } -func newEC2Info(metadataProvider ec2metadataprovider.MetadataProvider, providerType ec2ProviderType, ec2Credential *configaws.CredentialConfig, done chan struct{}, region string) *ec2Info { +func newEC2Info(metadataProvider ec2metadataprovider.MetadataProvider, providerType ec2ProviderType, ec2Credential *configaws.CredentialConfig, done chan struct{}, region string, logger *zap.Logger) *ec2Info { return &ec2Info{ metadataProvider: metadataProvider, ec2Provider: providerType, ec2Credential: ec2Credential, done: done, Region: region, + logger: logger, } } func (ei *ec2Info) ignoreInvalidFields() { if idLength := len(ei.InstanceID); idLength > instanceIdSizeMax { - log.Printf("W! ec2Info: InstanceId length of %d exceeds %d characters and will be ignored", idLength, instanceIdSizeMax) + ei.logger.Warn("InstanceId length exceeds characters limit and will be ignored", zap.Int("length", idLength), zap.Int("character limit", instanceIdSizeMax)) ei.InstanceID = "" } if asgLength := len(ei.AutoScalingGroup); asgLength > autoScalingGroupSizeMax { - log.Printf("W! ec2Info: AutoScalingGroup length of %d exceeds %d characters and will be ignored", asgLength, autoScalingGroupSizeMax) + ei.logger.Warn("AutoScalingGroup length exceeds characters limit and will be ignored", zap.Int("length", asgLength), zap.Int("character limit", autoScalingGroupSizeMax)) ei.AutoScalingGroup = "" } } diff --git a/extension/entitystore/ec2Info_test.go b/extension/entitystore/ec2Info_test.go index ddffefd14d..f5ca24a084 100644 --- a/extension/entitystore/ec2Info_test.go +++ b/extension/entitystore/ec2Info_test.go @@ -4,14 +4,20 @@ package entitystore import ( + "bytes" + "log" "strings" "testing" + "time" "github.com/aws/aws-sdk-go/aws/ec2metadata" "github.com/aws/aws-sdk-go/service/ec2" "github.com/aws/aws-sdk-go/service/ec2/ec2iface" "github.com/stretchr/testify/assert" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" + configaws "github.com/aws/amazon-cloudwatch-agent/cfg/aws" "github.com/aws/amazon-cloudwatch-agent/internal/ec2metadataprovider" ) @@ -64,6 +70,10 @@ func (m *mockEC2Client) DescribeTags(*ec2.DescribeTagsInput) (*ec2.DescribeTagsO return &allTags, nil } +func mockEC2Provider(region string, credential *configaws.CredentialConfig) ec2iface.EC2API { + return &mockEC2Client{withASG: true} +} + func TestSetInstanceIdAndRegion(t *testing.T) { type args struct { metadataProvider ec2metadataprovider.MetadataProvider @@ -86,9 +96,11 @@ func TestSetInstanceIdAndRegion(t *testing.T) { }, } for _, tt := range tests { + logger, _ := zap.NewDevelopment() t.Run(tt.name, func(t *testing.T) { ei := &ec2Info{ metadataProvider: tt.args.metadataProvider, + logger: logger, } if err := ei.setInstanceId(); (err != nil) != tt.wantErr { t.Errorf("setInstanceId() error = %v, wantErr %v", err, tt.wantErr) @@ -144,8 +156,9 @@ func TestRetrieveASGName(t *testing.T) { }, } for _, tt := range tests { + logger, _ := zap.NewDevelopment() t.Run(tt.name, func(t *testing.T) { - ei := &ec2Info{metadataProvider: tt.args.metadataProvider} + ei := &ec2Info{metadataProvider: tt.args.metadataProvider, logger: logger} if err := ei.retrieveAsgName(tt.args.ec2Client); (err != nil) != tt.wantErr { t.Errorf("retrieveAsgName() error = %v, wantErr %v", err, tt.wantErr) } @@ -186,8 +199,9 @@ func TestRetrieveASGNameWithDescribeTags(t *testing.T) { }, } for _, tt := range tests { + logger, _ := zap.NewDevelopment() t.Run(tt.name, func(t *testing.T) { - ei := &ec2Info{} + ei := &ec2Info{logger: logger} if err := ei.retrieveAsgNameWithDescribeTags(tt.args.ec2Client); (err != nil) != tt.wantErr { t.Errorf("retrieveAsgName() error = %v, wantErr %v", err, tt.wantErr) } @@ -197,6 +211,7 @@ func TestRetrieveASGNameWithDescribeTags(t *testing.T) { } func TestIgnoreInvalidFields(t *testing.T) { + logger, _ := zap.NewDevelopment() type want struct { instanceId string autoScalingGroup string @@ -211,6 +226,7 @@ func TestIgnoreInvalidFields(t *testing.T) { args: &ec2Info{ InstanceID: "i-01d2417c27a396e44", AutoScalingGroup: "asg", + logger: logger, }, want: want{ instanceId: "i-01d2417c27a396e44", @@ -222,6 +238,7 @@ func TestIgnoreInvalidFields(t *testing.T) { args: &ec2Info{ InstanceID: strings.Repeat("a", 20), AutoScalingGroup: "asg", + logger: logger, }, want: want{ instanceId: "", @@ -233,6 +250,7 @@ func TestIgnoreInvalidFields(t *testing.T) { args: &ec2Info{ InstanceID: "i-01d2417c27a396e44", AutoScalingGroup: strings.Repeat("a", 256), + logger: logger, }, want: want{ instanceId: "i-01d2417c27a396e44", @@ -248,3 +266,61 @@ func TestIgnoreInvalidFields(t *testing.T) { }) } } + +func TestLogMessageDoesNotIncludeResourceInfo(t *testing.T) { + type args struct { + metadataProvider ec2metadataprovider.MetadataProvider + } + tests := []struct { + name string + args args + want ec2Info + }{ + { + name: "AutoScalingGroupWithDescribeTags", + args: args{ + metadataProvider: &mockMetadataProvider{InstanceIdentityDocument: mockedInstanceIdentityDoc, InstanceTagError: true}, + }, + want: ec2Info{ + InstanceID: mockedInstanceIdentityDoc.InstanceID, + }, + }, + { + name: "AutoScalingGroupWithInstanceTags", + args: args{ + metadataProvider: &mockMetadataProvider{InstanceIdentityDocument: mockedInstanceIdentityDoc, Tags: "aws:autoscaling:groupName", TagValue: tagVal3}, + }, + want: ec2Info{ + InstanceID: mockedInstanceIdentityDoc.InstanceID, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Create a buffer to capture the logger output + var buf bytes.Buffer + writer := zapcore.AddSync(&buf) + + // Create a custom zapcore.Core that writes to the buffer + encoder := zapcore.NewJSONEncoder(zap.NewProductionEncoderConfig()) + core := zapcore.NewCore(encoder, writer, zapcore.DebugLevel) + + logger := zap.New(core) + done := make(chan struct{}) + + ei := &ec2Info{ + metadataProvider: tt.args.metadataProvider, + ec2Provider: mockEC2Provider, + logger: logger, + done: done, + } + go ei.initEc2Info() + time.Sleep(3 * time.Second) + + logOutput := buf.String() + log.Println(logOutput) + assert.NotContains(t, logOutput, ei.InstanceID) + assert.NotContains(t, logOutput, ei.AutoScalingGroup) + }) + } +} diff --git a/extension/entitystore/extension.go b/extension/entitystore/extension.go index d725edae9a..c71dd84a0a 100644 --- a/extension/entitystore/extension.go +++ b/extension/entitystore/extension.go @@ -91,7 +91,7 @@ func (e *EntityStore) Start(ctx context.Context, host component.Host) error { } switch e.mode { case config.ModeEC2: - e.ec2Info = *newEC2Info(e.metadataprovider, getEC2Provider, ec2CredentialConfig, e.done, e.config.Region) + e.ec2Info = *newEC2Info(e.metadataprovider, getEC2Provider, ec2CredentialConfig, e.done, e.config.Region, e.logger) go e.ec2Info.initEc2Info() } e.serviceprovider = newServiceProvider(e.mode, e.config.Region, &e.ec2Info, e.metadataprovider, getEC2Provider, ec2CredentialConfig, e.done) diff --git a/extension/entitystore/extension_test.go b/extension/entitystore/extension_test.go index a368bce519..27fb9e7c05 100644 --- a/extension/entitystore/extension_test.go +++ b/extension/entitystore/extension_test.go @@ -55,6 +55,7 @@ type mockMetadataProvider struct { InstanceIdentityDocument *ec2metadata.EC2InstanceIdentityDocument Tags string TagValue string + InstanceTagError bool } func mockMetadataProviderWithAccountId(accountId string) *mockMetadataProvider { @@ -85,6 +86,9 @@ func (m *mockMetadataProvider) InstanceProfileIAMRole() (string, error) { } func (m *mockMetadataProvider) InstanceTags(ctx context.Context) (string, error) { + if m.InstanceTagError { + return "", errors.New("an error occurred for instance tag retrieval") + } return m.Tags, nil } From bfed712dee28ad6663c479c33c6f3871b5fcf220 Mon Sep 17 00:00:00 2001 From: Dinakar Chappa Date: Mon, 19 Aug 2024 13:35:12 -0400 Subject: [PATCH 48/55] Temporarily moves changes from sdkv1 into CW Agent (#779) --- extension/entitystore/extension.go | 2 +- extension/entitystore/extension_test.go | 2 +- logs/logs.go | 2 +- plugins/inputs/logfile/tailersrc.go | 2 +- .../wineventlog/wineventlog.go | 2 +- .../outputs/cloudwatchlogs/cloudwatchlogs.go | 2 +- plugins/outputs/cloudwatchlogs/pusher.go | 2 +- plugins/outputs/cloudwatchlogs/pusher_test.go | 2 +- sdk/service/cloudwatchlogs/api.go | 21522 ++++++++++++++++ .../cloudwatchlogsiface/interface.go | 396 + sdk/service/cloudwatchlogs/doc.go | 57 + sdk/service/cloudwatchlogs/errors.go | 159 + sdk/service/cloudwatchlogs/integ_test.go | 67 + sdk/service/cloudwatchlogs/service.go | 112 + 14 files changed, 22321 insertions(+), 8 deletions(-) create mode 100644 sdk/service/cloudwatchlogs/api.go create mode 100644 sdk/service/cloudwatchlogs/cloudwatchlogsiface/interface.go create mode 100644 sdk/service/cloudwatchlogs/doc.go create mode 100644 sdk/service/cloudwatchlogs/errors.go create mode 100644 sdk/service/cloudwatchlogs/integ_test.go create mode 100644 sdk/service/cloudwatchlogs/service.go diff --git a/extension/entitystore/extension.go b/extension/entitystore/extension.go index c71dd84a0a..c18e395649 100644 --- a/extension/entitystore/extension.go +++ b/extension/entitystore/extension.go @@ -8,7 +8,6 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/client" - "github.com/aws/aws-sdk-go/service/cloudwatchlogs" "github.com/aws/aws-sdk-go/service/ec2" "github.com/aws/aws-sdk-go/service/ec2/ec2iface" "github.com/aws/aws-sdk-go/service/sts" @@ -20,6 +19,7 @@ import ( configaws "github.com/aws/amazon-cloudwatch-agent/cfg/aws" "github.com/aws/amazon-cloudwatch-agent/internal/ec2metadataprovider" "github.com/aws/amazon-cloudwatch-agent/internal/retryer" + "github.com/aws/amazon-cloudwatch-agent/sdk/service/cloudwatchlogs" "github.com/aws/amazon-cloudwatch-agent/translator/config" ) diff --git a/extension/entitystore/extension_test.go b/extension/entitystore/extension_test.go index 27fb9e7c05..3469ee8979 100644 --- a/extension/entitystore/extension_test.go +++ b/extension/entitystore/extension_test.go @@ -13,13 +13,13 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/ec2metadata" "github.com/aws/aws-sdk-go/aws/session" - "github.com/aws/aws-sdk-go/service/cloudwatchlogs" "github.com/aws/aws-sdk-go/service/sts" "github.com/aws/aws-sdk-go/service/sts/stsiface" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/aws/amazon-cloudwatch-agent/internal/ec2metadataprovider" + "github.com/aws/amazon-cloudwatch-agent/sdk/service/cloudwatchlogs" "github.com/aws/amazon-cloudwatch-agent/translator/config" ) diff --git a/logs/logs.go b/logs/logs.go index 913377b610..a497883009 100644 --- a/logs/logs.go +++ b/logs/logs.go @@ -9,11 +9,11 @@ import ( "log" "time" - "github.com/aws/aws-sdk-go/service/cloudwatchlogs" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" "github.com/aws/amazon-cloudwatch-agent/plugins/inputs/logfile/tail" + "github.com/aws/amazon-cloudwatch-agent/sdk/service/cloudwatchlogs" ) var ErrOutputStopped = errors.New("Output plugin stopped") diff --git a/plugins/inputs/logfile/tailersrc.go b/plugins/inputs/logfile/tailersrc.go index 4ff0e40b08..df5b5967f9 100644 --- a/plugins/inputs/logfile/tailersrc.go +++ b/plugins/inputs/logfile/tailersrc.go @@ -11,12 +11,12 @@ import ( "sync" "time" - "github.com/aws/aws-sdk-go/service/cloudwatchlogs" "golang.org/x/text/encoding" "github.com/aws/amazon-cloudwatch-agent/extension/entitystore" "github.com/aws/amazon-cloudwatch-agent/logs" "github.com/aws/amazon-cloudwatch-agent/plugins/inputs/logfile/tail" + "github.com/aws/amazon-cloudwatch-agent/sdk/service/cloudwatchlogs" ) const ( diff --git a/plugins/inputs/windows_event_log/wineventlog/wineventlog.go b/plugins/inputs/windows_event_log/wineventlog/wineventlog.go index 4d6f513218..7fde5711a3 100644 --- a/plugins/inputs/windows_event_log/wineventlog/wineventlog.go +++ b/plugins/inputs/windows_event_log/wineventlog/wineventlog.go @@ -17,10 +17,10 @@ import ( "syscall" "time" - "github.com/aws/aws-sdk-go/service/cloudwatchlogs" "golang.org/x/sys/windows" "github.com/aws/amazon-cloudwatch-agent/logs" + "github.com/aws/amazon-cloudwatch-agent/sdk/service/cloudwatchlogs" ) // https://msdn.microsoft.com/en-us/library/windows/desktop/aa385588(v=vs.85).aspx diff --git a/plugins/outputs/cloudwatchlogs/cloudwatchlogs.go b/plugins/outputs/cloudwatchlogs/cloudwatchlogs.go index aa9d1474b3..a75198f38d 100644 --- a/plugins/outputs/cloudwatchlogs/cloudwatchlogs.go +++ b/plugins/outputs/cloudwatchlogs/cloudwatchlogs.go @@ -14,7 +14,6 @@ import ( "github.com/amazon-contributing/opentelemetry-collector-contrib/extension/awsmiddleware" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/service/cloudwatchlogs" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/outputs" "go.uber.org/zap" @@ -29,6 +28,7 @@ import ( "github.com/aws/amazon-cloudwatch-agent/internal" "github.com/aws/amazon-cloudwatch-agent/internal/retryer" "github.com/aws/amazon-cloudwatch-agent/logs" + "github.com/aws/amazon-cloudwatch-agent/sdk/service/cloudwatchlogs" "github.com/aws/amazon-cloudwatch-agent/tool/util" ) diff --git a/plugins/outputs/cloudwatchlogs/pusher.go b/plugins/outputs/cloudwatchlogs/pusher.go index 201c9ffdb9..d00c7a6a69 100644 --- a/plugins/outputs/cloudwatchlogs/pusher.go +++ b/plugins/outputs/cloudwatchlogs/pusher.go @@ -11,11 +11,11 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/cloudwatchlogs" "github.com/influxdata/telegraf" "github.com/aws/amazon-cloudwatch-agent/logs" "github.com/aws/amazon-cloudwatch-agent/profiler" + "github.com/aws/amazon-cloudwatch-agent/sdk/service/cloudwatchlogs" ) const ( diff --git a/plugins/outputs/cloudwatchlogs/pusher_test.go b/plugins/outputs/cloudwatchlogs/pusher_test.go index 3545b00ef8..2faf17c87d 100644 --- a/plugins/outputs/cloudwatchlogs/pusher_test.go +++ b/plugins/outputs/cloudwatchlogs/pusher_test.go @@ -17,11 +17,11 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/cloudwatchlogs" "github.com/influxdata/telegraf/models" "github.com/stretchr/testify/require" "github.com/aws/amazon-cloudwatch-agent/logs" + "github.com/aws/amazon-cloudwatch-agent/sdk/service/cloudwatchlogs" "github.com/aws/amazon-cloudwatch-agent/tool/util" ) diff --git a/sdk/service/cloudwatchlogs/api.go b/sdk/service/cloudwatchlogs/api.go new file mode 100644 index 0000000000..f9e564e960 --- /dev/null +++ b/sdk/service/cloudwatchlogs/api.go @@ -0,0 +1,21522 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package cloudwatchlogs + +import ( + "bytes" + "fmt" + "io" + "sync" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/eventstream" + "github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" + "github.com/aws/aws-sdk-go/private/protocol/rest" +) + +const opAssociateKmsKey = "AssociateKmsKey" + +// AssociateKmsKeyRequest generates a "aws/request.Request" representing the +// client's request for the AssociateKmsKey operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See AssociateKmsKey for more information on using the AssociateKmsKey +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the AssociateKmsKeyRequest method. +// req, resp := client.AssociateKmsKeyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/AssociateKmsKey +func (c *CloudWatchLogs) AssociateKmsKeyRequest(input *AssociateKmsKeyInput) (req *request.Request, output *AssociateKmsKeyOutput) { + op := &request.Operation{ + Name: opAssociateKmsKey, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AssociateKmsKeyInput{} + } + + output = &AssociateKmsKeyOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// AssociateKmsKey API operation for Amazon CloudWatch Logs. +// +// Associates the specified KMS key with either one log group in the account, +// or with all stored CloudWatch Logs query insights results in the account. +// +// When you use AssociateKmsKey, you specify either the logGroupName parameter +// or the resourceIdentifier parameter. You can't specify both of those parameters +// in the same operation. +// +// - Specify the logGroupName parameter to cause all log events stored in +// the log group to be encrypted with that key. Only the log events ingested +// after the key is associated are encrypted with that key. Associating a +// KMS key with a log group overrides any existing associations between the +// log group and a KMS key. After a KMS key is associated with a log group, +// all newly ingested data for the log group is encrypted using the KMS key. +// This association is stored as long as the data encrypted with the KMS +// key is still within CloudWatch Logs. This enables CloudWatch Logs to decrypt +// this data whenever it is requested. Associating a key with a log group +// does not cause the results of queries of that log group to be encrypted +// with that key. To have query results encrypted with a KMS key, you must +// use an AssociateKmsKey operation with the resourceIdentifier parameter +// that specifies a query-result resource. +// +// - Specify the resourceIdentifier parameter with a query-result resource, +// to use that key to encrypt the stored results of all future StartQuery +// (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_StartQuery.html) +// operations in the account. The response from a GetQueryResults (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_GetQueryResults.html) +// operation will still return the query results in plain text. Even if you +// have not associated a key with your query results, the query results are +// encrypted when stored, using the default CloudWatch Logs method. If you +// run a query from a monitoring account that queries logs in a source account, +// the query results key from the monitoring account, if any, is used. +// +// If you delete the key that is used to encrypt log events or log group query +// results, then all the associated stored log events or query results that +// were encrypted with that key will be unencryptable and unusable. +// +// CloudWatch Logs supports only symmetric KMS keys. Do not use an associate +// an asymmetric KMS key with your log group or query results. For more information, +// see Using Symmetric and Asymmetric Keys (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html). +// +// It can take up to 5 minutes for this operation to take effect. +// +// If you attempt to associate a KMS key with a log group but the KMS key does +// not exist or the KMS key is disabled, you receive an InvalidParameterException +// error. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation AssociateKmsKey for usage and error information. +// +// Returned Error Types: +// +// - InvalidParameterException +// A parameter is specified incorrectly. +// +// - ResourceNotFoundException +// The specified resource does not exist. +// +// - OperationAbortedException +// Multiple concurrent requests to update the same resource were in conflict. +// +// - ServiceUnavailableException +// The service cannot complete the request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/AssociateKmsKey +func (c *CloudWatchLogs) AssociateKmsKey(input *AssociateKmsKeyInput) (*AssociateKmsKeyOutput, error) { + req, out := c.AssociateKmsKeyRequest(input) + return out, req.Send() +} + +// AssociateKmsKeyWithContext is the same as AssociateKmsKey with the addition of +// the ability to pass a context and additional request options. +// +// See AssociateKmsKey for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchLogs) AssociateKmsKeyWithContext(ctx aws.Context, input *AssociateKmsKeyInput, opts ...request.Option) (*AssociateKmsKeyOutput, error) { + req, out := c.AssociateKmsKeyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCancelExportTask = "CancelExportTask" + +// CancelExportTaskRequest generates a "aws/request.Request" representing the +// client's request for the CancelExportTask operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CancelExportTask for more information on using the CancelExportTask +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the CancelExportTaskRequest method. +// req, resp := client.CancelExportTaskRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/CancelExportTask +func (c *CloudWatchLogs) CancelExportTaskRequest(input *CancelExportTaskInput) (req *request.Request, output *CancelExportTaskOutput) { + op := &request.Operation{ + Name: opCancelExportTask, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CancelExportTaskInput{} + } + + output = &CancelExportTaskOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// CancelExportTask API operation for Amazon CloudWatch Logs. +// +// Cancels the specified export task. +// +// The task must be in the PENDING or RUNNING state. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation CancelExportTask for usage and error information. +// +// Returned Error Types: +// +// - InvalidParameterException +// A parameter is specified incorrectly. +// +// - ResourceNotFoundException +// The specified resource does not exist. +// +// - InvalidOperationException +// The operation is not valid on the specified resource. +// +// - ServiceUnavailableException +// The service cannot complete the request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/CancelExportTask +func (c *CloudWatchLogs) CancelExportTask(input *CancelExportTaskInput) (*CancelExportTaskOutput, error) { + req, out := c.CancelExportTaskRequest(input) + return out, req.Send() +} + +// CancelExportTaskWithContext is the same as CancelExportTask with the addition of +// the ability to pass a context and additional request options. +// +// See CancelExportTask for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchLogs) CancelExportTaskWithContext(ctx aws.Context, input *CancelExportTaskInput, opts ...request.Option) (*CancelExportTaskOutput, error) { + req, out := c.CancelExportTaskRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateDelivery = "CreateDelivery" + +// CreateDeliveryRequest generates a "aws/request.Request" representing the +// client's request for the CreateDelivery operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateDelivery for more information on using the CreateDelivery +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the CreateDeliveryRequest method. +// req, resp := client.CreateDeliveryRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/CreateDelivery +func (c *CloudWatchLogs) CreateDeliveryRequest(input *CreateDeliveryInput) (req *request.Request, output *CreateDeliveryOutput) { + op := &request.Operation{ + Name: opCreateDelivery, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateDeliveryInput{} + } + + output = &CreateDeliveryOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateDelivery API operation for Amazon CloudWatch Logs. +// +// Creates a delivery. A delivery is a connection between a logical delivery +// source and a logical delivery destination that you have already created. +// +// Only some Amazon Web Services services support being configured as a delivery +// source using this operation. These services are listed as Supported [V2 Permissions] +// in the table at Enabling logging from Amazon Web Services services. (https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/AWS-logs-and-resource-policy.html) +// +// A delivery destination can represent a log group in CloudWatch Logs, an Amazon +// S3 bucket, or a delivery stream in Firehose. +// +// To configure logs delivery between a supported Amazon Web Services service +// and a destination, you must do the following: +// +// - Create a delivery source, which is a logical object that represents +// the resource that is actually sending the logs. For more information, +// see PutDeliverySource (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutDeliverySource.html). +// +// - Create a delivery destination, which is a logical object that represents +// the actual delivery destination. For more information, see PutDeliveryDestination +// (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutDeliveryDestination.html). +// +// - If you are delivering logs cross-account, you must use PutDeliveryDestinationPolicy +// (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutDeliveryDestinationPolicy.html) +// in the destination account to assign an IAM policy to the destination. +// This policy allows delivery to that destination. +// +// - Use CreateDelivery to create a delivery by pairing exactly one delivery +// source and one delivery destination. +// +// You can configure a single delivery source to send logs to multiple destinations +// by creating multiple deliveries. You can also create multiple deliveries +// to configure multiple delivery sources to send logs to the same delivery +// destination. +// +// You can't update an existing delivery. You can only create and delete deliveries. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation CreateDelivery for usage and error information. +// +// Returned Error Types: +// +// - ServiceUnavailableException +// The service cannot complete the request. +// +// - ConflictException +// This operation attempted to create a resource that already exists. +// +// - ResourceNotFoundException +// The specified resource does not exist. +// +// - ValidationException +// One of the parameters for the request is not valid. +// +// - AccessDeniedException +// You don't have sufficient permissions to perform this action. +// +// - ServiceQuotaExceededException +// This request exceeds a service quota. +// +// - ThrottlingException +// The request was throttled because of quota limits. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/CreateDelivery +func (c *CloudWatchLogs) CreateDelivery(input *CreateDeliveryInput) (*CreateDeliveryOutput, error) { + req, out := c.CreateDeliveryRequest(input) + return out, req.Send() +} + +// CreateDeliveryWithContext is the same as CreateDelivery with the addition of +// the ability to pass a context and additional request options. +// +// See CreateDelivery for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchLogs) CreateDeliveryWithContext(ctx aws.Context, input *CreateDeliveryInput, opts ...request.Option) (*CreateDeliveryOutput, error) { + req, out := c.CreateDeliveryRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateExportTask = "CreateExportTask" + +// CreateExportTaskRequest generates a "aws/request.Request" representing the +// client's request for the CreateExportTask operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateExportTask for more information on using the CreateExportTask +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the CreateExportTaskRequest method. +// req, resp := client.CreateExportTaskRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/CreateExportTask +func (c *CloudWatchLogs) CreateExportTaskRequest(input *CreateExportTaskInput) (req *request.Request, output *CreateExportTaskOutput) { + op := &request.Operation{ + Name: opCreateExportTask, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateExportTaskInput{} + } + + output = &CreateExportTaskOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateExportTask API operation for Amazon CloudWatch Logs. +// +// Creates an export task so that you can efficiently export data from a log +// group to an Amazon S3 bucket. When you perform a CreateExportTask operation, +// you must use credentials that have permission to write to the S3 bucket that +// you specify as the destination. +// +// Exporting log data to S3 buckets that are encrypted by KMS is supported. +// Exporting log data to Amazon S3 buckets that have S3 Object Lock enabled +// with a retention period is also supported. +// +// Exporting to S3 buckets that are encrypted with AES-256 is supported. +// +// This is an asynchronous call. If all the required information is provided, +// this operation initiates an export task and responds with the ID of the task. +// After the task has started, you can use DescribeExportTasks (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_DescribeExportTasks.html) +// to get the status of the export task. Each account can only have one active +// (RUNNING or PENDING) export task at a time. To cancel an export task, use +// CancelExportTask (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_CancelExportTask.html). +// +// You can export logs from multiple log groups or multiple time ranges to the +// same S3 bucket. To separate log data for each export task, specify a prefix +// to be used as the Amazon S3 key prefix for all exported objects. +// +// Time-based sorting on chunks of log data inside an exported file is not guaranteed. +// You can sort the exported log field data by using Linux utilities. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation CreateExportTask for usage and error information. +// +// Returned Error Types: +// +// - InvalidParameterException +// A parameter is specified incorrectly. +// +// - LimitExceededException +// You have reached the maximum number of resources that can be created. +// +// - OperationAbortedException +// Multiple concurrent requests to update the same resource were in conflict. +// +// - ServiceUnavailableException +// The service cannot complete the request. +// +// - ResourceNotFoundException +// The specified resource does not exist. +// +// - ResourceAlreadyExistsException +// The specified resource already exists. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/CreateExportTask +func (c *CloudWatchLogs) CreateExportTask(input *CreateExportTaskInput) (*CreateExportTaskOutput, error) { + req, out := c.CreateExportTaskRequest(input) + return out, req.Send() +} + +// CreateExportTaskWithContext is the same as CreateExportTask with the addition of +// the ability to pass a context and additional request options. +// +// See CreateExportTask for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchLogs) CreateExportTaskWithContext(ctx aws.Context, input *CreateExportTaskInput, opts ...request.Option) (*CreateExportTaskOutput, error) { + req, out := c.CreateExportTaskRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateLogAnomalyDetector = "CreateLogAnomalyDetector" + +// CreateLogAnomalyDetectorRequest generates a "aws/request.Request" representing the +// client's request for the CreateLogAnomalyDetector operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateLogAnomalyDetector for more information on using the CreateLogAnomalyDetector +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the CreateLogAnomalyDetectorRequest method. +// req, resp := client.CreateLogAnomalyDetectorRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/CreateLogAnomalyDetector +func (c *CloudWatchLogs) CreateLogAnomalyDetectorRequest(input *CreateLogAnomalyDetectorInput) (req *request.Request, output *CreateLogAnomalyDetectorOutput) { + op := &request.Operation{ + Name: opCreateLogAnomalyDetector, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateLogAnomalyDetectorInput{} + } + + output = &CreateLogAnomalyDetectorOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateLogAnomalyDetector API operation for Amazon CloudWatch Logs. +// +// Creates an anomaly detector that regularly scans one or more log groups and +// look for patterns and anomalies in the logs. +// +// An anomaly detector can help surface issues by automatically discovering +// anomalies in your log event traffic. An anomaly detector uses machine learning +// algorithms to scan log events and find patterns. A pattern is a shared text +// structure that recurs among your log fields. Patterns provide a useful tool +// for analyzing large sets of logs because a large number of log events can +// often be compressed into a few patterns. +// +// The anomaly detector uses pattern recognition to find anomalies, which are +// unusual log events. It uses the evaluationFrequency to compare current log +// events and patterns with trained baselines. +// +// Fields within a pattern are called tokens. Fields that vary within a pattern, +// such as a request ID or timestamp, are referred to as dynamic tokens and +// represented by <*>. +// +// The following is an example of a pattern: +// +// [INFO] Request time: <*> ms +// +// This pattern represents log events like [INFO] Request time: 327 ms and other +// similar log events that differ only by the number, in this csse 327. When +// the pattern is displayed, the different numbers are replaced by <*> +// +// Any parts of log events that are masked as sensitive data are not scanned +// for anomalies. For more information about masking sensitive data, see Help +// protect sensitive log data with masking (https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/mask-sensitive-log-data.html). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation CreateLogAnomalyDetector for usage and error information. +// +// Returned Error Types: +// +// - InvalidParameterException +// A parameter is specified incorrectly. +// +// - ResourceNotFoundException +// The specified resource does not exist. +// +// - ServiceUnavailableException +// The service cannot complete the request. +// +// - OperationAbortedException +// Multiple concurrent requests to update the same resource were in conflict. +// +// - LimitExceededException +// You have reached the maximum number of resources that can be created. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/CreateLogAnomalyDetector +func (c *CloudWatchLogs) CreateLogAnomalyDetector(input *CreateLogAnomalyDetectorInput) (*CreateLogAnomalyDetectorOutput, error) { + req, out := c.CreateLogAnomalyDetectorRequest(input) + return out, req.Send() +} + +// CreateLogAnomalyDetectorWithContext is the same as CreateLogAnomalyDetector with the addition of +// the ability to pass a context and additional request options. +// +// See CreateLogAnomalyDetector for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchLogs) CreateLogAnomalyDetectorWithContext(ctx aws.Context, input *CreateLogAnomalyDetectorInput, opts ...request.Option) (*CreateLogAnomalyDetectorOutput, error) { + req, out := c.CreateLogAnomalyDetectorRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateLogGroup = "CreateLogGroup" + +// CreateLogGroupRequest generates a "aws/request.Request" representing the +// client's request for the CreateLogGroup operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateLogGroup for more information on using the CreateLogGroup +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the CreateLogGroupRequest method. +// req, resp := client.CreateLogGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/CreateLogGroup +func (c *CloudWatchLogs) CreateLogGroupRequest(input *CreateLogGroupInput) (req *request.Request, output *CreateLogGroupOutput) { + op := &request.Operation{ + Name: opCreateLogGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateLogGroupInput{} + } + + output = &CreateLogGroupOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// CreateLogGroup API operation for Amazon CloudWatch Logs. +// +// Creates a log group with the specified name. You can create up to 1,000,000 +// log groups per Region per account. +// +// You must use the following guidelines when naming a log group: +// +// - Log group names must be unique within a Region for an Amazon Web Services +// account. +// +// - Log group names can be between 1 and 512 characters long. +// +// - Log group names consist of the following characters: a-z, A-Z, 0-9, +// '_' (underscore), '-' (hyphen), '/' (forward slash), '.' (period), and +// '#' (number sign) +// +// - Log group names can't start with the string aws/ +// +// When you create a log group, by default the log events in the log group do +// not expire. To set a retention policy so that events expire and are deleted +// after a specified time, use PutRetentionPolicy (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutRetentionPolicy.html). +// +// If you associate an KMS key with the log group, ingested data is encrypted +// using the KMS key. This association is stored as long as the data encrypted +// with the KMS key is still within CloudWatch Logs. This enables CloudWatch +// Logs to decrypt this data whenever it is requested. +// +// If you attempt to associate a KMS key with the log group but the KMS key +// does not exist or the KMS key is disabled, you receive an InvalidParameterException +// error. +// +// CloudWatch Logs supports only symmetric KMS keys. Do not associate an asymmetric +// KMS key with your log group. For more information, see Using Symmetric and +// Asymmetric Keys (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation CreateLogGroup for usage and error information. +// +// Returned Error Types: +// +// - InvalidParameterException +// A parameter is specified incorrectly. +// +// - ResourceAlreadyExistsException +// The specified resource already exists. +// +// - LimitExceededException +// You have reached the maximum number of resources that can be created. +// +// - OperationAbortedException +// Multiple concurrent requests to update the same resource were in conflict. +// +// - ServiceUnavailableException +// The service cannot complete the request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/CreateLogGroup +func (c *CloudWatchLogs) CreateLogGroup(input *CreateLogGroupInput) (*CreateLogGroupOutput, error) { + req, out := c.CreateLogGroupRequest(input) + return out, req.Send() +} + +// CreateLogGroupWithContext is the same as CreateLogGroup with the addition of +// the ability to pass a context and additional request options. +// +// See CreateLogGroup for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchLogs) CreateLogGroupWithContext(ctx aws.Context, input *CreateLogGroupInput, opts ...request.Option) (*CreateLogGroupOutput, error) { + req, out := c.CreateLogGroupRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateLogStream = "CreateLogStream" + +// CreateLogStreamRequest generates a "aws/request.Request" representing the +// client's request for the CreateLogStream operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateLogStream for more information on using the CreateLogStream +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the CreateLogStreamRequest method. +// req, resp := client.CreateLogStreamRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/CreateLogStream +func (c *CloudWatchLogs) CreateLogStreamRequest(input *CreateLogStreamInput) (req *request.Request, output *CreateLogStreamOutput) { + op := &request.Operation{ + Name: opCreateLogStream, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateLogStreamInput{} + } + + output = &CreateLogStreamOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// CreateLogStream API operation for Amazon CloudWatch Logs. +// +// Creates a log stream for the specified log group. A log stream is a sequence +// of log events that originate from a single source, such as an application +// instance or a resource that is being monitored. +// +// There is no limit on the number of log streams that you can create for a +// log group. There is a limit of 50 TPS on CreateLogStream operations, after +// which transactions are throttled. +// +// You must use the following guidelines when naming a log stream: +// +// - Log stream names must be unique within the log group. +// +// - Log stream names can be between 1 and 512 characters long. +// +// - Don't use ':' (colon) or '*' (asterisk) characters. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation CreateLogStream for usage and error information. +// +// Returned Error Types: +// +// - InvalidParameterException +// A parameter is specified incorrectly. +// +// - ResourceAlreadyExistsException +// The specified resource already exists. +// +// - ResourceNotFoundException +// The specified resource does not exist. +// +// - ServiceUnavailableException +// The service cannot complete the request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/CreateLogStream +func (c *CloudWatchLogs) CreateLogStream(input *CreateLogStreamInput) (*CreateLogStreamOutput, error) { + req, out := c.CreateLogStreamRequest(input) + return out, req.Send() +} + +// CreateLogStreamWithContext is the same as CreateLogStream with the addition of +// the ability to pass a context and additional request options. +// +// See CreateLogStream for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchLogs) CreateLogStreamWithContext(ctx aws.Context, input *CreateLogStreamInput, opts ...request.Option) (*CreateLogStreamOutput, error) { + req, out := c.CreateLogStreamRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteAccountPolicy = "DeleteAccountPolicy" + +// DeleteAccountPolicyRequest generates a "aws/request.Request" representing the +// client's request for the DeleteAccountPolicy operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteAccountPolicy for more information on using the DeleteAccountPolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the DeleteAccountPolicyRequest method. +// req, resp := client.DeleteAccountPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/DeleteAccountPolicy +func (c *CloudWatchLogs) DeleteAccountPolicyRequest(input *DeleteAccountPolicyInput) (req *request.Request, output *DeleteAccountPolicyOutput) { + op := &request.Operation{ + Name: opDeleteAccountPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteAccountPolicyInput{} + } + + output = &DeleteAccountPolicyOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteAccountPolicy API operation for Amazon CloudWatch Logs. +// +// Deletes a CloudWatch Logs account policy. This stops the policy from applying +// to all log groups or a subset of log groups in the account. Log-group level +// policies will still be in effect. +// +// To use this operation, you must be signed on with the correct permissions +// depending on the type of policy that you are deleting. +// +// - To delete a data protection policy, you must have the logs:DeleteDataProtectionPolicy +// and logs:DeleteAccountPolicy permissions. +// +// - To delete a subscription filter policy, you must have the logs:DeleteSubscriptionFilter +// and logs:DeleteAccountPolicy permissions. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation DeleteAccountPolicy for usage and error information. +// +// Returned Error Types: +// +// - InvalidParameterException +// A parameter is specified incorrectly. +// +// - ResourceNotFoundException +// The specified resource does not exist. +// +// - ServiceUnavailableException +// The service cannot complete the request. +// +// - OperationAbortedException +// Multiple concurrent requests to update the same resource were in conflict. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/DeleteAccountPolicy +func (c *CloudWatchLogs) DeleteAccountPolicy(input *DeleteAccountPolicyInput) (*DeleteAccountPolicyOutput, error) { + req, out := c.DeleteAccountPolicyRequest(input) + return out, req.Send() +} + +// DeleteAccountPolicyWithContext is the same as DeleteAccountPolicy with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteAccountPolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchLogs) DeleteAccountPolicyWithContext(ctx aws.Context, input *DeleteAccountPolicyInput, opts ...request.Option) (*DeleteAccountPolicyOutput, error) { + req, out := c.DeleteAccountPolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteDataProtectionPolicy = "DeleteDataProtectionPolicy" + +// DeleteDataProtectionPolicyRequest generates a "aws/request.Request" representing the +// client's request for the DeleteDataProtectionPolicy operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteDataProtectionPolicy for more information on using the DeleteDataProtectionPolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the DeleteDataProtectionPolicyRequest method. +// req, resp := client.DeleteDataProtectionPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/DeleteDataProtectionPolicy +func (c *CloudWatchLogs) DeleteDataProtectionPolicyRequest(input *DeleteDataProtectionPolicyInput) (req *request.Request, output *DeleteDataProtectionPolicyOutput) { + op := &request.Operation{ + Name: opDeleteDataProtectionPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteDataProtectionPolicyInput{} + } + + output = &DeleteDataProtectionPolicyOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteDataProtectionPolicy API operation for Amazon CloudWatch Logs. +// +// Deletes the data protection policy from the specified log group. +// +// For more information about data protection policies, see PutDataProtectionPolicy +// (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutDataProtectionPolicy.html). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation DeleteDataProtectionPolicy for usage and error information. +// +// Returned Error Types: +// +// - InvalidParameterException +// A parameter is specified incorrectly. +// +// - OperationAbortedException +// Multiple concurrent requests to update the same resource were in conflict. +// +// - ResourceNotFoundException +// The specified resource does not exist. +// +// - ServiceUnavailableException +// The service cannot complete the request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/DeleteDataProtectionPolicy +func (c *CloudWatchLogs) DeleteDataProtectionPolicy(input *DeleteDataProtectionPolicyInput) (*DeleteDataProtectionPolicyOutput, error) { + req, out := c.DeleteDataProtectionPolicyRequest(input) + return out, req.Send() +} + +// DeleteDataProtectionPolicyWithContext is the same as DeleteDataProtectionPolicy with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteDataProtectionPolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchLogs) DeleteDataProtectionPolicyWithContext(ctx aws.Context, input *DeleteDataProtectionPolicyInput, opts ...request.Option) (*DeleteDataProtectionPolicyOutput, error) { + req, out := c.DeleteDataProtectionPolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteDelivery = "DeleteDelivery" + +// DeleteDeliveryRequest generates a "aws/request.Request" representing the +// client's request for the DeleteDelivery operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteDelivery for more information on using the DeleteDelivery +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the DeleteDeliveryRequest method. +// req, resp := client.DeleteDeliveryRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/DeleteDelivery +func (c *CloudWatchLogs) DeleteDeliveryRequest(input *DeleteDeliveryInput) (req *request.Request, output *DeleteDeliveryOutput) { + op := &request.Operation{ + Name: opDeleteDelivery, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteDeliveryInput{} + } + + output = &DeleteDeliveryOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteDelivery API operation for Amazon CloudWatch Logs. +// +// Deletes s delivery. A delivery is a connection between a logical delivery +// source and a logical delivery destination. Deleting a delivery only deletes +// the connection between the delivery source and delivery destination. It does +// not delete the delivery destination or the delivery source. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation DeleteDelivery for usage and error information. +// +// Returned Error Types: +// +// - ResourceNotFoundException +// The specified resource does not exist. +// +// - ServiceUnavailableException +// The service cannot complete the request. +// +// - ConflictException +// This operation attempted to create a resource that already exists. +// +// - ValidationException +// One of the parameters for the request is not valid. +// +// - ServiceQuotaExceededException +// This request exceeds a service quota. +// +// - ThrottlingException +// The request was throttled because of quota limits. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/DeleteDelivery +func (c *CloudWatchLogs) DeleteDelivery(input *DeleteDeliveryInput) (*DeleteDeliveryOutput, error) { + req, out := c.DeleteDeliveryRequest(input) + return out, req.Send() +} + +// DeleteDeliveryWithContext is the same as DeleteDelivery with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteDelivery for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchLogs) DeleteDeliveryWithContext(ctx aws.Context, input *DeleteDeliveryInput, opts ...request.Option) (*DeleteDeliveryOutput, error) { + req, out := c.DeleteDeliveryRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteDeliveryDestination = "DeleteDeliveryDestination" + +// DeleteDeliveryDestinationRequest generates a "aws/request.Request" representing the +// client's request for the DeleteDeliveryDestination operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteDeliveryDestination for more information on using the DeleteDeliveryDestination +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the DeleteDeliveryDestinationRequest method. +// req, resp := client.DeleteDeliveryDestinationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/DeleteDeliveryDestination +func (c *CloudWatchLogs) DeleteDeliveryDestinationRequest(input *DeleteDeliveryDestinationInput) (req *request.Request, output *DeleteDeliveryDestinationOutput) { + op := &request.Operation{ + Name: opDeleteDeliveryDestination, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteDeliveryDestinationInput{} + } + + output = &DeleteDeliveryDestinationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteDeliveryDestination API operation for Amazon CloudWatch Logs. +// +// Deletes a delivery destination. A delivery is a connection between a logical +// delivery source and a logical delivery destination. +// +// You can't delete a delivery destination if any current deliveries are associated +// with it. To find whether any deliveries are associated with this delivery +// destination, use the DescribeDeliveries (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_DescribeDeliveries.html) +// operation and check the deliveryDestinationArn field in the results. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation DeleteDeliveryDestination for usage and error information. +// +// Returned Error Types: +// +// - ResourceNotFoundException +// The specified resource does not exist. +// +// - ServiceUnavailableException +// The service cannot complete the request. +// +// - ConflictException +// This operation attempted to create a resource that already exists. +// +// - ValidationException +// One of the parameters for the request is not valid. +// +// - ServiceQuotaExceededException +// This request exceeds a service quota. +// +// - ThrottlingException +// The request was throttled because of quota limits. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/DeleteDeliveryDestination +func (c *CloudWatchLogs) DeleteDeliveryDestination(input *DeleteDeliveryDestinationInput) (*DeleteDeliveryDestinationOutput, error) { + req, out := c.DeleteDeliveryDestinationRequest(input) + return out, req.Send() +} + +// DeleteDeliveryDestinationWithContext is the same as DeleteDeliveryDestination with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteDeliveryDestination for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchLogs) DeleteDeliveryDestinationWithContext(ctx aws.Context, input *DeleteDeliveryDestinationInput, opts ...request.Option) (*DeleteDeliveryDestinationOutput, error) { + req, out := c.DeleteDeliveryDestinationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteDeliveryDestinationPolicy = "DeleteDeliveryDestinationPolicy" + +// DeleteDeliveryDestinationPolicyRequest generates a "aws/request.Request" representing the +// client's request for the DeleteDeliveryDestinationPolicy operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteDeliveryDestinationPolicy for more information on using the DeleteDeliveryDestinationPolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the DeleteDeliveryDestinationPolicyRequest method. +// req, resp := client.DeleteDeliveryDestinationPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/DeleteDeliveryDestinationPolicy +func (c *CloudWatchLogs) DeleteDeliveryDestinationPolicyRequest(input *DeleteDeliveryDestinationPolicyInput) (req *request.Request, output *DeleteDeliveryDestinationPolicyOutput) { + op := &request.Operation{ + Name: opDeleteDeliveryDestinationPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteDeliveryDestinationPolicyInput{} + } + + output = &DeleteDeliveryDestinationPolicyOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteDeliveryDestinationPolicy API operation for Amazon CloudWatch Logs. +// +// Deletes a delivery destination policy. For more information about these policies, +// see PutDeliveryDestinationPolicy (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutDeliveryDestinationPolicy.html). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation DeleteDeliveryDestinationPolicy for usage and error information. +// +// Returned Error Types: +// +// - ServiceUnavailableException +// The service cannot complete the request. +// +// - ValidationException +// One of the parameters for the request is not valid. +// +// - ResourceNotFoundException +// The specified resource does not exist. +// +// - ConflictException +// This operation attempted to create a resource that already exists. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/DeleteDeliveryDestinationPolicy +func (c *CloudWatchLogs) DeleteDeliveryDestinationPolicy(input *DeleteDeliveryDestinationPolicyInput) (*DeleteDeliveryDestinationPolicyOutput, error) { + req, out := c.DeleteDeliveryDestinationPolicyRequest(input) + return out, req.Send() +} + +// DeleteDeliveryDestinationPolicyWithContext is the same as DeleteDeliveryDestinationPolicy with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteDeliveryDestinationPolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchLogs) DeleteDeliveryDestinationPolicyWithContext(ctx aws.Context, input *DeleteDeliveryDestinationPolicyInput, opts ...request.Option) (*DeleteDeliveryDestinationPolicyOutput, error) { + req, out := c.DeleteDeliveryDestinationPolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteDeliverySource = "DeleteDeliverySource" + +// DeleteDeliverySourceRequest generates a "aws/request.Request" representing the +// client's request for the DeleteDeliverySource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteDeliverySource for more information on using the DeleteDeliverySource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the DeleteDeliverySourceRequest method. +// req, resp := client.DeleteDeliverySourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/DeleteDeliverySource +func (c *CloudWatchLogs) DeleteDeliverySourceRequest(input *DeleteDeliverySourceInput) (req *request.Request, output *DeleteDeliverySourceOutput) { + op := &request.Operation{ + Name: opDeleteDeliverySource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteDeliverySourceInput{} + } + + output = &DeleteDeliverySourceOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteDeliverySource API operation for Amazon CloudWatch Logs. +// +// Deletes a delivery source. A delivery is a connection between a logical delivery +// source and a logical delivery destination. +// +// You can't delete a delivery source if any current deliveries are associated +// with it. To find whether any deliveries are associated with this delivery +// source, use the DescribeDeliveries (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_DescribeDeliveries.html) +// operation and check the deliverySourceName field in the results. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation DeleteDeliverySource for usage and error information. +// +// Returned Error Types: +// +// - ResourceNotFoundException +// The specified resource does not exist. +// +// - ServiceUnavailableException +// The service cannot complete the request. +// +// - ConflictException +// This operation attempted to create a resource that already exists. +// +// - ValidationException +// One of the parameters for the request is not valid. +// +// - ServiceQuotaExceededException +// This request exceeds a service quota. +// +// - ThrottlingException +// The request was throttled because of quota limits. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/DeleteDeliverySource +func (c *CloudWatchLogs) DeleteDeliverySource(input *DeleteDeliverySourceInput) (*DeleteDeliverySourceOutput, error) { + req, out := c.DeleteDeliverySourceRequest(input) + return out, req.Send() +} + +// DeleteDeliverySourceWithContext is the same as DeleteDeliverySource with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteDeliverySource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchLogs) DeleteDeliverySourceWithContext(ctx aws.Context, input *DeleteDeliverySourceInput, opts ...request.Option) (*DeleteDeliverySourceOutput, error) { + req, out := c.DeleteDeliverySourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteDestination = "DeleteDestination" + +// DeleteDestinationRequest generates a "aws/request.Request" representing the +// client's request for the DeleteDestination operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteDestination for more information on using the DeleteDestination +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the DeleteDestinationRequest method. +// req, resp := client.DeleteDestinationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/DeleteDestination +func (c *CloudWatchLogs) DeleteDestinationRequest(input *DeleteDestinationInput) (req *request.Request, output *DeleteDestinationOutput) { + op := &request.Operation{ + Name: opDeleteDestination, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteDestinationInput{} + } + + output = &DeleteDestinationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteDestination API operation for Amazon CloudWatch Logs. +// +// Deletes the specified destination, and eventually disables all the subscription +// filters that publish to it. This operation does not delete the physical resource +// encapsulated by the destination. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation DeleteDestination for usage and error information. +// +// Returned Error Types: +// +// - InvalidParameterException +// A parameter is specified incorrectly. +// +// - ResourceNotFoundException +// The specified resource does not exist. +// +// - OperationAbortedException +// Multiple concurrent requests to update the same resource were in conflict. +// +// - ServiceUnavailableException +// The service cannot complete the request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/DeleteDestination +func (c *CloudWatchLogs) DeleteDestination(input *DeleteDestinationInput) (*DeleteDestinationOutput, error) { + req, out := c.DeleteDestinationRequest(input) + return out, req.Send() +} + +// DeleteDestinationWithContext is the same as DeleteDestination with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteDestination for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchLogs) DeleteDestinationWithContext(ctx aws.Context, input *DeleteDestinationInput, opts ...request.Option) (*DeleteDestinationOutput, error) { + req, out := c.DeleteDestinationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteLogAnomalyDetector = "DeleteLogAnomalyDetector" + +// DeleteLogAnomalyDetectorRequest generates a "aws/request.Request" representing the +// client's request for the DeleteLogAnomalyDetector operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteLogAnomalyDetector for more information on using the DeleteLogAnomalyDetector +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the DeleteLogAnomalyDetectorRequest method. +// req, resp := client.DeleteLogAnomalyDetectorRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/DeleteLogAnomalyDetector +func (c *CloudWatchLogs) DeleteLogAnomalyDetectorRequest(input *DeleteLogAnomalyDetectorInput) (req *request.Request, output *DeleteLogAnomalyDetectorOutput) { + op := &request.Operation{ + Name: opDeleteLogAnomalyDetector, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteLogAnomalyDetectorInput{} + } + + output = &DeleteLogAnomalyDetectorOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteLogAnomalyDetector API operation for Amazon CloudWatch Logs. +// +// Deletes the specified CloudWatch Logs anomaly detector. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation DeleteLogAnomalyDetector for usage and error information. +// +// Returned Error Types: +// +// - InvalidParameterException +// A parameter is specified incorrectly. +// +// - ResourceNotFoundException +// The specified resource does not exist. +// +// - ServiceUnavailableException +// The service cannot complete the request. +// +// - OperationAbortedException +// Multiple concurrent requests to update the same resource were in conflict. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/DeleteLogAnomalyDetector +func (c *CloudWatchLogs) DeleteLogAnomalyDetector(input *DeleteLogAnomalyDetectorInput) (*DeleteLogAnomalyDetectorOutput, error) { + req, out := c.DeleteLogAnomalyDetectorRequest(input) + return out, req.Send() +} + +// DeleteLogAnomalyDetectorWithContext is the same as DeleteLogAnomalyDetector with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteLogAnomalyDetector for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchLogs) DeleteLogAnomalyDetectorWithContext(ctx aws.Context, input *DeleteLogAnomalyDetectorInput, opts ...request.Option) (*DeleteLogAnomalyDetectorOutput, error) { + req, out := c.DeleteLogAnomalyDetectorRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteLogGroup = "DeleteLogGroup" + +// DeleteLogGroupRequest generates a "aws/request.Request" representing the +// client's request for the DeleteLogGroup operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteLogGroup for more information on using the DeleteLogGroup +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the DeleteLogGroupRequest method. +// req, resp := client.DeleteLogGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/DeleteLogGroup +func (c *CloudWatchLogs) DeleteLogGroupRequest(input *DeleteLogGroupInput) (req *request.Request, output *DeleteLogGroupOutput) { + op := &request.Operation{ + Name: opDeleteLogGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteLogGroupInput{} + } + + output = &DeleteLogGroupOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteLogGroup API operation for Amazon CloudWatch Logs. +// +// Deletes the specified log group and permanently deletes all the archived +// log events associated with the log group. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation DeleteLogGroup for usage and error information. +// +// Returned Error Types: +// +// - InvalidParameterException +// A parameter is specified incorrectly. +// +// - ResourceNotFoundException +// The specified resource does not exist. +// +// - OperationAbortedException +// Multiple concurrent requests to update the same resource were in conflict. +// +// - ServiceUnavailableException +// The service cannot complete the request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/DeleteLogGroup +func (c *CloudWatchLogs) DeleteLogGroup(input *DeleteLogGroupInput) (*DeleteLogGroupOutput, error) { + req, out := c.DeleteLogGroupRequest(input) + return out, req.Send() +} + +// DeleteLogGroupWithContext is the same as DeleteLogGroup with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteLogGroup for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchLogs) DeleteLogGroupWithContext(ctx aws.Context, input *DeleteLogGroupInput, opts ...request.Option) (*DeleteLogGroupOutput, error) { + req, out := c.DeleteLogGroupRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteLogStream = "DeleteLogStream" + +// DeleteLogStreamRequest generates a "aws/request.Request" representing the +// client's request for the DeleteLogStream operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteLogStream for more information on using the DeleteLogStream +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the DeleteLogStreamRequest method. +// req, resp := client.DeleteLogStreamRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/DeleteLogStream +func (c *CloudWatchLogs) DeleteLogStreamRequest(input *DeleteLogStreamInput) (req *request.Request, output *DeleteLogStreamOutput) { + op := &request.Operation{ + Name: opDeleteLogStream, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteLogStreamInput{} + } + + output = &DeleteLogStreamOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteLogStream API operation for Amazon CloudWatch Logs. +// +// Deletes the specified log stream and permanently deletes all the archived +// log events associated with the log stream. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation DeleteLogStream for usage and error information. +// +// Returned Error Types: +// +// - InvalidParameterException +// A parameter is specified incorrectly. +// +// - ResourceNotFoundException +// The specified resource does not exist. +// +// - OperationAbortedException +// Multiple concurrent requests to update the same resource were in conflict. +// +// - ServiceUnavailableException +// The service cannot complete the request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/DeleteLogStream +func (c *CloudWatchLogs) DeleteLogStream(input *DeleteLogStreamInput) (*DeleteLogStreamOutput, error) { + req, out := c.DeleteLogStreamRequest(input) + return out, req.Send() +} + +// DeleteLogStreamWithContext is the same as DeleteLogStream with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteLogStream for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchLogs) DeleteLogStreamWithContext(ctx aws.Context, input *DeleteLogStreamInput, opts ...request.Option) (*DeleteLogStreamOutput, error) { + req, out := c.DeleteLogStreamRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteMetricFilter = "DeleteMetricFilter" + +// DeleteMetricFilterRequest generates a "aws/request.Request" representing the +// client's request for the DeleteMetricFilter operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteMetricFilter for more information on using the DeleteMetricFilter +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the DeleteMetricFilterRequest method. +// req, resp := client.DeleteMetricFilterRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/DeleteMetricFilter +func (c *CloudWatchLogs) DeleteMetricFilterRequest(input *DeleteMetricFilterInput) (req *request.Request, output *DeleteMetricFilterOutput) { + op := &request.Operation{ + Name: opDeleteMetricFilter, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteMetricFilterInput{} + } + + output = &DeleteMetricFilterOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteMetricFilter API operation for Amazon CloudWatch Logs. +// +// Deletes the specified metric filter. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation DeleteMetricFilter for usage and error information. +// +// Returned Error Types: +// +// - InvalidParameterException +// A parameter is specified incorrectly. +// +// - ResourceNotFoundException +// The specified resource does not exist. +// +// - OperationAbortedException +// Multiple concurrent requests to update the same resource were in conflict. +// +// - ServiceUnavailableException +// The service cannot complete the request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/DeleteMetricFilter +func (c *CloudWatchLogs) DeleteMetricFilter(input *DeleteMetricFilterInput) (*DeleteMetricFilterOutput, error) { + req, out := c.DeleteMetricFilterRequest(input) + return out, req.Send() +} + +// DeleteMetricFilterWithContext is the same as DeleteMetricFilter with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteMetricFilter for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchLogs) DeleteMetricFilterWithContext(ctx aws.Context, input *DeleteMetricFilterInput, opts ...request.Option) (*DeleteMetricFilterOutput, error) { + req, out := c.DeleteMetricFilterRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteQueryDefinition = "DeleteQueryDefinition" + +// DeleteQueryDefinitionRequest generates a "aws/request.Request" representing the +// client's request for the DeleteQueryDefinition operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteQueryDefinition for more information on using the DeleteQueryDefinition +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the DeleteQueryDefinitionRequest method. +// req, resp := client.DeleteQueryDefinitionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/DeleteQueryDefinition +func (c *CloudWatchLogs) DeleteQueryDefinitionRequest(input *DeleteQueryDefinitionInput) (req *request.Request, output *DeleteQueryDefinitionOutput) { + op := &request.Operation{ + Name: opDeleteQueryDefinition, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteQueryDefinitionInput{} + } + + output = &DeleteQueryDefinitionOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteQueryDefinition API operation for Amazon CloudWatch Logs. +// +// Deletes a saved CloudWatch Logs Insights query definition. A query definition +// contains details about a saved CloudWatch Logs Insights query. +// +// Each DeleteQueryDefinition operation can delete one query definition. +// +// You must have the logs:DeleteQueryDefinition permission to be able to perform +// this operation. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation DeleteQueryDefinition for usage and error information. +// +// Returned Error Types: +// +// - InvalidParameterException +// A parameter is specified incorrectly. +// +// - ResourceNotFoundException +// The specified resource does not exist. +// +// - ServiceUnavailableException +// The service cannot complete the request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/DeleteQueryDefinition +func (c *CloudWatchLogs) DeleteQueryDefinition(input *DeleteQueryDefinitionInput) (*DeleteQueryDefinitionOutput, error) { + req, out := c.DeleteQueryDefinitionRequest(input) + return out, req.Send() +} + +// DeleteQueryDefinitionWithContext is the same as DeleteQueryDefinition with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteQueryDefinition for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchLogs) DeleteQueryDefinitionWithContext(ctx aws.Context, input *DeleteQueryDefinitionInput, opts ...request.Option) (*DeleteQueryDefinitionOutput, error) { + req, out := c.DeleteQueryDefinitionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteResourcePolicy = "DeleteResourcePolicy" + +// DeleteResourcePolicyRequest generates a "aws/request.Request" representing the +// client's request for the DeleteResourcePolicy operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteResourcePolicy for more information on using the DeleteResourcePolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the DeleteResourcePolicyRequest method. +// req, resp := client.DeleteResourcePolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/DeleteResourcePolicy +func (c *CloudWatchLogs) DeleteResourcePolicyRequest(input *DeleteResourcePolicyInput) (req *request.Request, output *DeleteResourcePolicyOutput) { + op := &request.Operation{ + Name: opDeleteResourcePolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteResourcePolicyInput{} + } + + output = &DeleteResourcePolicyOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteResourcePolicy API operation for Amazon CloudWatch Logs. +// +// Deletes a resource policy from this account. This revokes the access of the +// identities in that policy to put log events to this account. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation DeleteResourcePolicy for usage and error information. +// +// Returned Error Types: +// +// - InvalidParameterException +// A parameter is specified incorrectly. +// +// - ResourceNotFoundException +// The specified resource does not exist. +// +// - ServiceUnavailableException +// The service cannot complete the request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/DeleteResourcePolicy +func (c *CloudWatchLogs) DeleteResourcePolicy(input *DeleteResourcePolicyInput) (*DeleteResourcePolicyOutput, error) { + req, out := c.DeleteResourcePolicyRequest(input) + return out, req.Send() +} + +// DeleteResourcePolicyWithContext is the same as DeleteResourcePolicy with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteResourcePolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchLogs) DeleteResourcePolicyWithContext(ctx aws.Context, input *DeleteResourcePolicyInput, opts ...request.Option) (*DeleteResourcePolicyOutput, error) { + req, out := c.DeleteResourcePolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteRetentionPolicy = "DeleteRetentionPolicy" + +// DeleteRetentionPolicyRequest generates a "aws/request.Request" representing the +// client's request for the DeleteRetentionPolicy operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteRetentionPolicy for more information on using the DeleteRetentionPolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the DeleteRetentionPolicyRequest method. +// req, resp := client.DeleteRetentionPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/DeleteRetentionPolicy +func (c *CloudWatchLogs) DeleteRetentionPolicyRequest(input *DeleteRetentionPolicyInput) (req *request.Request, output *DeleteRetentionPolicyOutput) { + op := &request.Operation{ + Name: opDeleteRetentionPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteRetentionPolicyInput{} + } + + output = &DeleteRetentionPolicyOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteRetentionPolicy API operation for Amazon CloudWatch Logs. +// +// Deletes the specified retention policy. +// +// Log events do not expire if they belong to log groups without a retention +// policy. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation DeleteRetentionPolicy for usage and error information. +// +// Returned Error Types: +// +// - InvalidParameterException +// A parameter is specified incorrectly. +// +// - ResourceNotFoundException +// The specified resource does not exist. +// +// - OperationAbortedException +// Multiple concurrent requests to update the same resource were in conflict. +// +// - ServiceUnavailableException +// The service cannot complete the request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/DeleteRetentionPolicy +func (c *CloudWatchLogs) DeleteRetentionPolicy(input *DeleteRetentionPolicyInput) (*DeleteRetentionPolicyOutput, error) { + req, out := c.DeleteRetentionPolicyRequest(input) + return out, req.Send() +} + +// DeleteRetentionPolicyWithContext is the same as DeleteRetentionPolicy with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteRetentionPolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchLogs) DeleteRetentionPolicyWithContext(ctx aws.Context, input *DeleteRetentionPolicyInput, opts ...request.Option) (*DeleteRetentionPolicyOutput, error) { + req, out := c.DeleteRetentionPolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteSubscriptionFilter = "DeleteSubscriptionFilter" + +// DeleteSubscriptionFilterRequest generates a "aws/request.Request" representing the +// client's request for the DeleteSubscriptionFilter operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteSubscriptionFilter for more information on using the DeleteSubscriptionFilter +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the DeleteSubscriptionFilterRequest method. +// req, resp := client.DeleteSubscriptionFilterRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/DeleteSubscriptionFilter +func (c *CloudWatchLogs) DeleteSubscriptionFilterRequest(input *DeleteSubscriptionFilterInput) (req *request.Request, output *DeleteSubscriptionFilterOutput) { + op := &request.Operation{ + Name: opDeleteSubscriptionFilter, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteSubscriptionFilterInput{} + } + + output = &DeleteSubscriptionFilterOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteSubscriptionFilter API operation for Amazon CloudWatch Logs. +// +// Deletes the specified subscription filter. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation DeleteSubscriptionFilter for usage and error information. +// +// Returned Error Types: +// +// - InvalidParameterException +// A parameter is specified incorrectly. +// +// - ResourceNotFoundException +// The specified resource does not exist. +// +// - OperationAbortedException +// Multiple concurrent requests to update the same resource were in conflict. +// +// - ServiceUnavailableException +// The service cannot complete the request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/DeleteSubscriptionFilter +func (c *CloudWatchLogs) DeleteSubscriptionFilter(input *DeleteSubscriptionFilterInput) (*DeleteSubscriptionFilterOutput, error) { + req, out := c.DeleteSubscriptionFilterRequest(input) + return out, req.Send() +} + +// DeleteSubscriptionFilterWithContext is the same as DeleteSubscriptionFilter with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteSubscriptionFilter for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchLogs) DeleteSubscriptionFilterWithContext(ctx aws.Context, input *DeleteSubscriptionFilterInput, opts ...request.Option) (*DeleteSubscriptionFilterOutput, error) { + req, out := c.DeleteSubscriptionFilterRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeAccountPolicies = "DescribeAccountPolicies" + +// DescribeAccountPoliciesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeAccountPolicies operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeAccountPolicies for more information on using the DescribeAccountPolicies +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the DescribeAccountPoliciesRequest method. +// req, resp := client.DescribeAccountPoliciesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/DescribeAccountPolicies +func (c *CloudWatchLogs) DescribeAccountPoliciesRequest(input *DescribeAccountPoliciesInput) (req *request.Request, output *DescribeAccountPoliciesOutput) { + op := &request.Operation{ + Name: opDescribeAccountPolicies, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeAccountPoliciesInput{} + } + + output = &DescribeAccountPoliciesOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeAccountPolicies API operation for Amazon CloudWatch Logs. +// +// Returns a list of all CloudWatch Logs account policies in the account. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation DescribeAccountPolicies for usage and error information. +// +// Returned Error Types: +// +// - InvalidParameterException +// A parameter is specified incorrectly. +// +// - OperationAbortedException +// Multiple concurrent requests to update the same resource were in conflict. +// +// - ResourceNotFoundException +// The specified resource does not exist. +// +// - ServiceUnavailableException +// The service cannot complete the request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/DescribeAccountPolicies +func (c *CloudWatchLogs) DescribeAccountPolicies(input *DescribeAccountPoliciesInput) (*DescribeAccountPoliciesOutput, error) { + req, out := c.DescribeAccountPoliciesRequest(input) + return out, req.Send() +} + +// DescribeAccountPoliciesWithContext is the same as DescribeAccountPolicies with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeAccountPolicies for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchLogs) DescribeAccountPoliciesWithContext(ctx aws.Context, input *DescribeAccountPoliciesInput, opts ...request.Option) (*DescribeAccountPoliciesOutput, error) { + req, out := c.DescribeAccountPoliciesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeDeliveries = "DescribeDeliveries" + +// DescribeDeliveriesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeDeliveries operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeDeliveries for more information on using the DescribeDeliveries +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the DescribeDeliveriesRequest method. +// req, resp := client.DescribeDeliveriesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/DescribeDeliveries +func (c *CloudWatchLogs) DescribeDeliveriesRequest(input *DescribeDeliveriesInput) (req *request.Request, output *DescribeDeliveriesOutput) { + op := &request.Operation{ + Name: opDescribeDeliveries, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "limit", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeDeliveriesInput{} + } + + output = &DescribeDeliveriesOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeDeliveries API operation for Amazon CloudWatch Logs. +// +// Retrieves a list of the deliveries that have been created in the account. +// +// A delivery is a connection between a delivery source (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutDeliverySource.html) +// and a delivery destination (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutDeliveryDestination.html). +// +// A delivery source represents an Amazon Web Services resource that sends logs +// to an logs delivery destination. The destination can be CloudWatch Logs, +// Amazon S3, or Firehose. Only some Amazon Web Services services support being +// configured as a delivery source. These services are listed in Enable logging +// from Amazon Web Services services. (https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/AWS-logs-and-resource-policy.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation DescribeDeliveries for usage and error information. +// +// Returned Error Types: +// +// - ServiceUnavailableException +// The service cannot complete the request. +// +// - ServiceQuotaExceededException +// This request exceeds a service quota. +// +// - ValidationException +// One of the parameters for the request is not valid. +// +// - ThrottlingException +// The request was throttled because of quota limits. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/DescribeDeliveries +func (c *CloudWatchLogs) DescribeDeliveries(input *DescribeDeliveriesInput) (*DescribeDeliveriesOutput, error) { + req, out := c.DescribeDeliveriesRequest(input) + return out, req.Send() +} + +// DescribeDeliveriesWithContext is the same as DescribeDeliveries with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeDeliveries for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchLogs) DescribeDeliveriesWithContext(ctx aws.Context, input *DescribeDeliveriesInput, opts ...request.Option) (*DescribeDeliveriesOutput, error) { + req, out := c.DescribeDeliveriesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeDeliveriesPages iterates over the pages of a DescribeDeliveries operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeDeliveries method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeDeliveries operation. +// pageNum := 0 +// err := client.DescribeDeliveriesPages(params, +// func(page *cloudwatchlogs.DescribeDeliveriesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +func (c *CloudWatchLogs) DescribeDeliveriesPages(input *DescribeDeliveriesInput, fn func(*DescribeDeliveriesOutput, bool) bool) error { + return c.DescribeDeliveriesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeDeliveriesPagesWithContext same as DescribeDeliveriesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchLogs) DescribeDeliveriesPagesWithContext(ctx aws.Context, input *DescribeDeliveriesInput, fn func(*DescribeDeliveriesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + EndPageOnSameToken: true, + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeDeliveriesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeDeliveriesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeDeliveriesOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeDeliveryDestinations = "DescribeDeliveryDestinations" + +// DescribeDeliveryDestinationsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeDeliveryDestinations operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeDeliveryDestinations for more information on using the DescribeDeliveryDestinations +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the DescribeDeliveryDestinationsRequest method. +// req, resp := client.DescribeDeliveryDestinationsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/DescribeDeliveryDestinations +func (c *CloudWatchLogs) DescribeDeliveryDestinationsRequest(input *DescribeDeliveryDestinationsInput) (req *request.Request, output *DescribeDeliveryDestinationsOutput) { + op := &request.Operation{ + Name: opDescribeDeliveryDestinations, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "limit", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeDeliveryDestinationsInput{} + } + + output = &DescribeDeliveryDestinationsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeDeliveryDestinations API operation for Amazon CloudWatch Logs. +// +// Retrieves a list of the delivery destinations that have been created in the +// account. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation DescribeDeliveryDestinations for usage and error information. +// +// Returned Error Types: +// +// - ServiceUnavailableException +// The service cannot complete the request. +// +// - ServiceQuotaExceededException +// This request exceeds a service quota. +// +// - ValidationException +// One of the parameters for the request is not valid. +// +// - ThrottlingException +// The request was throttled because of quota limits. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/DescribeDeliveryDestinations +func (c *CloudWatchLogs) DescribeDeliveryDestinations(input *DescribeDeliveryDestinationsInput) (*DescribeDeliveryDestinationsOutput, error) { + req, out := c.DescribeDeliveryDestinationsRequest(input) + return out, req.Send() +} + +// DescribeDeliveryDestinationsWithContext is the same as DescribeDeliveryDestinations with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeDeliveryDestinations for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchLogs) DescribeDeliveryDestinationsWithContext(ctx aws.Context, input *DescribeDeliveryDestinationsInput, opts ...request.Option) (*DescribeDeliveryDestinationsOutput, error) { + req, out := c.DescribeDeliveryDestinationsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeDeliveryDestinationsPages iterates over the pages of a DescribeDeliveryDestinations operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeDeliveryDestinations method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeDeliveryDestinations operation. +// pageNum := 0 +// err := client.DescribeDeliveryDestinationsPages(params, +// func(page *cloudwatchlogs.DescribeDeliveryDestinationsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +func (c *CloudWatchLogs) DescribeDeliveryDestinationsPages(input *DescribeDeliveryDestinationsInput, fn func(*DescribeDeliveryDestinationsOutput, bool) bool) error { + return c.DescribeDeliveryDestinationsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeDeliveryDestinationsPagesWithContext same as DescribeDeliveryDestinationsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchLogs) DescribeDeliveryDestinationsPagesWithContext(ctx aws.Context, input *DescribeDeliveryDestinationsInput, fn func(*DescribeDeliveryDestinationsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + EndPageOnSameToken: true, + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeDeliveryDestinationsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeDeliveryDestinationsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeDeliveryDestinationsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeDeliverySources = "DescribeDeliverySources" + +// DescribeDeliverySourcesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeDeliverySources operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeDeliverySources for more information on using the DescribeDeliverySources +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the DescribeDeliverySourcesRequest method. +// req, resp := client.DescribeDeliverySourcesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/DescribeDeliverySources +func (c *CloudWatchLogs) DescribeDeliverySourcesRequest(input *DescribeDeliverySourcesInput) (req *request.Request, output *DescribeDeliverySourcesOutput) { + op := &request.Operation{ + Name: opDescribeDeliverySources, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "limit", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeDeliverySourcesInput{} + } + + output = &DescribeDeliverySourcesOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeDeliverySources API operation for Amazon CloudWatch Logs. +// +// Retrieves a list of the delivery sources that have been created in the account. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation DescribeDeliverySources for usage and error information. +// +// Returned Error Types: +// +// - ServiceUnavailableException +// The service cannot complete the request. +// +// - ServiceQuotaExceededException +// This request exceeds a service quota. +// +// - ValidationException +// One of the parameters for the request is not valid. +// +// - ThrottlingException +// The request was throttled because of quota limits. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/DescribeDeliverySources +func (c *CloudWatchLogs) DescribeDeliverySources(input *DescribeDeliverySourcesInput) (*DescribeDeliverySourcesOutput, error) { + req, out := c.DescribeDeliverySourcesRequest(input) + return out, req.Send() +} + +// DescribeDeliverySourcesWithContext is the same as DescribeDeliverySources with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeDeliverySources for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchLogs) DescribeDeliverySourcesWithContext(ctx aws.Context, input *DescribeDeliverySourcesInput, opts ...request.Option) (*DescribeDeliverySourcesOutput, error) { + req, out := c.DescribeDeliverySourcesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeDeliverySourcesPages iterates over the pages of a DescribeDeliverySources operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeDeliverySources method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeDeliverySources operation. +// pageNum := 0 +// err := client.DescribeDeliverySourcesPages(params, +// func(page *cloudwatchlogs.DescribeDeliverySourcesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +func (c *CloudWatchLogs) DescribeDeliverySourcesPages(input *DescribeDeliverySourcesInput, fn func(*DescribeDeliverySourcesOutput, bool) bool) error { + return c.DescribeDeliverySourcesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeDeliverySourcesPagesWithContext same as DescribeDeliverySourcesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchLogs) DescribeDeliverySourcesPagesWithContext(ctx aws.Context, input *DescribeDeliverySourcesInput, fn func(*DescribeDeliverySourcesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + EndPageOnSameToken: true, + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeDeliverySourcesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeDeliverySourcesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeDeliverySourcesOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeDestinations = "DescribeDestinations" + +// DescribeDestinationsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeDestinations operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeDestinations for more information on using the DescribeDestinations +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the DescribeDestinationsRequest method. +// req, resp := client.DescribeDestinationsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/DescribeDestinations +func (c *CloudWatchLogs) DescribeDestinationsRequest(input *DescribeDestinationsInput) (req *request.Request, output *DescribeDestinationsOutput) { + op := &request.Operation{ + Name: opDescribeDestinations, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "limit", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeDestinationsInput{} + } + + output = &DescribeDestinationsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeDestinations API operation for Amazon CloudWatch Logs. +// +// Lists all your destinations. The results are ASCII-sorted by destination +// name. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation DescribeDestinations for usage and error information. +// +// Returned Error Types: +// +// - InvalidParameterException +// A parameter is specified incorrectly. +// +// - ServiceUnavailableException +// The service cannot complete the request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/DescribeDestinations +func (c *CloudWatchLogs) DescribeDestinations(input *DescribeDestinationsInput) (*DescribeDestinationsOutput, error) { + req, out := c.DescribeDestinationsRequest(input) + return out, req.Send() +} + +// DescribeDestinationsWithContext is the same as DescribeDestinations with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeDestinations for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchLogs) DescribeDestinationsWithContext(ctx aws.Context, input *DescribeDestinationsInput, opts ...request.Option) (*DescribeDestinationsOutput, error) { + req, out := c.DescribeDestinationsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeDestinationsPages iterates over the pages of a DescribeDestinations operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeDestinations method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeDestinations operation. +// pageNum := 0 +// err := client.DescribeDestinationsPages(params, +// func(page *cloudwatchlogs.DescribeDestinationsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +func (c *CloudWatchLogs) DescribeDestinationsPages(input *DescribeDestinationsInput, fn func(*DescribeDestinationsOutput, bool) bool) error { + return c.DescribeDestinationsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeDestinationsPagesWithContext same as DescribeDestinationsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchLogs) DescribeDestinationsPagesWithContext(ctx aws.Context, input *DescribeDestinationsInput, fn func(*DescribeDestinationsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + EndPageOnSameToken: true, + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeDestinationsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeDestinationsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeDestinationsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeExportTasks = "DescribeExportTasks" + +// DescribeExportTasksRequest generates a "aws/request.Request" representing the +// client's request for the DescribeExportTasks operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeExportTasks for more information on using the DescribeExportTasks +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the DescribeExportTasksRequest method. +// req, resp := client.DescribeExportTasksRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/DescribeExportTasks +func (c *CloudWatchLogs) DescribeExportTasksRequest(input *DescribeExportTasksInput) (req *request.Request, output *DescribeExportTasksOutput) { + op := &request.Operation{ + Name: opDescribeExportTasks, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeExportTasksInput{} + } + + output = &DescribeExportTasksOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeExportTasks API operation for Amazon CloudWatch Logs. +// +// Lists the specified export tasks. You can list all your export tasks or filter +// the results based on task ID or task status. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation DescribeExportTasks for usage and error information. +// +// Returned Error Types: +// +// - InvalidParameterException +// A parameter is specified incorrectly. +// +// - ServiceUnavailableException +// The service cannot complete the request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/DescribeExportTasks +func (c *CloudWatchLogs) DescribeExportTasks(input *DescribeExportTasksInput) (*DescribeExportTasksOutput, error) { + req, out := c.DescribeExportTasksRequest(input) + return out, req.Send() +} + +// DescribeExportTasksWithContext is the same as DescribeExportTasks with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeExportTasks for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchLogs) DescribeExportTasksWithContext(ctx aws.Context, input *DescribeExportTasksInput, opts ...request.Option) (*DescribeExportTasksOutput, error) { + req, out := c.DescribeExportTasksRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeLogGroups = "DescribeLogGroups" + +// DescribeLogGroupsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeLogGroups operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeLogGroups for more information on using the DescribeLogGroups +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the DescribeLogGroupsRequest method. +// req, resp := client.DescribeLogGroupsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/DescribeLogGroups +func (c *CloudWatchLogs) DescribeLogGroupsRequest(input *DescribeLogGroupsInput) (req *request.Request, output *DescribeLogGroupsOutput) { + op := &request.Operation{ + Name: opDescribeLogGroups, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "limit", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeLogGroupsInput{} + } + + output = &DescribeLogGroupsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeLogGroups API operation for Amazon CloudWatch Logs. +// +// Lists the specified log groups. You can list all your log groups or filter +// the results by prefix. The results are ASCII-sorted by log group name. +// +// CloudWatch Logs doesn’t support IAM policies that control access to the +// DescribeLogGroups action by using the aws:ResourceTag/key-name condition +// key. Other CloudWatch Logs actions do support the use of the aws:ResourceTag/key-name +// condition key to control access. For more information about using tags to +// control access, see Controlling access to Amazon Web Services resources using +// tags (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_tags.html). +// +// If you are using CloudWatch cross-account observability, you can use this +// operation in a monitoring account and view data from the linked source accounts. +// For more information, see CloudWatch cross-account observability (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-Unified-Cross-Account.html). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation DescribeLogGroups for usage and error information. +// +// Returned Error Types: +// +// - InvalidParameterException +// A parameter is specified incorrectly. +// +// - ServiceUnavailableException +// The service cannot complete the request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/DescribeLogGroups +func (c *CloudWatchLogs) DescribeLogGroups(input *DescribeLogGroupsInput) (*DescribeLogGroupsOutput, error) { + req, out := c.DescribeLogGroupsRequest(input) + return out, req.Send() +} + +// DescribeLogGroupsWithContext is the same as DescribeLogGroups with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeLogGroups for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchLogs) DescribeLogGroupsWithContext(ctx aws.Context, input *DescribeLogGroupsInput, opts ...request.Option) (*DescribeLogGroupsOutput, error) { + req, out := c.DescribeLogGroupsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeLogGroupsPages iterates over the pages of a DescribeLogGroups operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeLogGroups method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeLogGroups operation. +// pageNum := 0 +// err := client.DescribeLogGroupsPages(params, +// func(page *cloudwatchlogs.DescribeLogGroupsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +func (c *CloudWatchLogs) DescribeLogGroupsPages(input *DescribeLogGroupsInput, fn func(*DescribeLogGroupsOutput, bool) bool) error { + return c.DescribeLogGroupsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeLogGroupsPagesWithContext same as DescribeLogGroupsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchLogs) DescribeLogGroupsPagesWithContext(ctx aws.Context, input *DescribeLogGroupsInput, fn func(*DescribeLogGroupsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + EndPageOnSameToken: true, + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeLogGroupsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeLogGroupsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeLogGroupsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeLogStreams = "DescribeLogStreams" + +// DescribeLogStreamsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeLogStreams operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeLogStreams for more information on using the DescribeLogStreams +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the DescribeLogStreamsRequest method. +// req, resp := client.DescribeLogStreamsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/DescribeLogStreams +func (c *CloudWatchLogs) DescribeLogStreamsRequest(input *DescribeLogStreamsInput) (req *request.Request, output *DescribeLogStreamsOutput) { + op := &request.Operation{ + Name: opDescribeLogStreams, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "limit", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeLogStreamsInput{} + } + + output = &DescribeLogStreamsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeLogStreams API operation for Amazon CloudWatch Logs. +// +// Lists the log streams for the specified log group. You can list all the log +// streams or filter the results by prefix. You can also control how the results +// are ordered. +// +// You can specify the log group to search by using either logGroupIdentifier +// or logGroupName. You must include one of these two parameters, but you can't +// include both. +// +// This operation has a limit of five transactions per second, after which transactions +// are throttled. +// +// If you are using CloudWatch cross-account observability, you can use this +// operation in a monitoring account and view data from the linked source accounts. +// For more information, see CloudWatch cross-account observability (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-Unified-Cross-Account.html). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation DescribeLogStreams for usage and error information. +// +// Returned Error Types: +// +// - InvalidParameterException +// A parameter is specified incorrectly. +// +// - ResourceNotFoundException +// The specified resource does not exist. +// +// - ServiceUnavailableException +// The service cannot complete the request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/DescribeLogStreams +func (c *CloudWatchLogs) DescribeLogStreams(input *DescribeLogStreamsInput) (*DescribeLogStreamsOutput, error) { + req, out := c.DescribeLogStreamsRequest(input) + return out, req.Send() +} + +// DescribeLogStreamsWithContext is the same as DescribeLogStreams with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeLogStreams for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchLogs) DescribeLogStreamsWithContext(ctx aws.Context, input *DescribeLogStreamsInput, opts ...request.Option) (*DescribeLogStreamsOutput, error) { + req, out := c.DescribeLogStreamsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeLogStreamsPages iterates over the pages of a DescribeLogStreams operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeLogStreams method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeLogStreams operation. +// pageNum := 0 +// err := client.DescribeLogStreamsPages(params, +// func(page *cloudwatchlogs.DescribeLogStreamsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +func (c *CloudWatchLogs) DescribeLogStreamsPages(input *DescribeLogStreamsInput, fn func(*DescribeLogStreamsOutput, bool) bool) error { + return c.DescribeLogStreamsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeLogStreamsPagesWithContext same as DescribeLogStreamsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchLogs) DescribeLogStreamsPagesWithContext(ctx aws.Context, input *DescribeLogStreamsInput, fn func(*DescribeLogStreamsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + EndPageOnSameToken: true, + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeLogStreamsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeLogStreamsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeLogStreamsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeMetricFilters = "DescribeMetricFilters" + +// DescribeMetricFiltersRequest generates a "aws/request.Request" representing the +// client's request for the DescribeMetricFilters operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeMetricFilters for more information on using the DescribeMetricFilters +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the DescribeMetricFiltersRequest method. +// req, resp := client.DescribeMetricFiltersRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/DescribeMetricFilters +func (c *CloudWatchLogs) DescribeMetricFiltersRequest(input *DescribeMetricFiltersInput) (req *request.Request, output *DescribeMetricFiltersOutput) { + op := &request.Operation{ + Name: opDescribeMetricFilters, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "limit", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeMetricFiltersInput{} + } + + output = &DescribeMetricFiltersOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeMetricFilters API operation for Amazon CloudWatch Logs. +// +// Lists the specified metric filters. You can list all of the metric filters +// or filter the results by log name, prefix, metric name, or metric namespace. +// The results are ASCII-sorted by filter name. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation DescribeMetricFilters for usage and error information. +// +// Returned Error Types: +// +// - InvalidParameterException +// A parameter is specified incorrectly. +// +// - ResourceNotFoundException +// The specified resource does not exist. +// +// - ServiceUnavailableException +// The service cannot complete the request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/DescribeMetricFilters +func (c *CloudWatchLogs) DescribeMetricFilters(input *DescribeMetricFiltersInput) (*DescribeMetricFiltersOutput, error) { + req, out := c.DescribeMetricFiltersRequest(input) + return out, req.Send() +} + +// DescribeMetricFiltersWithContext is the same as DescribeMetricFilters with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeMetricFilters for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchLogs) DescribeMetricFiltersWithContext(ctx aws.Context, input *DescribeMetricFiltersInput, opts ...request.Option) (*DescribeMetricFiltersOutput, error) { + req, out := c.DescribeMetricFiltersRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeMetricFiltersPages iterates over the pages of a DescribeMetricFilters operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeMetricFilters method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeMetricFilters operation. +// pageNum := 0 +// err := client.DescribeMetricFiltersPages(params, +// func(page *cloudwatchlogs.DescribeMetricFiltersOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +func (c *CloudWatchLogs) DescribeMetricFiltersPages(input *DescribeMetricFiltersInput, fn func(*DescribeMetricFiltersOutput, bool) bool) error { + return c.DescribeMetricFiltersPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeMetricFiltersPagesWithContext same as DescribeMetricFiltersPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchLogs) DescribeMetricFiltersPagesWithContext(ctx aws.Context, input *DescribeMetricFiltersInput, fn func(*DescribeMetricFiltersOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + EndPageOnSameToken: true, + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeMetricFiltersInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeMetricFiltersRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeMetricFiltersOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeQueries = "DescribeQueries" + +// DescribeQueriesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeQueries operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeQueries for more information on using the DescribeQueries +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the DescribeQueriesRequest method. +// req, resp := client.DescribeQueriesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/DescribeQueries +func (c *CloudWatchLogs) DescribeQueriesRequest(input *DescribeQueriesInput) (req *request.Request, output *DescribeQueriesOutput) { + op := &request.Operation{ + Name: opDescribeQueries, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeQueriesInput{} + } + + output = &DescribeQueriesOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeQueries API operation for Amazon CloudWatch Logs. +// +// Returns a list of CloudWatch Logs Insights queries that are scheduled, running, +// or have been run recently in this account. You can request all queries or +// limit it to queries of a specific log group or queries with a certain status. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation DescribeQueries for usage and error information. +// +// Returned Error Types: +// +// - InvalidParameterException +// A parameter is specified incorrectly. +// +// - ResourceNotFoundException +// The specified resource does not exist. +// +// - ServiceUnavailableException +// The service cannot complete the request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/DescribeQueries +func (c *CloudWatchLogs) DescribeQueries(input *DescribeQueriesInput) (*DescribeQueriesOutput, error) { + req, out := c.DescribeQueriesRequest(input) + return out, req.Send() +} + +// DescribeQueriesWithContext is the same as DescribeQueries with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeQueries for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchLogs) DescribeQueriesWithContext(ctx aws.Context, input *DescribeQueriesInput, opts ...request.Option) (*DescribeQueriesOutput, error) { + req, out := c.DescribeQueriesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeQueryDefinitions = "DescribeQueryDefinitions" + +// DescribeQueryDefinitionsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeQueryDefinitions operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeQueryDefinitions for more information on using the DescribeQueryDefinitions +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the DescribeQueryDefinitionsRequest method. +// req, resp := client.DescribeQueryDefinitionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/DescribeQueryDefinitions +func (c *CloudWatchLogs) DescribeQueryDefinitionsRequest(input *DescribeQueryDefinitionsInput) (req *request.Request, output *DescribeQueryDefinitionsOutput) { + op := &request.Operation{ + Name: opDescribeQueryDefinitions, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeQueryDefinitionsInput{} + } + + output = &DescribeQueryDefinitionsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeQueryDefinitions API operation for Amazon CloudWatch Logs. +// +// This operation returns a paginated list of your saved CloudWatch Logs Insights +// query definitions. You can retrieve query definitions from the current account +// or from a source account that is linked to the current account. +// +// You can use the queryDefinitionNamePrefix parameter to limit the results +// to only the query definitions that have names that start with a certain string. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation DescribeQueryDefinitions for usage and error information. +// +// Returned Error Types: +// +// - InvalidParameterException +// A parameter is specified incorrectly. +// +// - ServiceUnavailableException +// The service cannot complete the request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/DescribeQueryDefinitions +func (c *CloudWatchLogs) DescribeQueryDefinitions(input *DescribeQueryDefinitionsInput) (*DescribeQueryDefinitionsOutput, error) { + req, out := c.DescribeQueryDefinitionsRequest(input) + return out, req.Send() +} + +// DescribeQueryDefinitionsWithContext is the same as DescribeQueryDefinitions with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeQueryDefinitions for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchLogs) DescribeQueryDefinitionsWithContext(ctx aws.Context, input *DescribeQueryDefinitionsInput, opts ...request.Option) (*DescribeQueryDefinitionsOutput, error) { + req, out := c.DescribeQueryDefinitionsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeResourcePolicies = "DescribeResourcePolicies" + +// DescribeResourcePoliciesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeResourcePolicies operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeResourcePolicies for more information on using the DescribeResourcePolicies +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the DescribeResourcePoliciesRequest method. +// req, resp := client.DescribeResourcePoliciesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/DescribeResourcePolicies +func (c *CloudWatchLogs) DescribeResourcePoliciesRequest(input *DescribeResourcePoliciesInput) (req *request.Request, output *DescribeResourcePoliciesOutput) { + op := &request.Operation{ + Name: opDescribeResourcePolicies, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeResourcePoliciesInput{} + } + + output = &DescribeResourcePoliciesOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeResourcePolicies API operation for Amazon CloudWatch Logs. +// +// Lists the resource policies in this account. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation DescribeResourcePolicies for usage and error information. +// +// Returned Error Types: +// +// - InvalidParameterException +// A parameter is specified incorrectly. +// +// - ServiceUnavailableException +// The service cannot complete the request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/DescribeResourcePolicies +func (c *CloudWatchLogs) DescribeResourcePolicies(input *DescribeResourcePoliciesInput) (*DescribeResourcePoliciesOutput, error) { + req, out := c.DescribeResourcePoliciesRequest(input) + return out, req.Send() +} + +// DescribeResourcePoliciesWithContext is the same as DescribeResourcePolicies with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeResourcePolicies for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchLogs) DescribeResourcePoliciesWithContext(ctx aws.Context, input *DescribeResourcePoliciesInput, opts ...request.Option) (*DescribeResourcePoliciesOutput, error) { + req, out := c.DescribeResourcePoliciesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeSubscriptionFilters = "DescribeSubscriptionFilters" + +// DescribeSubscriptionFiltersRequest generates a "aws/request.Request" representing the +// client's request for the DescribeSubscriptionFilters operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeSubscriptionFilters for more information on using the DescribeSubscriptionFilters +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the DescribeSubscriptionFiltersRequest method. +// req, resp := client.DescribeSubscriptionFiltersRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/DescribeSubscriptionFilters +func (c *CloudWatchLogs) DescribeSubscriptionFiltersRequest(input *DescribeSubscriptionFiltersInput) (req *request.Request, output *DescribeSubscriptionFiltersOutput) { + op := &request.Operation{ + Name: opDescribeSubscriptionFilters, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "limit", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeSubscriptionFiltersInput{} + } + + output = &DescribeSubscriptionFiltersOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeSubscriptionFilters API operation for Amazon CloudWatch Logs. +// +// Lists the subscription filters for the specified log group. You can list +// all the subscription filters or filter the results by prefix. The results +// are ASCII-sorted by filter name. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation DescribeSubscriptionFilters for usage and error information. +// +// Returned Error Types: +// +// - InvalidParameterException +// A parameter is specified incorrectly. +// +// - ResourceNotFoundException +// The specified resource does not exist. +// +// - ServiceUnavailableException +// The service cannot complete the request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/DescribeSubscriptionFilters +func (c *CloudWatchLogs) DescribeSubscriptionFilters(input *DescribeSubscriptionFiltersInput) (*DescribeSubscriptionFiltersOutput, error) { + req, out := c.DescribeSubscriptionFiltersRequest(input) + return out, req.Send() +} + +// DescribeSubscriptionFiltersWithContext is the same as DescribeSubscriptionFilters with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeSubscriptionFilters for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchLogs) DescribeSubscriptionFiltersWithContext(ctx aws.Context, input *DescribeSubscriptionFiltersInput, opts ...request.Option) (*DescribeSubscriptionFiltersOutput, error) { + req, out := c.DescribeSubscriptionFiltersRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeSubscriptionFiltersPages iterates over the pages of a DescribeSubscriptionFilters operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeSubscriptionFilters method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeSubscriptionFilters operation. +// pageNum := 0 +// err := client.DescribeSubscriptionFiltersPages(params, +// func(page *cloudwatchlogs.DescribeSubscriptionFiltersOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +func (c *CloudWatchLogs) DescribeSubscriptionFiltersPages(input *DescribeSubscriptionFiltersInput, fn func(*DescribeSubscriptionFiltersOutput, bool) bool) error { + return c.DescribeSubscriptionFiltersPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeSubscriptionFiltersPagesWithContext same as DescribeSubscriptionFiltersPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchLogs) DescribeSubscriptionFiltersPagesWithContext(ctx aws.Context, input *DescribeSubscriptionFiltersInput, fn func(*DescribeSubscriptionFiltersOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + EndPageOnSameToken: true, + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeSubscriptionFiltersInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeSubscriptionFiltersRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeSubscriptionFiltersOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDisassociateKmsKey = "DisassociateKmsKey" + +// DisassociateKmsKeyRequest generates a "aws/request.Request" representing the +// client's request for the DisassociateKmsKey operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DisassociateKmsKey for more information on using the DisassociateKmsKey +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the DisassociateKmsKeyRequest method. +// req, resp := client.DisassociateKmsKeyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/DisassociateKmsKey +func (c *CloudWatchLogs) DisassociateKmsKeyRequest(input *DisassociateKmsKeyInput) (req *request.Request, output *DisassociateKmsKeyOutput) { + op := &request.Operation{ + Name: opDisassociateKmsKey, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DisassociateKmsKeyInput{} + } + + output = &DisassociateKmsKeyOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DisassociateKmsKey API operation for Amazon CloudWatch Logs. +// +// Disassociates the specified KMS key from the specified log group or from +// all CloudWatch Logs Insights query results in the account. +// +// When you use DisassociateKmsKey, you specify either the logGroupName parameter +// or the resourceIdentifier parameter. You can't specify both of those parameters +// in the same operation. +// +// - Specify the logGroupName parameter to stop using the KMS key to encrypt +// future log events ingested and stored in the log group. Instead, they +// will be encrypted with the default CloudWatch Logs method. The log events +// that were ingested while the key was associated with the log group are +// still encrypted with that key. Therefore, CloudWatch Logs will need permissions +// for the key whenever that data is accessed. +// +// - Specify the resourceIdentifier parameter with the query-result resource +// to stop using the KMS key to encrypt the results of all future StartQuery +// (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_StartQuery.html) +// operations in the account. They will instead be encrypted with the default +// CloudWatch Logs method. The results from queries that ran while the key +// was associated with the account are still encrypted with that key. Therefore, +// CloudWatch Logs will need permissions for the key whenever that data is +// accessed. +// +// It can take up to 5 minutes for this operation to take effect. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation DisassociateKmsKey for usage and error information. +// +// Returned Error Types: +// +// - InvalidParameterException +// A parameter is specified incorrectly. +// +// - ResourceNotFoundException +// The specified resource does not exist. +// +// - OperationAbortedException +// Multiple concurrent requests to update the same resource were in conflict. +// +// - ServiceUnavailableException +// The service cannot complete the request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/DisassociateKmsKey +func (c *CloudWatchLogs) DisassociateKmsKey(input *DisassociateKmsKeyInput) (*DisassociateKmsKeyOutput, error) { + req, out := c.DisassociateKmsKeyRequest(input) + return out, req.Send() +} + +// DisassociateKmsKeyWithContext is the same as DisassociateKmsKey with the addition of +// the ability to pass a context and additional request options. +// +// See DisassociateKmsKey for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchLogs) DisassociateKmsKeyWithContext(ctx aws.Context, input *DisassociateKmsKeyInput, opts ...request.Option) (*DisassociateKmsKeyOutput, error) { + req, out := c.DisassociateKmsKeyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opFilterLogEvents = "FilterLogEvents" + +// FilterLogEventsRequest generates a "aws/request.Request" representing the +// client's request for the FilterLogEvents operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See FilterLogEvents for more information on using the FilterLogEvents +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the FilterLogEventsRequest method. +// req, resp := client.FilterLogEventsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/FilterLogEvents +func (c *CloudWatchLogs) FilterLogEventsRequest(input *FilterLogEventsInput) (req *request.Request, output *FilterLogEventsOutput) { + op := &request.Operation{ + Name: opFilterLogEvents, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "limit", + TruncationToken: "", + }, + } + + if input == nil { + input = &FilterLogEventsInput{} + } + + output = &FilterLogEventsOutput{} + req = c.newRequest(op, input, output) + return +} + +// FilterLogEvents API operation for Amazon CloudWatch Logs. +// +// Lists log events from the specified log group. You can list all the log events +// or filter the results using a filter pattern, a time range, and the name +// of the log stream. +// +// You must have the logs:FilterLogEvents permission to perform this operation. +// +// You can specify the log group to search by using either logGroupIdentifier +// or logGroupName. You must include one of these two parameters, but you can't +// include both. +// +// By default, this operation returns as many log events as can fit in 1 MB +// (up to 10,000 log events) or all the events found within the specified time +// range. If the results include a token, that means there are more log events +// available. You can get additional results by specifying the token in a subsequent +// call. This operation can return empty results while there are more log events +// available through the token. +// +// The returned log events are sorted by event timestamp, the timestamp when +// the event was ingested by CloudWatch Logs, and the ID of the PutLogEvents +// request. +// +// If you are using CloudWatch cross-account observability, you can use this +// operation in a monitoring account and view data from the linked source accounts. +// For more information, see CloudWatch cross-account observability (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-Unified-Cross-Account.html). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation FilterLogEvents for usage and error information. +// +// Returned Error Types: +// +// - InvalidParameterException +// A parameter is specified incorrectly. +// +// - ResourceNotFoundException +// The specified resource does not exist. +// +// - ServiceUnavailableException +// The service cannot complete the request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/FilterLogEvents +func (c *CloudWatchLogs) FilterLogEvents(input *FilterLogEventsInput) (*FilterLogEventsOutput, error) { + req, out := c.FilterLogEventsRequest(input) + return out, req.Send() +} + +// FilterLogEventsWithContext is the same as FilterLogEvents with the addition of +// the ability to pass a context and additional request options. +// +// See FilterLogEvents for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchLogs) FilterLogEventsWithContext(ctx aws.Context, input *FilterLogEventsInput, opts ...request.Option) (*FilterLogEventsOutput, error) { + req, out := c.FilterLogEventsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// FilterLogEventsPages iterates over the pages of a FilterLogEvents operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See FilterLogEvents method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a FilterLogEvents operation. +// pageNum := 0 +// err := client.FilterLogEventsPages(params, +// func(page *cloudwatchlogs.FilterLogEventsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +func (c *CloudWatchLogs) FilterLogEventsPages(input *FilterLogEventsInput, fn func(*FilterLogEventsOutput, bool) bool) error { + return c.FilterLogEventsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// FilterLogEventsPagesWithContext same as FilterLogEventsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchLogs) FilterLogEventsPagesWithContext(ctx aws.Context, input *FilterLogEventsInput, fn func(*FilterLogEventsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + EndPageOnSameToken: true, + NewRequest: func() (*request.Request, error) { + var inCpy *FilterLogEventsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.FilterLogEventsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*FilterLogEventsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opGetDataProtectionPolicy = "GetDataProtectionPolicy" + +// GetDataProtectionPolicyRequest generates a "aws/request.Request" representing the +// client's request for the GetDataProtectionPolicy operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetDataProtectionPolicy for more information on using the GetDataProtectionPolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the GetDataProtectionPolicyRequest method. +// req, resp := client.GetDataProtectionPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/GetDataProtectionPolicy +func (c *CloudWatchLogs) GetDataProtectionPolicyRequest(input *GetDataProtectionPolicyInput) (req *request.Request, output *GetDataProtectionPolicyOutput) { + op := &request.Operation{ + Name: opGetDataProtectionPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetDataProtectionPolicyInput{} + } + + output = &GetDataProtectionPolicyOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetDataProtectionPolicy API operation for Amazon CloudWatch Logs. +// +// Returns information about a log group data protection policy. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation GetDataProtectionPolicy for usage and error information. +// +// Returned Error Types: +// +// - InvalidParameterException +// A parameter is specified incorrectly. +// +// - OperationAbortedException +// Multiple concurrent requests to update the same resource were in conflict. +// +// - ResourceNotFoundException +// The specified resource does not exist. +// +// - ServiceUnavailableException +// The service cannot complete the request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/GetDataProtectionPolicy +func (c *CloudWatchLogs) GetDataProtectionPolicy(input *GetDataProtectionPolicyInput) (*GetDataProtectionPolicyOutput, error) { + req, out := c.GetDataProtectionPolicyRequest(input) + return out, req.Send() +} + +// GetDataProtectionPolicyWithContext is the same as GetDataProtectionPolicy with the addition of +// the ability to pass a context and additional request options. +// +// See GetDataProtectionPolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchLogs) GetDataProtectionPolicyWithContext(ctx aws.Context, input *GetDataProtectionPolicyInput, opts ...request.Option) (*GetDataProtectionPolicyOutput, error) { + req, out := c.GetDataProtectionPolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetDelivery = "GetDelivery" + +// GetDeliveryRequest generates a "aws/request.Request" representing the +// client's request for the GetDelivery operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetDelivery for more information on using the GetDelivery +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the GetDeliveryRequest method. +// req, resp := client.GetDeliveryRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/GetDelivery +func (c *CloudWatchLogs) GetDeliveryRequest(input *GetDeliveryInput) (req *request.Request, output *GetDeliveryOutput) { + op := &request.Operation{ + Name: opGetDelivery, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetDeliveryInput{} + } + + output = &GetDeliveryOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetDelivery API operation for Amazon CloudWatch Logs. +// +// Returns complete information about one logical delivery. A delivery is a +// connection between a delivery source (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutDeliverySource.html) +// and a delivery destination (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutDeliveryDestination.html). +// +// A delivery source represents an Amazon Web Services resource that sends logs +// to an logs delivery destination. The destination can be CloudWatch Logs, +// Amazon S3, or Firehose. Only some Amazon Web Services services support being +// configured as a delivery source. These services are listed in Enable logging +// from Amazon Web Services services. (https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/AWS-logs-and-resource-policy.html) +// +// You need to specify the delivery id in this operation. You can find the IDs +// of the deliveries in your account with the DescribeDeliveries (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_DescribeDeliveries.html) +// operation. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation GetDelivery for usage and error information. +// +// Returned Error Types: +// +// - ResourceNotFoundException +// The specified resource does not exist. +// +// - ServiceUnavailableException +// The service cannot complete the request. +// +// - ValidationException +// One of the parameters for the request is not valid. +// +// - ServiceQuotaExceededException +// This request exceeds a service quota. +// +// - ThrottlingException +// The request was throttled because of quota limits. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/GetDelivery +func (c *CloudWatchLogs) GetDelivery(input *GetDeliveryInput) (*GetDeliveryOutput, error) { + req, out := c.GetDeliveryRequest(input) + return out, req.Send() +} + +// GetDeliveryWithContext is the same as GetDelivery with the addition of +// the ability to pass a context and additional request options. +// +// See GetDelivery for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchLogs) GetDeliveryWithContext(ctx aws.Context, input *GetDeliveryInput, opts ...request.Option) (*GetDeliveryOutput, error) { + req, out := c.GetDeliveryRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetDeliveryDestination = "GetDeliveryDestination" + +// GetDeliveryDestinationRequest generates a "aws/request.Request" representing the +// client's request for the GetDeliveryDestination operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetDeliveryDestination for more information on using the GetDeliveryDestination +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the GetDeliveryDestinationRequest method. +// req, resp := client.GetDeliveryDestinationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/GetDeliveryDestination +func (c *CloudWatchLogs) GetDeliveryDestinationRequest(input *GetDeliveryDestinationInput) (req *request.Request, output *GetDeliveryDestinationOutput) { + op := &request.Operation{ + Name: opGetDeliveryDestination, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetDeliveryDestinationInput{} + } + + output = &GetDeliveryDestinationOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetDeliveryDestination API operation for Amazon CloudWatch Logs. +// +// Retrieves complete information about one delivery destination. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation GetDeliveryDestination for usage and error information. +// +// Returned Error Types: +// +// - ResourceNotFoundException +// The specified resource does not exist. +// +// - ServiceUnavailableException +// The service cannot complete the request. +// +// - ValidationException +// One of the parameters for the request is not valid. +// +// - ServiceQuotaExceededException +// This request exceeds a service quota. +// +// - ThrottlingException +// The request was throttled because of quota limits. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/GetDeliveryDestination +func (c *CloudWatchLogs) GetDeliveryDestination(input *GetDeliveryDestinationInput) (*GetDeliveryDestinationOutput, error) { + req, out := c.GetDeliveryDestinationRequest(input) + return out, req.Send() +} + +// GetDeliveryDestinationWithContext is the same as GetDeliveryDestination with the addition of +// the ability to pass a context and additional request options. +// +// See GetDeliveryDestination for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchLogs) GetDeliveryDestinationWithContext(ctx aws.Context, input *GetDeliveryDestinationInput, opts ...request.Option) (*GetDeliveryDestinationOutput, error) { + req, out := c.GetDeliveryDestinationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetDeliveryDestinationPolicy = "GetDeliveryDestinationPolicy" + +// GetDeliveryDestinationPolicyRequest generates a "aws/request.Request" representing the +// client's request for the GetDeliveryDestinationPolicy operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetDeliveryDestinationPolicy for more information on using the GetDeliveryDestinationPolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the GetDeliveryDestinationPolicyRequest method. +// req, resp := client.GetDeliveryDestinationPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/GetDeliveryDestinationPolicy +func (c *CloudWatchLogs) GetDeliveryDestinationPolicyRequest(input *GetDeliveryDestinationPolicyInput) (req *request.Request, output *GetDeliveryDestinationPolicyOutput) { + op := &request.Operation{ + Name: opGetDeliveryDestinationPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetDeliveryDestinationPolicyInput{} + } + + output = &GetDeliveryDestinationPolicyOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetDeliveryDestinationPolicy API operation for Amazon CloudWatch Logs. +// +// Retrieves the delivery destination policy assigned to the delivery destination +// that you specify. For more information about delivery destinations and their +// policies, see PutDeliveryDestinationPolicy (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutDeliveryDestinationPolicy.html). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation GetDeliveryDestinationPolicy for usage and error information. +// +// Returned Error Types: +// +// - ServiceUnavailableException +// The service cannot complete the request. +// +// - ValidationException +// One of the parameters for the request is not valid. +// +// - ResourceNotFoundException +// The specified resource does not exist. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/GetDeliveryDestinationPolicy +func (c *CloudWatchLogs) GetDeliveryDestinationPolicy(input *GetDeliveryDestinationPolicyInput) (*GetDeliveryDestinationPolicyOutput, error) { + req, out := c.GetDeliveryDestinationPolicyRequest(input) + return out, req.Send() +} + +// GetDeliveryDestinationPolicyWithContext is the same as GetDeliveryDestinationPolicy with the addition of +// the ability to pass a context and additional request options. +// +// See GetDeliveryDestinationPolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchLogs) GetDeliveryDestinationPolicyWithContext(ctx aws.Context, input *GetDeliveryDestinationPolicyInput, opts ...request.Option) (*GetDeliveryDestinationPolicyOutput, error) { + req, out := c.GetDeliveryDestinationPolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetDeliverySource = "GetDeliverySource" + +// GetDeliverySourceRequest generates a "aws/request.Request" representing the +// client's request for the GetDeliverySource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetDeliverySource for more information on using the GetDeliverySource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the GetDeliverySourceRequest method. +// req, resp := client.GetDeliverySourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/GetDeliverySource +func (c *CloudWatchLogs) GetDeliverySourceRequest(input *GetDeliverySourceInput) (req *request.Request, output *GetDeliverySourceOutput) { + op := &request.Operation{ + Name: opGetDeliverySource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetDeliverySourceInput{} + } + + output = &GetDeliverySourceOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetDeliverySource API operation for Amazon CloudWatch Logs. +// +// Retrieves complete information about one delivery source. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation GetDeliverySource for usage and error information. +// +// Returned Error Types: +// +// - ResourceNotFoundException +// The specified resource does not exist. +// +// - ServiceUnavailableException +// The service cannot complete the request. +// +// - ValidationException +// One of the parameters for the request is not valid. +// +// - ServiceQuotaExceededException +// This request exceeds a service quota. +// +// - ThrottlingException +// The request was throttled because of quota limits. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/GetDeliverySource +func (c *CloudWatchLogs) GetDeliverySource(input *GetDeliverySourceInput) (*GetDeliverySourceOutput, error) { + req, out := c.GetDeliverySourceRequest(input) + return out, req.Send() +} + +// GetDeliverySourceWithContext is the same as GetDeliverySource with the addition of +// the ability to pass a context and additional request options. +// +// See GetDeliverySource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchLogs) GetDeliverySourceWithContext(ctx aws.Context, input *GetDeliverySourceInput, opts ...request.Option) (*GetDeliverySourceOutput, error) { + req, out := c.GetDeliverySourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetLogAnomalyDetector = "GetLogAnomalyDetector" + +// GetLogAnomalyDetectorRequest generates a "aws/request.Request" representing the +// client's request for the GetLogAnomalyDetector operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetLogAnomalyDetector for more information on using the GetLogAnomalyDetector +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the GetLogAnomalyDetectorRequest method. +// req, resp := client.GetLogAnomalyDetectorRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/GetLogAnomalyDetector +func (c *CloudWatchLogs) GetLogAnomalyDetectorRequest(input *GetLogAnomalyDetectorInput) (req *request.Request, output *GetLogAnomalyDetectorOutput) { + op := &request.Operation{ + Name: opGetLogAnomalyDetector, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetLogAnomalyDetectorInput{} + } + + output = &GetLogAnomalyDetectorOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetLogAnomalyDetector API operation for Amazon CloudWatch Logs. +// +// Retrieves information about the log anomaly detector that you specify. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation GetLogAnomalyDetector for usage and error information. +// +// Returned Error Types: +// +// - InvalidParameterException +// A parameter is specified incorrectly. +// +// - ResourceNotFoundException +// The specified resource does not exist. +// +// - ServiceUnavailableException +// The service cannot complete the request. +// +// - OperationAbortedException +// Multiple concurrent requests to update the same resource were in conflict. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/GetLogAnomalyDetector +func (c *CloudWatchLogs) GetLogAnomalyDetector(input *GetLogAnomalyDetectorInput) (*GetLogAnomalyDetectorOutput, error) { + req, out := c.GetLogAnomalyDetectorRequest(input) + return out, req.Send() +} + +// GetLogAnomalyDetectorWithContext is the same as GetLogAnomalyDetector with the addition of +// the ability to pass a context and additional request options. +// +// See GetLogAnomalyDetector for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchLogs) GetLogAnomalyDetectorWithContext(ctx aws.Context, input *GetLogAnomalyDetectorInput, opts ...request.Option) (*GetLogAnomalyDetectorOutput, error) { + req, out := c.GetLogAnomalyDetectorRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetLogEvents = "GetLogEvents" + +// GetLogEventsRequest generates a "aws/request.Request" representing the +// client's request for the GetLogEvents operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetLogEvents for more information on using the GetLogEvents +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the GetLogEventsRequest method. +// req, resp := client.GetLogEventsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/GetLogEvents +func (c *CloudWatchLogs) GetLogEventsRequest(input *GetLogEventsInput) (req *request.Request, output *GetLogEventsOutput) { + op := &request.Operation{ + Name: opGetLogEvents, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextForwardToken"}, + LimitToken: "limit", + TruncationToken: "", + }, + } + + if input == nil { + input = &GetLogEventsInput{} + } + + output = &GetLogEventsOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetLogEvents API operation for Amazon CloudWatch Logs. +// +// Lists log events from the specified log stream. You can list all of the log +// events or filter using a time range. +// +// By default, this operation returns as many log events as can fit in a response +// size of 1MB (up to 10,000 log events). You can get additional log events +// by specifying one of the tokens in a subsequent call. This operation can +// return empty results while there are more log events available through the +// token. +// +// If you are using CloudWatch cross-account observability, you can use this +// operation in a monitoring account and view data from the linked source accounts. +// For more information, see CloudWatch cross-account observability (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-Unified-Cross-Account.html). +// +// You can specify the log group to search by using either logGroupIdentifier +// or logGroupName. You must include one of these two parameters, but you can't +// include both. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation GetLogEvents for usage and error information. +// +// Returned Error Types: +// +// - InvalidParameterException +// A parameter is specified incorrectly. +// +// - ResourceNotFoundException +// The specified resource does not exist. +// +// - ServiceUnavailableException +// The service cannot complete the request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/GetLogEvents +func (c *CloudWatchLogs) GetLogEvents(input *GetLogEventsInput) (*GetLogEventsOutput, error) { + req, out := c.GetLogEventsRequest(input) + return out, req.Send() +} + +// GetLogEventsWithContext is the same as GetLogEvents with the addition of +// the ability to pass a context and additional request options. +// +// See GetLogEvents for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchLogs) GetLogEventsWithContext(ctx aws.Context, input *GetLogEventsInput, opts ...request.Option) (*GetLogEventsOutput, error) { + req, out := c.GetLogEventsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// GetLogEventsPages iterates over the pages of a GetLogEvents operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See GetLogEvents method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a GetLogEvents operation. +// pageNum := 0 +// err := client.GetLogEventsPages(params, +// func(page *cloudwatchlogs.GetLogEventsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +func (c *CloudWatchLogs) GetLogEventsPages(input *GetLogEventsInput, fn func(*GetLogEventsOutput, bool) bool) error { + return c.GetLogEventsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// GetLogEventsPagesWithContext same as GetLogEventsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchLogs) GetLogEventsPagesWithContext(ctx aws.Context, input *GetLogEventsInput, fn func(*GetLogEventsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + EndPageOnSameToken: true, + NewRequest: func() (*request.Request, error) { + var inCpy *GetLogEventsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.GetLogEventsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*GetLogEventsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opGetLogGroupFields = "GetLogGroupFields" + +// GetLogGroupFieldsRequest generates a "aws/request.Request" representing the +// client's request for the GetLogGroupFields operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetLogGroupFields for more information on using the GetLogGroupFields +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the GetLogGroupFieldsRequest method. +// req, resp := client.GetLogGroupFieldsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/GetLogGroupFields +func (c *CloudWatchLogs) GetLogGroupFieldsRequest(input *GetLogGroupFieldsInput) (req *request.Request, output *GetLogGroupFieldsOutput) { + op := &request.Operation{ + Name: opGetLogGroupFields, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetLogGroupFieldsInput{} + } + + output = &GetLogGroupFieldsOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetLogGroupFields API operation for Amazon CloudWatch Logs. +// +// Returns a list of the fields that are included in log events in the specified +// log group. Includes the percentage of log events that contain each field. +// The search is limited to a time period that you specify. +// +// You can specify the log group to search by using either logGroupIdentifier +// or logGroupName. You must specify one of these parameters, but you can't +// specify both. +// +// In the results, fields that start with @ are fields generated by CloudWatch +// Logs. For example, @timestamp is the timestamp of each log event. For more +// information about the fields that are generated by CloudWatch logs, see Supported +// Logs and Discovered Fields (https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/CWL_AnalyzeLogData-discoverable-fields.html). +// +// The response results are sorted by the frequency percentage, starting with +// the highest percentage. +// +// If you are using CloudWatch cross-account observability, you can use this +// operation in a monitoring account and view data from the linked source accounts. +// For more information, see CloudWatch cross-account observability (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-Unified-Cross-Account.html). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation GetLogGroupFields for usage and error information. +// +// Returned Error Types: +// +// - InvalidParameterException +// A parameter is specified incorrectly. +// +// - LimitExceededException +// You have reached the maximum number of resources that can be created. +// +// - ResourceNotFoundException +// The specified resource does not exist. +// +// - ServiceUnavailableException +// The service cannot complete the request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/GetLogGroupFields +func (c *CloudWatchLogs) GetLogGroupFields(input *GetLogGroupFieldsInput) (*GetLogGroupFieldsOutput, error) { + req, out := c.GetLogGroupFieldsRequest(input) + return out, req.Send() +} + +// GetLogGroupFieldsWithContext is the same as GetLogGroupFields with the addition of +// the ability to pass a context and additional request options. +// +// See GetLogGroupFields for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchLogs) GetLogGroupFieldsWithContext(ctx aws.Context, input *GetLogGroupFieldsInput, opts ...request.Option) (*GetLogGroupFieldsOutput, error) { + req, out := c.GetLogGroupFieldsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetLogRecord = "GetLogRecord" + +// GetLogRecordRequest generates a "aws/request.Request" representing the +// client's request for the GetLogRecord operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetLogRecord for more information on using the GetLogRecord +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the GetLogRecordRequest method. +// req, resp := client.GetLogRecordRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/GetLogRecord +func (c *CloudWatchLogs) GetLogRecordRequest(input *GetLogRecordInput) (req *request.Request, output *GetLogRecordOutput) { + op := &request.Operation{ + Name: opGetLogRecord, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetLogRecordInput{} + } + + output = &GetLogRecordOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetLogRecord API operation for Amazon CloudWatch Logs. +// +// Retrieves all of the fields and values of a single log event. All fields +// are retrieved, even if the original query that produced the logRecordPointer +// retrieved only a subset of fields. Fields are returned as field name/field +// value pairs. +// +// The full unparsed log event is returned within @message. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation GetLogRecord for usage and error information. +// +// Returned Error Types: +// +// - InvalidParameterException +// A parameter is specified incorrectly. +// +// - LimitExceededException +// You have reached the maximum number of resources that can be created. +// +// - ResourceNotFoundException +// The specified resource does not exist. +// +// - ServiceUnavailableException +// The service cannot complete the request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/GetLogRecord +func (c *CloudWatchLogs) GetLogRecord(input *GetLogRecordInput) (*GetLogRecordOutput, error) { + req, out := c.GetLogRecordRequest(input) + return out, req.Send() +} + +// GetLogRecordWithContext is the same as GetLogRecord with the addition of +// the ability to pass a context and additional request options. +// +// See GetLogRecord for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchLogs) GetLogRecordWithContext(ctx aws.Context, input *GetLogRecordInput, opts ...request.Option) (*GetLogRecordOutput, error) { + req, out := c.GetLogRecordRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetQueryResults = "GetQueryResults" + +// GetQueryResultsRequest generates a "aws/request.Request" representing the +// client's request for the GetQueryResults operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetQueryResults for more information on using the GetQueryResults +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the GetQueryResultsRequest method. +// req, resp := client.GetQueryResultsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/GetQueryResults +func (c *CloudWatchLogs) GetQueryResultsRequest(input *GetQueryResultsInput) (req *request.Request, output *GetQueryResultsOutput) { + op := &request.Operation{ + Name: opGetQueryResults, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetQueryResultsInput{} + } + + output = &GetQueryResultsOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetQueryResults API operation for Amazon CloudWatch Logs. +// +// Returns the results from the specified query. +// +// Only the fields requested in the query are returned, along with a @ptr field, +// which is the identifier for the log record. You can use the value of @ptr +// in a GetLogRecord (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_GetLogRecord.html) +// operation to get the full log record. +// +// GetQueryResults does not start running a query. To run a query, use StartQuery +// (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_StartQuery.html). +// For more information about how long results of previous queries are available, +// see CloudWatch Logs quotas (https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/cloudwatch_limits_cwl.html). +// +// If the value of the Status field in the output is Running, this operation +// returns only partial results. If you see a value of Scheduled or Running +// for the status, you can retry the operation later to see the final results. +// +// If you are using CloudWatch cross-account observability, you can use this +// operation in a monitoring account to start queries in linked source accounts. +// For more information, see CloudWatch cross-account observability (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-Unified-Cross-Account.html). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation GetQueryResults for usage and error information. +// +// Returned Error Types: +// +// - InvalidParameterException +// A parameter is specified incorrectly. +// +// - ResourceNotFoundException +// The specified resource does not exist. +// +// - ServiceUnavailableException +// The service cannot complete the request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/GetQueryResults +func (c *CloudWatchLogs) GetQueryResults(input *GetQueryResultsInput) (*GetQueryResultsOutput, error) { + req, out := c.GetQueryResultsRequest(input) + return out, req.Send() +} + +// GetQueryResultsWithContext is the same as GetQueryResults with the addition of +// the ability to pass a context and additional request options. +// +// See GetQueryResults for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchLogs) GetQueryResultsWithContext(ctx aws.Context, input *GetQueryResultsInput, opts ...request.Option) (*GetQueryResultsOutput, error) { + req, out := c.GetQueryResultsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListAnomalies = "ListAnomalies" + +// ListAnomaliesRequest generates a "aws/request.Request" representing the +// client's request for the ListAnomalies operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListAnomalies for more information on using the ListAnomalies +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the ListAnomaliesRequest method. +// req, resp := client.ListAnomaliesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/ListAnomalies +func (c *CloudWatchLogs) ListAnomaliesRequest(input *ListAnomaliesInput) (req *request.Request, output *ListAnomaliesOutput) { + op := &request.Operation{ + Name: opListAnomalies, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "limit", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListAnomaliesInput{} + } + + output = &ListAnomaliesOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListAnomalies API operation for Amazon CloudWatch Logs. +// +// Returns a list of anomalies that log anomaly detectors have found. For details +// about the structure format of each anomaly object that is returned, see the +// example in this section. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation ListAnomalies for usage and error information. +// +// Returned Error Types: +// +// - InvalidParameterException +// A parameter is specified incorrectly. +// +// - ResourceNotFoundException +// The specified resource does not exist. +// +// - ServiceUnavailableException +// The service cannot complete the request. +// +// - OperationAbortedException +// Multiple concurrent requests to update the same resource were in conflict. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/ListAnomalies +func (c *CloudWatchLogs) ListAnomalies(input *ListAnomaliesInput) (*ListAnomaliesOutput, error) { + req, out := c.ListAnomaliesRequest(input) + return out, req.Send() +} + +// ListAnomaliesWithContext is the same as ListAnomalies with the addition of +// the ability to pass a context and additional request options. +// +// See ListAnomalies for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchLogs) ListAnomaliesWithContext(ctx aws.Context, input *ListAnomaliesInput, opts ...request.Option) (*ListAnomaliesOutput, error) { + req, out := c.ListAnomaliesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListAnomaliesPages iterates over the pages of a ListAnomalies operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListAnomalies method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListAnomalies operation. +// pageNum := 0 +// err := client.ListAnomaliesPages(params, +// func(page *cloudwatchlogs.ListAnomaliesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +func (c *CloudWatchLogs) ListAnomaliesPages(input *ListAnomaliesInput, fn func(*ListAnomaliesOutput, bool) bool) error { + return c.ListAnomaliesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListAnomaliesPagesWithContext same as ListAnomaliesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchLogs) ListAnomaliesPagesWithContext(ctx aws.Context, input *ListAnomaliesInput, fn func(*ListAnomaliesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + EndPageOnSameToken: true, + NewRequest: func() (*request.Request, error) { + var inCpy *ListAnomaliesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListAnomaliesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListAnomaliesOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListLogAnomalyDetectors = "ListLogAnomalyDetectors" + +// ListLogAnomalyDetectorsRequest generates a "aws/request.Request" representing the +// client's request for the ListLogAnomalyDetectors operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListLogAnomalyDetectors for more information on using the ListLogAnomalyDetectors +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the ListLogAnomalyDetectorsRequest method. +// req, resp := client.ListLogAnomalyDetectorsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/ListLogAnomalyDetectors +func (c *CloudWatchLogs) ListLogAnomalyDetectorsRequest(input *ListLogAnomalyDetectorsInput) (req *request.Request, output *ListLogAnomalyDetectorsOutput) { + op := &request.Operation{ + Name: opListLogAnomalyDetectors, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "limit", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListLogAnomalyDetectorsInput{} + } + + output = &ListLogAnomalyDetectorsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListLogAnomalyDetectors API operation for Amazon CloudWatch Logs. +// +// Retrieves a list of the log anomaly detectors in the account. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation ListLogAnomalyDetectors for usage and error information. +// +// Returned Error Types: +// +// - InvalidParameterException +// A parameter is specified incorrectly. +// +// - ResourceNotFoundException +// The specified resource does not exist. +// +// - ServiceUnavailableException +// The service cannot complete the request. +// +// - OperationAbortedException +// Multiple concurrent requests to update the same resource were in conflict. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/ListLogAnomalyDetectors +func (c *CloudWatchLogs) ListLogAnomalyDetectors(input *ListLogAnomalyDetectorsInput) (*ListLogAnomalyDetectorsOutput, error) { + req, out := c.ListLogAnomalyDetectorsRequest(input) + return out, req.Send() +} + +// ListLogAnomalyDetectorsWithContext is the same as ListLogAnomalyDetectors with the addition of +// the ability to pass a context and additional request options. +// +// See ListLogAnomalyDetectors for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchLogs) ListLogAnomalyDetectorsWithContext(ctx aws.Context, input *ListLogAnomalyDetectorsInput, opts ...request.Option) (*ListLogAnomalyDetectorsOutput, error) { + req, out := c.ListLogAnomalyDetectorsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListLogAnomalyDetectorsPages iterates over the pages of a ListLogAnomalyDetectors operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListLogAnomalyDetectors method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListLogAnomalyDetectors operation. +// pageNum := 0 +// err := client.ListLogAnomalyDetectorsPages(params, +// func(page *cloudwatchlogs.ListLogAnomalyDetectorsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +func (c *CloudWatchLogs) ListLogAnomalyDetectorsPages(input *ListLogAnomalyDetectorsInput, fn func(*ListLogAnomalyDetectorsOutput, bool) bool) error { + return c.ListLogAnomalyDetectorsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListLogAnomalyDetectorsPagesWithContext same as ListLogAnomalyDetectorsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchLogs) ListLogAnomalyDetectorsPagesWithContext(ctx aws.Context, input *ListLogAnomalyDetectorsInput, fn func(*ListLogAnomalyDetectorsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + EndPageOnSameToken: true, + NewRequest: func() (*request.Request, error) { + var inCpy *ListLogAnomalyDetectorsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListLogAnomalyDetectorsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListLogAnomalyDetectorsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListTagsForResource = "ListTagsForResource" + +// ListTagsForResourceRequest generates a "aws/request.Request" representing the +// client's request for the ListTagsForResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListTagsForResource for more information on using the ListTagsForResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the ListTagsForResourceRequest method. +// req, resp := client.ListTagsForResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/ListTagsForResource +func (c *CloudWatchLogs) ListTagsForResourceRequest(input *ListTagsForResourceInput) (req *request.Request, output *ListTagsForResourceOutput) { + op := &request.Operation{ + Name: opListTagsForResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListTagsForResourceInput{} + } + + output = &ListTagsForResourceOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListTagsForResource API operation for Amazon CloudWatch Logs. +// +// Displays the tags associated with a CloudWatch Logs resource. Currently, +// log groups and destinations support tagging. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation ListTagsForResource for usage and error information. +// +// Returned Error Types: +// +// - InvalidParameterException +// A parameter is specified incorrectly. +// +// - ResourceNotFoundException +// The specified resource does not exist. +// +// - ServiceUnavailableException +// The service cannot complete the request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/ListTagsForResource +func (c *CloudWatchLogs) ListTagsForResource(input *ListTagsForResourceInput) (*ListTagsForResourceOutput, error) { + req, out := c.ListTagsForResourceRequest(input) + return out, req.Send() +} + +// ListTagsForResourceWithContext is the same as ListTagsForResource with the addition of +// the ability to pass a context and additional request options. +// +// See ListTagsForResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchLogs) ListTagsForResourceWithContext(ctx aws.Context, input *ListTagsForResourceInput, opts ...request.Option) (*ListTagsForResourceOutput, error) { + req, out := c.ListTagsForResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListTagsLogGroup = "ListTagsLogGroup" + +// ListTagsLogGroupRequest generates a "aws/request.Request" representing the +// client's request for the ListTagsLogGroup operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListTagsLogGroup for more information on using the ListTagsLogGroup +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the ListTagsLogGroupRequest method. +// req, resp := client.ListTagsLogGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/ListTagsLogGroup +// +// Deprecated: Please use the generic tagging API ListTagsForResource +func (c *CloudWatchLogs) ListTagsLogGroupRequest(input *ListTagsLogGroupInput) (req *request.Request, output *ListTagsLogGroupOutput) { + if c.Client.Config.Logger != nil { + c.Client.Config.Logger.Log("This operation, ListTagsLogGroup, has been deprecated") + } + op := &request.Operation{ + Name: opListTagsLogGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListTagsLogGroupInput{} + } + + output = &ListTagsLogGroupOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListTagsLogGroup API operation for Amazon CloudWatch Logs. +// +// The ListTagsLogGroup operation is on the path to deprecation. We recommend +// that you use ListTagsForResource (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_ListTagsForResource.html) +// instead. +// +// Lists the tags for the specified log group. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation ListTagsLogGroup for usage and error information. +// +// Returned Error Types: +// +// - ResourceNotFoundException +// The specified resource does not exist. +// +// - ServiceUnavailableException +// The service cannot complete the request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/ListTagsLogGroup +// +// Deprecated: Please use the generic tagging API ListTagsForResource +func (c *CloudWatchLogs) ListTagsLogGroup(input *ListTagsLogGroupInput) (*ListTagsLogGroupOutput, error) { + req, out := c.ListTagsLogGroupRequest(input) + return out, req.Send() +} + +// ListTagsLogGroupWithContext is the same as ListTagsLogGroup with the addition of +// the ability to pass a context and additional request options. +// +// See ListTagsLogGroup for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +// +// Deprecated: Please use the generic tagging API ListTagsForResource +func (c *CloudWatchLogs) ListTagsLogGroupWithContext(ctx aws.Context, input *ListTagsLogGroupInput, opts ...request.Option) (*ListTagsLogGroupOutput, error) { + req, out := c.ListTagsLogGroupRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutAccountPolicy = "PutAccountPolicy" + +// PutAccountPolicyRequest generates a "aws/request.Request" representing the +// client's request for the PutAccountPolicy operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutAccountPolicy for more information on using the PutAccountPolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the PutAccountPolicyRequest method. +// req, resp := client.PutAccountPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/PutAccountPolicy +func (c *CloudWatchLogs) PutAccountPolicyRequest(input *PutAccountPolicyInput) (req *request.Request, output *PutAccountPolicyOutput) { + op := &request.Operation{ + Name: opPutAccountPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutAccountPolicyInput{} + } + + output = &PutAccountPolicyOutput{} + req = c.newRequest(op, input, output) + return +} + +// PutAccountPolicy API operation for Amazon CloudWatch Logs. +// +// Creates an account-level data protection policy or subscription filter policy +// that applies to all log groups or a subset of log groups in the account. +// +// # Data protection policy +// +// A data protection policy can help safeguard sensitive data that's ingested +// by your log groups by auditing and masking the sensitive log data. Each account +// can have only one account-level data protection policy. +// +// Sensitive data is detected and masked when it is ingested into a log group. +// When you set a data protection policy, log events ingested into the log groups +// before that time are not masked. +// +// If you use PutAccountPolicy to create a data protection policy for your whole +// account, it applies to both existing log groups and all log groups that are +// created later in this account. The account-level policy is applied to existing +// log groups with eventual consistency. It might take up to 5 minutes before +// sensitive data in existing log groups begins to be masked. +// +// By default, when a user views a log event that includes masked data, the +// sensitive data is replaced by asterisks. A user who has the logs:Unmask permission +// can use a GetLogEvents (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_GetLogEvents.html) +// or FilterLogEvents (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_FilterLogEvents.html) +// operation with the unmask parameter set to true to view the unmasked log +// events. Users with the logs:Unmask can also view unmasked data in the CloudWatch +// Logs console by running a CloudWatch Logs Insights query with the unmask +// query command. +// +// For more information, including a list of types of data that can be audited +// and masked, see Protect sensitive log data with masking (https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/mask-sensitive-log-data.html). +// +// To use the PutAccountPolicy operation for a data protection policy, you must +// be signed on with the logs:PutDataProtectionPolicy and logs:PutAccountPolicy +// permissions. +// +// The PutAccountPolicy operation applies to all log groups in the account. +// You can use PutDataProtectionPolicy (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutDataProtectionPolicy.html) +// to create a data protection policy that applies to just one log group. If +// a log group has its own data protection policy and the account also has an +// account-level data protection policy, then the two policies are cumulative. +// Any sensitive term specified in either policy is masked. +// +// # Subscription filter policy +// +// A subscription filter policy sets up a real-time feed of log events from +// CloudWatch Logs to other Amazon Web Services services. Account-level subscription +// filter policies apply to both existing log groups and log groups that are +// created later in this account. Supported destinations are Kinesis Data Streams, +// Firehose, and Lambda. When log events are sent to the receiving service, +// they are Base64 encoded and compressed with the GZIP format. +// +// The following destinations are supported for subscription filters: +// +// - An Kinesis Data Streams data stream in the same account as the subscription +// policy, for same-account delivery. +// +// - An Firehose data stream in the same account as the subscription policy, +// for same-account delivery. +// +// - A Lambda function in the same account as the subscription policy, for +// same-account delivery. +// +// - A logical destination in a different account created with PutDestination +// (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutDestination.html), +// for cross-account delivery. Kinesis Data Streams and Firehose are supported +// as logical destinations. +// +// Each account can have one account-level subscription filter policy per Region. +// If you are updating an existing filter, you must specify the correct name +// in PolicyName. To perform a PutAccountPolicy subscription filter operation +// for any destination except a Lambda function, you must also have the iam:PassRole +// permission. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation PutAccountPolicy for usage and error information. +// +// Returned Error Types: +// +// - InvalidParameterException +// A parameter is specified incorrectly. +// +// - OperationAbortedException +// Multiple concurrent requests to update the same resource were in conflict. +// +// - ServiceUnavailableException +// The service cannot complete the request. +// +// - LimitExceededException +// You have reached the maximum number of resources that can be created. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/PutAccountPolicy +func (c *CloudWatchLogs) PutAccountPolicy(input *PutAccountPolicyInput) (*PutAccountPolicyOutput, error) { + req, out := c.PutAccountPolicyRequest(input) + return out, req.Send() +} + +// PutAccountPolicyWithContext is the same as PutAccountPolicy with the addition of +// the ability to pass a context and additional request options. +// +// See PutAccountPolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchLogs) PutAccountPolicyWithContext(ctx aws.Context, input *PutAccountPolicyInput, opts ...request.Option) (*PutAccountPolicyOutput, error) { + req, out := c.PutAccountPolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutDataProtectionPolicy = "PutDataProtectionPolicy" + +// PutDataProtectionPolicyRequest generates a "aws/request.Request" representing the +// client's request for the PutDataProtectionPolicy operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutDataProtectionPolicy for more information on using the PutDataProtectionPolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the PutDataProtectionPolicyRequest method. +// req, resp := client.PutDataProtectionPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/PutDataProtectionPolicy +func (c *CloudWatchLogs) PutDataProtectionPolicyRequest(input *PutDataProtectionPolicyInput) (req *request.Request, output *PutDataProtectionPolicyOutput) { + op := &request.Operation{ + Name: opPutDataProtectionPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutDataProtectionPolicyInput{} + } + + output = &PutDataProtectionPolicyOutput{} + req = c.newRequest(op, input, output) + return +} + +// PutDataProtectionPolicy API operation for Amazon CloudWatch Logs. +// +// Creates a data protection policy for the specified log group. A data protection +// policy can help safeguard sensitive data that's ingested by the log group +// by auditing and masking the sensitive log data. +// +// Sensitive data is detected and masked when it is ingested into the log group. +// When you set a data protection policy, log events ingested into the log group +// before that time are not masked. +// +// By default, when a user views a log event that includes masked data, the +// sensitive data is replaced by asterisks. A user who has the logs:Unmask permission +// can use a GetLogEvents (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_GetLogEvents.html) +// or FilterLogEvents (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_FilterLogEvents.html) +// operation with the unmask parameter set to true to view the unmasked log +// events. Users with the logs:Unmask can also view unmasked data in the CloudWatch +// Logs console by running a CloudWatch Logs Insights query with the unmask +// query command. +// +// For more information, including a list of types of data that can be audited +// and masked, see Protect sensitive log data with masking (https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/mask-sensitive-log-data.html). +// +// The PutDataProtectionPolicy operation applies to only the specified log group. +// You can also use PutAccountPolicy (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutAccountPolicy.html) +// to create an account-level data protection policy that applies to all log +// groups in the account, including both existing log groups and log groups +// that are created level. If a log group has its own data protection policy +// and the account also has an account-level data protection policy, then the +// two policies are cumulative. Any sensitive term specified in either policy +// is masked. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation PutDataProtectionPolicy for usage and error information. +// +// Returned Error Types: +// +// - InvalidParameterException +// A parameter is specified incorrectly. +// +// - LimitExceededException +// You have reached the maximum number of resources that can be created. +// +// - OperationAbortedException +// Multiple concurrent requests to update the same resource were in conflict. +// +// - ResourceNotFoundException +// The specified resource does not exist. +// +// - ServiceUnavailableException +// The service cannot complete the request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/PutDataProtectionPolicy +func (c *CloudWatchLogs) PutDataProtectionPolicy(input *PutDataProtectionPolicyInput) (*PutDataProtectionPolicyOutput, error) { + req, out := c.PutDataProtectionPolicyRequest(input) + return out, req.Send() +} + +// PutDataProtectionPolicyWithContext is the same as PutDataProtectionPolicy with the addition of +// the ability to pass a context and additional request options. +// +// See PutDataProtectionPolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchLogs) PutDataProtectionPolicyWithContext(ctx aws.Context, input *PutDataProtectionPolicyInput, opts ...request.Option) (*PutDataProtectionPolicyOutput, error) { + req, out := c.PutDataProtectionPolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutDeliveryDestination = "PutDeliveryDestination" + +// PutDeliveryDestinationRequest generates a "aws/request.Request" representing the +// client's request for the PutDeliveryDestination operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutDeliveryDestination for more information on using the PutDeliveryDestination +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the PutDeliveryDestinationRequest method. +// req, resp := client.PutDeliveryDestinationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/PutDeliveryDestination +func (c *CloudWatchLogs) PutDeliveryDestinationRequest(input *PutDeliveryDestinationInput) (req *request.Request, output *PutDeliveryDestinationOutput) { + op := &request.Operation{ + Name: opPutDeliveryDestination, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutDeliveryDestinationInput{} + } + + output = &PutDeliveryDestinationOutput{} + req = c.newRequest(op, input, output) + return +} + +// PutDeliveryDestination API operation for Amazon CloudWatch Logs. +// +// Creates or updates a logical delivery destination. A delivery destination +// is an Amazon Web Services resource that represents an Amazon Web Services +// service that logs can be sent to. CloudWatch Logs, Amazon S3, and Firehose +// are supported as logs delivery destinations. +// +// To configure logs delivery between a supported Amazon Web Services service +// and a destination, you must do the following: +// +// - Create a delivery source, which is a logical object that represents +// the resource that is actually sending the logs. For more information, +// see PutDeliverySource (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutDeliverySource.html). +// +// - Use PutDeliveryDestination to create a delivery destination, which is +// a logical object that represents the actual delivery destination. +// +// - If you are delivering logs cross-account, you must use PutDeliveryDestinationPolicy +// (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutDeliveryDestinationPolicy.html) +// in the destination account to assign an IAM policy to the destination. +// This policy allows delivery to that destination. +// +// - Use CreateDelivery to create a delivery by pairing exactly one delivery +// source and one delivery destination. For more information, see CreateDelivery +// (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_CreateDelivery.html). +// +// You can configure a single delivery source to send logs to multiple destinations +// by creating multiple deliveries. You can also create multiple deliveries +// to configure multiple delivery sources to send logs to the same delivery +// destination. +// +// Only some Amazon Web Services services support being configured as a delivery +// source. These services are listed as Supported [V2 Permissions] in the table +// at Enabling logging from Amazon Web Services services. (https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/AWS-logs-and-resource-policy.html) +// +// If you use this operation to update an existing delivery destination, all +// the current delivery destination parameters are overwritten with the new +// parameter values that you specify. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation PutDeliveryDestination for usage and error information. +// +// Returned Error Types: +// +// - ServiceUnavailableException +// The service cannot complete the request. +// +// - ConflictException +// This operation attempted to create a resource that already exists. +// +// - ValidationException +// One of the parameters for the request is not valid. +// +// - ServiceQuotaExceededException +// This request exceeds a service quota. +// +// - ThrottlingException +// The request was throttled because of quota limits. +// +// - ResourceNotFoundException +// The specified resource does not exist. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/PutDeliveryDestination +func (c *CloudWatchLogs) PutDeliveryDestination(input *PutDeliveryDestinationInput) (*PutDeliveryDestinationOutput, error) { + req, out := c.PutDeliveryDestinationRequest(input) + return out, req.Send() +} + +// PutDeliveryDestinationWithContext is the same as PutDeliveryDestination with the addition of +// the ability to pass a context and additional request options. +// +// See PutDeliveryDestination for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchLogs) PutDeliveryDestinationWithContext(ctx aws.Context, input *PutDeliveryDestinationInput, opts ...request.Option) (*PutDeliveryDestinationOutput, error) { + req, out := c.PutDeliveryDestinationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutDeliveryDestinationPolicy = "PutDeliveryDestinationPolicy" + +// PutDeliveryDestinationPolicyRequest generates a "aws/request.Request" representing the +// client's request for the PutDeliveryDestinationPolicy operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutDeliveryDestinationPolicy for more information on using the PutDeliveryDestinationPolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the PutDeliveryDestinationPolicyRequest method. +// req, resp := client.PutDeliveryDestinationPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/PutDeliveryDestinationPolicy +func (c *CloudWatchLogs) PutDeliveryDestinationPolicyRequest(input *PutDeliveryDestinationPolicyInput) (req *request.Request, output *PutDeliveryDestinationPolicyOutput) { + op := &request.Operation{ + Name: opPutDeliveryDestinationPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutDeliveryDestinationPolicyInput{} + } + + output = &PutDeliveryDestinationPolicyOutput{} + req = c.newRequest(op, input, output) + return +} + +// PutDeliveryDestinationPolicy API operation for Amazon CloudWatch Logs. +// +// Creates and assigns an IAM policy that grants permissions to CloudWatch Logs +// to deliver logs cross-account to a specified destination in this account. +// To configure the delivery of logs from an Amazon Web Services service in +// another account to a logs delivery destination in the current account, you +// must do the following: +// +// - Create a delivery source, which is a logical object that represents +// the resource that is actually sending the logs. For more information, +// see PutDeliverySource (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutDeliverySource.html). +// +// - Create a delivery destination, which is a logical object that represents +// the actual delivery destination. For more information, see PutDeliveryDestination +// (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutDeliveryDestination.html). +// +// - Use this operation in the destination account to assign an IAM policy +// to the destination. This policy allows delivery to that destination. +// +// - Create a delivery by pairing exactly one delivery source and one delivery +// destination. For more information, see CreateDelivery (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_CreateDelivery.html). +// +// Only some Amazon Web Services services support being configured as a delivery +// source. These services are listed as Supported [V2 Permissions] in the table +// at Enabling logging from Amazon Web Services services. (https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/AWS-logs-and-resource-policy.html) +// +// The contents of the policy must include two statements. One statement enables +// general logs delivery, and the other allows delivery to the chosen destination. +// See the examples for the needed policies. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation PutDeliveryDestinationPolicy for usage and error information. +// +// Returned Error Types: +// +// - ServiceUnavailableException +// The service cannot complete the request. +// +// - ValidationException +// One of the parameters for the request is not valid. +// +// - ResourceNotFoundException +// The specified resource does not exist. +// +// - ConflictException +// This operation attempted to create a resource that already exists. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/PutDeliveryDestinationPolicy +func (c *CloudWatchLogs) PutDeliveryDestinationPolicy(input *PutDeliveryDestinationPolicyInput) (*PutDeliveryDestinationPolicyOutput, error) { + req, out := c.PutDeliveryDestinationPolicyRequest(input) + return out, req.Send() +} + +// PutDeliveryDestinationPolicyWithContext is the same as PutDeliveryDestinationPolicy with the addition of +// the ability to pass a context and additional request options. +// +// See PutDeliveryDestinationPolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchLogs) PutDeliveryDestinationPolicyWithContext(ctx aws.Context, input *PutDeliveryDestinationPolicyInput, opts ...request.Option) (*PutDeliveryDestinationPolicyOutput, error) { + req, out := c.PutDeliveryDestinationPolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutDeliverySource = "PutDeliverySource" + +// PutDeliverySourceRequest generates a "aws/request.Request" representing the +// client's request for the PutDeliverySource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutDeliverySource for more information on using the PutDeliverySource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the PutDeliverySourceRequest method. +// req, resp := client.PutDeliverySourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/PutDeliverySource +func (c *CloudWatchLogs) PutDeliverySourceRequest(input *PutDeliverySourceInput) (req *request.Request, output *PutDeliverySourceOutput) { + op := &request.Operation{ + Name: opPutDeliverySource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutDeliverySourceInput{} + } + + output = &PutDeliverySourceOutput{} + req = c.newRequest(op, input, output) + return +} + +// PutDeliverySource API operation for Amazon CloudWatch Logs. +// +// Creates or updates a logical delivery source. A delivery source represents +// an Amazon Web Services resource that sends logs to an logs delivery destination. +// The destination can be CloudWatch Logs, Amazon S3, or Firehose. +// +// To configure logs delivery between a delivery destination and an Amazon Web +// Services service that is supported as a delivery source, you must do the +// following: +// +// - Use PutDeliverySource to create a delivery source, which is a logical +// object that represents the resource that is actually sending the logs. +// +// - Use PutDeliveryDestination to create a delivery destination, which is +// a logical object that represents the actual delivery destination. For +// more information, see PutDeliveryDestination (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutDeliveryDestination.html). +// +// - If you are delivering logs cross-account, you must use PutDeliveryDestinationPolicy +// (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutDeliveryDestinationPolicy.html) +// in the destination account to assign an IAM policy to the destination. +// This policy allows delivery to that destination. +// +// - Use CreateDelivery to create a delivery by pairing exactly one delivery +// source and one delivery destination. For more information, see CreateDelivery +// (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_CreateDelivery.html). +// +// You can configure a single delivery source to send logs to multiple destinations +// by creating multiple deliveries. You can also create multiple deliveries +// to configure multiple delivery sources to send logs to the same delivery +// destination. +// +// Only some Amazon Web Services services support being configured as a delivery +// source. These services are listed as Supported [V2 Permissions] in the table +// at Enabling logging from Amazon Web Services services. (https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/AWS-logs-and-resource-policy.html) +// +// If you use this operation to update an existing delivery source, all the +// current delivery source parameters are overwritten with the new parameter +// values that you specify. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation PutDeliverySource for usage and error information. +// +// Returned Error Types: +// +// - ServiceUnavailableException +// The service cannot complete the request. +// +// - ConflictException +// This operation attempted to create a resource that already exists. +// +// - ValidationException +// One of the parameters for the request is not valid. +// +// - ServiceQuotaExceededException +// This request exceeds a service quota. +// +// - ResourceNotFoundException +// The specified resource does not exist. +// +// - ThrottlingException +// The request was throttled because of quota limits. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/PutDeliverySource +func (c *CloudWatchLogs) PutDeliverySource(input *PutDeliverySourceInput) (*PutDeliverySourceOutput, error) { + req, out := c.PutDeliverySourceRequest(input) + return out, req.Send() +} + +// PutDeliverySourceWithContext is the same as PutDeliverySource with the addition of +// the ability to pass a context and additional request options. +// +// See PutDeliverySource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchLogs) PutDeliverySourceWithContext(ctx aws.Context, input *PutDeliverySourceInput, opts ...request.Option) (*PutDeliverySourceOutput, error) { + req, out := c.PutDeliverySourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutDestination = "PutDestination" + +// PutDestinationRequest generates a "aws/request.Request" representing the +// client's request for the PutDestination operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutDestination for more information on using the PutDestination +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the PutDestinationRequest method. +// req, resp := client.PutDestinationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/PutDestination +func (c *CloudWatchLogs) PutDestinationRequest(input *PutDestinationInput) (req *request.Request, output *PutDestinationOutput) { + op := &request.Operation{ + Name: opPutDestination, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutDestinationInput{} + } + + output = &PutDestinationOutput{} + req = c.newRequest(op, input, output) + return +} + +// PutDestination API operation for Amazon CloudWatch Logs. +// +// Creates or updates a destination. This operation is used only to create destinations +// for cross-account subscriptions. +// +// A destination encapsulates a physical resource (such as an Amazon Kinesis +// stream). With a destination, you can subscribe to a real-time stream of log +// events for a different account, ingested using PutLogEvents (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutLogEvents.html). +// +// Through an access policy, a destination controls what is written to it. By +// default, PutDestination does not set any access policy with the destination, +// which means a cross-account user cannot call PutSubscriptionFilter (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutSubscriptionFilter.html) +// against this destination. To enable this, the destination owner must call +// PutDestinationPolicy (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutDestinationPolicy.html) +// after PutDestination. +// +// To perform a PutDestination operation, you must also have the iam:PassRole +// permission. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation PutDestination for usage and error information. +// +// Returned Error Types: +// +// - InvalidParameterException +// A parameter is specified incorrectly. +// +// - OperationAbortedException +// Multiple concurrent requests to update the same resource were in conflict. +// +// - ServiceUnavailableException +// The service cannot complete the request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/PutDestination +func (c *CloudWatchLogs) PutDestination(input *PutDestinationInput) (*PutDestinationOutput, error) { + req, out := c.PutDestinationRequest(input) + return out, req.Send() +} + +// PutDestinationWithContext is the same as PutDestination with the addition of +// the ability to pass a context and additional request options. +// +// See PutDestination for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchLogs) PutDestinationWithContext(ctx aws.Context, input *PutDestinationInput, opts ...request.Option) (*PutDestinationOutput, error) { + req, out := c.PutDestinationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutDestinationPolicy = "PutDestinationPolicy" + +// PutDestinationPolicyRequest generates a "aws/request.Request" representing the +// client's request for the PutDestinationPolicy operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutDestinationPolicy for more information on using the PutDestinationPolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the PutDestinationPolicyRequest method. +// req, resp := client.PutDestinationPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/PutDestinationPolicy +func (c *CloudWatchLogs) PutDestinationPolicyRequest(input *PutDestinationPolicyInput) (req *request.Request, output *PutDestinationPolicyOutput) { + op := &request.Operation{ + Name: opPutDestinationPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutDestinationPolicyInput{} + } + + output = &PutDestinationPolicyOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutDestinationPolicy API operation for Amazon CloudWatch Logs. +// +// Creates or updates an access policy associated with an existing destination. +// An access policy is an IAM policy document (https://docs.aws.amazon.com/IAM/latest/UserGuide/policies_overview.html) +// that is used to authorize claims to register a subscription filter against +// a given destination. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation PutDestinationPolicy for usage and error information. +// +// Returned Error Types: +// +// - InvalidParameterException +// A parameter is specified incorrectly. +// +// - OperationAbortedException +// Multiple concurrent requests to update the same resource were in conflict. +// +// - ServiceUnavailableException +// The service cannot complete the request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/PutDestinationPolicy +func (c *CloudWatchLogs) PutDestinationPolicy(input *PutDestinationPolicyInput) (*PutDestinationPolicyOutput, error) { + req, out := c.PutDestinationPolicyRequest(input) + return out, req.Send() +} + +// PutDestinationPolicyWithContext is the same as PutDestinationPolicy with the addition of +// the ability to pass a context and additional request options. +// +// See PutDestinationPolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchLogs) PutDestinationPolicyWithContext(ctx aws.Context, input *PutDestinationPolicyInput, opts ...request.Option) (*PutDestinationPolicyOutput, error) { + req, out := c.PutDestinationPolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutLogEvents = "PutLogEvents" + +// PutLogEventsRequest generates a "aws/request.Request" representing the +// client's request for the PutLogEvents operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutLogEvents for more information on using the PutLogEvents +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the PutLogEventsRequest method. +// req, resp := client.PutLogEventsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/PutLogEvents +func (c *CloudWatchLogs) PutLogEventsRequest(input *PutLogEventsInput) (req *request.Request, output *PutLogEventsOutput) { + op := &request.Operation{ + Name: opPutLogEvents, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutLogEventsInput{} + } + + output = &PutLogEventsOutput{} + req = c.newRequest(op, input, output) + return +} + +// PutLogEvents API operation for Amazon CloudWatch Logs. +// +// Uploads a batch of log events to the specified log stream. +// +// The sequence token is now ignored in PutLogEvents actions. PutLogEvents actions +// are always accepted and never return InvalidSequenceTokenException or DataAlreadyAcceptedException +// even if the sequence token is not valid. You can use parallel PutLogEvents +// actions on the same log stream. +// +// The batch of events must satisfy the following constraints: +// +// - The maximum batch size is 1,048,576 bytes. This size is calculated as +// the sum of all event messages in UTF-8, plus 26 bytes for each log event. +// +// - None of the log events in the batch can be more than 2 hours in the +// future. +// +// - None of the log events in the batch can be more than 14 days in the +// past. Also, none of the log events can be from earlier than the retention +// period of the log group. +// +// - The log events in the batch must be in chronological order by their +// timestamp. The timestamp is the time that the event occurred, expressed +// as the number of milliseconds after Jan 1, 1970 00:00:00 UTC. (In Amazon +// Web Services Tools for PowerShell and the Amazon Web Services SDK for +// .NET, the timestamp is specified in .NET format: yyyy-mm-ddThh:mm:ss. +// For example, 2017-09-15T13:45:30.) +// +// - A batch of log events in a single request cannot span more than 24 hours. +// Otherwise, the operation fails. +// +// - Each log event can be no larger than 256 KB. +// +// - The maximum number of log events in a batch is 10,000. +// +// - The quota of five requests per second per log stream has been removed. +// Instead, PutLogEvents actions are throttled based on a per-second per-account +// quota. You can request an increase to the per-second throttling quota +// by using the Service Quotas service. +// +// If a call to PutLogEvents returns "UnrecognizedClientException" the most +// likely cause is a non-valid Amazon Web Services access key ID or secret key. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation PutLogEvents for usage and error information. +// +// Returned Error Types: +// +// - InvalidParameterException +// A parameter is specified incorrectly. +// +// - InvalidSequenceTokenException +// The sequence token is not valid. You can get the correct sequence token in +// the expectedSequenceToken field in the InvalidSequenceTokenException message. +// +// PutLogEvents actions are now always accepted and never return InvalidSequenceTokenException +// regardless of receiving an invalid sequence token. +// +// - DataAlreadyAcceptedException +// The event was already logged. +// +// PutLogEvents actions are now always accepted and never return DataAlreadyAcceptedException +// regardless of whether a given batch of log events has already been accepted. +// +// - ResourceNotFoundException +// The specified resource does not exist. +// +// - ServiceUnavailableException +// The service cannot complete the request. +// +// - UnrecognizedClientException +// The most likely cause is an Amazon Web Services access key ID or secret key +// that's not valid. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/PutLogEvents +func (c *CloudWatchLogs) PutLogEvents(input *PutLogEventsInput) (*PutLogEventsOutput, error) { + req, out := c.PutLogEventsRequest(input) + return out, req.Send() +} + +// PutLogEventsWithContext is the same as PutLogEvents with the addition of +// the ability to pass a context and additional request options. +// +// See PutLogEvents for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchLogs) PutLogEventsWithContext(ctx aws.Context, input *PutLogEventsInput, opts ...request.Option) (*PutLogEventsOutput, error) { + req, out := c.PutLogEventsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutMetricFilter = "PutMetricFilter" + +// PutMetricFilterRequest generates a "aws/request.Request" representing the +// client's request for the PutMetricFilter operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutMetricFilter for more information on using the PutMetricFilter +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the PutMetricFilterRequest method. +// req, resp := client.PutMetricFilterRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/PutMetricFilter +func (c *CloudWatchLogs) PutMetricFilterRequest(input *PutMetricFilterInput) (req *request.Request, output *PutMetricFilterOutput) { + op := &request.Operation{ + Name: opPutMetricFilter, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutMetricFilterInput{} + } + + output = &PutMetricFilterOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutMetricFilter API operation for Amazon CloudWatch Logs. +// +// Creates or updates a metric filter and associates it with the specified log +// group. With metric filters, you can configure rules to extract metric data +// from log events ingested through PutLogEvents (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutLogEvents.html). +// +// The maximum number of metric filters that can be associated with a log group +// is 100. +// +// Using regular expressions to create metric filters is supported. For these +// filters, there is a quotas of quota of two regular expression patterns within +// a single filter pattern. There is also a quota of five regular expression +// patterns per log group. For more information about using regular expressions +// in metric filters, see Filter pattern syntax for metric filters, subscription +// filters, filter log events, and Live Tail (https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/FilterAndPatternSyntax.html). +// +// When you create a metric filter, you can also optionally assign a unit and +// dimensions to the metric that is created. +// +// Metrics extracted from log events are charged as custom metrics. To prevent +// unexpected high charges, do not specify high-cardinality fields such as IPAddress +// or requestID as dimensions. Each different value found for a dimension is +// treated as a separate metric and accrues charges as a separate custom metric. +// +// CloudWatch Logs might disable a metric filter if it generates 1,000 different +// name/value pairs for your specified dimensions within one hour. +// +// You can also set up a billing alarm to alert you if your charges are higher +// than expected. For more information, see Creating a Billing Alarm to Monitor +// Your Estimated Amazon Web Services Charges (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/monitor_estimated_charges_with_cloudwatch.html). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation PutMetricFilter for usage and error information. +// +// Returned Error Types: +// +// - InvalidParameterException +// A parameter is specified incorrectly. +// +// - ResourceNotFoundException +// The specified resource does not exist. +// +// - OperationAbortedException +// Multiple concurrent requests to update the same resource were in conflict. +// +// - LimitExceededException +// You have reached the maximum number of resources that can be created. +// +// - ServiceUnavailableException +// The service cannot complete the request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/PutMetricFilter +func (c *CloudWatchLogs) PutMetricFilter(input *PutMetricFilterInput) (*PutMetricFilterOutput, error) { + req, out := c.PutMetricFilterRequest(input) + return out, req.Send() +} + +// PutMetricFilterWithContext is the same as PutMetricFilter with the addition of +// the ability to pass a context and additional request options. +// +// See PutMetricFilter for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchLogs) PutMetricFilterWithContext(ctx aws.Context, input *PutMetricFilterInput, opts ...request.Option) (*PutMetricFilterOutput, error) { + req, out := c.PutMetricFilterRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutQueryDefinition = "PutQueryDefinition" + +// PutQueryDefinitionRequest generates a "aws/request.Request" representing the +// client's request for the PutQueryDefinition operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutQueryDefinition for more information on using the PutQueryDefinition +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the PutQueryDefinitionRequest method. +// req, resp := client.PutQueryDefinitionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/PutQueryDefinition +func (c *CloudWatchLogs) PutQueryDefinitionRequest(input *PutQueryDefinitionInput) (req *request.Request, output *PutQueryDefinitionOutput) { + op := &request.Operation{ + Name: opPutQueryDefinition, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutQueryDefinitionInput{} + } + + output = &PutQueryDefinitionOutput{} + req = c.newRequest(op, input, output) + return +} + +// PutQueryDefinition API operation for Amazon CloudWatch Logs. +// +// Creates or updates a query definition for CloudWatch Logs Insights. For more +// information, see Analyzing Log Data with CloudWatch Logs Insights (https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/AnalyzingLogData.html). +// +// To update a query definition, specify its queryDefinitionId in your request. +// The values of name, queryString, and logGroupNames are changed to the values +// that you specify in your update operation. No current values are retained +// from the current query definition. For example, imagine updating a current +// query definition that includes log groups. If you don't specify the logGroupNames +// parameter in your update operation, the query definition changes to contain +// no log groups. +// +// You must have the logs:PutQueryDefinition permission to be able to perform +// this operation. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation PutQueryDefinition for usage and error information. +// +// Returned Error Types: +// +// - InvalidParameterException +// A parameter is specified incorrectly. +// +// - LimitExceededException +// You have reached the maximum number of resources that can be created. +// +// - ResourceNotFoundException +// The specified resource does not exist. +// +// - ServiceUnavailableException +// The service cannot complete the request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/PutQueryDefinition +func (c *CloudWatchLogs) PutQueryDefinition(input *PutQueryDefinitionInput) (*PutQueryDefinitionOutput, error) { + req, out := c.PutQueryDefinitionRequest(input) + return out, req.Send() +} + +// PutQueryDefinitionWithContext is the same as PutQueryDefinition with the addition of +// the ability to pass a context and additional request options. +// +// See PutQueryDefinition for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchLogs) PutQueryDefinitionWithContext(ctx aws.Context, input *PutQueryDefinitionInput, opts ...request.Option) (*PutQueryDefinitionOutput, error) { + req, out := c.PutQueryDefinitionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutResourcePolicy = "PutResourcePolicy" + +// PutResourcePolicyRequest generates a "aws/request.Request" representing the +// client's request for the PutResourcePolicy operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutResourcePolicy for more information on using the PutResourcePolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the PutResourcePolicyRequest method. +// req, resp := client.PutResourcePolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/PutResourcePolicy +func (c *CloudWatchLogs) PutResourcePolicyRequest(input *PutResourcePolicyInput) (req *request.Request, output *PutResourcePolicyOutput) { + op := &request.Operation{ + Name: opPutResourcePolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutResourcePolicyInput{} + } + + output = &PutResourcePolicyOutput{} + req = c.newRequest(op, input, output) + return +} + +// PutResourcePolicy API operation for Amazon CloudWatch Logs. +// +// Creates or updates a resource policy allowing other Amazon Web Services services +// to put log events to this account, such as Amazon Route 53. An account can +// have up to 10 resource policies per Amazon Web Services Region. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation PutResourcePolicy for usage and error information. +// +// Returned Error Types: +// +// - InvalidParameterException +// A parameter is specified incorrectly. +// +// - LimitExceededException +// You have reached the maximum number of resources that can be created. +// +// - ServiceUnavailableException +// The service cannot complete the request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/PutResourcePolicy +func (c *CloudWatchLogs) PutResourcePolicy(input *PutResourcePolicyInput) (*PutResourcePolicyOutput, error) { + req, out := c.PutResourcePolicyRequest(input) + return out, req.Send() +} + +// PutResourcePolicyWithContext is the same as PutResourcePolicy with the addition of +// the ability to pass a context and additional request options. +// +// See PutResourcePolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchLogs) PutResourcePolicyWithContext(ctx aws.Context, input *PutResourcePolicyInput, opts ...request.Option) (*PutResourcePolicyOutput, error) { + req, out := c.PutResourcePolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutRetentionPolicy = "PutRetentionPolicy" + +// PutRetentionPolicyRequest generates a "aws/request.Request" representing the +// client's request for the PutRetentionPolicy operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutRetentionPolicy for more information on using the PutRetentionPolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the PutRetentionPolicyRequest method. +// req, resp := client.PutRetentionPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/PutRetentionPolicy +func (c *CloudWatchLogs) PutRetentionPolicyRequest(input *PutRetentionPolicyInput) (req *request.Request, output *PutRetentionPolicyOutput) { + op := &request.Operation{ + Name: opPutRetentionPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutRetentionPolicyInput{} + } + + output = &PutRetentionPolicyOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutRetentionPolicy API operation for Amazon CloudWatch Logs. +// +// Sets the retention of the specified log group. With a retention policy, you +// can configure the number of days for which to retain log events in the specified +// log group. +// +// CloudWatch Logs doesn’t immediately delete log events when they reach their +// retention setting. It typically takes up to 72 hours after that before log +// events are deleted, but in rare situations might take longer. +// +// To illustrate, imagine that you change a log group to have a longer retention +// setting when it contains log events that are past the expiration date, but +// haven’t been deleted. Those log events will take up to 72 hours to be deleted +// after the new retention date is reached. To make sure that log data is deleted +// permanently, keep a log group at its lower retention setting until 72 hours +// after the previous retention period ends. Alternatively, wait to change the +// retention setting until you confirm that the earlier log events are deleted. +// +// When log events reach their retention setting they are marked for deletion. +// After they are marked for deletion, they do not add to your archival storage +// costs anymore, even if they are not actually deleted until later. These log +// events marked for deletion are also not included when you use an API to retrieve +// the storedBytes value to see how many bytes a log group is storing. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation PutRetentionPolicy for usage and error information. +// +// Returned Error Types: +// +// - InvalidParameterException +// A parameter is specified incorrectly. +// +// - ResourceNotFoundException +// The specified resource does not exist. +// +// - OperationAbortedException +// Multiple concurrent requests to update the same resource were in conflict. +// +// - ServiceUnavailableException +// The service cannot complete the request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/PutRetentionPolicy +func (c *CloudWatchLogs) PutRetentionPolicy(input *PutRetentionPolicyInput) (*PutRetentionPolicyOutput, error) { + req, out := c.PutRetentionPolicyRequest(input) + return out, req.Send() +} + +// PutRetentionPolicyWithContext is the same as PutRetentionPolicy with the addition of +// the ability to pass a context and additional request options. +// +// See PutRetentionPolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchLogs) PutRetentionPolicyWithContext(ctx aws.Context, input *PutRetentionPolicyInput, opts ...request.Option) (*PutRetentionPolicyOutput, error) { + req, out := c.PutRetentionPolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutSubscriptionFilter = "PutSubscriptionFilter" + +// PutSubscriptionFilterRequest generates a "aws/request.Request" representing the +// client's request for the PutSubscriptionFilter operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutSubscriptionFilter for more information on using the PutSubscriptionFilter +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the PutSubscriptionFilterRequest method. +// req, resp := client.PutSubscriptionFilterRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/PutSubscriptionFilter +func (c *CloudWatchLogs) PutSubscriptionFilterRequest(input *PutSubscriptionFilterInput) (req *request.Request, output *PutSubscriptionFilterOutput) { + op := &request.Operation{ + Name: opPutSubscriptionFilter, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutSubscriptionFilterInput{} + } + + output = &PutSubscriptionFilterOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutSubscriptionFilter API operation for Amazon CloudWatch Logs. +// +// Creates or updates a subscription filter and associates it with the specified +// log group. With subscription filters, you can subscribe to a real-time stream +// of log events ingested through PutLogEvents (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutLogEvents.html) +// and have them delivered to a specific destination. When log events are sent +// to the receiving service, they are Base64 encoded and compressed with the +// GZIP format. +// +// The following destinations are supported for subscription filters: +// +// - An Amazon Kinesis data stream belonging to the same account as the subscription +// filter, for same-account delivery. +// +// - A logical destination created with PutDestination (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutDestination.html) +// that belongs to a different account, for cross-account delivery. We currently +// support Kinesis Data Streams and Firehose as logical destinations. +// +// - An Amazon Kinesis Data Firehose delivery stream that belongs to the +// same account as the subscription filter, for same-account delivery. +// +// - An Lambda function that belongs to the same account as the subscription +// filter, for same-account delivery. +// +// Each log group can have up to two subscription filters associated with it. +// If you are updating an existing filter, you must specify the correct name +// in filterName. +// +// Using regular expressions to create subscription filters is supported. For +// these filters, there is a quotas of quota of two regular expression patterns +// within a single filter pattern. There is also a quota of five regular expression +// patterns per log group. For more information about using regular expressions +// in subscription filters, see Filter pattern syntax for metric filters, subscription +// filters, filter log events, and Live Tail (https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/FilterAndPatternSyntax.html). +// +// To perform a PutSubscriptionFilter operation for any destination except a +// Lambda function, you must also have the iam:PassRole permission. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation PutSubscriptionFilter for usage and error information. +// +// Returned Error Types: +// +// - InvalidParameterException +// A parameter is specified incorrectly. +// +// - ResourceNotFoundException +// The specified resource does not exist. +// +// - OperationAbortedException +// Multiple concurrent requests to update the same resource were in conflict. +// +// - LimitExceededException +// You have reached the maximum number of resources that can be created. +// +// - ServiceUnavailableException +// The service cannot complete the request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/PutSubscriptionFilter +func (c *CloudWatchLogs) PutSubscriptionFilter(input *PutSubscriptionFilterInput) (*PutSubscriptionFilterOutput, error) { + req, out := c.PutSubscriptionFilterRequest(input) + return out, req.Send() +} + +// PutSubscriptionFilterWithContext is the same as PutSubscriptionFilter with the addition of +// the ability to pass a context and additional request options. +// +// See PutSubscriptionFilter for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchLogs) PutSubscriptionFilterWithContext(ctx aws.Context, input *PutSubscriptionFilterInput, opts ...request.Option) (*PutSubscriptionFilterOutput, error) { + req, out := c.PutSubscriptionFilterRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opStartLiveTail = "StartLiveTail" + +// StartLiveTailRequest generates a "aws/request.Request" representing the +// client's request for the StartLiveTail operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See StartLiveTail for more information on using the StartLiveTail +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the StartLiveTailRequest method. +// req, resp := client.StartLiveTailRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/StartLiveTail +func (c *CloudWatchLogs) StartLiveTailRequest(input *StartLiveTailInput) (req *request.Request, output *StartLiveTailOutput) { + op := &request.Operation{ + Name: opStartLiveTail, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StartLiveTailInput{} + } + + output = &StartLiveTailOutput{} + req = c.newRequest(op, input, output) + + es := NewStartLiveTailEventStream() + output.eventStream = es + + req.Handlers.Send.Swap(client.LogHTTPResponseHandler.Name, client.LogHTTPResponseHeaderHandler) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, rest.UnmarshalHandler) + req.Handlers.Unmarshal.PushBack(es.runOutputStream) + es.output = output + req.Handlers.Unmarshal.PushBack(es.recvInitialEvent) + req.Handlers.Unmarshal.PushBack(es.runOnStreamPartClose) + req.Handlers.Build.PushBackNamed(protocol.NewHostPrefixHandler("streaming-", nil)) + req.Handlers.Build.PushBackNamed(protocol.ValidateEndpointHostHandler) + return +} + +// StartLiveTail API operation for Amazon CloudWatch Logs. +// +// Starts a Live Tail streaming session for one or more log groups. A Live Tail +// session returns a stream of log events that have been recently ingested in +// the log groups. For more information, see Use Live Tail to view logs in near +// real time (https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/CloudWatchLogs_LiveTail.html). +// +// The response to this operation is a response stream, over which the server +// sends live log events and the client receives them. +// +// The following objects are sent over the stream: +// +// - A single LiveTailSessionStart (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_LiveTailSessionStart.html) +// object is sent at the start of the session. +// +// - Every second, a LiveTailSessionUpdate (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_LiveTailSessionUpdate.html) +// object is sent. Each of these objects contains an array of the actual +// log events. If no new log events were ingested in the past second, the +// LiveTailSessionUpdate object will contain an empty array. The array of +// log events contained in a LiveTailSessionUpdate can include as many as +// 500 log events. If the number of log events matching the request exceeds +// 500 per second, the log events are sampled down to 500 log events to be +// included in each LiveTailSessionUpdate object. If your client consumes +// the log events slower than the server produces them, CloudWatch Logs buffers +// up to 10 LiveTailSessionUpdate events or 5000 log events, after which +// it starts dropping the oldest events. +// +// - A SessionStreamingException (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_StartLiveTailResponseStream.html#CWL-Type-StartLiveTailResponseStream-SessionStreamingException) +// object is returned if an unknown error occurs on the server side. +// +// - A SessionTimeoutException (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_StartLiveTailResponseStream.html#CWL-Type-StartLiveTailResponseStream-SessionTimeoutException) +// object is returned when the session times out, after it has been kept +// open for three hours. +// +// You can end a session before it times out by closing the session stream or +// by closing the client that is receiving the stream. The session also ends +// if the established connection between the client and the server breaks. +// +// For examples of using an SDK to start a Live Tail session, see Start a Live +// Tail session using an Amazon Web Services SDK (https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/example_cloudwatch-logs_StartLiveTail_section.html). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation StartLiveTail for usage and error information. +// +// Returned Error Types: +// +// - AccessDeniedException +// You don't have sufficient permissions to perform this action. +// +// - InvalidParameterException +// A parameter is specified incorrectly. +// +// - ResourceNotFoundException +// The specified resource does not exist. +// +// - LimitExceededException +// You have reached the maximum number of resources that can be created. +// +// - InvalidOperationException +// The operation is not valid on the specified resource. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/StartLiveTail +func (c *CloudWatchLogs) StartLiveTail(input *StartLiveTailInput) (*StartLiveTailOutput, error) { + req, out := c.StartLiveTailRequest(input) + return out, req.Send() +} + +// StartLiveTailWithContext is the same as StartLiveTail with the addition of +// the ability to pass a context and additional request options. +// +// See StartLiveTail for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchLogs) StartLiveTailWithContext(ctx aws.Context, input *StartLiveTailInput, opts ...request.Option) (*StartLiveTailOutput, error) { + req, out := c.StartLiveTailRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +var _ awserr.Error +var _ time.Time + +// StartLiveTailEventStream provides the event stream handling for the StartLiveTail. +// +// For testing and mocking the event stream this type should be initialized via +// the NewStartLiveTailEventStream constructor function. Using the functional options +// to pass in nested mock behavior. +type StartLiveTailEventStream struct { + + // Reader is the EventStream reader for the StartLiveTailResponseStream + // events. This value is automatically set by the SDK when the API call is made + // Use this member when unit testing your code with the SDK to mock out the + // EventStream Reader. + // + // Must not be nil. + Reader StartLiveTailResponseStreamReader + + outputReader io.ReadCloser + output *StartLiveTailOutput + + done chan struct{} + closeOnce sync.Once + err *eventstreamapi.OnceError +} + +// NewStartLiveTailEventStream initializes an StartLiveTailEventStream. +// This function should only be used for testing and mocking the StartLiveTailEventStream +// stream within your application. +// +// The Reader member must be set before reading events from the stream. +// +// es := NewStartLiveTailEventStream(func(o *StartLiveTailEventStream){ +// es.Reader = myMockStreamReader +// }) +func NewStartLiveTailEventStream(opts ...func(*StartLiveTailEventStream)) *StartLiveTailEventStream { + es := &StartLiveTailEventStream{ + done: make(chan struct{}), + err: eventstreamapi.NewOnceError(), + } + + for _, fn := range opts { + fn(es) + } + + return es +} + +func (es *StartLiveTailEventStream) runOnStreamPartClose(r *request.Request) { + if es.done == nil { + return + } + go es.waitStreamPartClose() + +} + +func (es *StartLiveTailEventStream) waitStreamPartClose() { + var outputErrCh <-chan struct{} + if v, ok := es.Reader.(interface{ ErrorSet() <-chan struct{} }); ok { + outputErrCh = v.ErrorSet() + } + var outputClosedCh <-chan struct{} + if v, ok := es.Reader.(interface{ Closed() <-chan struct{} }); ok { + outputClosedCh = v.Closed() + } + + select { + case <-es.done: + case <-outputErrCh: + es.err.SetError(es.Reader.Err()) + es.Close() + case <-outputClosedCh: + if err := es.Reader.Err(); err != nil { + es.err.SetError(es.Reader.Err()) + } + es.Close() + } +} + +type eventTypeForStartLiveTailEventStreamOutputEvent struct { + unmarshalerForEvent func(string) (eventstreamapi.Unmarshaler, error) + output *StartLiveTailOutput +} + +func (e eventTypeForStartLiveTailEventStreamOutputEvent) UnmarshalerForEventName(eventType string) (eventstreamapi.Unmarshaler, error) { + if eventType == "initial-response" { + return e.output, nil + } + return e.unmarshalerForEvent(eventType) +} + +// Events returns a channel to read events from. +// +// These events are: +// +// - LiveTailSessionStart +// - LiveTailSessionUpdate +// - StartLiveTailResponseStreamUnknownEvent +func (es *StartLiveTailEventStream) Events() <-chan StartLiveTailResponseStreamEvent { + return es.Reader.Events() +} + +func (es *StartLiveTailEventStream) runOutputStream(r *request.Request) { + var opts []func(*eventstream.Decoder) + if r.Config.Logger != nil && r.Config.LogLevel.Matches(aws.LogDebugWithEventStreamBody) { + opts = append(opts, eventstream.DecodeWithLogger(r.Config.Logger)) + } + + unmarshalerForEvent := unmarshalerForStartLiveTailResponseStreamEvent{ + metadata: protocol.ResponseMetadata{ + StatusCode: r.HTTPResponse.StatusCode, + RequestID: r.RequestID, + }, + }.UnmarshalerForEventName + unmarshalerForEvent = eventTypeForStartLiveTailEventStreamOutputEvent{ + unmarshalerForEvent: unmarshalerForEvent, + output: es.output, + }.UnmarshalerForEventName + + decoder := eventstream.NewDecoder(r.HTTPResponse.Body, opts...) + eventReader := eventstreamapi.NewEventReader(decoder, + protocol.HandlerPayloadUnmarshal{ + Unmarshalers: r.Handlers.UnmarshalStream, + }, + unmarshalerForEvent, + ) + + es.outputReader = r.HTTPResponse.Body + es.Reader = newReadStartLiveTailResponseStream(eventReader) +} +func (es *StartLiveTailEventStream) recvInitialEvent(r *request.Request) { + // Wait for the initial response event, which must be the first + // event to be received from the API. + select { + case event, ok := <-es.Events(): + if !ok { + return + } + + v, ok := event.(*StartLiveTailOutput) + if !ok || v == nil { + r.Error = awserr.New( + request.ErrCodeSerialization, + fmt.Sprintf("invalid event, %T, expect %T, %v", + event, (*StartLiveTailOutput)(nil), v), + nil, + ) + return + } + + *es.output = *v + es.output.eventStream = es + } +} + +// Close closes the stream. This will also cause the stream to be closed. +// Close must be called when done using the stream API. Not calling Close +// may result in resource leaks. +// +// You can use the closing of the Reader's Events channel to terminate your +// application's read from the API's stream. +func (es *StartLiveTailEventStream) Close() (err error) { + es.closeOnce.Do(es.safeClose) + return es.Err() +} + +func (es *StartLiveTailEventStream) safeClose() { + if es.done != nil { + close(es.done) + } + + es.Reader.Close() + if es.outputReader != nil { + es.outputReader.Close() + } +} + +// Err returns any error that occurred while reading or writing EventStream +// Events from the service API's response. Returns nil if there were no errors. +func (es *StartLiveTailEventStream) Err() error { + if err := es.err.Err(); err != nil { + return err + } + if err := es.Reader.Err(); err != nil { + return err + } + + return nil +} + +const opStartQuery = "StartQuery" + +// StartQueryRequest generates a "aws/request.Request" representing the +// client's request for the StartQuery operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See StartQuery for more information on using the StartQuery +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the StartQueryRequest method. +// req, resp := client.StartQueryRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/StartQuery +func (c *CloudWatchLogs) StartQueryRequest(input *StartQueryInput) (req *request.Request, output *StartQueryOutput) { + op := &request.Operation{ + Name: opStartQuery, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StartQueryInput{} + } + + output = &StartQueryOutput{} + req = c.newRequest(op, input, output) + return +} + +// StartQuery API operation for Amazon CloudWatch Logs. +// +// Schedules a query of a log group using CloudWatch Logs Insights. You specify +// the log group and time range to query and the query string to use. +// +// For more information, see CloudWatch Logs Insights Query Syntax (https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/CWL_QuerySyntax.html). +// +// After you run a query using StartQuery, the query results are stored by CloudWatch +// Logs. You can use GetQueryResults (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_GetQueryResults.html) +// to retrieve the results of a query, using the queryId that StartQuery returns. +// +// If you have associated a KMS key with the query results in this account, +// then StartQuery (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_StartQuery.html) +// uses that key to encrypt the results when it stores them. If no key is associated +// with query results, the query results are encrypted with the default CloudWatch +// Logs encryption method. +// +// Queries time out after 60 minutes of runtime. If your queries are timing +// out, reduce the time range being searched or partition your query into a +// number of queries. +// +// If you are using CloudWatch cross-account observability, you can use this +// operation in a monitoring account to start a query in a linked source account. +// For more information, see CloudWatch cross-account observability (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-Unified-Cross-Account.html). +// For a cross-account StartQuery operation, the query definition must be defined +// in the monitoring account. +// +// You can have up to 30 concurrent CloudWatch Logs insights queries, including +// queries that have been added to dashboards. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation StartQuery for usage and error information. +// +// Returned Error Types: +// +// - MalformedQueryException +// The query string is not valid. Details about this error are displayed in +// a QueryCompileError object. For more information, see QueryCompileError (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_QueryCompileError.html). +// +// For more information about valid query syntax, see CloudWatch Logs Insights +// Query Syntax (https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/CWL_QuerySyntax.html). +// +// - InvalidParameterException +// A parameter is specified incorrectly. +// +// - LimitExceededException +// You have reached the maximum number of resources that can be created. +// +// - ResourceNotFoundException +// The specified resource does not exist. +// +// - ServiceUnavailableException +// The service cannot complete the request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/StartQuery +func (c *CloudWatchLogs) StartQuery(input *StartQueryInput) (*StartQueryOutput, error) { + req, out := c.StartQueryRequest(input) + return out, req.Send() +} + +// StartQueryWithContext is the same as StartQuery with the addition of +// the ability to pass a context and additional request options. +// +// See StartQuery for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchLogs) StartQueryWithContext(ctx aws.Context, input *StartQueryInput, opts ...request.Option) (*StartQueryOutput, error) { + req, out := c.StartQueryRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opStopQuery = "StopQuery" + +// StopQueryRequest generates a "aws/request.Request" representing the +// client's request for the StopQuery operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See StopQuery for more information on using the StopQuery +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the StopQueryRequest method. +// req, resp := client.StopQueryRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/StopQuery +func (c *CloudWatchLogs) StopQueryRequest(input *StopQueryInput) (req *request.Request, output *StopQueryOutput) { + op := &request.Operation{ + Name: opStopQuery, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StopQueryInput{} + } + + output = &StopQueryOutput{} + req = c.newRequest(op, input, output) + return +} + +// StopQuery API operation for Amazon CloudWatch Logs. +// +// Stops a CloudWatch Logs Insights query that is in progress. If the query +// has already ended, the operation returns an error indicating that the specified +// query is not running. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation StopQuery for usage and error information. +// +// Returned Error Types: +// +// - InvalidParameterException +// A parameter is specified incorrectly. +// +// - ResourceNotFoundException +// The specified resource does not exist. +// +// - ServiceUnavailableException +// The service cannot complete the request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/StopQuery +func (c *CloudWatchLogs) StopQuery(input *StopQueryInput) (*StopQueryOutput, error) { + req, out := c.StopQueryRequest(input) + return out, req.Send() +} + +// StopQueryWithContext is the same as StopQuery with the addition of +// the ability to pass a context and additional request options. +// +// See StopQuery for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchLogs) StopQueryWithContext(ctx aws.Context, input *StopQueryInput, opts ...request.Option) (*StopQueryOutput, error) { + req, out := c.StopQueryRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opTagLogGroup = "TagLogGroup" + +// TagLogGroupRequest generates a "aws/request.Request" representing the +// client's request for the TagLogGroup operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See TagLogGroup for more information on using the TagLogGroup +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the TagLogGroupRequest method. +// req, resp := client.TagLogGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/TagLogGroup +// +// Deprecated: Please use the generic tagging API TagResource +func (c *CloudWatchLogs) TagLogGroupRequest(input *TagLogGroupInput) (req *request.Request, output *TagLogGroupOutput) { + if c.Client.Config.Logger != nil { + c.Client.Config.Logger.Log("This operation, TagLogGroup, has been deprecated") + } + op := &request.Operation{ + Name: opTagLogGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &TagLogGroupInput{} + } + + output = &TagLogGroupOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// TagLogGroup API operation for Amazon CloudWatch Logs. +// +// The TagLogGroup operation is on the path to deprecation. We recommend that +// you use TagResource (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_TagResource.html) +// instead. +// +// Adds or updates the specified tags for the specified log group. +// +// To list the tags for a log group, use ListTagsForResource (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_ListTagsForResource.html). +// To remove tags, use UntagResource (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_UntagResource.html). +// +// For more information about tags, see Tag Log Groups in Amazon CloudWatch +// Logs (https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/Working-with-log-groups-and-streams.html#log-group-tagging) +// in the Amazon CloudWatch Logs User Guide. +// +// CloudWatch Logs doesn’t support IAM policies that prevent users from assigning +// specified tags to log groups using the aws:Resource/key-name or aws:TagKeys +// condition keys. For more information about using tags to control access, +// see Controlling access to Amazon Web Services resources using tags (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_tags.html). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation TagLogGroup for usage and error information. +// +// Returned Error Types: +// +// - ResourceNotFoundException +// The specified resource does not exist. +// +// - InvalidParameterException +// A parameter is specified incorrectly. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/TagLogGroup +// +// Deprecated: Please use the generic tagging API TagResource +func (c *CloudWatchLogs) TagLogGroup(input *TagLogGroupInput) (*TagLogGroupOutput, error) { + req, out := c.TagLogGroupRequest(input) + return out, req.Send() +} + +// TagLogGroupWithContext is the same as TagLogGroup with the addition of +// the ability to pass a context and additional request options. +// +// See TagLogGroup for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +// +// Deprecated: Please use the generic tagging API TagResource +func (c *CloudWatchLogs) TagLogGroupWithContext(ctx aws.Context, input *TagLogGroupInput, opts ...request.Option) (*TagLogGroupOutput, error) { + req, out := c.TagLogGroupRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opTagResource = "TagResource" + +// TagResourceRequest generates a "aws/request.Request" representing the +// client's request for the TagResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See TagResource for more information on using the TagResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the TagResourceRequest method. +// req, resp := client.TagResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/TagResource +func (c *CloudWatchLogs) TagResourceRequest(input *TagResourceInput) (req *request.Request, output *TagResourceOutput) { + op := &request.Operation{ + Name: opTagResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &TagResourceInput{} + } + + output = &TagResourceOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// TagResource API operation for Amazon CloudWatch Logs. +// +// Assigns one or more tags (key-value pairs) to the specified CloudWatch Logs +// resource. Currently, the only CloudWatch Logs resources that can be tagged +// are log groups and destinations. +// +// Tags can help you organize and categorize your resources. You can also use +// them to scope user permissions by granting a user permission to access or +// change only resources with certain tag values. +// +// Tags don't have any semantic meaning to Amazon Web Services and are interpreted +// strictly as strings of characters. +// +// You can use the TagResource action with a resource that already has tags. +// If you specify a new tag key for the alarm, this tag is appended to the list +// of tags associated with the alarm. If you specify a tag key that is already +// associated with the alarm, the new tag value that you specify replaces the +// previous value for that tag. +// +// You can associate as many as 50 tags with a CloudWatch Logs resource. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation TagResource for usage and error information. +// +// Returned Error Types: +// +// - InvalidParameterException +// A parameter is specified incorrectly. +// +// - ResourceNotFoundException +// The specified resource does not exist. +// +// - ServiceUnavailableException +// The service cannot complete the request. +// +// - TooManyTagsException +// A resource can have no more than 50 tags. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/TagResource +func (c *CloudWatchLogs) TagResource(input *TagResourceInput) (*TagResourceOutput, error) { + req, out := c.TagResourceRequest(input) + return out, req.Send() +} + +// TagResourceWithContext is the same as TagResource with the addition of +// the ability to pass a context and additional request options. +// +// See TagResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchLogs) TagResourceWithContext(ctx aws.Context, input *TagResourceInput, opts ...request.Option) (*TagResourceOutput, error) { + req, out := c.TagResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opTestMetricFilter = "TestMetricFilter" + +// TestMetricFilterRequest generates a "aws/request.Request" representing the +// client's request for the TestMetricFilter operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See TestMetricFilter for more information on using the TestMetricFilter +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the TestMetricFilterRequest method. +// req, resp := client.TestMetricFilterRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/TestMetricFilter +func (c *CloudWatchLogs) TestMetricFilterRequest(input *TestMetricFilterInput) (req *request.Request, output *TestMetricFilterOutput) { + op := &request.Operation{ + Name: opTestMetricFilter, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &TestMetricFilterInput{} + } + + output = &TestMetricFilterOutput{} + req = c.newRequest(op, input, output) + return +} + +// TestMetricFilter API operation for Amazon CloudWatch Logs. +// +// Tests the filter pattern of a metric filter against a sample of log event +// messages. You can use this operation to validate the correctness of a metric +// filter pattern. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation TestMetricFilter for usage and error information. +// +// Returned Error Types: +// +// - InvalidParameterException +// A parameter is specified incorrectly. +// +// - ServiceUnavailableException +// The service cannot complete the request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/TestMetricFilter +func (c *CloudWatchLogs) TestMetricFilter(input *TestMetricFilterInput) (*TestMetricFilterOutput, error) { + req, out := c.TestMetricFilterRequest(input) + return out, req.Send() +} + +// TestMetricFilterWithContext is the same as TestMetricFilter with the addition of +// the ability to pass a context and additional request options. +// +// See TestMetricFilter for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchLogs) TestMetricFilterWithContext(ctx aws.Context, input *TestMetricFilterInput, opts ...request.Option) (*TestMetricFilterOutput, error) { + req, out := c.TestMetricFilterRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUntagLogGroup = "UntagLogGroup" + +// UntagLogGroupRequest generates a "aws/request.Request" representing the +// client's request for the UntagLogGroup operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UntagLogGroup for more information on using the UntagLogGroup +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the UntagLogGroupRequest method. +// req, resp := client.UntagLogGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/UntagLogGroup +// +// Deprecated: Please use the generic tagging API UntagResource +func (c *CloudWatchLogs) UntagLogGroupRequest(input *UntagLogGroupInput) (req *request.Request, output *UntagLogGroupOutput) { + if c.Client.Config.Logger != nil { + c.Client.Config.Logger.Log("This operation, UntagLogGroup, has been deprecated") + } + op := &request.Operation{ + Name: opUntagLogGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UntagLogGroupInput{} + } + + output = &UntagLogGroupOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// UntagLogGroup API operation for Amazon CloudWatch Logs. +// +// The UntagLogGroup operation is on the path to deprecation. We recommend that +// you use UntagResource (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_UntagResource.html) +// instead. +// +// Removes the specified tags from the specified log group. +// +// To list the tags for a log group, use ListTagsForResource (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_ListTagsForResource.html). +// To add tags, use TagResource (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_TagResource.html). +// +// CloudWatch Logs doesn’t support IAM policies that prevent users from assigning +// specified tags to log groups using the aws:Resource/key-name or aws:TagKeys +// condition keys. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation UntagLogGroup for usage and error information. +// +// Returned Error Types: +// - ResourceNotFoundException +// The specified resource does not exist. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/UntagLogGroup +// +// Deprecated: Please use the generic tagging API UntagResource +func (c *CloudWatchLogs) UntagLogGroup(input *UntagLogGroupInput) (*UntagLogGroupOutput, error) { + req, out := c.UntagLogGroupRequest(input) + return out, req.Send() +} + +// UntagLogGroupWithContext is the same as UntagLogGroup with the addition of +// the ability to pass a context and additional request options. +// +// See UntagLogGroup for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +// +// Deprecated: Please use the generic tagging API UntagResource +func (c *CloudWatchLogs) UntagLogGroupWithContext(ctx aws.Context, input *UntagLogGroupInput, opts ...request.Option) (*UntagLogGroupOutput, error) { + req, out := c.UntagLogGroupRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUntagResource = "UntagResource" + +// UntagResourceRequest generates a "aws/request.Request" representing the +// client's request for the UntagResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UntagResource for more information on using the UntagResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the UntagResourceRequest method. +// req, resp := client.UntagResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/UntagResource +func (c *CloudWatchLogs) UntagResourceRequest(input *UntagResourceInput) (req *request.Request, output *UntagResourceOutput) { + op := &request.Operation{ + Name: opUntagResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UntagResourceInput{} + } + + output = &UntagResourceOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// UntagResource API operation for Amazon CloudWatch Logs. +// +// Removes one or more tags from the specified resource. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation UntagResource for usage and error information. +// +// Returned Error Types: +// +// - InvalidParameterException +// A parameter is specified incorrectly. +// +// - ResourceNotFoundException +// The specified resource does not exist. +// +// - ServiceUnavailableException +// The service cannot complete the request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/UntagResource +func (c *CloudWatchLogs) UntagResource(input *UntagResourceInput) (*UntagResourceOutput, error) { + req, out := c.UntagResourceRequest(input) + return out, req.Send() +} + +// UntagResourceWithContext is the same as UntagResource with the addition of +// the ability to pass a context and additional request options. +// +// See UntagResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchLogs) UntagResourceWithContext(ctx aws.Context, input *UntagResourceInput, opts ...request.Option) (*UntagResourceOutput, error) { + req, out := c.UntagResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateAnomaly = "UpdateAnomaly" + +// UpdateAnomalyRequest generates a "aws/request.Request" representing the +// client's request for the UpdateAnomaly operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateAnomaly for more information on using the UpdateAnomaly +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the UpdateAnomalyRequest method. +// req, resp := client.UpdateAnomalyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/UpdateAnomaly +func (c *CloudWatchLogs) UpdateAnomalyRequest(input *UpdateAnomalyInput) (req *request.Request, output *UpdateAnomalyOutput) { + op := &request.Operation{ + Name: opUpdateAnomaly, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateAnomalyInput{} + } + + output = &UpdateAnomalyOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// UpdateAnomaly API operation for Amazon CloudWatch Logs. +// +// Use this operation to suppress anomaly detection for a specified anomaly +// or pattern. If you suppress an anomaly, CloudWatch Logs won’t report new +// occurrences of that anomaly and won't update that anomaly with new data. +// If you suppress a pattern, CloudWatch Logs won’t report any anomalies related +// to that pattern. +// +// You must specify either anomalyId or patternId, but you can't specify both +// parameters in the same operation. +// +// If you have previously used this operation to suppress detection of a pattern +// or anomaly, you can use it again to cause CloudWatch Logs to end the suppression. +// To do this, use this operation and specify the anomaly or pattern to stop +// suppressing, and omit the suppressionType and suppressionPeriod parameters. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation UpdateAnomaly for usage and error information. +// +// Returned Error Types: +// +// - InvalidParameterException +// A parameter is specified incorrectly. +// +// - ResourceNotFoundException +// The specified resource does not exist. +// +// - ServiceUnavailableException +// The service cannot complete the request. +// +// - OperationAbortedException +// Multiple concurrent requests to update the same resource were in conflict. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/UpdateAnomaly +func (c *CloudWatchLogs) UpdateAnomaly(input *UpdateAnomalyInput) (*UpdateAnomalyOutput, error) { + req, out := c.UpdateAnomalyRequest(input) + return out, req.Send() +} + +// UpdateAnomalyWithContext is the same as UpdateAnomaly with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateAnomaly for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchLogs) UpdateAnomalyWithContext(ctx aws.Context, input *UpdateAnomalyInput, opts ...request.Option) (*UpdateAnomalyOutput, error) { + req, out := c.UpdateAnomalyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateLogAnomalyDetector = "UpdateLogAnomalyDetector" + +// UpdateLogAnomalyDetectorRequest generates a "aws/request.Request" representing the +// client's request for the UpdateLogAnomalyDetector operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateLogAnomalyDetector for more information on using the UpdateLogAnomalyDetector +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the UpdateLogAnomalyDetectorRequest method. +// req, resp := client.UpdateLogAnomalyDetectorRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/UpdateLogAnomalyDetector +func (c *CloudWatchLogs) UpdateLogAnomalyDetectorRequest(input *UpdateLogAnomalyDetectorInput) (req *request.Request, output *UpdateLogAnomalyDetectorOutput) { + op := &request.Operation{ + Name: opUpdateLogAnomalyDetector, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateLogAnomalyDetectorInput{} + } + + output = &UpdateLogAnomalyDetectorOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// UpdateLogAnomalyDetector API operation for Amazon CloudWatch Logs. +// +// Updates an existing log anomaly detector. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation UpdateLogAnomalyDetector for usage and error information. +// +// Returned Error Types: +// +// - InvalidParameterException +// A parameter is specified incorrectly. +// +// - ResourceNotFoundException +// The specified resource does not exist. +// +// - ServiceUnavailableException +// The service cannot complete the request. +// +// - OperationAbortedException +// Multiple concurrent requests to update the same resource were in conflict. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/UpdateLogAnomalyDetector +func (c *CloudWatchLogs) UpdateLogAnomalyDetector(input *UpdateLogAnomalyDetectorInput) (*UpdateLogAnomalyDetectorOutput, error) { + req, out := c.UpdateLogAnomalyDetectorRequest(input) + return out, req.Send() +} + +// UpdateLogAnomalyDetectorWithContext is the same as UpdateLogAnomalyDetector with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateLogAnomalyDetector for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchLogs) UpdateLogAnomalyDetectorWithContext(ctx aws.Context, input *UpdateLogAnomalyDetectorInput, opts ...request.Option) (*UpdateLogAnomalyDetectorOutput, error) { + req, out := c.UpdateLogAnomalyDetectorRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// You don't have sufficient permissions to perform this action. +type AccessDeniedException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AccessDeniedException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AccessDeniedException) GoString() string { + return s.String() +} + +func newErrorAccessDeniedException(v protocol.ResponseMetadata) error { + return &AccessDeniedException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *AccessDeniedException) Code() string { + return "AccessDeniedException" +} + +// Message returns the exception's message. +func (s *AccessDeniedException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *AccessDeniedException) OrigErr() error { + return nil +} + +func (s *AccessDeniedException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *AccessDeniedException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *AccessDeniedException) RequestID() string { + return s.RespMetadata.RequestID +} + +// A structure that contains information about one CloudWatch Logs account policy. +type AccountPolicy struct { + _ struct{} `type:"structure"` + + // The Amazon Web Services account ID that the policy applies to. + AccountId *string `locationName:"accountId" min:"12" type:"string"` + + // The date and time that this policy was most recently updated. + LastUpdatedTime *int64 `locationName:"lastUpdatedTime" type:"long"` + + // The policy document for this account policy. + // + // The JSON specified in policyDocument can be up to 30,720 characters. + PolicyDocument *string `locationName:"policyDocument" type:"string"` + + // The name of the account policy. + PolicyName *string `locationName:"policyName" type:"string"` + + // The type of policy for this account policy. + PolicyType *string `locationName:"policyType" type:"string" enum:"PolicyType"` + + // The scope of the account policy. + Scope *string `locationName:"scope" type:"string" enum:"Scope"` + + // The log group selection criteria for this subscription filter policy. + SelectionCriteria *string `locationName:"selectionCriteria" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AccountPolicy) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AccountPolicy) GoString() string { + return s.String() +} + +// SetAccountId sets the AccountId field's value. +func (s *AccountPolicy) SetAccountId(v string) *AccountPolicy { + s.AccountId = &v + return s +} + +// SetLastUpdatedTime sets the LastUpdatedTime field's value. +func (s *AccountPolicy) SetLastUpdatedTime(v int64) *AccountPolicy { + s.LastUpdatedTime = &v + return s +} + +// SetPolicyDocument sets the PolicyDocument field's value. +func (s *AccountPolicy) SetPolicyDocument(v string) *AccountPolicy { + s.PolicyDocument = &v + return s +} + +// SetPolicyName sets the PolicyName field's value. +func (s *AccountPolicy) SetPolicyName(v string) *AccountPolicy { + s.PolicyName = &v + return s +} + +// SetPolicyType sets the PolicyType field's value. +func (s *AccountPolicy) SetPolicyType(v string) *AccountPolicy { + s.PolicyType = &v + return s +} + +// SetScope sets the Scope field's value. +func (s *AccountPolicy) SetScope(v string) *AccountPolicy { + s.Scope = &v + return s +} + +// SetSelectionCriteria sets the SelectionCriteria field's value. +func (s *AccountPolicy) SetSelectionCriteria(v string) *AccountPolicy { + s.SelectionCriteria = &v + return s +} + +// This structure represents one anomaly that has been found by a logs anomaly +// detector. +// +// For more information about patterns and anomalies, see CreateLogAnomalyDetector +// (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_CreateLogAnomalyDetector.html). +type Anomaly struct { + _ struct{} `type:"structure"` + + // Specifies whether this anomaly is still ongoing. + // + // Active is a required field + Active *bool `locationName:"active" type:"boolean" required:"true"` + + // The ARN of the anomaly detector that identified this anomaly. + // + // AnomalyDetectorArn is a required field + AnomalyDetectorArn *string `locationName:"anomalyDetectorArn" min:"1" type:"string" required:"true"` + + // The unique ID that CloudWatch Logs assigned to this anomaly. + // + // AnomalyId is a required field + AnomalyId *string `locationName:"anomalyId" min:"36" type:"string" required:"true"` + + // A human-readable description of the anomaly. This description is generated + // by CloudWatch Logs. + // + // Description is a required field + Description *string `locationName:"description" min:"1" type:"string" required:"true"` + + // The date and time when the anomaly detector first saw this anomaly. It is + // specified as epoch time, which is the number of seconds since January 1, + // 1970, 00:00:00 UTC. + // + // FirstSeen is a required field + FirstSeen *int64 `locationName:"firstSeen" type:"long" required:"true"` + + // A map showing times when the anomaly detector ran, and the number of occurrences + // of this anomaly that were detected at each of those runs. The times are specified + // in epoch time, which is the number of seconds since January 1, 1970, 00:00:00 + // UTC. + // + // Histogram is a required field + Histogram map[string]*int64 `locationName:"histogram" type:"map" required:"true"` + + // If this anomaly is suppressed, this field is true if the suppression is because + // the pattern is suppressed. If false, then only this particular anomaly is + // suppressed. + IsPatternLevelSuppression *bool `locationName:"isPatternLevelSuppression" type:"boolean"` + + // The date and time when the anomaly detector most recently saw this anomaly. + // It is specified as epoch time, which is the number of seconds since January + // 1, 1970, 00:00:00 UTC. + // + // LastSeen is a required field + LastSeen *int64 `locationName:"lastSeen" type:"long" required:"true"` + + // An array of ARNS of the log groups that contained log events considered to + // be part of this anomaly. + // + // LogGroupArnList is a required field + LogGroupArnList []*string `locationName:"logGroupArnList" type:"list" required:"true"` + + // An array of sample log event messages that are considered to be part of this + // anomaly. + // + // LogSamples is a required field + LogSamples []*LogEvent `locationName:"logSamples" type:"list" required:"true"` + + // The ID of the pattern used to help identify this anomaly. + // + // PatternId is a required field + PatternId *string `locationName:"patternId" min:"32" type:"string" required:"true"` + + // The pattern used to help identify this anomaly, in regular expression format. + PatternRegex *string `locationName:"patternRegex" min:"1" type:"string"` + + // The pattern used to help identify this anomaly, in string format. + // + // PatternString is a required field + PatternString *string `locationName:"patternString" min:"1" type:"string" required:"true"` + + // An array of structures where each structure contains information about one + // token that makes up the pattern. + // + // PatternTokens is a required field + PatternTokens []*PatternToken `locationName:"patternTokens" type:"list" required:"true"` + + // The priority level of this anomaly, as determined by CloudWatch Logs. Priority + // is computed based on log severity labels such as FATAL and ERROR and the + // amount of deviation from the baseline. Possible values are HIGH, MEDIUM, + // and LOW. + Priority *string `locationName:"priority" min:"1" type:"string"` + + // Indicates the current state of this anomaly. If it is still being treated + // as an anomaly, the value is Active. If you have suppressed this anomaly by + // using the UpdateAnomaly (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_UpdateAnomaly.html) + // operation, the value is Suppressed. If this behavior is now considered to + // be normal, the value is Baseline. + // + // State is a required field + State *string `locationName:"state" type:"string" required:"true" enum:"State"` + + // Indicates whether this anomaly is currently suppressed. To suppress an anomaly, + // use UpdateAnomaly (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_UpdateAnomaly.html). + Suppressed *bool `locationName:"suppressed" type:"boolean"` + + // If the anomaly is suppressed, this indicates when it was suppressed. + SuppressedDate *int64 `locationName:"suppressedDate" type:"long"` + + // If the anomaly is suppressed, this indicates when the suppression will end. + // If this value is 0, the anomaly was suppressed with no expiration, with the + // INFINITE value. + SuppressedUntil *int64 `locationName:"suppressedUntil" type:"long"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Anomaly) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Anomaly) GoString() string { + return s.String() +} + +// SetActive sets the Active field's value. +func (s *Anomaly) SetActive(v bool) *Anomaly { + s.Active = &v + return s +} + +// SetAnomalyDetectorArn sets the AnomalyDetectorArn field's value. +func (s *Anomaly) SetAnomalyDetectorArn(v string) *Anomaly { + s.AnomalyDetectorArn = &v + return s +} + +// SetAnomalyId sets the AnomalyId field's value. +func (s *Anomaly) SetAnomalyId(v string) *Anomaly { + s.AnomalyId = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *Anomaly) SetDescription(v string) *Anomaly { + s.Description = &v + return s +} + +// SetFirstSeen sets the FirstSeen field's value. +func (s *Anomaly) SetFirstSeen(v int64) *Anomaly { + s.FirstSeen = &v + return s +} + +// SetHistogram sets the Histogram field's value. +func (s *Anomaly) SetHistogram(v map[string]*int64) *Anomaly { + s.Histogram = v + return s +} + +// SetIsPatternLevelSuppression sets the IsPatternLevelSuppression field's value. +func (s *Anomaly) SetIsPatternLevelSuppression(v bool) *Anomaly { + s.IsPatternLevelSuppression = &v + return s +} + +// SetLastSeen sets the LastSeen field's value. +func (s *Anomaly) SetLastSeen(v int64) *Anomaly { + s.LastSeen = &v + return s +} + +// SetLogGroupArnList sets the LogGroupArnList field's value. +func (s *Anomaly) SetLogGroupArnList(v []*string) *Anomaly { + s.LogGroupArnList = v + return s +} + +// SetLogSamples sets the LogSamples field's value. +func (s *Anomaly) SetLogSamples(v []*LogEvent) *Anomaly { + s.LogSamples = v + return s +} + +// SetPatternId sets the PatternId field's value. +func (s *Anomaly) SetPatternId(v string) *Anomaly { + s.PatternId = &v + return s +} + +// SetPatternRegex sets the PatternRegex field's value. +func (s *Anomaly) SetPatternRegex(v string) *Anomaly { + s.PatternRegex = &v + return s +} + +// SetPatternString sets the PatternString field's value. +func (s *Anomaly) SetPatternString(v string) *Anomaly { + s.PatternString = &v + return s +} + +// SetPatternTokens sets the PatternTokens field's value. +func (s *Anomaly) SetPatternTokens(v []*PatternToken) *Anomaly { + s.PatternTokens = v + return s +} + +// SetPriority sets the Priority field's value. +func (s *Anomaly) SetPriority(v string) *Anomaly { + s.Priority = &v + return s +} + +// SetState sets the State field's value. +func (s *Anomaly) SetState(v string) *Anomaly { + s.State = &v + return s +} + +// SetSuppressed sets the Suppressed field's value. +func (s *Anomaly) SetSuppressed(v bool) *Anomaly { + s.Suppressed = &v + return s +} + +// SetSuppressedDate sets the SuppressedDate field's value. +func (s *Anomaly) SetSuppressedDate(v int64) *Anomaly { + s.SuppressedDate = &v + return s +} + +// SetSuppressedUntil sets the SuppressedUntil field's value. +func (s *Anomaly) SetSuppressedUntil(v int64) *Anomaly { + s.SuppressedUntil = &v + return s +} + +// Contains information about one anomaly detector in the account. +type AnomalyDetector struct { + _ struct{} `type:"structure"` + + // The ARN of the anomaly detector. + AnomalyDetectorArn *string `locationName:"anomalyDetectorArn" min:"1" type:"string"` + + // Specifies the current status of the anomaly detector. To pause an anomaly + // detector, use the enabled parameter in the UpdateLogAnomalyDetector (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_UpdateLogAnomalyDetector.html) + // operation. + AnomalyDetectorStatus *string `locationName:"anomalyDetectorStatus" type:"string" enum:"AnomalyDetectorStatus"` + + // The number of days used as the life cycle of anomalies. After this time, + // anomalies are automatically baselined and the anomaly detector model will + // treat new occurrences of similar event as normal. + AnomalyVisibilityTime *int64 `locationName:"anomalyVisibilityTime" min:"7" type:"long"` + + // The date and time when this anomaly detector was created. + CreationTimeStamp *int64 `locationName:"creationTimeStamp" type:"long"` + + // The name of the anomaly detector. + DetectorName *string `locationName:"detectorName" min:"1" type:"string"` + + // Specifies how often the anomaly detector runs and look for anomalies. + EvaluationFrequency *string `locationName:"evaluationFrequency" type:"string" enum:"EvaluationFrequency"` + + // A symbolic description of how CloudWatch Logs should interpret the data in + // each log event. For example, a log event can contain timestamps, IP addresses, + // strings, and so on. You use the filter pattern to specify what to look for + // in the log event message. + FilterPattern *string `locationName:"filterPattern" type:"string"` + + // The ID of the KMS key assigned to this anomaly detector, if any. + KmsKeyId *string `locationName:"kmsKeyId" type:"string"` + + // The date and time when this anomaly detector was most recently modified. + LastModifiedTimeStamp *int64 `locationName:"lastModifiedTimeStamp" type:"long"` + + // A list of the ARNs of the log groups that this anomaly detector watches. + LogGroupArnList []*string `locationName:"logGroupArnList" type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AnomalyDetector) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AnomalyDetector) GoString() string { + return s.String() +} + +// SetAnomalyDetectorArn sets the AnomalyDetectorArn field's value. +func (s *AnomalyDetector) SetAnomalyDetectorArn(v string) *AnomalyDetector { + s.AnomalyDetectorArn = &v + return s +} + +// SetAnomalyDetectorStatus sets the AnomalyDetectorStatus field's value. +func (s *AnomalyDetector) SetAnomalyDetectorStatus(v string) *AnomalyDetector { + s.AnomalyDetectorStatus = &v + return s +} + +// SetAnomalyVisibilityTime sets the AnomalyVisibilityTime field's value. +func (s *AnomalyDetector) SetAnomalyVisibilityTime(v int64) *AnomalyDetector { + s.AnomalyVisibilityTime = &v + return s +} + +// SetCreationTimeStamp sets the CreationTimeStamp field's value. +func (s *AnomalyDetector) SetCreationTimeStamp(v int64) *AnomalyDetector { + s.CreationTimeStamp = &v + return s +} + +// SetDetectorName sets the DetectorName field's value. +func (s *AnomalyDetector) SetDetectorName(v string) *AnomalyDetector { + s.DetectorName = &v + return s +} + +// SetEvaluationFrequency sets the EvaluationFrequency field's value. +func (s *AnomalyDetector) SetEvaluationFrequency(v string) *AnomalyDetector { + s.EvaluationFrequency = &v + return s +} + +// SetFilterPattern sets the FilterPattern field's value. +func (s *AnomalyDetector) SetFilterPattern(v string) *AnomalyDetector { + s.FilterPattern = &v + return s +} + +// SetKmsKeyId sets the KmsKeyId field's value. +func (s *AnomalyDetector) SetKmsKeyId(v string) *AnomalyDetector { + s.KmsKeyId = &v + return s +} + +// SetLastModifiedTimeStamp sets the LastModifiedTimeStamp field's value. +func (s *AnomalyDetector) SetLastModifiedTimeStamp(v int64) *AnomalyDetector { + s.LastModifiedTimeStamp = &v + return s +} + +// SetLogGroupArnList sets the LogGroupArnList field's value. +func (s *AnomalyDetector) SetLogGroupArnList(v []*string) *AnomalyDetector { + s.LogGroupArnList = v + return s +} + +type AssociateKmsKeyInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the KMS key to use when encrypting log + // data. This must be a symmetric KMS key. For more information, see Amazon + // Resource Names (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#arn-syntax-kms) + // and Using Symmetric and Asymmetric Keys (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html). + // + // KmsKeyId is a required field + KmsKeyId *string `locationName:"kmsKeyId" type:"string" required:"true"` + + // The name of the log group. + // + // In your AssociateKmsKey operation, you must specify either the resourceIdentifier + // parameter or the logGroup parameter, but you can't specify both. + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string"` + + // Specifies the target for this operation. You must specify one of the following: + // + // * Specify the following ARN to have future GetQueryResults (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_GetQueryResults.html) + // operations in this account encrypt the results with the specified KMS + // key. Replace REGION and ACCOUNT_ID with your Region and account ID. arn:aws:logs:REGION:ACCOUNT_ID:query-result:* + // + // * Specify the ARN of a log group to have CloudWatch Logs use the KMS key + // to encrypt log events that are ingested and stored by that log group. + // The log group ARN must be in the following format. Replace REGION and + // ACCOUNT_ID with your Region and account ID. arn:aws:logs:REGION:ACCOUNT_ID:log-group:LOG_GROUP_NAME + // + // In your AssociateKmsKey operation, you must specify either the resourceIdentifier + // parameter or the logGroup parameter, but you can't specify both. + ResourceIdentifier *string `locationName:"resourceIdentifier" min:"1" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AssociateKmsKeyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AssociateKmsKeyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AssociateKmsKeyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AssociateKmsKeyInput"} + if s.KmsKeyId == nil { + invalidParams.Add(request.NewErrParamRequired("KmsKeyId")) + } + if s.LogGroupName != nil && len(*s.LogGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogGroupName", 1)) + } + if s.ResourceIdentifier != nil && len(*s.ResourceIdentifier) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceIdentifier", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKmsKeyId sets the KmsKeyId field's value. +func (s *AssociateKmsKeyInput) SetKmsKeyId(v string) *AssociateKmsKeyInput { + s.KmsKeyId = &v + return s +} + +// SetLogGroupName sets the LogGroupName field's value. +func (s *AssociateKmsKeyInput) SetLogGroupName(v string) *AssociateKmsKeyInput { + s.LogGroupName = &v + return s +} + +// SetResourceIdentifier sets the ResourceIdentifier field's value. +func (s *AssociateKmsKeyInput) SetResourceIdentifier(v string) *AssociateKmsKeyInput { + s.ResourceIdentifier = &v + return s +} + +type AssociateKmsKeyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AssociateKmsKeyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AssociateKmsKeyOutput) GoString() string { + return s.String() +} + +type CancelExportTaskInput struct { + _ struct{} `type:"structure"` + + // The ID of the export task. + // + // TaskId is a required field + TaskId *string `locationName:"taskId" min:"1" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CancelExportTaskInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CancelExportTaskInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CancelExportTaskInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CancelExportTaskInput"} + if s.TaskId == nil { + invalidParams.Add(request.NewErrParamRequired("TaskId")) + } + if s.TaskId != nil && len(*s.TaskId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TaskId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetTaskId sets the TaskId field's value. +func (s *CancelExportTaskInput) SetTaskId(v string) *CancelExportTaskInput { + s.TaskId = &v + return s +} + +type CancelExportTaskOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CancelExportTaskOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CancelExportTaskOutput) GoString() string { + return s.String() +} + +// This operation attempted to create a resource that already exists. +type ConflictException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ConflictException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ConflictException) GoString() string { + return s.String() +} + +func newErrorConflictException(v protocol.ResponseMetadata) error { + return &ConflictException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *ConflictException) Code() string { + return "ConflictException" +} + +// Message returns the exception's message. +func (s *ConflictException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ConflictException) OrigErr() error { + return nil +} + +func (s *ConflictException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *ConflictException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *ConflictException) RequestID() string { + return s.RespMetadata.RequestID +} + +type CreateDeliveryInput struct { + _ struct{} `type:"structure"` + + // The ARN of the delivery destination to use for this delivery. + // + // DeliveryDestinationArn is a required field + DeliveryDestinationArn *string `locationName:"deliveryDestinationArn" type:"string" required:"true"` + + // The name of the delivery source to use for this delivery. + // + // DeliverySourceName is a required field + DeliverySourceName *string `locationName:"deliverySourceName" min:"1" type:"string" required:"true"` + + // An optional list of key-value pairs to associate with the resource. + // + // For more information about tagging, see Tagging Amazon Web Services resources + // (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html) + Tags map[string]*string `locationName:"tags" min:"1" type:"map"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateDeliveryInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateDeliveryInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateDeliveryInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateDeliveryInput"} + if s.DeliveryDestinationArn == nil { + invalidParams.Add(request.NewErrParamRequired("DeliveryDestinationArn")) + } + if s.DeliverySourceName == nil { + invalidParams.Add(request.NewErrParamRequired("DeliverySourceName")) + } + if s.DeliverySourceName != nil && len(*s.DeliverySourceName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DeliverySourceName", 1)) + } + if s.Tags != nil && len(s.Tags) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Tags", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDeliveryDestinationArn sets the DeliveryDestinationArn field's value. +func (s *CreateDeliveryInput) SetDeliveryDestinationArn(v string) *CreateDeliveryInput { + s.DeliveryDestinationArn = &v + return s +} + +// SetDeliverySourceName sets the DeliverySourceName field's value. +func (s *CreateDeliveryInput) SetDeliverySourceName(v string) *CreateDeliveryInput { + s.DeliverySourceName = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *CreateDeliveryInput) SetTags(v map[string]*string) *CreateDeliveryInput { + s.Tags = v + return s +} + +type CreateDeliveryOutput struct { + _ struct{} `type:"structure"` + + // A structure that contains information about the delivery that you just created. + Delivery *Delivery `locationName:"delivery" type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateDeliveryOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateDeliveryOutput) GoString() string { + return s.String() +} + +// SetDelivery sets the Delivery field's value. +func (s *CreateDeliveryOutput) SetDelivery(v *Delivery) *CreateDeliveryOutput { + s.Delivery = v + return s +} + +type CreateExportTaskInput struct { + _ struct{} `type:"structure"` + + // The name of S3 bucket for the exported log data. The bucket must be in the + // same Amazon Web Services Region. + // + // Destination is a required field + Destination *string `locationName:"destination" min:"1" type:"string" required:"true"` + + // The prefix used as the start of the key for every object exported. If you + // don't specify a value, the default is exportedlogs. + DestinationPrefix *string `locationName:"destinationPrefix" type:"string"` + + // The start time of the range for the request, expressed as the number of milliseconds + // after Jan 1, 1970 00:00:00 UTC. Events with a timestamp earlier than this + // time are not exported. + // + // From is a required field + From *int64 `locationName:"from" type:"long" required:"true"` + + // The name of the log group. + // + // LogGroupName is a required field + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` + + // Export only log streams that match the provided prefix. If you don't specify + // a value, no prefix filter is applied. + LogStreamNamePrefix *string `locationName:"logStreamNamePrefix" min:"1" type:"string"` + + // The name of the export task. + TaskName *string `locationName:"taskName" min:"1" type:"string"` + + // The end time of the range for the request, expressed as the number of milliseconds + // after Jan 1, 1970 00:00:00 UTC. Events with a timestamp later than this time + // are not exported. + // + // You must specify a time that is not earlier than when this log group was + // created. + // + // To is a required field + To *int64 `locationName:"to" type:"long" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateExportTaskInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateExportTaskInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateExportTaskInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateExportTaskInput"} + if s.Destination == nil { + invalidParams.Add(request.NewErrParamRequired("Destination")) + } + if s.Destination != nil && len(*s.Destination) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Destination", 1)) + } + if s.From == nil { + invalidParams.Add(request.NewErrParamRequired("From")) + } + if s.LogGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("LogGroupName")) + } + if s.LogGroupName != nil && len(*s.LogGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogGroupName", 1)) + } + if s.LogStreamNamePrefix != nil && len(*s.LogStreamNamePrefix) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogStreamNamePrefix", 1)) + } + if s.TaskName != nil && len(*s.TaskName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TaskName", 1)) + } + if s.To == nil { + invalidParams.Add(request.NewErrParamRequired("To")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDestination sets the Destination field's value. +func (s *CreateExportTaskInput) SetDestination(v string) *CreateExportTaskInput { + s.Destination = &v + return s +} + +// SetDestinationPrefix sets the DestinationPrefix field's value. +func (s *CreateExportTaskInput) SetDestinationPrefix(v string) *CreateExportTaskInput { + s.DestinationPrefix = &v + return s +} + +// SetFrom sets the From field's value. +func (s *CreateExportTaskInput) SetFrom(v int64) *CreateExportTaskInput { + s.From = &v + return s +} + +// SetLogGroupName sets the LogGroupName field's value. +func (s *CreateExportTaskInput) SetLogGroupName(v string) *CreateExportTaskInput { + s.LogGroupName = &v + return s +} + +// SetLogStreamNamePrefix sets the LogStreamNamePrefix field's value. +func (s *CreateExportTaskInput) SetLogStreamNamePrefix(v string) *CreateExportTaskInput { + s.LogStreamNamePrefix = &v + return s +} + +// SetTaskName sets the TaskName field's value. +func (s *CreateExportTaskInput) SetTaskName(v string) *CreateExportTaskInput { + s.TaskName = &v + return s +} + +// SetTo sets the To field's value. +func (s *CreateExportTaskInput) SetTo(v int64) *CreateExportTaskInput { + s.To = &v + return s +} + +type CreateExportTaskOutput struct { + _ struct{} `type:"structure"` + + // The ID of the export task. + TaskId *string `locationName:"taskId" min:"1" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateExportTaskOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateExportTaskOutput) GoString() string { + return s.String() +} + +// SetTaskId sets the TaskId field's value. +func (s *CreateExportTaskOutput) SetTaskId(v string) *CreateExportTaskOutput { + s.TaskId = &v + return s +} + +type CreateLogAnomalyDetectorInput struct { + _ struct{} `type:"structure"` + + // The number of days to have visibility on an anomaly. After this time period + // has elapsed for an anomaly, it will be automatically baselined and the anomaly + // detector will treat new occurrences of a similar anomaly as normal. Therefore, + // if you do not correct the cause of an anomaly during the time period specified + // in anomalyVisibilityTime, it will be considered normal going forward and + // will not be detected as an anomaly. + AnomalyVisibilityTime *int64 `locationName:"anomalyVisibilityTime" min:"7" type:"long"` + + // A name for this anomaly detector. + DetectorName *string `locationName:"detectorName" min:"1" type:"string"` + + // Specifies how often the anomaly detector is to run and look for anomalies. + // Set this value according to the frequency that the log group receives new + // logs. For example, if the log group receives new log events every 10 minutes, + // then 15 minutes might be a good setting for evaluationFrequency . + EvaluationFrequency *string `locationName:"evaluationFrequency" type:"string" enum:"EvaluationFrequency"` + + // You can use this parameter to limit the anomaly detection model to examine + // only log events that match the pattern you specify here. For more information, + // see Filter and Pattern Syntax (https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/FilterAndPatternSyntax.html). + FilterPattern *string `locationName:"filterPattern" type:"string"` + + // Optionally assigns a KMS key to secure this anomaly detector and its findings. + // If a key is assigned, the anomalies found and the model used by this detector + // are encrypted at rest with the key. If a key is assigned to an anomaly detector, + // a user must have permissions for both this key and for the anomaly detector + // to retrieve information about the anomalies that it finds. + // + // For more information about using a KMS key and to see the required IAM policy, + // see Use a KMS key with an anomaly detector (https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/LogsAnomalyDetection-KMS.html). + KmsKeyId *string `locationName:"kmsKeyId" type:"string"` + + // An array containing the ARN of the log group that this anomaly detector will + // watch. You can specify only one log group ARN. + // + // LogGroupArnList is a required field + LogGroupArnList []*string `locationName:"logGroupArnList" type:"list" required:"true"` + + // An optional list of key-value pairs to associate with the resource. + // + // For more information about tagging, see Tagging Amazon Web Services resources + // (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html) + Tags map[string]*string `locationName:"tags" min:"1" type:"map"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateLogAnomalyDetectorInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateLogAnomalyDetectorInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateLogAnomalyDetectorInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateLogAnomalyDetectorInput"} + if s.AnomalyVisibilityTime != nil && *s.AnomalyVisibilityTime < 7 { + invalidParams.Add(request.NewErrParamMinValue("AnomalyVisibilityTime", 7)) + } + if s.DetectorName != nil && len(*s.DetectorName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DetectorName", 1)) + } + if s.LogGroupArnList == nil { + invalidParams.Add(request.NewErrParamRequired("LogGroupArnList")) + } + if s.Tags != nil && len(s.Tags) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Tags", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAnomalyVisibilityTime sets the AnomalyVisibilityTime field's value. +func (s *CreateLogAnomalyDetectorInput) SetAnomalyVisibilityTime(v int64) *CreateLogAnomalyDetectorInput { + s.AnomalyVisibilityTime = &v + return s +} + +// SetDetectorName sets the DetectorName field's value. +func (s *CreateLogAnomalyDetectorInput) SetDetectorName(v string) *CreateLogAnomalyDetectorInput { + s.DetectorName = &v + return s +} + +// SetEvaluationFrequency sets the EvaluationFrequency field's value. +func (s *CreateLogAnomalyDetectorInput) SetEvaluationFrequency(v string) *CreateLogAnomalyDetectorInput { + s.EvaluationFrequency = &v + return s +} + +// SetFilterPattern sets the FilterPattern field's value. +func (s *CreateLogAnomalyDetectorInput) SetFilterPattern(v string) *CreateLogAnomalyDetectorInput { + s.FilterPattern = &v + return s +} + +// SetKmsKeyId sets the KmsKeyId field's value. +func (s *CreateLogAnomalyDetectorInput) SetKmsKeyId(v string) *CreateLogAnomalyDetectorInput { + s.KmsKeyId = &v + return s +} + +// SetLogGroupArnList sets the LogGroupArnList field's value. +func (s *CreateLogAnomalyDetectorInput) SetLogGroupArnList(v []*string) *CreateLogAnomalyDetectorInput { + s.LogGroupArnList = v + return s +} + +// SetTags sets the Tags field's value. +func (s *CreateLogAnomalyDetectorInput) SetTags(v map[string]*string) *CreateLogAnomalyDetectorInput { + s.Tags = v + return s +} + +type CreateLogAnomalyDetectorOutput struct { + _ struct{} `type:"structure"` + + // The ARN of the log anomaly detector that you just created. + AnomalyDetectorArn *string `locationName:"anomalyDetectorArn" min:"1" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateLogAnomalyDetectorOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateLogAnomalyDetectorOutput) GoString() string { + return s.String() +} + +// SetAnomalyDetectorArn sets the AnomalyDetectorArn field's value. +func (s *CreateLogAnomalyDetectorOutput) SetAnomalyDetectorArn(v string) *CreateLogAnomalyDetectorOutput { + s.AnomalyDetectorArn = &v + return s +} + +type CreateLogGroupInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the KMS key to use when encrypting log + // data. For more information, see Amazon Resource Names (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#arn-syntax-kms). + KmsKeyId *string `locationName:"kmsKeyId" type:"string"` + + // Use this parameter to specify the log group class for this log group. There + // are two classes: + // + // * The Standard log class supports all CloudWatch Logs features. + // + // * The Infrequent Access log class supports a subset of CloudWatch Logs + // features and incurs lower costs. + // + // If you omit this parameter, the default of STANDARD is used. + // + // The value of logGroupClass can't be changed after a log group is created. + // + // For details about the features supported by each class, see Log classes (https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/CloudWatch_Logs_Log_Classes.html) + LogGroupClass *string `locationName:"logGroupClass" type:"string" enum:"LogGroupClass"` + + // A name for the log group. + // + // LogGroupName is a required field + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` + + // The key-value pairs to use for the tags. + // + // You can grant users access to certain log groups while preventing them from + // accessing other log groups. To do so, tag your groups and use IAM policies + // that refer to those tags. To assign tags when you create a log group, you + // must have either the logs:TagResource or logs:TagLogGroup permission. For + // more information about tagging, see Tagging Amazon Web Services resources + // (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html). For more + // information about using tags to control access, see Controlling access to + // Amazon Web Services resources using tags (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_tags.html). + Tags map[string]*string `locationName:"tags" min:"1" type:"map"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateLogGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateLogGroupInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateLogGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateLogGroupInput"} + if s.LogGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("LogGroupName")) + } + if s.LogGroupName != nil && len(*s.LogGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogGroupName", 1)) + } + if s.Tags != nil && len(s.Tags) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Tags", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKmsKeyId sets the KmsKeyId field's value. +func (s *CreateLogGroupInput) SetKmsKeyId(v string) *CreateLogGroupInput { + s.KmsKeyId = &v + return s +} + +// SetLogGroupClass sets the LogGroupClass field's value. +func (s *CreateLogGroupInput) SetLogGroupClass(v string) *CreateLogGroupInput { + s.LogGroupClass = &v + return s +} + +// SetLogGroupName sets the LogGroupName field's value. +func (s *CreateLogGroupInput) SetLogGroupName(v string) *CreateLogGroupInput { + s.LogGroupName = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *CreateLogGroupInput) SetTags(v map[string]*string) *CreateLogGroupInput { + s.Tags = v + return s +} + +type CreateLogGroupOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateLogGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateLogGroupOutput) GoString() string { + return s.String() +} + +type CreateLogStreamInput struct { + _ struct{} `type:"structure"` + + // The name of the log group. + // + // LogGroupName is a required field + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` + + // The name of the log stream. + // + // LogStreamName is a required field + LogStreamName *string `locationName:"logStreamName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateLogStreamInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateLogStreamInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateLogStreamInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateLogStreamInput"} + if s.LogGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("LogGroupName")) + } + if s.LogGroupName != nil && len(*s.LogGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogGroupName", 1)) + } + if s.LogStreamName == nil { + invalidParams.Add(request.NewErrParamRequired("LogStreamName")) + } + if s.LogStreamName != nil && len(*s.LogStreamName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogStreamName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetLogGroupName sets the LogGroupName field's value. +func (s *CreateLogStreamInput) SetLogGroupName(v string) *CreateLogStreamInput { + s.LogGroupName = &v + return s +} + +// SetLogStreamName sets the LogStreamName field's value. +func (s *CreateLogStreamInput) SetLogStreamName(v string) *CreateLogStreamInput { + s.LogStreamName = &v + return s +} + +type CreateLogStreamOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateLogStreamOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateLogStreamOutput) GoString() string { + return s.String() +} + +// The event was already logged. +// +// PutLogEvents actions are now always accepted and never return DataAlreadyAcceptedException +// regardless of whether a given batch of log events has already been accepted. +type DataAlreadyAcceptedException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + ExpectedSequenceToken *string `locationName:"expectedSequenceToken" min:"1" type:"string"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DataAlreadyAcceptedException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DataAlreadyAcceptedException) GoString() string { + return s.String() +} + +func newErrorDataAlreadyAcceptedException(v protocol.ResponseMetadata) error { + return &DataAlreadyAcceptedException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *DataAlreadyAcceptedException) Code() string { + return "DataAlreadyAcceptedException" +} + +// Message returns the exception's message. +func (s *DataAlreadyAcceptedException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *DataAlreadyAcceptedException) OrigErr() error { + return nil +} + +func (s *DataAlreadyAcceptedException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *DataAlreadyAcceptedException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *DataAlreadyAcceptedException) RequestID() string { + return s.RespMetadata.RequestID +} + +type DeleteAccountPolicyInput struct { + _ struct{} `type:"structure"` + + // The name of the policy to delete. + // + // PolicyName is a required field + PolicyName *string `locationName:"policyName" type:"string" required:"true"` + + // The type of policy to delete. + // + // PolicyType is a required field + PolicyType *string `locationName:"policyType" type:"string" required:"true" enum:"PolicyType"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteAccountPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteAccountPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteAccountPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteAccountPolicyInput"} + if s.PolicyName == nil { + invalidParams.Add(request.NewErrParamRequired("PolicyName")) + } + if s.PolicyType == nil { + invalidParams.Add(request.NewErrParamRequired("PolicyType")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPolicyName sets the PolicyName field's value. +func (s *DeleteAccountPolicyInput) SetPolicyName(v string) *DeleteAccountPolicyInput { + s.PolicyName = &v + return s +} + +// SetPolicyType sets the PolicyType field's value. +func (s *DeleteAccountPolicyInput) SetPolicyType(v string) *DeleteAccountPolicyInput { + s.PolicyType = &v + return s +} + +type DeleteAccountPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteAccountPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteAccountPolicyOutput) GoString() string { + return s.String() +} + +type DeleteDataProtectionPolicyInput struct { + _ struct{} `type:"structure"` + + // The name or ARN of the log group that you want to delete the data protection + // policy for. + // + // LogGroupIdentifier is a required field + LogGroupIdentifier *string `locationName:"logGroupIdentifier" min:"1" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteDataProtectionPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteDataProtectionPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteDataProtectionPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteDataProtectionPolicyInput"} + if s.LogGroupIdentifier == nil { + invalidParams.Add(request.NewErrParamRequired("LogGroupIdentifier")) + } + if s.LogGroupIdentifier != nil && len(*s.LogGroupIdentifier) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogGroupIdentifier", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetLogGroupIdentifier sets the LogGroupIdentifier field's value. +func (s *DeleteDataProtectionPolicyInput) SetLogGroupIdentifier(v string) *DeleteDataProtectionPolicyInput { + s.LogGroupIdentifier = &v + return s +} + +type DeleteDataProtectionPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteDataProtectionPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteDataProtectionPolicyOutput) GoString() string { + return s.String() +} + +type DeleteDeliveryDestinationInput struct { + _ struct{} `type:"structure"` + + // The name of the delivery destination that you want to delete. You can find + // a list of delivery destionation names by using the DescribeDeliveryDestinations + // (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_DescribeDeliveryDestinations.html) + // operation. + // + // Name is a required field + Name *string `locationName:"name" min:"1" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteDeliveryDestinationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteDeliveryDestinationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteDeliveryDestinationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteDeliveryDestinationInput"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetName sets the Name field's value. +func (s *DeleteDeliveryDestinationInput) SetName(v string) *DeleteDeliveryDestinationInput { + s.Name = &v + return s +} + +type DeleteDeliveryDestinationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteDeliveryDestinationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteDeliveryDestinationOutput) GoString() string { + return s.String() +} + +type DeleteDeliveryDestinationPolicyInput struct { + _ struct{} `type:"structure"` + + // The name of the delivery destination that you want to delete the policy for. + // + // DeliveryDestinationName is a required field + DeliveryDestinationName *string `locationName:"deliveryDestinationName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteDeliveryDestinationPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteDeliveryDestinationPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteDeliveryDestinationPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteDeliveryDestinationPolicyInput"} + if s.DeliveryDestinationName == nil { + invalidParams.Add(request.NewErrParamRequired("DeliveryDestinationName")) + } + if s.DeliveryDestinationName != nil && len(*s.DeliveryDestinationName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DeliveryDestinationName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDeliveryDestinationName sets the DeliveryDestinationName field's value. +func (s *DeleteDeliveryDestinationPolicyInput) SetDeliveryDestinationName(v string) *DeleteDeliveryDestinationPolicyInput { + s.DeliveryDestinationName = &v + return s +} + +type DeleteDeliveryDestinationPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteDeliveryDestinationPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteDeliveryDestinationPolicyOutput) GoString() string { + return s.String() +} + +type DeleteDeliveryInput struct { + _ struct{} `type:"structure"` + + // The unique ID of the delivery to delete. You can find the ID of a delivery + // with the DescribeDeliveries (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_DescribeDeliveries.html) + // operation. + // + // Id is a required field + Id *string `locationName:"id" min:"1" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteDeliveryInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteDeliveryInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteDeliveryInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteDeliveryInput"} + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.Id != nil && len(*s.Id) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Id", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetId sets the Id field's value. +func (s *DeleteDeliveryInput) SetId(v string) *DeleteDeliveryInput { + s.Id = &v + return s +} + +type DeleteDeliveryOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteDeliveryOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteDeliveryOutput) GoString() string { + return s.String() +} + +type DeleteDeliverySourceInput struct { + _ struct{} `type:"structure"` + + // The name of the delivery source that you want to delete. + // + // Name is a required field + Name *string `locationName:"name" min:"1" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteDeliverySourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteDeliverySourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteDeliverySourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteDeliverySourceInput"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetName sets the Name field's value. +func (s *DeleteDeliverySourceInput) SetName(v string) *DeleteDeliverySourceInput { + s.Name = &v + return s +} + +type DeleteDeliverySourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteDeliverySourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteDeliverySourceOutput) GoString() string { + return s.String() +} + +type DeleteDestinationInput struct { + _ struct{} `type:"structure"` + + // The name of the destination. + // + // DestinationName is a required field + DestinationName *string `locationName:"destinationName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteDestinationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteDestinationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteDestinationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteDestinationInput"} + if s.DestinationName == nil { + invalidParams.Add(request.NewErrParamRequired("DestinationName")) + } + if s.DestinationName != nil && len(*s.DestinationName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DestinationName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDestinationName sets the DestinationName field's value. +func (s *DeleteDestinationInput) SetDestinationName(v string) *DeleteDestinationInput { + s.DestinationName = &v + return s +} + +type DeleteDestinationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteDestinationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteDestinationOutput) GoString() string { + return s.String() +} + +type DeleteLogAnomalyDetectorInput struct { + _ struct{} `type:"structure"` + + // The ARN of the anomaly detector to delete. You can find the ARNs of log anomaly + // detectors in your account by using the ListLogAnomalyDetectors (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_ListLogAnomalyDetectors.html) + // operation. + // + // AnomalyDetectorArn is a required field + AnomalyDetectorArn *string `locationName:"anomalyDetectorArn" min:"1" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteLogAnomalyDetectorInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteLogAnomalyDetectorInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteLogAnomalyDetectorInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteLogAnomalyDetectorInput"} + if s.AnomalyDetectorArn == nil { + invalidParams.Add(request.NewErrParamRequired("AnomalyDetectorArn")) + } + if s.AnomalyDetectorArn != nil && len(*s.AnomalyDetectorArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AnomalyDetectorArn", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAnomalyDetectorArn sets the AnomalyDetectorArn field's value. +func (s *DeleteLogAnomalyDetectorInput) SetAnomalyDetectorArn(v string) *DeleteLogAnomalyDetectorInput { + s.AnomalyDetectorArn = &v + return s +} + +type DeleteLogAnomalyDetectorOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteLogAnomalyDetectorOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteLogAnomalyDetectorOutput) GoString() string { + return s.String() +} + +type DeleteLogGroupInput struct { + _ struct{} `type:"structure"` + + // The name of the log group. + // + // LogGroupName is a required field + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteLogGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteLogGroupInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteLogGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteLogGroupInput"} + if s.LogGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("LogGroupName")) + } + if s.LogGroupName != nil && len(*s.LogGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogGroupName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetLogGroupName sets the LogGroupName field's value. +func (s *DeleteLogGroupInput) SetLogGroupName(v string) *DeleteLogGroupInput { + s.LogGroupName = &v + return s +} + +type DeleteLogGroupOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteLogGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteLogGroupOutput) GoString() string { + return s.String() +} + +type DeleteLogStreamInput struct { + _ struct{} `type:"structure"` + + // The name of the log group. + // + // LogGroupName is a required field + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` + + // The name of the log stream. + // + // LogStreamName is a required field + LogStreamName *string `locationName:"logStreamName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteLogStreamInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteLogStreamInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteLogStreamInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteLogStreamInput"} + if s.LogGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("LogGroupName")) + } + if s.LogGroupName != nil && len(*s.LogGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogGroupName", 1)) + } + if s.LogStreamName == nil { + invalidParams.Add(request.NewErrParamRequired("LogStreamName")) + } + if s.LogStreamName != nil && len(*s.LogStreamName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogStreamName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetLogGroupName sets the LogGroupName field's value. +func (s *DeleteLogStreamInput) SetLogGroupName(v string) *DeleteLogStreamInput { + s.LogGroupName = &v + return s +} + +// SetLogStreamName sets the LogStreamName field's value. +func (s *DeleteLogStreamInput) SetLogStreamName(v string) *DeleteLogStreamInput { + s.LogStreamName = &v + return s +} + +type DeleteLogStreamOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteLogStreamOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteLogStreamOutput) GoString() string { + return s.String() +} + +type DeleteMetricFilterInput struct { + _ struct{} `type:"structure"` + + // The name of the metric filter. + // + // FilterName is a required field + FilterName *string `locationName:"filterName" min:"1" type:"string" required:"true"` + + // The name of the log group. + // + // LogGroupName is a required field + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteMetricFilterInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteMetricFilterInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteMetricFilterInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteMetricFilterInput"} + if s.FilterName == nil { + invalidParams.Add(request.NewErrParamRequired("FilterName")) + } + if s.FilterName != nil && len(*s.FilterName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("FilterName", 1)) + } + if s.LogGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("LogGroupName")) + } + if s.LogGroupName != nil && len(*s.LogGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogGroupName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFilterName sets the FilterName field's value. +func (s *DeleteMetricFilterInput) SetFilterName(v string) *DeleteMetricFilterInput { + s.FilterName = &v + return s +} + +// SetLogGroupName sets the LogGroupName field's value. +func (s *DeleteMetricFilterInput) SetLogGroupName(v string) *DeleteMetricFilterInput { + s.LogGroupName = &v + return s +} + +type DeleteMetricFilterOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteMetricFilterOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteMetricFilterOutput) GoString() string { + return s.String() +} + +type DeleteQueryDefinitionInput struct { + _ struct{} `type:"structure"` + + // The ID of the query definition that you want to delete. You can use DescribeQueryDefinitions + // (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_DescribeQueryDefinitions.html) + // to retrieve the IDs of your saved query definitions. + // + // QueryDefinitionId is a required field + QueryDefinitionId *string `locationName:"queryDefinitionId" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteQueryDefinitionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteQueryDefinitionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteQueryDefinitionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteQueryDefinitionInput"} + if s.QueryDefinitionId == nil { + invalidParams.Add(request.NewErrParamRequired("QueryDefinitionId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetQueryDefinitionId sets the QueryDefinitionId field's value. +func (s *DeleteQueryDefinitionInput) SetQueryDefinitionId(v string) *DeleteQueryDefinitionInput { + s.QueryDefinitionId = &v + return s +} + +type DeleteQueryDefinitionOutput struct { + _ struct{} `type:"structure"` + + // A value of TRUE indicates that the operation succeeded. FALSE indicates that + // the operation failed. + Success *bool `locationName:"success" type:"boolean"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteQueryDefinitionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteQueryDefinitionOutput) GoString() string { + return s.String() +} + +// SetSuccess sets the Success field's value. +func (s *DeleteQueryDefinitionOutput) SetSuccess(v bool) *DeleteQueryDefinitionOutput { + s.Success = &v + return s +} + +type DeleteResourcePolicyInput struct { + _ struct{} `type:"structure"` + + // The name of the policy to be revoked. This parameter is required. + PolicyName *string `locationName:"policyName" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteResourcePolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteResourcePolicyInput) GoString() string { + return s.String() +} + +// SetPolicyName sets the PolicyName field's value. +func (s *DeleteResourcePolicyInput) SetPolicyName(v string) *DeleteResourcePolicyInput { + s.PolicyName = &v + return s +} + +type DeleteResourcePolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteResourcePolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteResourcePolicyOutput) GoString() string { + return s.String() +} + +type DeleteRetentionPolicyInput struct { + _ struct{} `type:"structure"` + + // The name of the log group. + // + // LogGroupName is a required field + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteRetentionPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteRetentionPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteRetentionPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteRetentionPolicyInput"} + if s.LogGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("LogGroupName")) + } + if s.LogGroupName != nil && len(*s.LogGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogGroupName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetLogGroupName sets the LogGroupName field's value. +func (s *DeleteRetentionPolicyInput) SetLogGroupName(v string) *DeleteRetentionPolicyInput { + s.LogGroupName = &v + return s +} + +type DeleteRetentionPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteRetentionPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteRetentionPolicyOutput) GoString() string { + return s.String() +} + +type DeleteSubscriptionFilterInput struct { + _ struct{} `type:"structure"` + + // The name of the subscription filter. + // + // FilterName is a required field + FilterName *string `locationName:"filterName" min:"1" type:"string" required:"true"` + + // The name of the log group. + // + // LogGroupName is a required field + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteSubscriptionFilterInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteSubscriptionFilterInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteSubscriptionFilterInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteSubscriptionFilterInput"} + if s.FilterName == nil { + invalidParams.Add(request.NewErrParamRequired("FilterName")) + } + if s.FilterName != nil && len(*s.FilterName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("FilterName", 1)) + } + if s.LogGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("LogGroupName")) + } + if s.LogGroupName != nil && len(*s.LogGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogGroupName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFilterName sets the FilterName field's value. +func (s *DeleteSubscriptionFilterInput) SetFilterName(v string) *DeleteSubscriptionFilterInput { + s.FilterName = &v + return s +} + +// SetLogGroupName sets the LogGroupName field's value. +func (s *DeleteSubscriptionFilterInput) SetLogGroupName(v string) *DeleteSubscriptionFilterInput { + s.LogGroupName = &v + return s +} + +type DeleteSubscriptionFilterOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteSubscriptionFilterOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteSubscriptionFilterOutput) GoString() string { + return s.String() +} + +// This structure contains information about one delivery in your account. +// +// A delivery is a connection between a logical delivery source and a logical +// delivery destination. +// +// For more information, see CreateDelivery (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_CreateDelivery.html). +// +// You can't update an existing delivery. You can only create and delete deliveries. +type Delivery struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) that uniquely identifies this delivery. + Arn *string `locationName:"arn" type:"string"` + + // The ARN of the delivery destination that is associated with this delivery. + DeliveryDestinationArn *string `locationName:"deliveryDestinationArn" type:"string"` + + // Displays whether the delivery destination associated with this delivery is + // CloudWatch Logs, Amazon S3, or Firehose. + DeliveryDestinationType *string `locationName:"deliveryDestinationType" type:"string" enum:"DeliveryDestinationType"` + + // The name of the delivery source that is associated with this delivery. + DeliverySourceName *string `locationName:"deliverySourceName" min:"1" type:"string"` + + // The unique ID that identifies this delivery in your account. + Id *string `locationName:"id" min:"1" type:"string"` + + // The tags that have been assigned to this delivery. + Tags map[string]*string `locationName:"tags" min:"1" type:"map"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Delivery) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Delivery) GoString() string { + return s.String() +} + +// SetArn sets the Arn field's value. +func (s *Delivery) SetArn(v string) *Delivery { + s.Arn = &v + return s +} + +// SetDeliveryDestinationArn sets the DeliveryDestinationArn field's value. +func (s *Delivery) SetDeliveryDestinationArn(v string) *Delivery { + s.DeliveryDestinationArn = &v + return s +} + +// SetDeliveryDestinationType sets the DeliveryDestinationType field's value. +func (s *Delivery) SetDeliveryDestinationType(v string) *Delivery { + s.DeliveryDestinationType = &v + return s +} + +// SetDeliverySourceName sets the DeliverySourceName field's value. +func (s *Delivery) SetDeliverySourceName(v string) *Delivery { + s.DeliverySourceName = &v + return s +} + +// SetId sets the Id field's value. +func (s *Delivery) SetId(v string) *Delivery { + s.Id = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *Delivery) SetTags(v map[string]*string) *Delivery { + s.Tags = v + return s +} + +// This structure contains information about one delivery destination in your +// account. A delivery destination is an Amazon Web Services resource that represents +// an Amazon Web Services service that logs can be sent to. CloudWatch Logs, +// Amazon S3, are supported as Firehose delivery destinations. +// +// To configure logs delivery between a supported Amazon Web Services service +// and a destination, you must do the following: +// +// - Create a delivery source, which is a logical object that represents +// the resource that is actually sending the logs. For more information, +// see PutDeliverySource (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutDeliverySource.html). +// +// - Create a delivery destination, which is a logical object that represents +// the actual delivery destination. +// +// - If you are delivering logs cross-account, you must use PutDeliveryDestinationPolicy +// (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutDeliveryDestinationPolicy.html) +// in the destination account to assign an IAM policy to the destination. +// This policy allows delivery to that destination. +// +// - Create a delivery by pairing exactly one delivery source and one delivery +// destination. For more information, see CreateDelivery (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_CreateDelivery.html). +// +// You can configure a single delivery source to send logs to multiple destinations +// by creating multiple deliveries. You can also create multiple deliveries +// to configure multiple delivery sources to send logs to the same delivery +// destination. +type DeliveryDestination struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) that uniquely identifies this delivery destination. + Arn *string `locationName:"arn" type:"string"` + + // A structure that contains the ARN of the Amazon Web Services resource that + // will receive the logs. + DeliveryDestinationConfiguration *DeliveryDestinationConfiguration `locationName:"deliveryDestinationConfiguration" type:"structure"` + + // Displays whether this delivery destination is CloudWatch Logs, Amazon S3, + // or Firehose. + DeliveryDestinationType *string `locationName:"deliveryDestinationType" type:"string" enum:"DeliveryDestinationType"` + + // The name of this delivery destination. + Name *string `locationName:"name" min:"1" type:"string"` + + // The format of the logs that are sent to this delivery destination. + OutputFormat *string `locationName:"outputFormat" type:"string" enum:"OutputFormat"` + + // The tags that have been assigned to this delivery destination. + Tags map[string]*string `locationName:"tags" min:"1" type:"map"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeliveryDestination) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeliveryDestination) GoString() string { + return s.String() +} + +// SetArn sets the Arn field's value. +func (s *DeliveryDestination) SetArn(v string) *DeliveryDestination { + s.Arn = &v + return s +} + +// SetDeliveryDestinationConfiguration sets the DeliveryDestinationConfiguration field's value. +func (s *DeliveryDestination) SetDeliveryDestinationConfiguration(v *DeliveryDestinationConfiguration) *DeliveryDestination { + s.DeliveryDestinationConfiguration = v + return s +} + +// SetDeliveryDestinationType sets the DeliveryDestinationType field's value. +func (s *DeliveryDestination) SetDeliveryDestinationType(v string) *DeliveryDestination { + s.DeliveryDestinationType = &v + return s +} + +// SetName sets the Name field's value. +func (s *DeliveryDestination) SetName(v string) *DeliveryDestination { + s.Name = &v + return s +} + +// SetOutputFormat sets the OutputFormat field's value. +func (s *DeliveryDestination) SetOutputFormat(v string) *DeliveryDestination { + s.OutputFormat = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *DeliveryDestination) SetTags(v map[string]*string) *DeliveryDestination { + s.Tags = v + return s +} + +// A structure that contains information about one logs delivery destination. +type DeliveryDestinationConfiguration struct { + _ struct{} `type:"structure"` + + // The ARN of the Amazon Web Services destination that this delivery destination + // represents. That Amazon Web Services destination can be a log group in CloudWatch + // Logs, an Amazon S3 bucket, or a delivery stream in Firehose. + // + // DestinationResourceArn is a required field + DestinationResourceArn *string `locationName:"destinationResourceArn" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeliveryDestinationConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeliveryDestinationConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeliveryDestinationConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeliveryDestinationConfiguration"} + if s.DestinationResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("DestinationResourceArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDestinationResourceArn sets the DestinationResourceArn field's value. +func (s *DeliveryDestinationConfiguration) SetDestinationResourceArn(v string) *DeliveryDestinationConfiguration { + s.DestinationResourceArn = &v + return s +} + +// This structure contains information about one delivery source in your account. +// A delivery source is an Amazon Web Services resource that sends logs to an +// Amazon Web Services destination. The destination can be CloudWatch Logs, +// Amazon S3, or Firehose. +// +// Only some Amazon Web Services services support being configured as a delivery +// source. These services are listed as Supported [V2 Permissions] in the table +// at Enabling logging from Amazon Web Services services. (https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/AWS-logs-and-resource-policy.html) +// +// To configure logs delivery between a supported Amazon Web Services service +// and a destination, you must do the following: +// +// - Create a delivery source, which is a logical object that represents +// the resource that is actually sending the logs. For more information, +// see PutDeliverySource (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutDeliverySource.html). +// +// - Create a delivery destination, which is a logical object that represents +// the actual delivery destination. For more information, see PutDeliveryDestination +// (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutDeliveryDestination.html). +// +// - If you are delivering logs cross-account, you must use PutDeliveryDestinationPolicy +// (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutDeliveryDestinationPolicy.html) +// in the destination account to assign an IAM policy to the destination. +// This policy allows delivery to that destination. +// +// - Create a delivery by pairing exactly one delivery source and one delivery +// destination. For more information, see CreateDelivery (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_CreateDelivery.html). +// +// You can configure a single delivery source to send logs to multiple destinations +// by creating multiple deliveries. You can also create multiple deliveries +// to configure multiple delivery sources to send logs to the same delivery +// destination. +type DeliverySource struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) that uniquely identifies this delivery source. + Arn *string `locationName:"arn" type:"string"` + + // The type of log that the source is sending. For valid values for this parameter, + // see the documentation for the source service. + LogType *string `locationName:"logType" min:"1" type:"string"` + + // The unique name of the delivery source. + Name *string `locationName:"name" min:"1" type:"string"` + + // This array contains the ARN of the Amazon Web Services resource that sends + // logs and is represented by this delivery source. Currently, only one ARN + // can be in the array. + ResourceArns []*string `locationName:"resourceArns" type:"list"` + + // The Amazon Web Services service that is sending logs. + Service *string `locationName:"service" min:"1" type:"string"` + + // The tags that have been assigned to this delivery source. + Tags map[string]*string `locationName:"tags" min:"1" type:"map"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeliverySource) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeliverySource) GoString() string { + return s.String() +} + +// SetArn sets the Arn field's value. +func (s *DeliverySource) SetArn(v string) *DeliverySource { + s.Arn = &v + return s +} + +// SetLogType sets the LogType field's value. +func (s *DeliverySource) SetLogType(v string) *DeliverySource { + s.LogType = &v + return s +} + +// SetName sets the Name field's value. +func (s *DeliverySource) SetName(v string) *DeliverySource { + s.Name = &v + return s +} + +// SetResourceArns sets the ResourceArns field's value. +func (s *DeliverySource) SetResourceArns(v []*string) *DeliverySource { + s.ResourceArns = v + return s +} + +// SetService sets the Service field's value. +func (s *DeliverySource) SetService(v string) *DeliverySource { + s.Service = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *DeliverySource) SetTags(v map[string]*string) *DeliverySource { + s.Tags = v + return s +} + +type DescribeAccountPoliciesInput struct { + _ struct{} `type:"structure"` + + // If you are using an account that is set up as a monitoring account for CloudWatch + // unified cross-account observability, you can use this to specify the account + // ID of a source account. If you do, the operation returns the account policy + // for the specified account. Currently, you can specify only one account ID + // in this parameter. + // + // If you omit this parameter, only the policy in the current account is returned. + AccountIdentifiers []*string `locationName:"accountIdentifiers" type:"list"` + + // Use this parameter to limit the returned policies to only the policy with + // the name that you specify. + PolicyName *string `locationName:"policyName" type:"string"` + + // Use this parameter to limit the returned policies to only the policies that + // match the policy type that you specify. + // + // PolicyType is a required field + PolicyType *string `locationName:"policyType" type:"string" required:"true" enum:"PolicyType"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeAccountPoliciesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeAccountPoliciesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeAccountPoliciesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeAccountPoliciesInput"} + if s.PolicyType == nil { + invalidParams.Add(request.NewErrParamRequired("PolicyType")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccountIdentifiers sets the AccountIdentifiers field's value. +func (s *DescribeAccountPoliciesInput) SetAccountIdentifiers(v []*string) *DescribeAccountPoliciesInput { + s.AccountIdentifiers = v + return s +} + +// SetPolicyName sets the PolicyName field's value. +func (s *DescribeAccountPoliciesInput) SetPolicyName(v string) *DescribeAccountPoliciesInput { + s.PolicyName = &v + return s +} + +// SetPolicyType sets the PolicyType field's value. +func (s *DescribeAccountPoliciesInput) SetPolicyType(v string) *DescribeAccountPoliciesInput { + s.PolicyType = &v + return s +} + +type DescribeAccountPoliciesOutput struct { + _ struct{} `type:"structure"` + + // An array of structures that contain information about the CloudWatch Logs + // account policies that match the specified filters. + AccountPolicies []*AccountPolicy `locationName:"accountPolicies" type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeAccountPoliciesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeAccountPoliciesOutput) GoString() string { + return s.String() +} + +// SetAccountPolicies sets the AccountPolicies field's value. +func (s *DescribeAccountPoliciesOutput) SetAccountPolicies(v []*AccountPolicy) *DescribeAccountPoliciesOutput { + s.AccountPolicies = v + return s +} + +type DescribeDeliveriesInput struct { + _ struct{} `type:"structure"` + + // Optionally specify the maximum number of deliveries to return in the response. + Limit *int64 `locationName:"limit" min:"1" type:"integer"` + + // The token for the next set of items to return. The token expires after 24 + // hours. + NextToken *string `locationName:"nextToken" min:"1" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeDeliveriesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeDeliveriesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeDeliveriesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeDeliveriesInput"} + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetLimit sets the Limit field's value. +func (s *DescribeDeliveriesInput) SetLimit(v int64) *DescribeDeliveriesInput { + s.Limit = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeDeliveriesInput) SetNextToken(v string) *DescribeDeliveriesInput { + s.NextToken = &v + return s +} + +type DescribeDeliveriesOutput struct { + _ struct{} `type:"structure"` + + // An array of structures. Each structure contains information about one delivery + // in the account. + Deliveries []*Delivery `locationName:"deliveries" type:"list"` + + // The token for the next set of items to return. The token expires after 24 + // hours. + NextToken *string `locationName:"nextToken" min:"1" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeDeliveriesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeDeliveriesOutput) GoString() string { + return s.String() +} + +// SetDeliveries sets the Deliveries field's value. +func (s *DescribeDeliveriesOutput) SetDeliveries(v []*Delivery) *DescribeDeliveriesOutput { + s.Deliveries = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeDeliveriesOutput) SetNextToken(v string) *DescribeDeliveriesOutput { + s.NextToken = &v + return s +} + +type DescribeDeliveryDestinationsInput struct { + _ struct{} `type:"structure"` + + // Optionally specify the maximum number of delivery destinations to return + // in the response. + Limit *int64 `locationName:"limit" min:"1" type:"integer"` + + // The token for the next set of items to return. The token expires after 24 + // hours. + NextToken *string `locationName:"nextToken" min:"1" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeDeliveryDestinationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeDeliveryDestinationsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeDeliveryDestinationsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeDeliveryDestinationsInput"} + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetLimit sets the Limit field's value. +func (s *DescribeDeliveryDestinationsInput) SetLimit(v int64) *DescribeDeliveryDestinationsInput { + s.Limit = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeDeliveryDestinationsInput) SetNextToken(v string) *DescribeDeliveryDestinationsInput { + s.NextToken = &v + return s +} + +type DescribeDeliveryDestinationsOutput struct { + _ struct{} `type:"structure"` + + // An array of structures. Each structure contains information about one delivery + // destination in the account. + DeliveryDestinations []*DeliveryDestination `locationName:"deliveryDestinations" type:"list"` + + // The token for the next set of items to return. The token expires after 24 + // hours. + NextToken *string `locationName:"nextToken" min:"1" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeDeliveryDestinationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeDeliveryDestinationsOutput) GoString() string { + return s.String() +} + +// SetDeliveryDestinations sets the DeliveryDestinations field's value. +func (s *DescribeDeliveryDestinationsOutput) SetDeliveryDestinations(v []*DeliveryDestination) *DescribeDeliveryDestinationsOutput { + s.DeliveryDestinations = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeDeliveryDestinationsOutput) SetNextToken(v string) *DescribeDeliveryDestinationsOutput { + s.NextToken = &v + return s +} + +type DescribeDeliverySourcesInput struct { + _ struct{} `type:"structure"` + + // Optionally specify the maximum number of delivery sources to return in the + // response. + Limit *int64 `locationName:"limit" min:"1" type:"integer"` + + // The token for the next set of items to return. The token expires after 24 + // hours. + NextToken *string `locationName:"nextToken" min:"1" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeDeliverySourcesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeDeliverySourcesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeDeliverySourcesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeDeliverySourcesInput"} + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetLimit sets the Limit field's value. +func (s *DescribeDeliverySourcesInput) SetLimit(v int64) *DescribeDeliverySourcesInput { + s.Limit = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeDeliverySourcesInput) SetNextToken(v string) *DescribeDeliverySourcesInput { + s.NextToken = &v + return s +} + +type DescribeDeliverySourcesOutput struct { + _ struct{} `type:"structure"` + + // An array of structures. Each structure contains information about one delivery + // source in the account. + DeliverySources []*DeliverySource `locationName:"deliverySources" type:"list"` + + // The token for the next set of items to return. The token expires after 24 + // hours. + NextToken *string `locationName:"nextToken" min:"1" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeDeliverySourcesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeDeliverySourcesOutput) GoString() string { + return s.String() +} + +// SetDeliverySources sets the DeliverySources field's value. +func (s *DescribeDeliverySourcesOutput) SetDeliverySources(v []*DeliverySource) *DescribeDeliverySourcesOutput { + s.DeliverySources = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeDeliverySourcesOutput) SetNextToken(v string) *DescribeDeliverySourcesOutput { + s.NextToken = &v + return s +} + +type DescribeDestinationsInput struct { + _ struct{} `type:"structure"` + + // The prefix to match. If you don't specify a value, no prefix filter is applied. + DestinationNamePrefix *string `min:"1" type:"string"` + + // The maximum number of items returned. If you don't specify a value, the default + // maximum value of 50 items is used. + Limit *int64 `locationName:"limit" min:"1" type:"integer"` + + // The token for the next set of items to return. (You received this token from + // a previous call.) + NextToken *string `locationName:"nextToken" min:"1" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeDestinationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeDestinationsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeDestinationsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeDestinationsInput"} + if s.DestinationNamePrefix != nil && len(*s.DestinationNamePrefix) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DestinationNamePrefix", 1)) + } + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDestinationNamePrefix sets the DestinationNamePrefix field's value. +func (s *DescribeDestinationsInput) SetDestinationNamePrefix(v string) *DescribeDestinationsInput { + s.DestinationNamePrefix = &v + return s +} + +// SetLimit sets the Limit field's value. +func (s *DescribeDestinationsInput) SetLimit(v int64) *DescribeDestinationsInput { + s.Limit = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeDestinationsInput) SetNextToken(v string) *DescribeDestinationsInput { + s.NextToken = &v + return s +} + +type DescribeDestinationsOutput struct { + _ struct{} `type:"structure"` + + // The destinations. + Destinations []*Destination `locationName:"destinations" type:"list"` + + // The token for the next set of items to return. The token expires after 24 + // hours. + NextToken *string `locationName:"nextToken" min:"1" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeDestinationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeDestinationsOutput) GoString() string { + return s.String() +} + +// SetDestinations sets the Destinations field's value. +func (s *DescribeDestinationsOutput) SetDestinations(v []*Destination) *DescribeDestinationsOutput { + s.Destinations = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeDestinationsOutput) SetNextToken(v string) *DescribeDestinationsOutput { + s.NextToken = &v + return s +} + +type DescribeExportTasksInput struct { + _ struct{} `type:"structure"` + + // The maximum number of items returned. If you don't specify a value, the default + // is up to 50 items. + Limit *int64 `locationName:"limit" min:"1" type:"integer"` + + // The token for the next set of items to return. (You received this token from + // a previous call.) + NextToken *string `locationName:"nextToken" min:"1" type:"string"` + + // The status code of the export task. Specifying a status code filters the + // results to zero or more export tasks. + StatusCode *string `locationName:"statusCode" type:"string" enum:"ExportTaskStatusCode"` + + // The ID of the export task. Specifying a task ID filters the results to one + // or zero export tasks. + TaskId *string `locationName:"taskId" min:"1" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeExportTasksInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeExportTasksInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeExportTasksInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeExportTasksInput"} + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + if s.TaskId != nil && len(*s.TaskId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TaskId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetLimit sets the Limit field's value. +func (s *DescribeExportTasksInput) SetLimit(v int64) *DescribeExportTasksInput { + s.Limit = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeExportTasksInput) SetNextToken(v string) *DescribeExportTasksInput { + s.NextToken = &v + return s +} + +// SetStatusCode sets the StatusCode field's value. +func (s *DescribeExportTasksInput) SetStatusCode(v string) *DescribeExportTasksInput { + s.StatusCode = &v + return s +} + +// SetTaskId sets the TaskId field's value. +func (s *DescribeExportTasksInput) SetTaskId(v string) *DescribeExportTasksInput { + s.TaskId = &v + return s +} + +type DescribeExportTasksOutput struct { + _ struct{} `type:"structure"` + + // The export tasks. + ExportTasks []*ExportTask `locationName:"exportTasks" type:"list"` + + // The token for the next set of items to return. The token expires after 24 + // hours. + NextToken *string `locationName:"nextToken" min:"1" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeExportTasksOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeExportTasksOutput) GoString() string { + return s.String() +} + +// SetExportTasks sets the ExportTasks field's value. +func (s *DescribeExportTasksOutput) SetExportTasks(v []*ExportTask) *DescribeExportTasksOutput { + s.ExportTasks = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeExportTasksOutput) SetNextToken(v string) *DescribeExportTasksOutput { + s.NextToken = &v + return s +} + +type DescribeLogGroupsInput struct { + _ struct{} `type:"structure"` + + // When includeLinkedAccounts is set to True, use this parameter to specify + // the list of accounts to search. You can specify as many as 20 account IDs + // in the array. + AccountIdentifiers []*string `locationName:"accountIdentifiers" type:"list"` + + // If you are using a monitoring account, set this to True to have the operation + // return log groups in the accounts listed in accountIdentifiers. + // + // If this parameter is set to true and accountIdentifiers contains a null value, + // the operation returns all log groups in the monitoring account and all log + // groups in all source accounts that are linked to the monitoring account. + IncludeLinkedAccounts *bool `locationName:"includeLinkedAccounts" type:"boolean"` + + // The maximum number of items returned. If you don't specify a value, the default + // is up to 50 items. + Limit *int64 `locationName:"limit" min:"1" type:"integer"` + + // Specifies the log group class for this log group. There are two classes: + // + // * The Standard log class supports all CloudWatch Logs features. + // + // * The Infrequent Access log class supports a subset of CloudWatch Logs + // features and incurs lower costs. + // + // For details about the features supported by each class, see Log classes (https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/CloudWatch_Logs_Log_Classes.html) + LogGroupClass *string `locationName:"logGroupClass" type:"string" enum:"LogGroupClass"` + + // If you specify a string for this parameter, the operation returns only log + // groups that have names that match the string based on a case-sensitive substring + // search. For example, if you specify Foo, log groups named FooBar, aws/Foo, + // and GroupFoo would match, but foo, F/o/o and Froo would not match. + // + // If you specify logGroupNamePattern in your request, then only arn, creationTime, + // and logGroupName are included in the response. + // + // logGroupNamePattern and logGroupNamePrefix are mutually exclusive. Only one + // of these parameters can be passed. + LogGroupNamePattern *string `locationName:"logGroupNamePattern" type:"string"` + + // The prefix to match. + // + // logGroupNamePrefix and logGroupNamePattern are mutually exclusive. Only one + // of these parameters can be passed. + LogGroupNamePrefix *string `locationName:"logGroupNamePrefix" min:"1" type:"string"` + + // The token for the next set of items to return. (You received this token from + // a previous call.) + NextToken *string `locationName:"nextToken" min:"1" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeLogGroupsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeLogGroupsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeLogGroupsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeLogGroupsInput"} + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + if s.LogGroupNamePrefix != nil && len(*s.LogGroupNamePrefix) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogGroupNamePrefix", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccountIdentifiers sets the AccountIdentifiers field's value. +func (s *DescribeLogGroupsInput) SetAccountIdentifiers(v []*string) *DescribeLogGroupsInput { + s.AccountIdentifiers = v + return s +} + +// SetIncludeLinkedAccounts sets the IncludeLinkedAccounts field's value. +func (s *DescribeLogGroupsInput) SetIncludeLinkedAccounts(v bool) *DescribeLogGroupsInput { + s.IncludeLinkedAccounts = &v + return s +} + +// SetLimit sets the Limit field's value. +func (s *DescribeLogGroupsInput) SetLimit(v int64) *DescribeLogGroupsInput { + s.Limit = &v + return s +} + +// SetLogGroupClass sets the LogGroupClass field's value. +func (s *DescribeLogGroupsInput) SetLogGroupClass(v string) *DescribeLogGroupsInput { + s.LogGroupClass = &v + return s +} + +// SetLogGroupNamePattern sets the LogGroupNamePattern field's value. +func (s *DescribeLogGroupsInput) SetLogGroupNamePattern(v string) *DescribeLogGroupsInput { + s.LogGroupNamePattern = &v + return s +} + +// SetLogGroupNamePrefix sets the LogGroupNamePrefix field's value. +func (s *DescribeLogGroupsInput) SetLogGroupNamePrefix(v string) *DescribeLogGroupsInput { + s.LogGroupNamePrefix = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeLogGroupsInput) SetNextToken(v string) *DescribeLogGroupsInput { + s.NextToken = &v + return s +} + +type DescribeLogGroupsOutput struct { + _ struct{} `type:"structure"` + + // The log groups. + // + // If the retentionInDays value is not included for a log group, then that log + // group's events do not expire. + LogGroups []*LogGroup `locationName:"logGroups" type:"list"` + + // The token for the next set of items to return. The token expires after 24 + // hours. + NextToken *string `locationName:"nextToken" min:"1" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeLogGroupsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeLogGroupsOutput) GoString() string { + return s.String() +} + +// SetLogGroups sets the LogGroups field's value. +func (s *DescribeLogGroupsOutput) SetLogGroups(v []*LogGroup) *DescribeLogGroupsOutput { + s.LogGroups = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeLogGroupsOutput) SetNextToken(v string) *DescribeLogGroupsOutput { + s.NextToken = &v + return s +} + +type DescribeLogStreamsInput struct { + _ struct{} `type:"structure"` + + // If the value is true, results are returned in descending order. If the value + // is to false, results are returned in ascending order. The default value is + // false. + Descending *bool `locationName:"descending" type:"boolean"` + + // The maximum number of items returned. If you don't specify a value, the default + // is up to 50 items. + Limit *int64 `locationName:"limit" min:"1" type:"integer"` + + // Specify either the name or ARN of the log group to view. If the log group + // is in a source account and you are using a monitoring account, you must use + // the log group ARN. + // + // You must include either logGroupIdentifier or logGroupName, but not both. + LogGroupIdentifier *string `locationName:"logGroupIdentifier" min:"1" type:"string"` + + // The name of the log group. + // + // You must include either logGroupIdentifier or logGroupName, but not both. + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string"` + + // The prefix to match. + // + // If orderBy is LastEventTime, you cannot specify this parameter. + LogStreamNamePrefix *string `locationName:"logStreamNamePrefix" min:"1" type:"string"` + + // The token for the next set of items to return. (You received this token from + // a previous call.) + NextToken *string `locationName:"nextToken" min:"1" type:"string"` + + // If the value is LogStreamName, the results are ordered by log stream name. + // If the value is LastEventTime, the results are ordered by the event time. + // The default value is LogStreamName. + // + // If you order the results by event time, you cannot specify the logStreamNamePrefix + // parameter. + // + // lastEventTimestamp represents the time of the most recent log event in the + // log stream in CloudWatch Logs. This number is expressed as the number of + // milliseconds after Jan 1, 1970 00:00:00 UTC. lastEventTimestamp updates on + // an eventual consistency basis. It typically updates in less than an hour + // from ingestion, but in rare situations might take longer. + OrderBy *string `locationName:"orderBy" type:"string" enum:"OrderBy"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeLogStreamsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeLogStreamsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeLogStreamsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeLogStreamsInput"} + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + if s.LogGroupIdentifier != nil && len(*s.LogGroupIdentifier) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogGroupIdentifier", 1)) + } + if s.LogGroupName != nil && len(*s.LogGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogGroupName", 1)) + } + if s.LogStreamNamePrefix != nil && len(*s.LogStreamNamePrefix) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogStreamNamePrefix", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDescending sets the Descending field's value. +func (s *DescribeLogStreamsInput) SetDescending(v bool) *DescribeLogStreamsInput { + s.Descending = &v + return s +} + +// SetLimit sets the Limit field's value. +func (s *DescribeLogStreamsInput) SetLimit(v int64) *DescribeLogStreamsInput { + s.Limit = &v + return s +} + +// SetLogGroupIdentifier sets the LogGroupIdentifier field's value. +func (s *DescribeLogStreamsInput) SetLogGroupIdentifier(v string) *DescribeLogStreamsInput { + s.LogGroupIdentifier = &v + return s +} + +// SetLogGroupName sets the LogGroupName field's value. +func (s *DescribeLogStreamsInput) SetLogGroupName(v string) *DescribeLogStreamsInput { + s.LogGroupName = &v + return s +} + +// SetLogStreamNamePrefix sets the LogStreamNamePrefix field's value. +func (s *DescribeLogStreamsInput) SetLogStreamNamePrefix(v string) *DescribeLogStreamsInput { + s.LogStreamNamePrefix = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeLogStreamsInput) SetNextToken(v string) *DescribeLogStreamsInput { + s.NextToken = &v + return s +} + +// SetOrderBy sets the OrderBy field's value. +func (s *DescribeLogStreamsInput) SetOrderBy(v string) *DescribeLogStreamsInput { + s.OrderBy = &v + return s +} + +type DescribeLogStreamsOutput struct { + _ struct{} `type:"structure"` + + // The log streams. + LogStreams []*LogStream `locationName:"logStreams" type:"list"` + + // The token for the next set of items to return. The token expires after 24 + // hours. + NextToken *string `locationName:"nextToken" min:"1" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeLogStreamsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeLogStreamsOutput) GoString() string { + return s.String() +} + +// SetLogStreams sets the LogStreams field's value. +func (s *DescribeLogStreamsOutput) SetLogStreams(v []*LogStream) *DescribeLogStreamsOutput { + s.LogStreams = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeLogStreamsOutput) SetNextToken(v string) *DescribeLogStreamsOutput { + s.NextToken = &v + return s +} + +type DescribeMetricFiltersInput struct { + _ struct{} `type:"structure"` + + // The prefix to match. CloudWatch Logs uses the value that you set here only + // if you also include the logGroupName parameter in your request. + FilterNamePrefix *string `locationName:"filterNamePrefix" min:"1" type:"string"` + + // The maximum number of items returned. If you don't specify a value, the default + // is up to 50 items. + Limit *int64 `locationName:"limit" min:"1" type:"integer"` + + // The name of the log group. + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string"` + + // Filters results to include only those with the specified metric name. If + // you include this parameter in your request, you must also include the metricNamespace + // parameter. + MetricName *string `locationName:"metricName" type:"string"` + + // Filters results to include only those in the specified namespace. If you + // include this parameter in your request, you must also include the metricName + // parameter. + MetricNamespace *string `locationName:"metricNamespace" type:"string"` + + // The token for the next set of items to return. (You received this token from + // a previous call.) + NextToken *string `locationName:"nextToken" min:"1" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeMetricFiltersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeMetricFiltersInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeMetricFiltersInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeMetricFiltersInput"} + if s.FilterNamePrefix != nil && len(*s.FilterNamePrefix) < 1 { + invalidParams.Add(request.NewErrParamMinLen("FilterNamePrefix", 1)) + } + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + if s.LogGroupName != nil && len(*s.LogGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogGroupName", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFilterNamePrefix sets the FilterNamePrefix field's value. +func (s *DescribeMetricFiltersInput) SetFilterNamePrefix(v string) *DescribeMetricFiltersInput { + s.FilterNamePrefix = &v + return s +} + +// SetLimit sets the Limit field's value. +func (s *DescribeMetricFiltersInput) SetLimit(v int64) *DescribeMetricFiltersInput { + s.Limit = &v + return s +} + +// SetLogGroupName sets the LogGroupName field's value. +func (s *DescribeMetricFiltersInput) SetLogGroupName(v string) *DescribeMetricFiltersInput { + s.LogGroupName = &v + return s +} + +// SetMetricName sets the MetricName field's value. +func (s *DescribeMetricFiltersInput) SetMetricName(v string) *DescribeMetricFiltersInput { + s.MetricName = &v + return s +} + +// SetMetricNamespace sets the MetricNamespace field's value. +func (s *DescribeMetricFiltersInput) SetMetricNamespace(v string) *DescribeMetricFiltersInput { + s.MetricNamespace = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeMetricFiltersInput) SetNextToken(v string) *DescribeMetricFiltersInput { + s.NextToken = &v + return s +} + +type DescribeMetricFiltersOutput struct { + _ struct{} `type:"structure"` + + // The metric filters. + MetricFilters []*MetricFilter `locationName:"metricFilters" type:"list"` + + // The token for the next set of items to return. The token expires after 24 + // hours. + NextToken *string `locationName:"nextToken" min:"1" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeMetricFiltersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeMetricFiltersOutput) GoString() string { + return s.String() +} + +// SetMetricFilters sets the MetricFilters field's value. +func (s *DescribeMetricFiltersOutput) SetMetricFilters(v []*MetricFilter) *DescribeMetricFiltersOutput { + s.MetricFilters = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeMetricFiltersOutput) SetNextToken(v string) *DescribeMetricFiltersOutput { + s.NextToken = &v + return s +} + +type DescribeQueriesInput struct { + _ struct{} `type:"structure"` + + // Limits the returned queries to only those for the specified log group. + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string"` + + // Limits the number of returned queries to the specified number. + MaxResults *int64 `locationName:"maxResults" min:"1" type:"integer"` + + // The token for the next set of items to return. The token expires after 24 + // hours. + NextToken *string `locationName:"nextToken" min:"1" type:"string"` + + // Limits the returned queries to only those that have the specified status. + // Valid values are Cancelled, Complete, Failed, Running, and Scheduled. + Status *string `locationName:"status" type:"string" enum:"QueryStatus"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeQueriesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeQueriesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeQueriesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeQueriesInput"} + if s.LogGroupName != nil && len(*s.LogGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogGroupName", 1)) + } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetLogGroupName sets the LogGroupName field's value. +func (s *DescribeQueriesInput) SetLogGroupName(v string) *DescribeQueriesInput { + s.LogGroupName = &v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeQueriesInput) SetMaxResults(v int64) *DescribeQueriesInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeQueriesInput) SetNextToken(v string) *DescribeQueriesInput { + s.NextToken = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *DescribeQueriesInput) SetStatus(v string) *DescribeQueriesInput { + s.Status = &v + return s +} + +type DescribeQueriesOutput struct { + _ struct{} `type:"structure"` + + // The token for the next set of items to return. The token expires after 24 + // hours. + NextToken *string `locationName:"nextToken" min:"1" type:"string"` + + // The list of queries that match the request. + Queries []*QueryInfo `locationName:"queries" type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeQueriesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeQueriesOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeQueriesOutput) SetNextToken(v string) *DescribeQueriesOutput { + s.NextToken = &v + return s +} + +// SetQueries sets the Queries field's value. +func (s *DescribeQueriesOutput) SetQueries(v []*QueryInfo) *DescribeQueriesOutput { + s.Queries = v + return s +} + +type DescribeQueryDefinitionsInput struct { + _ struct{} `type:"structure"` + + // Limits the number of returned query definitions to the specified number. + MaxResults *int64 `locationName:"maxResults" min:"1" type:"integer"` + + // The token for the next set of items to return. The token expires after 24 + // hours. + NextToken *string `locationName:"nextToken" min:"1" type:"string"` + + // Use this parameter to filter your results to only the query definitions that + // have names that start with the prefix you specify. + QueryDefinitionNamePrefix *string `locationName:"queryDefinitionNamePrefix" min:"1" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeQueryDefinitionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeQueryDefinitionsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeQueryDefinitionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeQueryDefinitionsInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + if s.QueryDefinitionNamePrefix != nil && len(*s.QueryDefinitionNamePrefix) < 1 { + invalidParams.Add(request.NewErrParamMinLen("QueryDefinitionNamePrefix", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeQueryDefinitionsInput) SetMaxResults(v int64) *DescribeQueryDefinitionsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeQueryDefinitionsInput) SetNextToken(v string) *DescribeQueryDefinitionsInput { + s.NextToken = &v + return s +} + +// SetQueryDefinitionNamePrefix sets the QueryDefinitionNamePrefix field's value. +func (s *DescribeQueryDefinitionsInput) SetQueryDefinitionNamePrefix(v string) *DescribeQueryDefinitionsInput { + s.QueryDefinitionNamePrefix = &v + return s +} + +type DescribeQueryDefinitionsOutput struct { + _ struct{} `type:"structure"` + + // The token for the next set of items to return. The token expires after 24 + // hours. + NextToken *string `locationName:"nextToken" min:"1" type:"string"` + + // The list of query definitions that match your request. + QueryDefinitions []*QueryDefinition `locationName:"queryDefinitions" type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeQueryDefinitionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeQueryDefinitionsOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeQueryDefinitionsOutput) SetNextToken(v string) *DescribeQueryDefinitionsOutput { + s.NextToken = &v + return s +} + +// SetQueryDefinitions sets the QueryDefinitions field's value. +func (s *DescribeQueryDefinitionsOutput) SetQueryDefinitions(v []*QueryDefinition) *DescribeQueryDefinitionsOutput { + s.QueryDefinitions = v + return s +} + +type DescribeResourcePoliciesInput struct { + _ struct{} `type:"structure"` + + // The maximum number of resource policies to be displayed with one call of + // this API. + Limit *int64 `locationName:"limit" min:"1" type:"integer"` + + // The token for the next set of items to return. The token expires after 24 + // hours. + NextToken *string `locationName:"nextToken" min:"1" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeResourcePoliciesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeResourcePoliciesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeResourcePoliciesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeResourcePoliciesInput"} + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetLimit sets the Limit field's value. +func (s *DescribeResourcePoliciesInput) SetLimit(v int64) *DescribeResourcePoliciesInput { + s.Limit = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeResourcePoliciesInput) SetNextToken(v string) *DescribeResourcePoliciesInput { + s.NextToken = &v + return s +} + +type DescribeResourcePoliciesOutput struct { + _ struct{} `type:"structure"` + + // The token for the next set of items to return. The token expires after 24 + // hours. + NextToken *string `locationName:"nextToken" min:"1" type:"string"` + + // The resource policies that exist in this account. + ResourcePolicies []*ResourcePolicy `locationName:"resourcePolicies" type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeResourcePoliciesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeResourcePoliciesOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeResourcePoliciesOutput) SetNextToken(v string) *DescribeResourcePoliciesOutput { + s.NextToken = &v + return s +} + +// SetResourcePolicies sets the ResourcePolicies field's value. +func (s *DescribeResourcePoliciesOutput) SetResourcePolicies(v []*ResourcePolicy) *DescribeResourcePoliciesOutput { + s.ResourcePolicies = v + return s +} + +type DescribeSubscriptionFiltersInput struct { + _ struct{} `type:"structure"` + + // The prefix to match. If you don't specify a value, no prefix filter is applied. + FilterNamePrefix *string `locationName:"filterNamePrefix" min:"1" type:"string"` + + // The maximum number of items returned. If you don't specify a value, the default + // is up to 50 items. + Limit *int64 `locationName:"limit" min:"1" type:"integer"` + + // The name of the log group. + // + // LogGroupName is a required field + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` + + // The token for the next set of items to return. (You received this token from + // a previous call.) + NextToken *string `locationName:"nextToken" min:"1" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeSubscriptionFiltersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeSubscriptionFiltersInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeSubscriptionFiltersInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeSubscriptionFiltersInput"} + if s.FilterNamePrefix != nil && len(*s.FilterNamePrefix) < 1 { + invalidParams.Add(request.NewErrParamMinLen("FilterNamePrefix", 1)) + } + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + if s.LogGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("LogGroupName")) + } + if s.LogGroupName != nil && len(*s.LogGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogGroupName", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFilterNamePrefix sets the FilterNamePrefix field's value. +func (s *DescribeSubscriptionFiltersInput) SetFilterNamePrefix(v string) *DescribeSubscriptionFiltersInput { + s.FilterNamePrefix = &v + return s +} + +// SetLimit sets the Limit field's value. +func (s *DescribeSubscriptionFiltersInput) SetLimit(v int64) *DescribeSubscriptionFiltersInput { + s.Limit = &v + return s +} + +// SetLogGroupName sets the LogGroupName field's value. +func (s *DescribeSubscriptionFiltersInput) SetLogGroupName(v string) *DescribeSubscriptionFiltersInput { + s.LogGroupName = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeSubscriptionFiltersInput) SetNextToken(v string) *DescribeSubscriptionFiltersInput { + s.NextToken = &v + return s +} + +type DescribeSubscriptionFiltersOutput struct { + _ struct{} `type:"structure"` + + // The token for the next set of items to return. The token expires after 24 + // hours. + NextToken *string `locationName:"nextToken" min:"1" type:"string"` + + // The subscription filters. + SubscriptionFilters []*SubscriptionFilter `locationName:"subscriptionFilters" type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeSubscriptionFiltersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeSubscriptionFiltersOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeSubscriptionFiltersOutput) SetNextToken(v string) *DescribeSubscriptionFiltersOutput { + s.NextToken = &v + return s +} + +// SetSubscriptionFilters sets the SubscriptionFilters field's value. +func (s *DescribeSubscriptionFiltersOutput) SetSubscriptionFilters(v []*SubscriptionFilter) *DescribeSubscriptionFiltersOutput { + s.SubscriptionFilters = v + return s +} + +// Represents a cross-account destination that receives subscription log events. +type Destination struct { + _ struct{} `type:"structure"` + + // An IAM policy document that governs which Amazon Web Services accounts can + // create subscription filters against this destination. + AccessPolicy *string `locationName:"accessPolicy" min:"1" type:"string"` + + // The ARN of this destination. + Arn *string `locationName:"arn" type:"string"` + + // The creation time of the destination, expressed as the number of milliseconds + // after Jan 1, 1970 00:00:00 UTC. + CreationTime *int64 `locationName:"creationTime" type:"long"` + + // The name of the destination. + DestinationName *string `locationName:"destinationName" min:"1" type:"string"` + + // A role for impersonation, used when delivering log events to the target. + RoleArn *string `locationName:"roleArn" min:"1" type:"string"` + + // The Amazon Resource Name (ARN) of the physical target where the log events + // are delivered (for example, a Kinesis stream). + TargetArn *string `locationName:"targetArn" min:"1" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Destination) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Destination) GoString() string { + return s.String() +} + +// SetAccessPolicy sets the AccessPolicy field's value. +func (s *Destination) SetAccessPolicy(v string) *Destination { + s.AccessPolicy = &v + return s +} + +// SetArn sets the Arn field's value. +func (s *Destination) SetArn(v string) *Destination { + s.Arn = &v + return s +} + +// SetCreationTime sets the CreationTime field's value. +func (s *Destination) SetCreationTime(v int64) *Destination { + s.CreationTime = &v + return s +} + +// SetDestinationName sets the DestinationName field's value. +func (s *Destination) SetDestinationName(v string) *Destination { + s.DestinationName = &v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *Destination) SetRoleArn(v string) *Destination { + s.RoleArn = &v + return s +} + +// SetTargetArn sets the TargetArn field's value. +func (s *Destination) SetTargetArn(v string) *Destination { + s.TargetArn = &v + return s +} + +type DisassociateKmsKeyInput struct { + _ struct{} `type:"structure"` + + // The name of the log group. + // + // In your DisassociateKmsKey operation, you must specify either the resourceIdentifier + // parameter or the logGroup parameter, but you can't specify both. + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string"` + + // Specifies the target for this operation. You must specify one of the following: + // + // * Specify the ARN of a log group to stop having CloudWatch Logs use the + // KMS key to encrypt log events that are ingested and stored by that log + // group. After you run this operation, CloudWatch Logs encrypts ingested + // log events with the default CloudWatch Logs method. The log group ARN + // must be in the following format. Replace REGION and ACCOUNT_ID with your + // Region and account ID. arn:aws:logs:REGION:ACCOUNT_ID:log-group:LOG_GROUP_NAME + // + // * Specify the following ARN to stop using this key to encrypt the results + // of future StartQuery (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_StartQuery.html) + // operations in this account. Replace REGION and ACCOUNT_ID with your Region + // and account ID. arn:aws:logs:REGION:ACCOUNT_ID:query-result:* + // + // In your DisssociateKmsKey operation, you must specify either the resourceIdentifier + // parameter or the logGroup parameter, but you can't specify both. + ResourceIdentifier *string `locationName:"resourceIdentifier" min:"1" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DisassociateKmsKeyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DisassociateKmsKeyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DisassociateKmsKeyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DisassociateKmsKeyInput"} + if s.LogGroupName != nil && len(*s.LogGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogGroupName", 1)) + } + if s.ResourceIdentifier != nil && len(*s.ResourceIdentifier) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceIdentifier", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetLogGroupName sets the LogGroupName field's value. +func (s *DisassociateKmsKeyInput) SetLogGroupName(v string) *DisassociateKmsKeyInput { + s.LogGroupName = &v + return s +} + +// SetResourceIdentifier sets the ResourceIdentifier field's value. +func (s *DisassociateKmsKeyInput) SetResourceIdentifier(v string) *DisassociateKmsKeyInput { + s.ResourceIdentifier = &v + return s +} + +type DisassociateKmsKeyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DisassociateKmsKeyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DisassociateKmsKeyOutput) GoString() string { + return s.String() +} + +type Entity struct { + _ struct{} `type:"structure"` + + Attributes map[string]*string `locationName:"attributes" type:"map"` + + KeyAttributes map[string]*string `locationName:"keyAttributes" min:"2" type:"map"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Entity) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Entity) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Entity) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Entity"} + if s.KeyAttributes != nil && len(s.KeyAttributes) < 2 { + invalidParams.Add(request.NewErrParamMinLen("KeyAttributes", 2)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAttributes sets the Attributes field's value. +func (s *Entity) SetAttributes(v map[string]*string) *Entity { + s.Attributes = v + return s +} + +// SetKeyAttributes sets the KeyAttributes field's value. +func (s *Entity) SetKeyAttributes(v map[string]*string) *Entity { + s.KeyAttributes = v + return s +} + +// Represents an export task. +type ExportTask struct { + _ struct{} `type:"structure"` + + // The name of the S3 bucket to which the log data was exported. + Destination *string `locationName:"destination" min:"1" type:"string"` + + // The prefix that was used as the start of Amazon S3 key for every object exported. + DestinationPrefix *string `locationName:"destinationPrefix" type:"string"` + + // Execution information about the export task. + ExecutionInfo *ExportTaskExecutionInfo `locationName:"executionInfo" type:"structure"` + + // The start time, expressed as the number of milliseconds after Jan 1, 1970 + // 00:00:00 UTC. Events with a timestamp before this time are not exported. + From *int64 `locationName:"from" type:"long"` + + // The name of the log group from which logs data was exported. + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string"` + + // The status of the export task. + Status *ExportTaskStatus `locationName:"status" type:"structure"` + + // The ID of the export task. + TaskId *string `locationName:"taskId" min:"1" type:"string"` + + // The name of the export task. + TaskName *string `locationName:"taskName" min:"1" type:"string"` + + // The end time, expressed as the number of milliseconds after Jan 1, 1970 00:00:00 + // UTC. Events with a timestamp later than this time are not exported. + To *int64 `locationName:"to" type:"long"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ExportTask) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ExportTask) GoString() string { + return s.String() +} + +// SetDestination sets the Destination field's value. +func (s *ExportTask) SetDestination(v string) *ExportTask { + s.Destination = &v + return s +} + +// SetDestinationPrefix sets the DestinationPrefix field's value. +func (s *ExportTask) SetDestinationPrefix(v string) *ExportTask { + s.DestinationPrefix = &v + return s +} + +// SetExecutionInfo sets the ExecutionInfo field's value. +func (s *ExportTask) SetExecutionInfo(v *ExportTaskExecutionInfo) *ExportTask { + s.ExecutionInfo = v + return s +} + +// SetFrom sets the From field's value. +func (s *ExportTask) SetFrom(v int64) *ExportTask { + s.From = &v + return s +} + +// SetLogGroupName sets the LogGroupName field's value. +func (s *ExportTask) SetLogGroupName(v string) *ExportTask { + s.LogGroupName = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *ExportTask) SetStatus(v *ExportTaskStatus) *ExportTask { + s.Status = v + return s +} + +// SetTaskId sets the TaskId field's value. +func (s *ExportTask) SetTaskId(v string) *ExportTask { + s.TaskId = &v + return s +} + +// SetTaskName sets the TaskName field's value. +func (s *ExportTask) SetTaskName(v string) *ExportTask { + s.TaskName = &v + return s +} + +// SetTo sets the To field's value. +func (s *ExportTask) SetTo(v int64) *ExportTask { + s.To = &v + return s +} + +// Represents the status of an export task. +type ExportTaskExecutionInfo struct { + _ struct{} `type:"structure"` + + // The completion time of the export task, expressed as the number of milliseconds + // after Jan 1, 1970 00:00:00 UTC. + CompletionTime *int64 `locationName:"completionTime" type:"long"` + + // The creation time of the export task, expressed as the number of milliseconds + // after Jan 1, 1970 00:00:00 UTC. + CreationTime *int64 `locationName:"creationTime" type:"long"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ExportTaskExecutionInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ExportTaskExecutionInfo) GoString() string { + return s.String() +} + +// SetCompletionTime sets the CompletionTime field's value. +func (s *ExportTaskExecutionInfo) SetCompletionTime(v int64) *ExportTaskExecutionInfo { + s.CompletionTime = &v + return s +} + +// SetCreationTime sets the CreationTime field's value. +func (s *ExportTaskExecutionInfo) SetCreationTime(v int64) *ExportTaskExecutionInfo { + s.CreationTime = &v + return s +} + +// Represents the status of an export task. +type ExportTaskStatus struct { + _ struct{} `type:"structure"` + + // The status code of the export task. + Code *string `locationName:"code" type:"string" enum:"ExportTaskStatusCode"` + + // The status message related to the status code. + Message *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ExportTaskStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ExportTaskStatus) GoString() string { + return s.String() +} + +// SetCode sets the Code field's value. +func (s *ExportTaskStatus) SetCode(v string) *ExportTaskStatus { + s.Code = &v + return s +} + +// SetMessage sets the Message field's value. +func (s *ExportTaskStatus) SetMessage(v string) *ExportTaskStatus { + s.Message = &v + return s +} + +type FilterLogEventsInput struct { + _ struct{} `type:"structure"` + + // The end of the time range, expressed as the number of milliseconds after + // Jan 1, 1970 00:00:00 UTC. Events with a timestamp later than this time are + // not returned. + EndTime *int64 `locationName:"endTime" type:"long"` + + // The filter pattern to use. For more information, see Filter and Pattern Syntax + // (https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/FilterAndPatternSyntax.html). + // + // If not provided, all the events are matched. + FilterPattern *string `locationName:"filterPattern" type:"string"` + + // If the value is true, the operation attempts to provide responses that contain + // events from multiple log streams within the log group, interleaved in a single + // response. If the value is false, all the matched log events in the first + // log stream are searched first, then those in the next log stream, and so + // on. + // + // Important As of June 17, 2019, this parameter is ignored and the value is + // assumed to be true. The response from this operation always interleaves events + // from multiple log streams within a log group. + // + // Deprecated: Starting on June 17, 2019, this parameter will be ignored and the value will be assumed to be true. The response from this operation will always interleave events from multiple log streams within a log group. + Interleaved *bool `locationName:"interleaved" deprecated:"true" type:"boolean"` + + // The maximum number of events to return. The default is 10,000 events. + Limit *int64 `locationName:"limit" min:"1" type:"integer"` + + // Specify either the name or ARN of the log group to view log events from. + // If the log group is in a source account and you are using a monitoring account, + // you must use the log group ARN. + // + // You must include either logGroupIdentifier or logGroupName, but not both. + LogGroupIdentifier *string `locationName:"logGroupIdentifier" min:"1" type:"string"` + + // The name of the log group to search. + // + // You must include either logGroupIdentifier or logGroupName, but not both. + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string"` + + // Filters the results to include only events from log streams that have names + // starting with this prefix. + // + // If you specify a value for both logStreamNamePrefix and logStreamNames, the + // action returns an InvalidParameterException error. + LogStreamNamePrefix *string `locationName:"logStreamNamePrefix" min:"1" type:"string"` + + // Filters the results to only logs from the log streams in this list. + // + // If you specify a value for both logStreamNames and logStreamNamePrefix, the + // action returns an InvalidParameterException error. + LogStreamNames []*string `locationName:"logStreamNames" min:"1" type:"list"` + + // The token for the next set of events to return. (You received this token + // from a previous call.) + NextToken *string `locationName:"nextToken" min:"1" type:"string"` + + // The start of the time range, expressed as the number of milliseconds after + // Jan 1, 1970 00:00:00 UTC. Events with a timestamp before this time are not + // returned. + StartTime *int64 `locationName:"startTime" type:"long"` + + // Specify true to display the log event fields with all sensitive data unmasked + // and visible. The default is false. + // + // To use this operation with this parameter, you must be signed into an account + // with the logs:Unmask permission. + Unmask *bool `locationName:"unmask" type:"boolean"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s FilterLogEventsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s FilterLogEventsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *FilterLogEventsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "FilterLogEventsInput"} + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + if s.LogGroupIdentifier != nil && len(*s.LogGroupIdentifier) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogGroupIdentifier", 1)) + } + if s.LogGroupName != nil && len(*s.LogGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogGroupName", 1)) + } + if s.LogStreamNamePrefix != nil && len(*s.LogStreamNamePrefix) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogStreamNamePrefix", 1)) + } + if s.LogStreamNames != nil && len(s.LogStreamNames) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogStreamNames", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEndTime sets the EndTime field's value. +func (s *FilterLogEventsInput) SetEndTime(v int64) *FilterLogEventsInput { + s.EndTime = &v + return s +} + +// SetFilterPattern sets the FilterPattern field's value. +func (s *FilterLogEventsInput) SetFilterPattern(v string) *FilterLogEventsInput { + s.FilterPattern = &v + return s +} + +// SetInterleaved sets the Interleaved field's value. +func (s *FilterLogEventsInput) SetInterleaved(v bool) *FilterLogEventsInput { + s.Interleaved = &v + return s +} + +// SetLimit sets the Limit field's value. +func (s *FilterLogEventsInput) SetLimit(v int64) *FilterLogEventsInput { + s.Limit = &v + return s +} + +// SetLogGroupIdentifier sets the LogGroupIdentifier field's value. +func (s *FilterLogEventsInput) SetLogGroupIdentifier(v string) *FilterLogEventsInput { + s.LogGroupIdentifier = &v + return s +} + +// SetLogGroupName sets the LogGroupName field's value. +func (s *FilterLogEventsInput) SetLogGroupName(v string) *FilterLogEventsInput { + s.LogGroupName = &v + return s +} + +// SetLogStreamNamePrefix sets the LogStreamNamePrefix field's value. +func (s *FilterLogEventsInput) SetLogStreamNamePrefix(v string) *FilterLogEventsInput { + s.LogStreamNamePrefix = &v + return s +} + +// SetLogStreamNames sets the LogStreamNames field's value. +func (s *FilterLogEventsInput) SetLogStreamNames(v []*string) *FilterLogEventsInput { + s.LogStreamNames = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *FilterLogEventsInput) SetNextToken(v string) *FilterLogEventsInput { + s.NextToken = &v + return s +} + +// SetStartTime sets the StartTime field's value. +func (s *FilterLogEventsInput) SetStartTime(v int64) *FilterLogEventsInput { + s.StartTime = &v + return s +} + +// SetUnmask sets the Unmask field's value. +func (s *FilterLogEventsInput) SetUnmask(v bool) *FilterLogEventsInput { + s.Unmask = &v + return s +} + +type FilterLogEventsOutput struct { + _ struct{} `type:"structure"` + + // The matched events. + Events []*FilteredLogEvent `locationName:"events" type:"list"` + + // The token to use when requesting the next set of items. The token expires + // after 24 hours. + NextToken *string `locationName:"nextToken" min:"1" type:"string"` + + // Important As of May 15, 2020, this parameter is no longer supported. This + // parameter returns an empty list. + // + // Indicates which log streams have been searched and whether each has been + // searched completely. + SearchedLogStreams []*SearchedLogStream `locationName:"searchedLogStreams" type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s FilterLogEventsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s FilterLogEventsOutput) GoString() string { + return s.String() +} + +// SetEvents sets the Events field's value. +func (s *FilterLogEventsOutput) SetEvents(v []*FilteredLogEvent) *FilterLogEventsOutput { + s.Events = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *FilterLogEventsOutput) SetNextToken(v string) *FilterLogEventsOutput { + s.NextToken = &v + return s +} + +// SetSearchedLogStreams sets the SearchedLogStreams field's value. +func (s *FilterLogEventsOutput) SetSearchedLogStreams(v []*SearchedLogStream) *FilterLogEventsOutput { + s.SearchedLogStreams = v + return s +} + +// Represents a matched event. +type FilteredLogEvent struct { + _ struct{} `type:"structure"` + + // The ID of the event. + EventId *string `locationName:"eventId" type:"string"` + + // The time the event was ingested, expressed as the number of milliseconds + // after Jan 1, 1970 00:00:00 UTC. + IngestionTime *int64 `locationName:"ingestionTime" type:"long"` + + // The name of the log stream to which this event belongs. + LogStreamName *string `locationName:"logStreamName" min:"1" type:"string"` + + // The data contained in the log event. + Message *string `locationName:"message" min:"1" type:"string"` + + // The time the event occurred, expressed as the number of milliseconds after + // Jan 1, 1970 00:00:00 UTC. + Timestamp *int64 `locationName:"timestamp" type:"long"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s FilteredLogEvent) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s FilteredLogEvent) GoString() string { + return s.String() +} + +// SetEventId sets the EventId field's value. +func (s *FilteredLogEvent) SetEventId(v string) *FilteredLogEvent { + s.EventId = &v + return s +} + +// SetIngestionTime sets the IngestionTime field's value. +func (s *FilteredLogEvent) SetIngestionTime(v int64) *FilteredLogEvent { + s.IngestionTime = &v + return s +} + +// SetLogStreamName sets the LogStreamName field's value. +func (s *FilteredLogEvent) SetLogStreamName(v string) *FilteredLogEvent { + s.LogStreamName = &v + return s +} + +// SetMessage sets the Message field's value. +func (s *FilteredLogEvent) SetMessage(v string) *FilteredLogEvent { + s.Message = &v + return s +} + +// SetTimestamp sets the Timestamp field's value. +func (s *FilteredLogEvent) SetTimestamp(v int64) *FilteredLogEvent { + s.Timestamp = &v + return s +} + +type GetDataProtectionPolicyInput struct { + _ struct{} `type:"structure"` + + // The name or ARN of the log group that contains the data protection policy + // that you want to see. + // + // LogGroupIdentifier is a required field + LogGroupIdentifier *string `locationName:"logGroupIdentifier" min:"1" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetDataProtectionPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetDataProtectionPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetDataProtectionPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetDataProtectionPolicyInput"} + if s.LogGroupIdentifier == nil { + invalidParams.Add(request.NewErrParamRequired("LogGroupIdentifier")) + } + if s.LogGroupIdentifier != nil && len(*s.LogGroupIdentifier) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogGroupIdentifier", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetLogGroupIdentifier sets the LogGroupIdentifier field's value. +func (s *GetDataProtectionPolicyInput) SetLogGroupIdentifier(v string) *GetDataProtectionPolicyInput { + s.LogGroupIdentifier = &v + return s +} + +type GetDataProtectionPolicyOutput struct { + _ struct{} `type:"structure"` + + // The date and time that this policy was most recently updated. + LastUpdatedTime *int64 `locationName:"lastUpdatedTime" type:"long"` + + // The log group name or ARN that you specified in your request. + LogGroupIdentifier *string `locationName:"logGroupIdentifier" min:"1" type:"string"` + + // The data protection policy document for this log group. + PolicyDocument *string `locationName:"policyDocument" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetDataProtectionPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetDataProtectionPolicyOutput) GoString() string { + return s.String() +} + +// SetLastUpdatedTime sets the LastUpdatedTime field's value. +func (s *GetDataProtectionPolicyOutput) SetLastUpdatedTime(v int64) *GetDataProtectionPolicyOutput { + s.LastUpdatedTime = &v + return s +} + +// SetLogGroupIdentifier sets the LogGroupIdentifier field's value. +func (s *GetDataProtectionPolicyOutput) SetLogGroupIdentifier(v string) *GetDataProtectionPolicyOutput { + s.LogGroupIdentifier = &v + return s +} + +// SetPolicyDocument sets the PolicyDocument field's value. +func (s *GetDataProtectionPolicyOutput) SetPolicyDocument(v string) *GetDataProtectionPolicyOutput { + s.PolicyDocument = &v + return s +} + +type GetDeliveryDestinationInput struct { + _ struct{} `type:"structure"` + + // The name of the delivery destination that you want to retrieve. + // + // Name is a required field + Name *string `locationName:"name" min:"1" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetDeliveryDestinationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetDeliveryDestinationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetDeliveryDestinationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetDeliveryDestinationInput"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetName sets the Name field's value. +func (s *GetDeliveryDestinationInput) SetName(v string) *GetDeliveryDestinationInput { + s.Name = &v + return s +} + +type GetDeliveryDestinationOutput struct { + _ struct{} `type:"structure"` + + // A structure containing information about the delivery destination. + DeliveryDestination *DeliveryDestination `locationName:"deliveryDestination" type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetDeliveryDestinationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetDeliveryDestinationOutput) GoString() string { + return s.String() +} + +// SetDeliveryDestination sets the DeliveryDestination field's value. +func (s *GetDeliveryDestinationOutput) SetDeliveryDestination(v *DeliveryDestination) *GetDeliveryDestinationOutput { + s.DeliveryDestination = v + return s +} + +type GetDeliveryDestinationPolicyInput struct { + _ struct{} `type:"structure"` + + // The name of the delivery destination that you want to retrieve the policy + // of. + // + // DeliveryDestinationName is a required field + DeliveryDestinationName *string `locationName:"deliveryDestinationName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetDeliveryDestinationPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetDeliveryDestinationPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetDeliveryDestinationPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetDeliveryDestinationPolicyInput"} + if s.DeliveryDestinationName == nil { + invalidParams.Add(request.NewErrParamRequired("DeliveryDestinationName")) + } + if s.DeliveryDestinationName != nil && len(*s.DeliveryDestinationName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DeliveryDestinationName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDeliveryDestinationName sets the DeliveryDestinationName field's value. +func (s *GetDeliveryDestinationPolicyInput) SetDeliveryDestinationName(v string) *GetDeliveryDestinationPolicyInput { + s.DeliveryDestinationName = &v + return s +} + +type GetDeliveryDestinationPolicyOutput struct { + _ struct{} `type:"structure"` + + // The IAM policy for this delivery destination. + Policy *Policy `locationName:"policy" type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetDeliveryDestinationPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetDeliveryDestinationPolicyOutput) GoString() string { + return s.String() +} + +// SetPolicy sets the Policy field's value. +func (s *GetDeliveryDestinationPolicyOutput) SetPolicy(v *Policy) *GetDeliveryDestinationPolicyOutput { + s.Policy = v + return s +} + +type GetDeliveryInput struct { + _ struct{} `type:"structure"` + + // The ID of the delivery that you want to retrieve. + // + // Id is a required field + Id *string `locationName:"id" min:"1" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetDeliveryInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetDeliveryInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetDeliveryInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetDeliveryInput"} + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.Id != nil && len(*s.Id) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Id", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetId sets the Id field's value. +func (s *GetDeliveryInput) SetId(v string) *GetDeliveryInput { + s.Id = &v + return s +} + +type GetDeliveryOutput struct { + _ struct{} `type:"structure"` + + // A structure that contains information about the delivery. + Delivery *Delivery `locationName:"delivery" type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetDeliveryOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetDeliveryOutput) GoString() string { + return s.String() +} + +// SetDelivery sets the Delivery field's value. +func (s *GetDeliveryOutput) SetDelivery(v *Delivery) *GetDeliveryOutput { + s.Delivery = v + return s +} + +type GetDeliverySourceInput struct { + _ struct{} `type:"structure"` + + // The name of the delivery source that you want to retrieve. + // + // Name is a required field + Name *string `locationName:"name" min:"1" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetDeliverySourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetDeliverySourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetDeliverySourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetDeliverySourceInput"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetName sets the Name field's value. +func (s *GetDeliverySourceInput) SetName(v string) *GetDeliverySourceInput { + s.Name = &v + return s +} + +type GetDeliverySourceOutput struct { + _ struct{} `type:"structure"` + + // A structure containing information about the delivery source. + DeliverySource *DeliverySource `locationName:"deliverySource" type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetDeliverySourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetDeliverySourceOutput) GoString() string { + return s.String() +} + +// SetDeliverySource sets the DeliverySource field's value. +func (s *GetDeliverySourceOutput) SetDeliverySource(v *DeliverySource) *GetDeliverySourceOutput { + s.DeliverySource = v + return s +} + +type GetLogAnomalyDetectorInput struct { + _ struct{} `type:"structure"` + + // The ARN of the anomaly detector to retrieve information about. You can find + // the ARNs of log anomaly detectors in your account by using the ListLogAnomalyDetectors + // (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_ListLogAnomalyDetectors.html) + // operation. + // + // AnomalyDetectorArn is a required field + AnomalyDetectorArn *string `locationName:"anomalyDetectorArn" min:"1" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetLogAnomalyDetectorInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetLogAnomalyDetectorInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetLogAnomalyDetectorInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetLogAnomalyDetectorInput"} + if s.AnomalyDetectorArn == nil { + invalidParams.Add(request.NewErrParamRequired("AnomalyDetectorArn")) + } + if s.AnomalyDetectorArn != nil && len(*s.AnomalyDetectorArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AnomalyDetectorArn", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAnomalyDetectorArn sets the AnomalyDetectorArn field's value. +func (s *GetLogAnomalyDetectorInput) SetAnomalyDetectorArn(v string) *GetLogAnomalyDetectorInput { + s.AnomalyDetectorArn = &v + return s +} + +type GetLogAnomalyDetectorOutput struct { + _ struct{} `type:"structure"` + + // Specifies whether the anomaly detector is currently active. To change its + // status, use the enabled parameter in the UpdateLogAnomalyDetector (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_UpdateLogAnomalyDetector.html) + // operation. + AnomalyDetectorStatus *string `locationName:"anomalyDetectorStatus" type:"string" enum:"AnomalyDetectorStatus"` + + // The number of days used as the life cycle of anomalies. After this time, + // anomalies are automatically baselined and the anomaly detector model will + // treat new occurrences of similar event as normal. + AnomalyVisibilityTime *int64 `locationName:"anomalyVisibilityTime" min:"7" type:"long"` + + // The date and time when this anomaly detector was created. + CreationTimeStamp *int64 `locationName:"creationTimeStamp" type:"long"` + + // The name of the log anomaly detector + DetectorName *string `locationName:"detectorName" min:"1" type:"string"` + + // Specifies how often the anomaly detector runs and look for anomalies. Set + // this value according to the frequency that the log group receives new logs. + // For example, if the log group receives new log events every 10 minutes, then + // setting evaluationFrequency to FIFTEEN_MIN might be appropriate. + EvaluationFrequency *string `locationName:"evaluationFrequency" type:"string" enum:"EvaluationFrequency"` + + // A symbolic description of how CloudWatch Logs should interpret the data in + // each log event. For example, a log event can contain timestamps, IP addresses, + // strings, and so on. You use the filter pattern to specify what to look for + // in the log event message. + FilterPattern *string `locationName:"filterPattern" type:"string"` + + // The ID of the KMS key assigned to this anomaly detector, if any. + KmsKeyId *string `locationName:"kmsKeyId" type:"string"` + + // The date and time when this anomaly detector was most recently modified. + LastModifiedTimeStamp *int64 `locationName:"lastModifiedTimeStamp" type:"long"` + + // An array of structures, where each structure contains the ARN of a log group + // associated with this anomaly detector. + LogGroupArnList []*string `locationName:"logGroupArnList" type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetLogAnomalyDetectorOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetLogAnomalyDetectorOutput) GoString() string { + return s.String() +} + +// SetAnomalyDetectorStatus sets the AnomalyDetectorStatus field's value. +func (s *GetLogAnomalyDetectorOutput) SetAnomalyDetectorStatus(v string) *GetLogAnomalyDetectorOutput { + s.AnomalyDetectorStatus = &v + return s +} + +// SetAnomalyVisibilityTime sets the AnomalyVisibilityTime field's value. +func (s *GetLogAnomalyDetectorOutput) SetAnomalyVisibilityTime(v int64) *GetLogAnomalyDetectorOutput { + s.AnomalyVisibilityTime = &v + return s +} + +// SetCreationTimeStamp sets the CreationTimeStamp field's value. +func (s *GetLogAnomalyDetectorOutput) SetCreationTimeStamp(v int64) *GetLogAnomalyDetectorOutput { + s.CreationTimeStamp = &v + return s +} + +// SetDetectorName sets the DetectorName field's value. +func (s *GetLogAnomalyDetectorOutput) SetDetectorName(v string) *GetLogAnomalyDetectorOutput { + s.DetectorName = &v + return s +} + +// SetEvaluationFrequency sets the EvaluationFrequency field's value. +func (s *GetLogAnomalyDetectorOutput) SetEvaluationFrequency(v string) *GetLogAnomalyDetectorOutput { + s.EvaluationFrequency = &v + return s +} + +// SetFilterPattern sets the FilterPattern field's value. +func (s *GetLogAnomalyDetectorOutput) SetFilterPattern(v string) *GetLogAnomalyDetectorOutput { + s.FilterPattern = &v + return s +} + +// SetKmsKeyId sets the KmsKeyId field's value. +func (s *GetLogAnomalyDetectorOutput) SetKmsKeyId(v string) *GetLogAnomalyDetectorOutput { + s.KmsKeyId = &v + return s +} + +// SetLastModifiedTimeStamp sets the LastModifiedTimeStamp field's value. +func (s *GetLogAnomalyDetectorOutput) SetLastModifiedTimeStamp(v int64) *GetLogAnomalyDetectorOutput { + s.LastModifiedTimeStamp = &v + return s +} + +// SetLogGroupArnList sets the LogGroupArnList field's value. +func (s *GetLogAnomalyDetectorOutput) SetLogGroupArnList(v []*string) *GetLogAnomalyDetectorOutput { + s.LogGroupArnList = v + return s +} + +type GetLogEventsInput struct { + _ struct{} `type:"structure"` + + // The end of the time range, expressed as the number of milliseconds after + // Jan 1, 1970 00:00:00 UTC. Events with a timestamp equal to or later than + // this time are not included. + EndTime *int64 `locationName:"endTime" type:"long"` + + // The maximum number of log events returned. If you don't specify a limit, + // the default is as many log events as can fit in a response size of 1 MB (up + // to 10,000 log events). + Limit *int64 `locationName:"limit" min:"1" type:"integer"` + + // Specify either the name or ARN of the log group to view events from. If the + // log group is in a source account and you are using a monitoring account, + // you must use the log group ARN. + // + // You must include either logGroupIdentifier or logGroupName, but not both. + LogGroupIdentifier *string `locationName:"logGroupIdentifier" min:"1" type:"string"` + + // The name of the log group. + // + // You must include either logGroupIdentifier or logGroupName, but not both. + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string"` + + // The name of the log stream. + // + // LogStreamName is a required field + LogStreamName *string `locationName:"logStreamName" min:"1" type:"string" required:"true"` + + // The token for the next set of items to return. (You received this token from + // a previous call.) + NextToken *string `locationName:"nextToken" min:"1" type:"string"` + + // If the value is true, the earliest log events are returned first. If the + // value is false, the latest log events are returned first. The default value + // is false. + // + // If you are using a previous nextForwardToken value as the nextToken in this + // operation, you must specify true for startFromHead. + StartFromHead *bool `locationName:"startFromHead" type:"boolean"` + + // The start of the time range, expressed as the number of milliseconds after + // Jan 1, 1970 00:00:00 UTC. Events with a timestamp equal to this time or later + // than this time are included. Events with a timestamp earlier than this time + // are not included. + StartTime *int64 `locationName:"startTime" type:"long"` + + // Specify true to display the log event fields with all sensitive data unmasked + // and visible. The default is false. + // + // To use this operation with this parameter, you must be signed into an account + // with the logs:Unmask permission. + Unmask *bool `locationName:"unmask" type:"boolean"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetLogEventsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetLogEventsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetLogEventsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetLogEventsInput"} + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + if s.LogGroupIdentifier != nil && len(*s.LogGroupIdentifier) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogGroupIdentifier", 1)) + } + if s.LogGroupName != nil && len(*s.LogGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogGroupName", 1)) + } + if s.LogStreamName == nil { + invalidParams.Add(request.NewErrParamRequired("LogStreamName")) + } + if s.LogStreamName != nil && len(*s.LogStreamName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogStreamName", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEndTime sets the EndTime field's value. +func (s *GetLogEventsInput) SetEndTime(v int64) *GetLogEventsInput { + s.EndTime = &v + return s +} + +// SetLimit sets the Limit field's value. +func (s *GetLogEventsInput) SetLimit(v int64) *GetLogEventsInput { + s.Limit = &v + return s +} + +// SetLogGroupIdentifier sets the LogGroupIdentifier field's value. +func (s *GetLogEventsInput) SetLogGroupIdentifier(v string) *GetLogEventsInput { + s.LogGroupIdentifier = &v + return s +} + +// SetLogGroupName sets the LogGroupName field's value. +func (s *GetLogEventsInput) SetLogGroupName(v string) *GetLogEventsInput { + s.LogGroupName = &v + return s +} + +// SetLogStreamName sets the LogStreamName field's value. +func (s *GetLogEventsInput) SetLogStreamName(v string) *GetLogEventsInput { + s.LogStreamName = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *GetLogEventsInput) SetNextToken(v string) *GetLogEventsInput { + s.NextToken = &v + return s +} + +// SetStartFromHead sets the StartFromHead field's value. +func (s *GetLogEventsInput) SetStartFromHead(v bool) *GetLogEventsInput { + s.StartFromHead = &v + return s +} + +// SetStartTime sets the StartTime field's value. +func (s *GetLogEventsInput) SetStartTime(v int64) *GetLogEventsInput { + s.StartTime = &v + return s +} + +// SetUnmask sets the Unmask field's value. +func (s *GetLogEventsInput) SetUnmask(v bool) *GetLogEventsInput { + s.Unmask = &v + return s +} + +type GetLogEventsOutput struct { + _ struct{} `type:"structure"` + + // The events. + Events []*OutputLogEvent `locationName:"events" type:"list"` + + // The token for the next set of items in the backward direction. The token + // expires after 24 hours. This token is not null. If you have reached the end + // of the stream, it returns the same token you passed in. + NextBackwardToken *string `locationName:"nextBackwardToken" min:"1" type:"string"` + + // The token for the next set of items in the forward direction. The token expires + // after 24 hours. If you have reached the end of the stream, it returns the + // same token you passed in. + NextForwardToken *string `locationName:"nextForwardToken" min:"1" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetLogEventsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetLogEventsOutput) GoString() string { + return s.String() +} + +// SetEvents sets the Events field's value. +func (s *GetLogEventsOutput) SetEvents(v []*OutputLogEvent) *GetLogEventsOutput { + s.Events = v + return s +} + +// SetNextBackwardToken sets the NextBackwardToken field's value. +func (s *GetLogEventsOutput) SetNextBackwardToken(v string) *GetLogEventsOutput { + s.NextBackwardToken = &v + return s +} + +// SetNextForwardToken sets the NextForwardToken field's value. +func (s *GetLogEventsOutput) SetNextForwardToken(v string) *GetLogEventsOutput { + s.NextForwardToken = &v + return s +} + +type GetLogGroupFieldsInput struct { + _ struct{} `type:"structure"` + + // Specify either the name or ARN of the log group to view. If the log group + // is in a source account and you are using a monitoring account, you must specify + // the ARN. + // + // You must include either logGroupIdentifier or logGroupName, but not both. + LogGroupIdentifier *string `locationName:"logGroupIdentifier" min:"1" type:"string"` + + // The name of the log group to search. + // + // You must include either logGroupIdentifier or logGroupName, but not both. + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string"` + + // The time to set as the center of the query. If you specify time, the 8 minutes + // before and 8 minutes after this time are searched. If you omit time, the + // most recent 15 minutes up to the current time are searched. + // + // The time value is specified as epoch time, which is the number of seconds + // since January 1, 1970, 00:00:00 UTC. + Time *int64 `locationName:"time" type:"long"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetLogGroupFieldsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetLogGroupFieldsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetLogGroupFieldsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetLogGroupFieldsInput"} + if s.LogGroupIdentifier != nil && len(*s.LogGroupIdentifier) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogGroupIdentifier", 1)) + } + if s.LogGroupName != nil && len(*s.LogGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogGroupName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetLogGroupIdentifier sets the LogGroupIdentifier field's value. +func (s *GetLogGroupFieldsInput) SetLogGroupIdentifier(v string) *GetLogGroupFieldsInput { + s.LogGroupIdentifier = &v + return s +} + +// SetLogGroupName sets the LogGroupName field's value. +func (s *GetLogGroupFieldsInput) SetLogGroupName(v string) *GetLogGroupFieldsInput { + s.LogGroupName = &v + return s +} + +// SetTime sets the Time field's value. +func (s *GetLogGroupFieldsInput) SetTime(v int64) *GetLogGroupFieldsInput { + s.Time = &v + return s +} + +type GetLogGroupFieldsOutput struct { + _ struct{} `type:"structure"` + + // The array of fields found in the query. Each object in the array contains + // the name of the field, along with the percentage of time it appeared in the + // log events that were queried. + LogGroupFields []*LogGroupField `locationName:"logGroupFields" type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetLogGroupFieldsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetLogGroupFieldsOutput) GoString() string { + return s.String() +} + +// SetLogGroupFields sets the LogGroupFields field's value. +func (s *GetLogGroupFieldsOutput) SetLogGroupFields(v []*LogGroupField) *GetLogGroupFieldsOutput { + s.LogGroupFields = v + return s +} + +type GetLogRecordInput struct { + _ struct{} `type:"structure"` + + // The pointer corresponding to the log event record you want to retrieve. You + // get this from the response of a GetQueryResults operation. In that response, + // the value of the @ptr field for a log event is the value to use as logRecordPointer + // to retrieve that complete log event record. + // + // LogRecordPointer is a required field + LogRecordPointer *string `locationName:"logRecordPointer" type:"string" required:"true"` + + // Specify true to display the log event fields with all sensitive data unmasked + // and visible. The default is false. + // + // To use this operation with this parameter, you must be signed into an account + // with the logs:Unmask permission. + Unmask *bool `locationName:"unmask" type:"boolean"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetLogRecordInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetLogRecordInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetLogRecordInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetLogRecordInput"} + if s.LogRecordPointer == nil { + invalidParams.Add(request.NewErrParamRequired("LogRecordPointer")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetLogRecordPointer sets the LogRecordPointer field's value. +func (s *GetLogRecordInput) SetLogRecordPointer(v string) *GetLogRecordInput { + s.LogRecordPointer = &v + return s +} + +// SetUnmask sets the Unmask field's value. +func (s *GetLogRecordInput) SetUnmask(v bool) *GetLogRecordInput { + s.Unmask = &v + return s +} + +type GetLogRecordOutput struct { + _ struct{} `type:"structure"` + + // The requested log event, as a JSON string. + LogRecord map[string]*string `locationName:"logRecord" type:"map"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetLogRecordOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetLogRecordOutput) GoString() string { + return s.String() +} + +// SetLogRecord sets the LogRecord field's value. +func (s *GetLogRecordOutput) SetLogRecord(v map[string]*string) *GetLogRecordOutput { + s.LogRecord = v + return s +} + +type GetQueryResultsInput struct { + _ struct{} `type:"structure"` + + // The ID number of the query. + // + // QueryId is a required field + QueryId *string `locationName:"queryId" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetQueryResultsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetQueryResultsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetQueryResultsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetQueryResultsInput"} + if s.QueryId == nil { + invalidParams.Add(request.NewErrParamRequired("QueryId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetQueryId sets the QueryId field's value. +func (s *GetQueryResultsInput) SetQueryId(v string) *GetQueryResultsInput { + s.QueryId = &v + return s +} + +type GetQueryResultsOutput struct { + _ struct{} `type:"structure"` + + // If you associated an KMS key with the CloudWatch Logs Insights query results + // in this account, this field displays the ARN of the key that's used to encrypt + // the query results when StartQuery (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_StartQuery.html) + // stores them. + EncryptionKey *string `locationName:"encryptionKey" type:"string"` + + // The log events that matched the query criteria during the most recent time + // it ran. + // + // The results value is an array of arrays. Each log event is one object in + // the top-level array. Each of these log event objects is an array of field/value + // pairs. + Results [][]*ResultField `locationName:"results" type:"list"` + + // Includes the number of log events scanned by the query, the number of log + // events that matched the query criteria, and the total number of bytes in + // the scanned log events. These values reflect the full raw results of the + // query. + Statistics *QueryStatistics `locationName:"statistics" type:"structure"` + + // The status of the most recent running of the query. Possible values are Cancelled, + // Complete, Failed, Running, Scheduled, Timeout, and Unknown. + // + // Queries time out after 60 minutes of runtime. To avoid having your queries + // time out, reduce the time range being searched or partition your query into + // a number of queries. + Status *string `locationName:"status" type:"string" enum:"QueryStatus"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetQueryResultsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetQueryResultsOutput) GoString() string { + return s.String() +} + +// SetEncryptionKey sets the EncryptionKey field's value. +func (s *GetQueryResultsOutput) SetEncryptionKey(v string) *GetQueryResultsOutput { + s.EncryptionKey = &v + return s +} + +// SetResults sets the Results field's value. +func (s *GetQueryResultsOutput) SetResults(v [][]*ResultField) *GetQueryResultsOutput { + s.Results = v + return s +} + +// SetStatistics sets the Statistics field's value. +func (s *GetQueryResultsOutput) SetStatistics(v *QueryStatistics) *GetQueryResultsOutput { + s.Statistics = v + return s +} + +// SetStatus sets the Status field's value. +func (s *GetQueryResultsOutput) SetStatus(v string) *GetQueryResultsOutput { + s.Status = &v + return s +} + +// Represents a log event, which is a record of activity that was recorded by +// the application or resource being monitored. +type InputLogEvent struct { + _ struct{} `type:"structure"` + + // The raw event message. Each log event can be no larger than 256 KB. + // + // Message is a required field + Message *string `locationName:"message" min:"1" type:"string" required:"true"` + + // The time the event occurred, expressed as the number of milliseconds after + // Jan 1, 1970 00:00:00 UTC. + // + // Timestamp is a required field + Timestamp *int64 `locationName:"timestamp" type:"long" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InputLogEvent) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InputLogEvent) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *InputLogEvent) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "InputLogEvent"} + if s.Message == nil { + invalidParams.Add(request.NewErrParamRequired("Message")) + } + if s.Message != nil && len(*s.Message) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Message", 1)) + } + if s.Timestamp == nil { + invalidParams.Add(request.NewErrParamRequired("Timestamp")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMessage sets the Message field's value. +func (s *InputLogEvent) SetMessage(v string) *InputLogEvent { + s.Message = &v + return s +} + +// SetTimestamp sets the Timestamp field's value. +func (s *InputLogEvent) SetTimestamp(v int64) *InputLogEvent { + s.Timestamp = &v + return s +} + +// The operation is not valid on the specified resource. +type InvalidOperationException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InvalidOperationException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InvalidOperationException) GoString() string { + return s.String() +} + +func newErrorInvalidOperationException(v protocol.ResponseMetadata) error { + return &InvalidOperationException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidOperationException) Code() string { + return "InvalidOperationException" +} + +// Message returns the exception's message. +func (s *InvalidOperationException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidOperationException) OrigErr() error { + return nil +} + +func (s *InvalidOperationException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidOperationException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidOperationException) RequestID() string { + return s.RespMetadata.RequestID +} + +// A parameter is specified incorrectly. +type InvalidParameterException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InvalidParameterException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InvalidParameterException) GoString() string { + return s.String() +} + +func newErrorInvalidParameterException(v protocol.ResponseMetadata) error { + return &InvalidParameterException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidParameterException) Code() string { + return "InvalidParameterException" +} + +// Message returns the exception's message. +func (s *InvalidParameterException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidParameterException) OrigErr() error { + return nil +} + +func (s *InvalidParameterException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidParameterException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidParameterException) RequestID() string { + return s.RespMetadata.RequestID +} + +// The sequence token is not valid. You can get the correct sequence token in +// the expectedSequenceToken field in the InvalidSequenceTokenException message. +// +// PutLogEvents actions are now always accepted and never return InvalidSequenceTokenException +// regardless of receiving an invalid sequence token. +type InvalidSequenceTokenException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + ExpectedSequenceToken *string `locationName:"expectedSequenceToken" min:"1" type:"string"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InvalidSequenceTokenException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InvalidSequenceTokenException) GoString() string { + return s.String() +} + +func newErrorInvalidSequenceTokenException(v protocol.ResponseMetadata) error { + return &InvalidSequenceTokenException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidSequenceTokenException) Code() string { + return "InvalidSequenceTokenException" +} + +// Message returns the exception's message. +func (s *InvalidSequenceTokenException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidSequenceTokenException) OrigErr() error { + return nil +} + +func (s *InvalidSequenceTokenException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidSequenceTokenException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidSequenceTokenException) RequestID() string { + return s.RespMetadata.RequestID +} + +// You have reached the maximum number of resources that can be created. +type LimitExceededException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s LimitExceededException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s LimitExceededException) GoString() string { + return s.String() +} + +func newErrorLimitExceededException(v protocol.ResponseMetadata) error { + return &LimitExceededException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *LimitExceededException) Code() string { + return "LimitExceededException" +} + +// Message returns the exception's message. +func (s *LimitExceededException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *LimitExceededException) OrigErr() error { + return nil +} + +func (s *LimitExceededException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *LimitExceededException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *LimitExceededException) RequestID() string { + return s.RespMetadata.RequestID +} + +type ListAnomaliesInput struct { + _ struct{} `type:"structure"` + + // Use this to optionally limit the results to only the anomalies found by a + // certain anomaly detector. + AnomalyDetectorArn *string `locationName:"anomalyDetectorArn" min:"1" type:"string"` + + // The maximum number of items to return. If you don't specify a value, the + // default maximum value of 50 items is used. + Limit *int64 `locationName:"limit" min:"1" type:"integer"` + + // The token for the next set of items to return. The token expires after 24 + // hours. + NextToken *string `locationName:"nextToken" min:"1" type:"string"` + + // You can specify this parameter if you want to the operation to return only + // anomalies that are currently either suppressed or unsuppressed. + SuppressionState *string `locationName:"suppressionState" type:"string" enum:"SuppressionState"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListAnomaliesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListAnomaliesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListAnomaliesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListAnomaliesInput"} + if s.AnomalyDetectorArn != nil && len(*s.AnomalyDetectorArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AnomalyDetectorArn", 1)) + } + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAnomalyDetectorArn sets the AnomalyDetectorArn field's value. +func (s *ListAnomaliesInput) SetAnomalyDetectorArn(v string) *ListAnomaliesInput { + s.AnomalyDetectorArn = &v + return s +} + +// SetLimit sets the Limit field's value. +func (s *ListAnomaliesInput) SetLimit(v int64) *ListAnomaliesInput { + s.Limit = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListAnomaliesInput) SetNextToken(v string) *ListAnomaliesInput { + s.NextToken = &v + return s +} + +// SetSuppressionState sets the SuppressionState field's value. +func (s *ListAnomaliesInput) SetSuppressionState(v string) *ListAnomaliesInput { + s.SuppressionState = &v + return s +} + +type ListAnomaliesOutput struct { + _ struct{} `type:"structure"` + + // An array of structures, where each structure contains information about one + // anomaly that a log anomaly detector has found. + Anomalies []*Anomaly `locationName:"anomalies" type:"list"` + + // The token for the next set of items to return. The token expires after 24 + // hours. + NextToken *string `locationName:"nextToken" min:"1" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListAnomaliesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListAnomaliesOutput) GoString() string { + return s.String() +} + +// SetAnomalies sets the Anomalies field's value. +func (s *ListAnomaliesOutput) SetAnomalies(v []*Anomaly) *ListAnomaliesOutput { + s.Anomalies = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListAnomaliesOutput) SetNextToken(v string) *ListAnomaliesOutput { + s.NextToken = &v + return s +} + +type ListLogAnomalyDetectorsInput struct { + _ struct{} `type:"structure"` + + // Use this to optionally filter the results to only include anomaly detectors + // that are associated with the specified log group. + FilterLogGroupArn *string `locationName:"filterLogGroupArn" min:"1" type:"string"` + + // The maximum number of items to return. If you don't specify a value, the + // default maximum value of 50 items is used. + Limit *int64 `locationName:"limit" min:"1" type:"integer"` + + // The token for the next set of items to return. The token expires after 24 + // hours. + NextToken *string `locationName:"nextToken" min:"1" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListLogAnomalyDetectorsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListLogAnomalyDetectorsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListLogAnomalyDetectorsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListLogAnomalyDetectorsInput"} + if s.FilterLogGroupArn != nil && len(*s.FilterLogGroupArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("FilterLogGroupArn", 1)) + } + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFilterLogGroupArn sets the FilterLogGroupArn field's value. +func (s *ListLogAnomalyDetectorsInput) SetFilterLogGroupArn(v string) *ListLogAnomalyDetectorsInput { + s.FilterLogGroupArn = &v + return s +} + +// SetLimit sets the Limit field's value. +func (s *ListLogAnomalyDetectorsInput) SetLimit(v int64) *ListLogAnomalyDetectorsInput { + s.Limit = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListLogAnomalyDetectorsInput) SetNextToken(v string) *ListLogAnomalyDetectorsInput { + s.NextToken = &v + return s +} + +type ListLogAnomalyDetectorsOutput struct { + _ struct{} `type:"structure"` + + // An array of structures, where each structure in the array contains information + // about one anomaly detector. + AnomalyDetectors []*AnomalyDetector `locationName:"anomalyDetectors" type:"list"` + + // The token for the next set of items to return. The token expires after 24 + // hours. + NextToken *string `locationName:"nextToken" min:"1" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListLogAnomalyDetectorsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListLogAnomalyDetectorsOutput) GoString() string { + return s.String() +} + +// SetAnomalyDetectors sets the AnomalyDetectors field's value. +func (s *ListLogAnomalyDetectorsOutput) SetAnomalyDetectors(v []*AnomalyDetector) *ListLogAnomalyDetectorsOutput { + s.AnomalyDetectors = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListLogAnomalyDetectorsOutput) SetNextToken(v string) *ListLogAnomalyDetectorsOutput { + s.NextToken = &v + return s +} + +type ListTagsForResourceInput struct { + _ struct{} `type:"structure"` + + // The ARN of the resource that you want to view tags for. + // + // The ARN format of a log group is arn:aws:logs:Region:account-id:log-group:log-group-name + // + // The ARN format of a destination is arn:aws:logs:Region:account-id:destination:destination-name + // + // For more information about ARN format, see CloudWatch Logs resources and + // operations (https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html). + // + // ResourceArn is a required field + ResourceArn *string `locationName:"resourceArn" min:"1" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListTagsForResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListTagsForResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListTagsForResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListTagsForResourceInput"} + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + if s.ResourceArn != nil && len(*s.ResourceArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceArn sets the ResourceArn field's value. +func (s *ListTagsForResourceInput) SetResourceArn(v string) *ListTagsForResourceInput { + s.ResourceArn = &v + return s +} + +type ListTagsForResourceOutput struct { + _ struct{} `type:"structure"` + + // The list of tags associated with the requested resource.> + Tags map[string]*string `locationName:"tags" min:"1" type:"map"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListTagsForResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListTagsForResourceOutput) GoString() string { + return s.String() +} + +// SetTags sets the Tags field's value. +func (s *ListTagsForResourceOutput) SetTags(v map[string]*string) *ListTagsForResourceOutput { + s.Tags = v + return s +} + +// Deprecated: Please use the generic tagging API model ListTagsForResourceRequest and ListTagsForResourceResponse +type ListTagsLogGroupInput struct { + _ struct{} `deprecated:"true" type:"structure"` + + // The name of the log group. + // + // LogGroupName is a required field + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListTagsLogGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListTagsLogGroupInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListTagsLogGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListTagsLogGroupInput"} + if s.LogGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("LogGroupName")) + } + if s.LogGroupName != nil && len(*s.LogGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogGroupName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetLogGroupName sets the LogGroupName field's value. +func (s *ListTagsLogGroupInput) SetLogGroupName(v string) *ListTagsLogGroupInput { + s.LogGroupName = &v + return s +} + +// Deprecated: Please use the generic tagging API model ListTagsForResourceRequest and ListTagsForResourceResponse +type ListTagsLogGroupOutput struct { + _ struct{} `deprecated:"true" type:"structure"` + + // The tags for the log group. + Tags map[string]*string `locationName:"tags" min:"1" type:"map"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListTagsLogGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListTagsLogGroupOutput) GoString() string { + return s.String() +} + +// SetTags sets the Tags field's value. +func (s *ListTagsLogGroupOutput) SetTags(v map[string]*string) *ListTagsLogGroupOutput { + s.Tags = v + return s +} + +// This object contains the information for one log event returned in a Live +// Tail stream. +type LiveTailSessionLogEvent struct { + _ struct{} `type:"structure"` + + // The timestamp specifying when this log event was ingested into the log group. + IngestionTime *int64 `locationName:"ingestionTime" type:"long"` + + // The name or ARN of the log group that ingested this log event. + LogGroupIdentifier *string `locationName:"logGroupIdentifier" min:"1" type:"string"` + + // The name of the log stream that ingested this log event. + LogStreamName *string `locationName:"logStreamName" min:"1" type:"string"` + + // The log event message text. + Message *string `locationName:"message" min:"1" type:"string"` + + // The timestamp specifying when this log event was created. + Timestamp *int64 `locationName:"timestamp" type:"long"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s LiveTailSessionLogEvent) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s LiveTailSessionLogEvent) GoString() string { + return s.String() +} + +// SetIngestionTime sets the IngestionTime field's value. +func (s *LiveTailSessionLogEvent) SetIngestionTime(v int64) *LiveTailSessionLogEvent { + s.IngestionTime = &v + return s +} + +// SetLogGroupIdentifier sets the LogGroupIdentifier field's value. +func (s *LiveTailSessionLogEvent) SetLogGroupIdentifier(v string) *LiveTailSessionLogEvent { + s.LogGroupIdentifier = &v + return s +} + +// SetLogStreamName sets the LogStreamName field's value. +func (s *LiveTailSessionLogEvent) SetLogStreamName(v string) *LiveTailSessionLogEvent { + s.LogStreamName = &v + return s +} + +// SetMessage sets the Message field's value. +func (s *LiveTailSessionLogEvent) SetMessage(v string) *LiveTailSessionLogEvent { + s.Message = &v + return s +} + +// SetTimestamp sets the Timestamp field's value. +func (s *LiveTailSessionLogEvent) SetTimestamp(v int64) *LiveTailSessionLogEvent { + s.Timestamp = &v + return s +} + +// This object contains the metadata for one LiveTailSessionUpdate structure. +// It indicates whether that update includes only a sample of 500 log events +// out of a larger number of ingested log events, or if it contains all of the +// matching log events ingested during that second of time. +type LiveTailSessionMetadata struct { + _ struct{} `type:"structure"` + + // If this is true, then more than 500 log events matched the request for this + // update, and the sessionResults includes a sample of 500 of those events. + // + // If this is false, then 500 or fewer log events matched the request for this + // update, so no sampling was necessary. In this case, the sessionResults array + // includes all log events that matched your request during this time. + Sampled *bool `locationName:"sampled" type:"boolean"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s LiveTailSessionMetadata) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s LiveTailSessionMetadata) GoString() string { + return s.String() +} + +// SetSampled sets the Sampled field's value. +func (s *LiveTailSessionMetadata) SetSampled(v bool) *LiveTailSessionMetadata { + s.Sampled = &v + return s +} + +// This object contains information about this Live Tail session, including +// the log groups included and the log stream filters, if any. +type LiveTailSessionStart struct { + _ struct{} `type:"structure"` + + // An optional pattern to filter the results to include only log events that + // match the pattern. For example, a filter pattern of error 404 displays only + // log events that include both error and 404. + // + // For more information about filter pattern syntax, see Filter and Pattern + // Syntax (https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/FilterAndPatternSyntax.html). + LogEventFilterPattern *string `locationName:"logEventFilterPattern" type:"string"` + + // An array of the names and ARNs of the log groups included in this Live Tail + // session. + LogGroupIdentifiers []*string `locationName:"logGroupIdentifiers" min:"1" type:"list"` + + // If your StartLiveTail operation request included a logStreamNamePrefixes + // parameter that filtered the session to only include log streams that have + // names that start with certain prefixes, these prefixes are listed here. + LogStreamNamePrefixes []*string `locationName:"logStreamNamePrefixes" min:"1" type:"list"` + + // If your StartLiveTail operation request included a logStreamNames parameter + // that filtered the session to only include certain log streams, these streams + // are listed here. + LogStreamNames []*string `locationName:"logStreamNames" min:"1" type:"list"` + + // The unique ID generated by CloudWatch Logs to identify this Live Tail session + // request. + RequestId *string `locationName:"requestId" type:"string"` + + // The unique ID generated by CloudWatch Logs to identify this Live Tail session. + SessionId *string `locationName:"sessionId" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s LiveTailSessionStart) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s LiveTailSessionStart) GoString() string { + return s.String() +} + +// SetLogEventFilterPattern sets the LogEventFilterPattern field's value. +func (s *LiveTailSessionStart) SetLogEventFilterPattern(v string) *LiveTailSessionStart { + s.LogEventFilterPattern = &v + return s +} + +// SetLogGroupIdentifiers sets the LogGroupIdentifiers field's value. +func (s *LiveTailSessionStart) SetLogGroupIdentifiers(v []*string) *LiveTailSessionStart { + s.LogGroupIdentifiers = v + return s +} + +// SetLogStreamNamePrefixes sets the LogStreamNamePrefixes field's value. +func (s *LiveTailSessionStart) SetLogStreamNamePrefixes(v []*string) *LiveTailSessionStart { + s.LogStreamNamePrefixes = v + return s +} + +// SetLogStreamNames sets the LogStreamNames field's value. +func (s *LiveTailSessionStart) SetLogStreamNames(v []*string) *LiveTailSessionStart { + s.LogStreamNames = v + return s +} + +// SetRequestId sets the RequestId field's value. +func (s *LiveTailSessionStart) SetRequestId(v string) *LiveTailSessionStart { + s.RequestId = &v + return s +} + +// SetSessionId sets the SessionId field's value. +func (s *LiveTailSessionStart) SetSessionId(v string) *LiveTailSessionStart { + s.SessionId = &v + return s +} + +// The LiveTailSessionStart is and event in the StartLiveTailResponseStream group of events. +func (s *LiveTailSessionStart) eventStartLiveTailResponseStream() {} + +// UnmarshalEvent unmarshals the EventStream Message into the LiveTailSessionStart value. +// This method is only used internally within the SDK's EventStream handling. +func (s *LiveTailSessionStart) UnmarshalEvent( + payloadUnmarshaler protocol.PayloadUnmarshaler, + msg eventstream.Message, +) error { + if err := payloadUnmarshaler.UnmarshalPayload( + bytes.NewReader(msg.Payload), s, + ); err != nil { + return err + } + return nil +} + +// MarshalEvent marshals the type into an stream event value. This method +// should only used internally within the SDK's EventStream handling. +func (s *LiveTailSessionStart) MarshalEvent(pm protocol.PayloadMarshaler) (msg eventstream.Message, err error) { + msg.Headers.Set(eventstreamapi.MessageTypeHeader, eventstream.StringValue(eventstreamapi.EventMessageType)) + var buf bytes.Buffer + if err = pm.MarshalPayload(&buf, s); err != nil { + return eventstream.Message{}, err + } + msg.Payload = buf.Bytes() + return msg, err +} + +// This object contains the log events and metadata for a Live Tail session. +type LiveTailSessionUpdate struct { + _ struct{} `type:"structure"` + + // This object contains the session metadata for a Live Tail session. + SessionMetadata *LiveTailSessionMetadata `locationName:"sessionMetadata" type:"structure"` + + // An array, where each member of the array includes the information for one + // log event in the Live Tail session. + // + // A sessionResults array can include as many as 500 log events. If the number + // of log events matching the request exceeds 500 per second, the log events + // are sampled down to 500 log events to be included in each sessionUpdate structure. + SessionResults []*LiveTailSessionLogEvent `locationName:"sessionResults" type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s LiveTailSessionUpdate) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s LiveTailSessionUpdate) GoString() string { + return s.String() +} + +// SetSessionMetadata sets the SessionMetadata field's value. +func (s *LiveTailSessionUpdate) SetSessionMetadata(v *LiveTailSessionMetadata) *LiveTailSessionUpdate { + s.SessionMetadata = v + return s +} + +// SetSessionResults sets the SessionResults field's value. +func (s *LiveTailSessionUpdate) SetSessionResults(v []*LiveTailSessionLogEvent) *LiveTailSessionUpdate { + s.SessionResults = v + return s +} + +// The LiveTailSessionUpdate is and event in the StartLiveTailResponseStream group of events. +func (s *LiveTailSessionUpdate) eventStartLiveTailResponseStream() {} + +// UnmarshalEvent unmarshals the EventStream Message into the LiveTailSessionUpdate value. +// This method is only used internally within the SDK's EventStream handling. +func (s *LiveTailSessionUpdate) UnmarshalEvent( + payloadUnmarshaler protocol.PayloadUnmarshaler, + msg eventstream.Message, +) error { + if err := payloadUnmarshaler.UnmarshalPayload( + bytes.NewReader(msg.Payload), s, + ); err != nil { + return err + } + return nil +} + +// MarshalEvent marshals the type into an stream event value. This method +// should only used internally within the SDK's EventStream handling. +func (s *LiveTailSessionUpdate) MarshalEvent(pm protocol.PayloadMarshaler) (msg eventstream.Message, err error) { + msg.Headers.Set(eventstreamapi.MessageTypeHeader, eventstream.StringValue(eventstreamapi.EventMessageType)) + var buf bytes.Buffer + if err = pm.MarshalPayload(&buf, s); err != nil { + return eventstream.Message{}, err + } + msg.Payload = buf.Bytes() + return msg, err +} + +// This structure contains the information for one sample log event that is +// associated with an anomaly found by a log anomaly detector. +type LogEvent struct { + _ struct{} `type:"structure"` + + // The message content of the log event. + Message *string `locationName:"message" min:"1" type:"string"` + + // The time stamp of the log event. + Timestamp *int64 `locationName:"timestamp" type:"long"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s LogEvent) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s LogEvent) GoString() string { + return s.String() +} + +// SetMessage sets the Message field's value. +func (s *LogEvent) SetMessage(v string) *LogEvent { + s.Message = &v + return s +} + +// SetTimestamp sets the Timestamp field's value. +func (s *LogEvent) SetTimestamp(v int64) *LogEvent { + s.Timestamp = &v + return s +} + +// Represents a log group. +type LogGroup struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the log group. This version of the ARN + // includes a trailing :* after the log group name. + // + // Use this version to refer to the ARN in IAM policies when specifying permissions + // for most API actions. The exception is when specifying permissions for TagResource + // (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_TagResource.html), + // UntagResource (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_UntagResource.html), + // and ListTagsForResource (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_ListTagsForResource.html). + // The permissions for those three actions require the ARN version that doesn't + // include a trailing :*. + Arn *string `locationName:"arn" type:"string"` + + // The creation time of the log group, expressed as the number of milliseconds + // after Jan 1, 1970 00:00:00 UTC. + CreationTime *int64 `locationName:"creationTime" type:"long"` + + // Displays whether this log group has a protection policy, or whether it had + // one in the past. For more information, see PutDataProtectionPolicy (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutDataProtectionPolicy.html). + DataProtectionStatus *string `locationName:"dataProtectionStatus" type:"string" enum:"DataProtectionStatus"` + + // Displays all the properties that this log group has inherited from account-level + // settings. + InheritedProperties []*string `locationName:"inheritedProperties" type:"list" enum:"InheritedProperty"` + + // The Amazon Resource Name (ARN) of the KMS key to use when encrypting log + // data. + KmsKeyId *string `locationName:"kmsKeyId" type:"string"` + + // The Amazon Resource Name (ARN) of the log group. This version of the ARN + // doesn't include a trailing :* after the log group name. + // + // Use this version to refer to the ARN in the following situations: + // + // * In the logGroupIdentifier input field in many CloudWatch Logs APIs. + // + // * In the resourceArn field in tagging APIs + // + // * In IAM policies, when specifying permissions for TagResource (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_TagResource.html), + // UntagResource (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_UntagResource.html), + // and ListTagsForResource (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_ListTagsForResource.html). + LogGroupArn *string `locationName:"logGroupArn" type:"string"` + + // This specifies the log group class for this log group. There are two classes: + // + // * The Standard log class supports all CloudWatch Logs features. + // + // * The Infrequent Access log class supports a subset of CloudWatch Logs + // features and incurs lower costs. + // + // For details about the features supported by each class, see Log classes (https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/CloudWatch_Logs_Log_Classes.html) + LogGroupClass *string `locationName:"logGroupClass" type:"string" enum:"LogGroupClass"` + + // The name of the log group. + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string"` + + // The number of metric filters. + MetricFilterCount *int64 `locationName:"metricFilterCount" type:"integer"` + + // The number of days to retain the log events in the specified log group. Possible + // values are: 1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, 365, 400, 545, 731, + // 1096, 1827, 2192, 2557, 2922, 3288, and 3653. + // + // To set a log group so that its log events do not expire, use DeleteRetentionPolicy + // (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_DeleteRetentionPolicy.html). + RetentionInDays *int64 `locationName:"retentionInDays" type:"integer"` + + // The number of bytes stored. + StoredBytes *int64 `locationName:"storedBytes" type:"long"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s LogGroup) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s LogGroup) GoString() string { + return s.String() +} + +// SetArn sets the Arn field's value. +func (s *LogGroup) SetArn(v string) *LogGroup { + s.Arn = &v + return s +} + +// SetCreationTime sets the CreationTime field's value. +func (s *LogGroup) SetCreationTime(v int64) *LogGroup { + s.CreationTime = &v + return s +} + +// SetDataProtectionStatus sets the DataProtectionStatus field's value. +func (s *LogGroup) SetDataProtectionStatus(v string) *LogGroup { + s.DataProtectionStatus = &v + return s +} + +// SetInheritedProperties sets the InheritedProperties field's value. +func (s *LogGroup) SetInheritedProperties(v []*string) *LogGroup { + s.InheritedProperties = v + return s +} + +// SetKmsKeyId sets the KmsKeyId field's value. +func (s *LogGroup) SetKmsKeyId(v string) *LogGroup { + s.KmsKeyId = &v + return s +} + +// SetLogGroupArn sets the LogGroupArn field's value. +func (s *LogGroup) SetLogGroupArn(v string) *LogGroup { + s.LogGroupArn = &v + return s +} + +// SetLogGroupClass sets the LogGroupClass field's value. +func (s *LogGroup) SetLogGroupClass(v string) *LogGroup { + s.LogGroupClass = &v + return s +} + +// SetLogGroupName sets the LogGroupName field's value. +func (s *LogGroup) SetLogGroupName(v string) *LogGroup { + s.LogGroupName = &v + return s +} + +// SetMetricFilterCount sets the MetricFilterCount field's value. +func (s *LogGroup) SetMetricFilterCount(v int64) *LogGroup { + s.MetricFilterCount = &v + return s +} + +// SetRetentionInDays sets the RetentionInDays field's value. +func (s *LogGroup) SetRetentionInDays(v int64) *LogGroup { + s.RetentionInDays = &v + return s +} + +// SetStoredBytes sets the StoredBytes field's value. +func (s *LogGroup) SetStoredBytes(v int64) *LogGroup { + s.StoredBytes = &v + return s +} + +// The fields contained in log events found by a GetLogGroupFields operation, +// along with the percentage of queried log events in which each field appears. +type LogGroupField struct { + _ struct{} `type:"structure"` + + // The name of a log field. + Name *string `locationName:"name" type:"string"` + + // The percentage of log events queried that contained the field. + Percent *int64 `locationName:"percent" type:"integer"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s LogGroupField) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s LogGroupField) GoString() string { + return s.String() +} + +// SetName sets the Name field's value. +func (s *LogGroupField) SetName(v string) *LogGroupField { + s.Name = &v + return s +} + +// SetPercent sets the Percent field's value. +func (s *LogGroupField) SetPercent(v int64) *LogGroupField { + s.Percent = &v + return s +} + +// Represents a log stream, which is a sequence of log events from a single +// emitter of logs. +type LogStream struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the log stream. + Arn *string `locationName:"arn" type:"string"` + + // The creation time of the stream, expressed as the number of milliseconds + // after Jan 1, 1970 00:00:00 UTC. + CreationTime *int64 `locationName:"creationTime" type:"long"` + + // The time of the first event, expressed as the number of milliseconds after + // Jan 1, 1970 00:00:00 UTC. + FirstEventTimestamp *int64 `locationName:"firstEventTimestamp" type:"long"` + + // The time of the most recent log event in the log stream in CloudWatch Logs. + // This number is expressed as the number of milliseconds after Jan 1, 1970 + // 00:00:00 UTC. The lastEventTime value updates on an eventual consistency + // basis. It typically updates in less than an hour from ingestion, but in rare + // situations might take longer. + LastEventTimestamp *int64 `locationName:"lastEventTimestamp" type:"long"` + + // The ingestion time, expressed as the number of milliseconds after Jan 1, + // 1970 00:00:00 UTC The lastIngestionTime value updates on an eventual consistency + // basis. It typically updates in less than an hour after ingestion, but in + // rare situations might take longer. + LastIngestionTime *int64 `locationName:"lastIngestionTime" type:"long"` + + // The name of the log stream. + LogStreamName *string `locationName:"logStreamName" min:"1" type:"string"` + + // The number of bytes stored. + // + // Important: As of June 17, 2019, this parameter is no longer supported for + // log streams, and is always reported as zero. This change applies only to + // log streams. The storedBytes parameter for log groups is not affected. + // + // Deprecated: Starting on June 17, 2019, this parameter will be deprecated for log streams, and will be reported as zero. This change applies only to log streams. The storedBytes parameter for log groups is not affected. + StoredBytes *int64 `locationName:"storedBytes" deprecated:"true" type:"long"` + + // The sequence token. + // + // The sequence token is now ignored in PutLogEvents actions. PutLogEvents actions + // are always accepted regardless of receiving an invalid sequence token. You + // don't need to obtain uploadSequenceToken to use a PutLogEvents action. + UploadSequenceToken *string `locationName:"uploadSequenceToken" min:"1" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s LogStream) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s LogStream) GoString() string { + return s.String() +} + +// SetArn sets the Arn field's value. +func (s *LogStream) SetArn(v string) *LogStream { + s.Arn = &v + return s +} + +// SetCreationTime sets the CreationTime field's value. +func (s *LogStream) SetCreationTime(v int64) *LogStream { + s.CreationTime = &v + return s +} + +// SetFirstEventTimestamp sets the FirstEventTimestamp field's value. +func (s *LogStream) SetFirstEventTimestamp(v int64) *LogStream { + s.FirstEventTimestamp = &v + return s +} + +// SetLastEventTimestamp sets the LastEventTimestamp field's value. +func (s *LogStream) SetLastEventTimestamp(v int64) *LogStream { + s.LastEventTimestamp = &v + return s +} + +// SetLastIngestionTime sets the LastIngestionTime field's value. +func (s *LogStream) SetLastIngestionTime(v int64) *LogStream { + s.LastIngestionTime = &v + return s +} + +// SetLogStreamName sets the LogStreamName field's value. +func (s *LogStream) SetLogStreamName(v string) *LogStream { + s.LogStreamName = &v + return s +} + +// SetStoredBytes sets the StoredBytes field's value. +func (s *LogStream) SetStoredBytes(v int64) *LogStream { + s.StoredBytes = &v + return s +} + +// SetUploadSequenceToken sets the UploadSequenceToken field's value. +func (s *LogStream) SetUploadSequenceToken(v string) *LogStream { + s.UploadSequenceToken = &v + return s +} + +// The query string is not valid. Details about this error are displayed in +// a QueryCompileError object. For more information, see QueryCompileError (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_QueryCompileError.html). +// +// For more information about valid query syntax, see CloudWatch Logs Insights +// Query Syntax (https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/CWL_QuerySyntax.html). +type MalformedQueryException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` + + // Reserved. + QueryCompileError *QueryCompileError `locationName:"queryCompileError" type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s MalformedQueryException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s MalformedQueryException) GoString() string { + return s.String() +} + +func newErrorMalformedQueryException(v protocol.ResponseMetadata) error { + return &MalformedQueryException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *MalformedQueryException) Code() string { + return "MalformedQueryException" +} + +// Message returns the exception's message. +func (s *MalformedQueryException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *MalformedQueryException) OrigErr() error { + return nil +} + +func (s *MalformedQueryException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *MalformedQueryException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *MalformedQueryException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Metric filters express how CloudWatch Logs would extract metric observations +// from ingested log events and transform them into metric data in a CloudWatch +// metric. +type MetricFilter struct { + _ struct{} `type:"structure"` + + // The creation time of the metric filter, expressed as the number of milliseconds + // after Jan 1, 1970 00:00:00 UTC. + CreationTime *int64 `locationName:"creationTime" type:"long"` + + // The name of the metric filter. + FilterName *string `locationName:"filterName" min:"1" type:"string"` + + // A symbolic description of how CloudWatch Logs should interpret the data in + // each log event. For example, a log event can contain timestamps, IP addresses, + // strings, and so on. You use the filter pattern to specify what to look for + // in the log event message. + FilterPattern *string `locationName:"filterPattern" type:"string"` + + // The name of the log group. + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string"` + + // The metric transformations. + MetricTransformations []*MetricTransformation `locationName:"metricTransformations" min:"1" type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s MetricFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s MetricFilter) GoString() string { + return s.String() +} + +// SetCreationTime sets the CreationTime field's value. +func (s *MetricFilter) SetCreationTime(v int64) *MetricFilter { + s.CreationTime = &v + return s +} + +// SetFilterName sets the FilterName field's value. +func (s *MetricFilter) SetFilterName(v string) *MetricFilter { + s.FilterName = &v + return s +} + +// SetFilterPattern sets the FilterPattern field's value. +func (s *MetricFilter) SetFilterPattern(v string) *MetricFilter { + s.FilterPattern = &v + return s +} + +// SetLogGroupName sets the LogGroupName field's value. +func (s *MetricFilter) SetLogGroupName(v string) *MetricFilter { + s.LogGroupName = &v + return s +} + +// SetMetricTransformations sets the MetricTransformations field's value. +func (s *MetricFilter) SetMetricTransformations(v []*MetricTransformation) *MetricFilter { + s.MetricTransformations = v + return s +} + +// Represents a matched event. +type MetricFilterMatchRecord struct { + _ struct{} `type:"structure"` + + // The raw event data. + EventMessage *string `locationName:"eventMessage" min:"1" type:"string"` + + // The event number. + EventNumber *int64 `locationName:"eventNumber" type:"long"` + + // The values extracted from the event data by the filter. + ExtractedValues map[string]*string `locationName:"extractedValues" type:"map"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s MetricFilterMatchRecord) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s MetricFilterMatchRecord) GoString() string { + return s.String() +} + +// SetEventMessage sets the EventMessage field's value. +func (s *MetricFilterMatchRecord) SetEventMessage(v string) *MetricFilterMatchRecord { + s.EventMessage = &v + return s +} + +// SetEventNumber sets the EventNumber field's value. +func (s *MetricFilterMatchRecord) SetEventNumber(v int64) *MetricFilterMatchRecord { + s.EventNumber = &v + return s +} + +// SetExtractedValues sets the ExtractedValues field's value. +func (s *MetricFilterMatchRecord) SetExtractedValues(v map[string]*string) *MetricFilterMatchRecord { + s.ExtractedValues = v + return s +} + +// Indicates how to transform ingested log events to metric data in a CloudWatch +// metric. +type MetricTransformation struct { + _ struct{} `type:"structure"` + + // (Optional) The value to emit when a filter pattern does not match a log event. + // This value can be null. + DefaultValue *float64 `locationName:"defaultValue" type:"double"` + + // The fields to use as dimensions for the metric. One metric filter can include + // as many as three dimensions. + // + // Metrics extracted from log events are charged as custom metrics. To prevent + // unexpected high charges, do not specify high-cardinality fields such as IPAddress + // or requestID as dimensions. Each different value found for a dimension is + // treated as a separate metric and accrues charges as a separate custom metric. + // + // CloudWatch Logs disables a metric filter if it generates 1000 different name/value + // pairs for your specified dimensions within a certain amount of time. This + // helps to prevent accidental high charges. + // + // You can also set up a billing alarm to alert you if your charges are higher + // than expected. For more information, see Creating a Billing Alarm to Monitor + // Your Estimated Amazon Web Services Charges (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/monitor_estimated_charges_with_cloudwatch.html). + Dimensions map[string]*string `locationName:"dimensions" type:"map"` + + // The name of the CloudWatch metric. + // + // MetricName is a required field + MetricName *string `locationName:"metricName" type:"string" required:"true"` + + // A custom namespace to contain your metric in CloudWatch. Use namespaces to + // group together metrics that are similar. For more information, see Namespaces + // (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/cloudwatch_concepts.html#Namespace). + // + // MetricNamespace is a required field + MetricNamespace *string `locationName:"metricNamespace" type:"string" required:"true"` + + // The value to publish to the CloudWatch metric when a filter pattern matches + // a log event. + // + // MetricValue is a required field + MetricValue *string `locationName:"metricValue" type:"string" required:"true"` + + // The unit to assign to the metric. If you omit this, the unit is set as None. + Unit *string `locationName:"unit" type:"string" enum:"StandardUnit"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s MetricTransformation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s MetricTransformation) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *MetricTransformation) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "MetricTransformation"} + if s.MetricName == nil { + invalidParams.Add(request.NewErrParamRequired("MetricName")) + } + if s.MetricNamespace == nil { + invalidParams.Add(request.NewErrParamRequired("MetricNamespace")) + } + if s.MetricValue == nil { + invalidParams.Add(request.NewErrParamRequired("MetricValue")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDefaultValue sets the DefaultValue field's value. +func (s *MetricTransformation) SetDefaultValue(v float64) *MetricTransformation { + s.DefaultValue = &v + return s +} + +// SetDimensions sets the Dimensions field's value. +func (s *MetricTransformation) SetDimensions(v map[string]*string) *MetricTransformation { + s.Dimensions = v + return s +} + +// SetMetricName sets the MetricName field's value. +func (s *MetricTransformation) SetMetricName(v string) *MetricTransformation { + s.MetricName = &v + return s +} + +// SetMetricNamespace sets the MetricNamespace field's value. +func (s *MetricTransformation) SetMetricNamespace(v string) *MetricTransformation { + s.MetricNamespace = &v + return s +} + +// SetMetricValue sets the MetricValue field's value. +func (s *MetricTransformation) SetMetricValue(v string) *MetricTransformation { + s.MetricValue = &v + return s +} + +// SetUnit sets the Unit field's value. +func (s *MetricTransformation) SetUnit(v string) *MetricTransformation { + s.Unit = &v + return s +} + +// Multiple concurrent requests to update the same resource were in conflict. +type OperationAbortedException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s OperationAbortedException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s OperationAbortedException) GoString() string { + return s.String() +} + +func newErrorOperationAbortedException(v protocol.ResponseMetadata) error { + return &OperationAbortedException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *OperationAbortedException) Code() string { + return "OperationAbortedException" +} + +// Message returns the exception's message. +func (s *OperationAbortedException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *OperationAbortedException) OrigErr() error { + return nil +} + +func (s *OperationAbortedException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *OperationAbortedException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *OperationAbortedException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Represents a log event. +type OutputLogEvent struct { + _ struct{} `type:"structure"` + + // The time the event was ingested, expressed as the number of milliseconds + // after Jan 1, 1970 00:00:00 UTC. + IngestionTime *int64 `locationName:"ingestionTime" type:"long"` + + // The data contained in the log event. + Message *string `locationName:"message" min:"1" type:"string"` + + // The time the event occurred, expressed as the number of milliseconds after + // Jan 1, 1970 00:00:00 UTC. + Timestamp *int64 `locationName:"timestamp" type:"long"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s OutputLogEvent) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s OutputLogEvent) GoString() string { + return s.String() +} + +// SetIngestionTime sets the IngestionTime field's value. +func (s *OutputLogEvent) SetIngestionTime(v int64) *OutputLogEvent { + s.IngestionTime = &v + return s +} + +// SetMessage sets the Message field's value. +func (s *OutputLogEvent) SetMessage(v string) *OutputLogEvent { + s.Message = &v + return s +} + +// SetTimestamp sets the Timestamp field's value. +func (s *OutputLogEvent) SetTimestamp(v int64) *OutputLogEvent { + s.Timestamp = &v + return s +} + +// A tructures that contains information about one pattern token related to +// an anomaly. +// +// For more information about patterns and tokens, see CreateLogAnomalyDetector +// (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_CreateLogAnomalyDetector.html). +type PatternToken struct { + _ struct{} `type:"structure"` + + // For a dynamic token, this indicates where in the pattern that this token + // appears, related to other dynamic tokens. The dynamic token that appears + // first has a value of 1, the one that appears second is 2, and so on. + DynamicTokenPosition *int64 `locationName:"dynamicTokenPosition" type:"integer"` + + // Contains the values found for a dynamic token, and the number of times each + // value was found. + Enumerations map[string]*int64 `locationName:"enumerations" type:"map"` + + // Specifies whether this is a dynamic token. + IsDynamic *bool `locationName:"isDynamic" type:"boolean"` + + // The string represented by this token. If this is a dynamic token, the value + // will be <*> + TokenString *string `locationName:"tokenString" min:"1" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PatternToken) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PatternToken) GoString() string { + return s.String() +} + +// SetDynamicTokenPosition sets the DynamicTokenPosition field's value. +func (s *PatternToken) SetDynamicTokenPosition(v int64) *PatternToken { + s.DynamicTokenPosition = &v + return s +} + +// SetEnumerations sets the Enumerations field's value. +func (s *PatternToken) SetEnumerations(v map[string]*int64) *PatternToken { + s.Enumerations = v + return s +} + +// SetIsDynamic sets the IsDynamic field's value. +func (s *PatternToken) SetIsDynamic(v bool) *PatternToken { + s.IsDynamic = &v + return s +} + +// SetTokenString sets the TokenString field's value. +func (s *PatternToken) SetTokenString(v string) *PatternToken { + s.TokenString = &v + return s +} + +// A structure that contains information about one delivery destination policy. +type Policy struct { + _ struct{} `type:"structure"` + + // The contents of the delivery destination policy. + DeliveryDestinationPolicy *string `locationName:"deliveryDestinationPolicy" min:"1" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Policy) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Policy) GoString() string { + return s.String() +} + +// SetDeliveryDestinationPolicy sets the DeliveryDestinationPolicy field's value. +func (s *Policy) SetDeliveryDestinationPolicy(v string) *Policy { + s.DeliveryDestinationPolicy = &v + return s +} + +type PutAccountPolicyInput struct { + _ struct{} `type:"structure"` + + // Specify the policy, in JSON. + // + // Data protection policy + // + // A data protection policy must include two JSON blocks: + // + // * The first block must include both a DataIdentifer array and an Operation + // property with an Audit action. The DataIdentifer array lists the types + // of sensitive data that you want to mask. For more information about the + // available options, see Types of data that you can mask (https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/mask-sensitive-log-data-types.html). + // The Operation property with an Audit action is required to find the sensitive + // data terms. This Audit action must contain a FindingsDestination object. + // You can optionally use that FindingsDestination object to list one or + // more destinations to send audit findings to. If you specify destinations + // such as log groups, Firehose streams, and S3 buckets, they must already + // exist. + // + // * The second block must include both a DataIdentifer array and an Operation + // property with an Deidentify action. The DataIdentifer array must exactly + // match the DataIdentifer array in the first block of the policy. The Operation + // property with the Deidentify action is what actually masks the data, and + // it must contain the "MaskConfig": {} object. The "MaskConfig": {} object + // must be empty. + // + // For an example data protection policy, see the Examples section on this page. + // + // The contents of the two DataIdentifer arrays must match exactly. + // + // In addition to the two JSON blocks, the policyDocument can also include Name, + // Description, and Version fields. The Name is different than the operation's + // policyName parameter, and is used as a dimension when CloudWatch Logs reports + // audit findings metrics to CloudWatch. + // + // The JSON specified in policyDocument can be up to 30,720 characters long. + // + // Subscription filter policy + // + // A subscription filter policy can include the following attributes in a JSON + // block: + // + // * DestinationArn The ARN of the destination to deliver log events to. + // Supported destinations are: An Kinesis Data Streams data stream in the + // same account as the subscription policy, for same-account delivery. An + // Firehose data stream in the same account as the subscription policy, for + // same-account delivery. A Lambda function in the same account as the subscription + // policy, for same-account delivery. A logical destination in a different + // account created with PutDestination (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutDestination.html), + // for cross-account delivery. Kinesis Data Streams and Firehose are supported + // as logical destinations. + // + // * RoleArn The ARN of an IAM role that grants CloudWatch Logs permissions + // to deliver ingested log events to the destination stream. You don't need + // to provide the ARN when you are working with a logical destination for + // cross-account delivery. + // + // * FilterPattern A filter pattern for subscribing to a filtered stream + // of log events. + // + // * Distribution The method used to distribute log data to the destination. + // By default, log data is grouped by log stream, but the grouping can be + // set to Random for a more even distribution. This property is only applicable + // when the destination is an Kinesis Data Streams data stream. + // + // PolicyDocument is a required field + PolicyDocument *string `locationName:"policyDocument" type:"string" required:"true"` + + // A name for the policy. This must be unique within the account. + // + // PolicyName is a required field + PolicyName *string `locationName:"policyName" type:"string" required:"true"` + + // The type of policy that you're creating or updating. + // + // PolicyType is a required field + PolicyType *string `locationName:"policyType" type:"string" required:"true" enum:"PolicyType"` + + // Currently the only valid value for this parameter is ALL, which specifies + // that the data protection policy applies to all log groups in the account. + // If you omit this parameter, the default of ALL is used. + Scope *string `locationName:"scope" type:"string" enum:"Scope"` + + // Use this parameter to apply the subscription filter policy to a subset of + // log groups in the account. Currently, the only supported filter is LogGroupName + // NOT IN []. The selectionCriteria string can be up to 25KB in length. The + // length is determined by using its UTF-8 bytes. + // + // Using the selectionCriteria parameter is useful to help prevent infinite + // loops. For more information, see Log recursion prevention (https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/Subscriptions-recursion-prevention.html). + // + // Specifing selectionCriteria is valid only when you specify SUBSCRIPTION_FILTER_POLICY + // for policyType. + SelectionCriteria *string `locationName:"selectionCriteria" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutAccountPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutAccountPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutAccountPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutAccountPolicyInput"} + if s.PolicyDocument == nil { + invalidParams.Add(request.NewErrParamRequired("PolicyDocument")) + } + if s.PolicyName == nil { + invalidParams.Add(request.NewErrParamRequired("PolicyName")) + } + if s.PolicyType == nil { + invalidParams.Add(request.NewErrParamRequired("PolicyType")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPolicyDocument sets the PolicyDocument field's value. +func (s *PutAccountPolicyInput) SetPolicyDocument(v string) *PutAccountPolicyInput { + s.PolicyDocument = &v + return s +} + +// SetPolicyName sets the PolicyName field's value. +func (s *PutAccountPolicyInput) SetPolicyName(v string) *PutAccountPolicyInput { + s.PolicyName = &v + return s +} + +// SetPolicyType sets the PolicyType field's value. +func (s *PutAccountPolicyInput) SetPolicyType(v string) *PutAccountPolicyInput { + s.PolicyType = &v + return s +} + +// SetScope sets the Scope field's value. +func (s *PutAccountPolicyInput) SetScope(v string) *PutAccountPolicyInput { + s.Scope = &v + return s +} + +// SetSelectionCriteria sets the SelectionCriteria field's value. +func (s *PutAccountPolicyInput) SetSelectionCriteria(v string) *PutAccountPolicyInput { + s.SelectionCriteria = &v + return s +} + +type PutAccountPolicyOutput struct { + _ struct{} `type:"structure"` + + // The account policy that you created. + AccountPolicy *AccountPolicy `locationName:"accountPolicy" type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutAccountPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutAccountPolicyOutput) GoString() string { + return s.String() +} + +// SetAccountPolicy sets the AccountPolicy field's value. +func (s *PutAccountPolicyOutput) SetAccountPolicy(v *AccountPolicy) *PutAccountPolicyOutput { + s.AccountPolicy = v + return s +} + +type PutDataProtectionPolicyInput struct { + _ struct{} `type:"structure"` + + // Specify either the log group name or log group ARN. + // + // LogGroupIdentifier is a required field + LogGroupIdentifier *string `locationName:"logGroupIdentifier" min:"1" type:"string" required:"true"` + + // Specify the data protection policy, in JSON. + // + // This policy must include two JSON blocks: + // + // * The first block must include both a DataIdentifer array and an Operation + // property with an Audit action. The DataIdentifer array lists the types + // of sensitive data that you want to mask. For more information about the + // available options, see Types of data that you can mask (https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/mask-sensitive-log-data-types.html). + // The Operation property with an Audit action is required to find the sensitive + // data terms. This Audit action must contain a FindingsDestination object. + // You can optionally use that FindingsDestination object to list one or + // more destinations to send audit findings to. If you specify destinations + // such as log groups, Firehose streams, and S3 buckets, they must already + // exist. + // + // * The second block must include both a DataIdentifer array and an Operation + // property with an Deidentify action. The DataIdentifer array must exactly + // match the DataIdentifer array in the first block of the policy. The Operation + // property with the Deidentify action is what actually masks the data, and + // it must contain the "MaskConfig": {} object. The "MaskConfig": {} object + // must be empty. + // + // For an example data protection policy, see the Examples section on this page. + // + // The contents of the two DataIdentifer arrays must match exactly. + // + // In addition to the two JSON blocks, the policyDocument can also include Name, + // Description, and Version fields. The Name is used as a dimension when CloudWatch + // Logs reports audit findings metrics to CloudWatch. + // + // The JSON specified in policyDocument can be up to 30,720 characters. + // + // PolicyDocument is a required field + PolicyDocument *string `locationName:"policyDocument" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutDataProtectionPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutDataProtectionPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutDataProtectionPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutDataProtectionPolicyInput"} + if s.LogGroupIdentifier == nil { + invalidParams.Add(request.NewErrParamRequired("LogGroupIdentifier")) + } + if s.LogGroupIdentifier != nil && len(*s.LogGroupIdentifier) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogGroupIdentifier", 1)) + } + if s.PolicyDocument == nil { + invalidParams.Add(request.NewErrParamRequired("PolicyDocument")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetLogGroupIdentifier sets the LogGroupIdentifier field's value. +func (s *PutDataProtectionPolicyInput) SetLogGroupIdentifier(v string) *PutDataProtectionPolicyInput { + s.LogGroupIdentifier = &v + return s +} + +// SetPolicyDocument sets the PolicyDocument field's value. +func (s *PutDataProtectionPolicyInput) SetPolicyDocument(v string) *PutDataProtectionPolicyInput { + s.PolicyDocument = &v + return s +} + +type PutDataProtectionPolicyOutput struct { + _ struct{} `type:"structure"` + + // The date and time that this policy was most recently updated. + LastUpdatedTime *int64 `locationName:"lastUpdatedTime" type:"long"` + + // The log group name or ARN that you specified in your request. + LogGroupIdentifier *string `locationName:"logGroupIdentifier" min:"1" type:"string"` + + // The data protection policy used for this log group. + PolicyDocument *string `locationName:"policyDocument" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutDataProtectionPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutDataProtectionPolicyOutput) GoString() string { + return s.String() +} + +// SetLastUpdatedTime sets the LastUpdatedTime field's value. +func (s *PutDataProtectionPolicyOutput) SetLastUpdatedTime(v int64) *PutDataProtectionPolicyOutput { + s.LastUpdatedTime = &v + return s +} + +// SetLogGroupIdentifier sets the LogGroupIdentifier field's value. +func (s *PutDataProtectionPolicyOutput) SetLogGroupIdentifier(v string) *PutDataProtectionPolicyOutput { + s.LogGroupIdentifier = &v + return s +} + +// SetPolicyDocument sets the PolicyDocument field's value. +func (s *PutDataProtectionPolicyOutput) SetPolicyDocument(v string) *PutDataProtectionPolicyOutput { + s.PolicyDocument = &v + return s +} + +type PutDeliveryDestinationInput struct { + _ struct{} `type:"structure"` + + // A structure that contains the ARN of the Amazon Web Services resource that + // will receive the logs. + // + // DeliveryDestinationConfiguration is a required field + DeliveryDestinationConfiguration *DeliveryDestinationConfiguration `locationName:"deliveryDestinationConfiguration" type:"structure" required:"true"` + + // A name for this delivery destination. This name must be unique for all delivery + // destinations in your account. + // + // Name is a required field + Name *string `locationName:"name" min:"1" type:"string" required:"true"` + + // The format for the logs that this delivery destination will receive. + OutputFormat *string `locationName:"outputFormat" type:"string" enum:"OutputFormat"` + + // An optional list of key-value pairs to associate with the resource. + // + // For more information about tagging, see Tagging Amazon Web Services resources + // (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html) + Tags map[string]*string `locationName:"tags" min:"1" type:"map"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutDeliveryDestinationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutDeliveryDestinationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutDeliveryDestinationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutDeliveryDestinationInput"} + if s.DeliveryDestinationConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("DeliveryDestinationConfiguration")) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.Tags != nil && len(s.Tags) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Tags", 1)) + } + if s.DeliveryDestinationConfiguration != nil { + if err := s.DeliveryDestinationConfiguration.Validate(); err != nil { + invalidParams.AddNested("DeliveryDestinationConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDeliveryDestinationConfiguration sets the DeliveryDestinationConfiguration field's value. +func (s *PutDeliveryDestinationInput) SetDeliveryDestinationConfiguration(v *DeliveryDestinationConfiguration) *PutDeliveryDestinationInput { + s.DeliveryDestinationConfiguration = v + return s +} + +// SetName sets the Name field's value. +func (s *PutDeliveryDestinationInput) SetName(v string) *PutDeliveryDestinationInput { + s.Name = &v + return s +} + +// SetOutputFormat sets the OutputFormat field's value. +func (s *PutDeliveryDestinationInput) SetOutputFormat(v string) *PutDeliveryDestinationInput { + s.OutputFormat = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *PutDeliveryDestinationInput) SetTags(v map[string]*string) *PutDeliveryDestinationInput { + s.Tags = v + return s +} + +type PutDeliveryDestinationOutput struct { + _ struct{} `type:"structure"` + + // A structure containing information about the delivery destination that you + // just created or updated. + DeliveryDestination *DeliveryDestination `locationName:"deliveryDestination" type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutDeliveryDestinationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutDeliveryDestinationOutput) GoString() string { + return s.String() +} + +// SetDeliveryDestination sets the DeliveryDestination field's value. +func (s *PutDeliveryDestinationOutput) SetDeliveryDestination(v *DeliveryDestination) *PutDeliveryDestinationOutput { + s.DeliveryDestination = v + return s +} + +type PutDeliveryDestinationPolicyInput struct { + _ struct{} `type:"structure"` + + // The name of the delivery destination to assign this policy to. + // + // DeliveryDestinationName is a required field + DeliveryDestinationName *string `locationName:"deliveryDestinationName" min:"1" type:"string" required:"true"` + + // The contents of the policy. + // + // DeliveryDestinationPolicy is a required field + DeliveryDestinationPolicy *string `locationName:"deliveryDestinationPolicy" min:"1" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutDeliveryDestinationPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutDeliveryDestinationPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutDeliveryDestinationPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutDeliveryDestinationPolicyInput"} + if s.DeliveryDestinationName == nil { + invalidParams.Add(request.NewErrParamRequired("DeliveryDestinationName")) + } + if s.DeliveryDestinationName != nil && len(*s.DeliveryDestinationName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DeliveryDestinationName", 1)) + } + if s.DeliveryDestinationPolicy == nil { + invalidParams.Add(request.NewErrParamRequired("DeliveryDestinationPolicy")) + } + if s.DeliveryDestinationPolicy != nil && len(*s.DeliveryDestinationPolicy) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DeliveryDestinationPolicy", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDeliveryDestinationName sets the DeliveryDestinationName field's value. +func (s *PutDeliveryDestinationPolicyInput) SetDeliveryDestinationName(v string) *PutDeliveryDestinationPolicyInput { + s.DeliveryDestinationName = &v + return s +} + +// SetDeliveryDestinationPolicy sets the DeliveryDestinationPolicy field's value. +func (s *PutDeliveryDestinationPolicyInput) SetDeliveryDestinationPolicy(v string) *PutDeliveryDestinationPolicyInput { + s.DeliveryDestinationPolicy = &v + return s +} + +type PutDeliveryDestinationPolicyOutput struct { + _ struct{} `type:"structure"` + + // The contents of the policy that you just created. + Policy *Policy `locationName:"policy" type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutDeliveryDestinationPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutDeliveryDestinationPolicyOutput) GoString() string { + return s.String() +} + +// SetPolicy sets the Policy field's value. +func (s *PutDeliveryDestinationPolicyOutput) SetPolicy(v *Policy) *PutDeliveryDestinationPolicyOutput { + s.Policy = v + return s +} + +type PutDeliverySourceInput struct { + _ struct{} `type:"structure"` + + // Defines the type of log that the source is sending. + // + // * For Amazon Bedrock, the valid value is APPLICATION_LOGS. + // + // * For Amazon CodeWhisperer, the valid value is EVENT_LOGS. + // + // * For IAM Identity Center, the valid value is ERROR_LOGS. + // + // * For Amazon WorkMail, the valid values are ACCESS_CONTROL_LOGS, AUTHENTICATION_LOGS, + // WORKMAIL_AVAILABILITY_PROVIDER_LOGS, and WORKMAIL_MAILBOX_ACCESS_LOGS. + // + // LogType is a required field + LogType *string `locationName:"logType" min:"1" type:"string" required:"true"` + + // A name for this delivery source. This name must be unique for all delivery + // sources in your account. + // + // Name is a required field + Name *string `locationName:"name" min:"1" type:"string" required:"true"` + + // The ARN of the Amazon Web Services resource that is generating and sending + // logs. For example, arn:aws:workmail:us-east-1:123456789012:organization/m-1234EXAMPLEabcd1234abcd1234abcd1234 + // + // ResourceArn is a required field + ResourceArn *string `locationName:"resourceArn" type:"string" required:"true"` + + // An optional list of key-value pairs to associate with the resource. + // + // For more information about tagging, see Tagging Amazon Web Services resources + // (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html) + Tags map[string]*string `locationName:"tags" min:"1" type:"map"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutDeliverySourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutDeliverySourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutDeliverySourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutDeliverySourceInput"} + if s.LogType == nil { + invalidParams.Add(request.NewErrParamRequired("LogType")) + } + if s.LogType != nil && len(*s.LogType) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogType", 1)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + if s.Tags != nil && len(s.Tags) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Tags", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetLogType sets the LogType field's value. +func (s *PutDeliverySourceInput) SetLogType(v string) *PutDeliverySourceInput { + s.LogType = &v + return s +} + +// SetName sets the Name field's value. +func (s *PutDeliverySourceInput) SetName(v string) *PutDeliverySourceInput { + s.Name = &v + return s +} + +// SetResourceArn sets the ResourceArn field's value. +func (s *PutDeliverySourceInput) SetResourceArn(v string) *PutDeliverySourceInput { + s.ResourceArn = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *PutDeliverySourceInput) SetTags(v map[string]*string) *PutDeliverySourceInput { + s.Tags = v + return s +} + +type PutDeliverySourceOutput struct { + _ struct{} `type:"structure"` + + // A structure containing information about the delivery source that was just + // created or updated. + DeliverySource *DeliverySource `locationName:"deliverySource" type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutDeliverySourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutDeliverySourceOutput) GoString() string { + return s.String() +} + +// SetDeliverySource sets the DeliverySource field's value. +func (s *PutDeliverySourceOutput) SetDeliverySource(v *DeliverySource) *PutDeliverySourceOutput { + s.DeliverySource = v + return s +} + +type PutDestinationInput struct { + _ struct{} `type:"structure"` + + // A name for the destination. + // + // DestinationName is a required field + DestinationName *string `locationName:"destinationName" min:"1" type:"string" required:"true"` + + // The ARN of an IAM role that grants CloudWatch Logs permissions to call the + // Amazon Kinesis PutRecord operation on the destination stream. + // + // RoleArn is a required field + RoleArn *string `locationName:"roleArn" min:"1" type:"string" required:"true"` + + // An optional list of key-value pairs to associate with the resource. + // + // For more information about tagging, see Tagging Amazon Web Services resources + // (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html) + Tags map[string]*string `locationName:"tags" min:"1" type:"map"` + + // The ARN of an Amazon Kinesis stream to which to deliver matching log events. + // + // TargetArn is a required field + TargetArn *string `locationName:"targetArn" min:"1" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutDestinationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutDestinationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutDestinationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutDestinationInput"} + if s.DestinationName == nil { + invalidParams.Add(request.NewErrParamRequired("DestinationName")) + } + if s.DestinationName != nil && len(*s.DestinationName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DestinationName", 1)) + } + if s.RoleArn == nil { + invalidParams.Add(request.NewErrParamRequired("RoleArn")) + } + if s.RoleArn != nil && len(*s.RoleArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RoleArn", 1)) + } + if s.Tags != nil && len(s.Tags) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Tags", 1)) + } + if s.TargetArn == nil { + invalidParams.Add(request.NewErrParamRequired("TargetArn")) + } + if s.TargetArn != nil && len(*s.TargetArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TargetArn", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDestinationName sets the DestinationName field's value. +func (s *PutDestinationInput) SetDestinationName(v string) *PutDestinationInput { + s.DestinationName = &v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *PutDestinationInput) SetRoleArn(v string) *PutDestinationInput { + s.RoleArn = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *PutDestinationInput) SetTags(v map[string]*string) *PutDestinationInput { + s.Tags = v + return s +} + +// SetTargetArn sets the TargetArn field's value. +func (s *PutDestinationInput) SetTargetArn(v string) *PutDestinationInput { + s.TargetArn = &v + return s +} + +type PutDestinationOutput struct { + _ struct{} `type:"structure"` + + // The destination. + Destination *Destination `locationName:"destination" type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutDestinationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutDestinationOutput) GoString() string { + return s.String() +} + +// SetDestination sets the Destination field's value. +func (s *PutDestinationOutput) SetDestination(v *Destination) *PutDestinationOutput { + s.Destination = v + return s +} + +type PutDestinationPolicyInput struct { + _ struct{} `type:"structure"` + + // An IAM policy document that authorizes cross-account users to deliver their + // log events to the associated destination. This can be up to 5120 bytes. + // + // AccessPolicy is a required field + AccessPolicy *string `locationName:"accessPolicy" min:"1" type:"string" required:"true"` + + // A name for an existing destination. + // + // DestinationName is a required field + DestinationName *string `locationName:"destinationName" min:"1" type:"string" required:"true"` + + // Specify true if you are updating an existing destination policy to grant + // permission to an organization ID instead of granting permission to individual + // Amazon Web Services accounts. Before you update a destination policy this + // way, you must first update the subscription filters in the accounts that + // send logs to this destination. If you do not, the subscription filters might + // stop working. By specifying true for forceUpdate, you are affirming that + // you have already updated the subscription filters. For more information, + // see Updating an existing cross-account subscription (https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/Cross-Account-Log_Subscription-Update.html) + // + // If you omit this parameter, the default of false is used. + ForceUpdate *bool `locationName:"forceUpdate" type:"boolean"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutDestinationPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutDestinationPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutDestinationPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutDestinationPolicyInput"} + if s.AccessPolicy == nil { + invalidParams.Add(request.NewErrParamRequired("AccessPolicy")) + } + if s.AccessPolicy != nil && len(*s.AccessPolicy) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AccessPolicy", 1)) + } + if s.DestinationName == nil { + invalidParams.Add(request.NewErrParamRequired("DestinationName")) + } + if s.DestinationName != nil && len(*s.DestinationName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DestinationName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccessPolicy sets the AccessPolicy field's value. +func (s *PutDestinationPolicyInput) SetAccessPolicy(v string) *PutDestinationPolicyInput { + s.AccessPolicy = &v + return s +} + +// SetDestinationName sets the DestinationName field's value. +func (s *PutDestinationPolicyInput) SetDestinationName(v string) *PutDestinationPolicyInput { + s.DestinationName = &v + return s +} + +// SetForceUpdate sets the ForceUpdate field's value. +func (s *PutDestinationPolicyInput) SetForceUpdate(v bool) *PutDestinationPolicyInput { + s.ForceUpdate = &v + return s +} + +type PutDestinationPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutDestinationPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutDestinationPolicyOutput) GoString() string { + return s.String() +} + +type PutLogEventsInput struct { + _ struct{} `type:"structure"` + + Entity *Entity `locationName:"entity" type:"structure"` + + // The log events. + // + // LogEvents is a required field + LogEvents []*InputLogEvent `locationName:"logEvents" min:"1" type:"list" required:"true"` + + // The name of the log group. + // + // LogGroupName is a required field + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` + + // The name of the log stream. + // + // LogStreamName is a required field + LogStreamName *string `locationName:"logStreamName" min:"1" type:"string" required:"true"` + + // The sequence token obtained from the response of the previous PutLogEvents + // call. + // + // The sequenceToken parameter is now ignored in PutLogEvents actions. PutLogEvents + // actions are now accepted and never return InvalidSequenceTokenException or + // DataAlreadyAcceptedException even if the sequence token is not valid. + SequenceToken *string `locationName:"sequenceToken" min:"1" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutLogEventsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutLogEventsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutLogEventsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutLogEventsInput"} + if s.LogEvents == nil { + invalidParams.Add(request.NewErrParamRequired("LogEvents")) + } + if s.LogEvents != nil && len(s.LogEvents) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogEvents", 1)) + } + if s.LogGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("LogGroupName")) + } + if s.LogGroupName != nil && len(*s.LogGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogGroupName", 1)) + } + if s.LogStreamName == nil { + invalidParams.Add(request.NewErrParamRequired("LogStreamName")) + } + if s.LogStreamName != nil && len(*s.LogStreamName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogStreamName", 1)) + } + if s.SequenceToken != nil && len(*s.SequenceToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SequenceToken", 1)) + } + if s.Entity != nil { + if err := s.Entity.Validate(); err != nil { + invalidParams.AddNested("Entity", err.(request.ErrInvalidParams)) + } + } + if s.LogEvents != nil { + for i, v := range s.LogEvents { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "LogEvents", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEntity sets the Entity field's value. +func (s *PutLogEventsInput) SetEntity(v *Entity) *PutLogEventsInput { + s.Entity = v + return s +} + +// SetLogEvents sets the LogEvents field's value. +func (s *PutLogEventsInput) SetLogEvents(v []*InputLogEvent) *PutLogEventsInput { + s.LogEvents = v + return s +} + +// SetLogGroupName sets the LogGroupName field's value. +func (s *PutLogEventsInput) SetLogGroupName(v string) *PutLogEventsInput { + s.LogGroupName = &v + return s +} + +// SetLogStreamName sets the LogStreamName field's value. +func (s *PutLogEventsInput) SetLogStreamName(v string) *PutLogEventsInput { + s.LogStreamName = &v + return s +} + +// SetSequenceToken sets the SequenceToken field's value. +func (s *PutLogEventsInput) SetSequenceToken(v string) *PutLogEventsInput { + s.SequenceToken = &v + return s +} + +type PutLogEventsOutput struct { + _ struct{} `type:"structure"` + + // The next sequence token. + // + // This field has been deprecated. + // + // The sequence token is now ignored in PutLogEvents actions. PutLogEvents actions + // are always accepted even if the sequence token is not valid. You can use + // parallel PutLogEvents actions on the same log stream and you do not need + // to wait for the response of a previous PutLogEvents action to obtain the + // nextSequenceToken value. + NextSequenceToken *string `locationName:"nextSequenceToken" min:"1" type:"string"` + + RejectedEntityInfo *RejectedEntityInfo `locationName:"rejectedEntityInfo" type:"structure"` + + // The rejected events. + RejectedLogEventsInfo *RejectedLogEventsInfo `locationName:"rejectedLogEventsInfo" type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutLogEventsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutLogEventsOutput) GoString() string { + return s.String() +} + +// SetNextSequenceToken sets the NextSequenceToken field's value. +func (s *PutLogEventsOutput) SetNextSequenceToken(v string) *PutLogEventsOutput { + s.NextSequenceToken = &v + return s +} + +// SetRejectedEntityInfo sets the RejectedEntityInfo field's value. +func (s *PutLogEventsOutput) SetRejectedEntityInfo(v *RejectedEntityInfo) *PutLogEventsOutput { + s.RejectedEntityInfo = v + return s +} + +// SetRejectedLogEventsInfo sets the RejectedLogEventsInfo field's value. +func (s *PutLogEventsOutput) SetRejectedLogEventsInfo(v *RejectedLogEventsInfo) *PutLogEventsOutput { + s.RejectedLogEventsInfo = v + return s +} + +type PutMetricFilterInput struct { + _ struct{} `type:"structure"` + + // A name for the metric filter. + // + // FilterName is a required field + FilterName *string `locationName:"filterName" min:"1" type:"string" required:"true"` + + // A filter pattern for extracting metric data out of ingested log events. + // + // FilterPattern is a required field + FilterPattern *string `locationName:"filterPattern" type:"string" required:"true"` + + // The name of the log group. + // + // LogGroupName is a required field + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` + + // A collection of information that defines how metric data gets emitted. + // + // MetricTransformations is a required field + MetricTransformations []*MetricTransformation `locationName:"metricTransformations" min:"1" type:"list" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutMetricFilterInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutMetricFilterInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutMetricFilterInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutMetricFilterInput"} + if s.FilterName == nil { + invalidParams.Add(request.NewErrParamRequired("FilterName")) + } + if s.FilterName != nil && len(*s.FilterName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("FilterName", 1)) + } + if s.FilterPattern == nil { + invalidParams.Add(request.NewErrParamRequired("FilterPattern")) + } + if s.LogGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("LogGroupName")) + } + if s.LogGroupName != nil && len(*s.LogGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogGroupName", 1)) + } + if s.MetricTransformations == nil { + invalidParams.Add(request.NewErrParamRequired("MetricTransformations")) + } + if s.MetricTransformations != nil && len(s.MetricTransformations) < 1 { + invalidParams.Add(request.NewErrParamMinLen("MetricTransformations", 1)) + } + if s.MetricTransformations != nil { + for i, v := range s.MetricTransformations { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "MetricTransformations", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFilterName sets the FilterName field's value. +func (s *PutMetricFilterInput) SetFilterName(v string) *PutMetricFilterInput { + s.FilterName = &v + return s +} + +// SetFilterPattern sets the FilterPattern field's value. +func (s *PutMetricFilterInput) SetFilterPattern(v string) *PutMetricFilterInput { + s.FilterPattern = &v + return s +} + +// SetLogGroupName sets the LogGroupName field's value. +func (s *PutMetricFilterInput) SetLogGroupName(v string) *PutMetricFilterInput { + s.LogGroupName = &v + return s +} + +// SetMetricTransformations sets the MetricTransformations field's value. +func (s *PutMetricFilterInput) SetMetricTransformations(v []*MetricTransformation) *PutMetricFilterInput { + s.MetricTransformations = v + return s +} + +type PutMetricFilterOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutMetricFilterOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutMetricFilterOutput) GoString() string { + return s.String() +} + +type PutQueryDefinitionInput struct { + _ struct{} `type:"structure"` + + // Used as an idempotency token, to avoid returning an exception if the service + // receives the same request twice because of a network error. + ClientToken *string `locationName:"clientToken" min:"36" type:"string" idempotencyToken:"true"` + + // Use this parameter to include specific log groups as part of your query definition. + // + // If you are updating a query definition and you omit this parameter, then + // the updated definition will contain no log groups. + LogGroupNames []*string `locationName:"logGroupNames" type:"list"` + + // A name for the query definition. If you are saving numerous query definitions, + // we recommend that you name them. This way, you can find the ones you want + // by using the first part of the name as a filter in the queryDefinitionNamePrefix + // parameter of DescribeQueryDefinitions (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_DescribeQueryDefinitions.html). + // + // Name is a required field + Name *string `locationName:"name" min:"1" type:"string" required:"true"` + + // If you are updating a query definition, use this parameter to specify the + // ID of the query definition that you want to update. You can use DescribeQueryDefinitions + // (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_DescribeQueryDefinitions.html) + // to retrieve the IDs of your saved query definitions. + // + // If you are creating a query definition, do not specify this parameter. CloudWatch + // generates a unique ID for the new query definition and include it in the + // response to this operation. + QueryDefinitionId *string `locationName:"queryDefinitionId" type:"string"` + + // The query string to use for this definition. For more information, see CloudWatch + // Logs Insights Query Syntax (https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/CWL_QuerySyntax.html). + // + // QueryString is a required field + QueryString *string `locationName:"queryString" min:"1" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutQueryDefinitionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutQueryDefinitionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutQueryDefinitionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutQueryDefinitionInput"} + if s.ClientToken != nil && len(*s.ClientToken) < 36 { + invalidParams.Add(request.NewErrParamMinLen("ClientToken", 36)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.QueryString == nil { + invalidParams.Add(request.NewErrParamRequired("QueryString")) + } + if s.QueryString != nil && len(*s.QueryString) < 1 { + invalidParams.Add(request.NewErrParamMinLen("QueryString", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClientToken sets the ClientToken field's value. +func (s *PutQueryDefinitionInput) SetClientToken(v string) *PutQueryDefinitionInput { + s.ClientToken = &v + return s +} + +// SetLogGroupNames sets the LogGroupNames field's value. +func (s *PutQueryDefinitionInput) SetLogGroupNames(v []*string) *PutQueryDefinitionInput { + s.LogGroupNames = v + return s +} + +// SetName sets the Name field's value. +func (s *PutQueryDefinitionInput) SetName(v string) *PutQueryDefinitionInput { + s.Name = &v + return s +} + +// SetQueryDefinitionId sets the QueryDefinitionId field's value. +func (s *PutQueryDefinitionInput) SetQueryDefinitionId(v string) *PutQueryDefinitionInput { + s.QueryDefinitionId = &v + return s +} + +// SetQueryString sets the QueryString field's value. +func (s *PutQueryDefinitionInput) SetQueryString(v string) *PutQueryDefinitionInput { + s.QueryString = &v + return s +} + +type PutQueryDefinitionOutput struct { + _ struct{} `type:"structure"` + + // The ID of the query definition. + QueryDefinitionId *string `locationName:"queryDefinitionId" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutQueryDefinitionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutQueryDefinitionOutput) GoString() string { + return s.String() +} + +// SetQueryDefinitionId sets the QueryDefinitionId field's value. +func (s *PutQueryDefinitionOutput) SetQueryDefinitionId(v string) *PutQueryDefinitionOutput { + s.QueryDefinitionId = &v + return s +} + +type PutResourcePolicyInput struct { + _ struct{} `type:"structure"` + + // Details of the new policy, including the identity of the principal that is + // enabled to put logs to this account. This is formatted as a JSON string. + // This parameter is required. + // + // The following example creates a resource policy enabling the Route 53 service + // to put DNS query logs in to the specified log group. Replace "logArn" with + // the ARN of your CloudWatch Logs resource, such as a log group or log stream. + // + // CloudWatch Logs also supports aws:SourceArn (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_condition-keys.html#condition-keys-sourcearn) + // and aws:SourceAccount (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_condition-keys.html#condition-keys-sourceaccount) + // condition context keys. + // + // In the example resource policy, you would replace the value of SourceArn + // with the resource making the call from Route 53 to CloudWatch Logs. You would + // also replace the value of SourceAccount with the Amazon Web Services account + // ID making that call. + // + // { "Version": "2012-10-17", "Statement": [ { "Sid": "Route53LogsToCloudWatchLogs", + // "Effect": "Allow", "Principal": { "Service": [ "route53.amazonaws.com" ] + // }, "Action": "logs:PutLogEvents", "Resource": "logArn", "Condition": { "ArnLike": + // { "aws:SourceArn": "myRoute53ResourceArn" }, "StringEquals": { "aws:SourceAccount": + // "myAwsAccountId" } } } ] } + PolicyDocument *string `locationName:"policyDocument" min:"1" type:"string"` + + // Name of the new policy. This parameter is required. + PolicyName *string `locationName:"policyName" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutResourcePolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutResourcePolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutResourcePolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutResourcePolicyInput"} + if s.PolicyDocument != nil && len(*s.PolicyDocument) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PolicyDocument", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPolicyDocument sets the PolicyDocument field's value. +func (s *PutResourcePolicyInput) SetPolicyDocument(v string) *PutResourcePolicyInput { + s.PolicyDocument = &v + return s +} + +// SetPolicyName sets the PolicyName field's value. +func (s *PutResourcePolicyInput) SetPolicyName(v string) *PutResourcePolicyInput { + s.PolicyName = &v + return s +} + +type PutResourcePolicyOutput struct { + _ struct{} `type:"structure"` + + // The new policy. + ResourcePolicy *ResourcePolicy `locationName:"resourcePolicy" type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutResourcePolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutResourcePolicyOutput) GoString() string { + return s.String() +} + +// SetResourcePolicy sets the ResourcePolicy field's value. +func (s *PutResourcePolicyOutput) SetResourcePolicy(v *ResourcePolicy) *PutResourcePolicyOutput { + s.ResourcePolicy = v + return s +} + +type PutRetentionPolicyInput struct { + _ struct{} `type:"structure"` + + // The name of the log group. + // + // LogGroupName is a required field + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` + + // The number of days to retain the log events in the specified log group. Possible + // values are: 1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, 365, 400, 545, 731, + // 1096, 1827, 2192, 2557, 2922, 3288, and 3653. + // + // To set a log group so that its log events do not expire, use DeleteRetentionPolicy + // (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_DeleteRetentionPolicy.html). + // + // RetentionInDays is a required field + RetentionInDays *int64 `locationName:"retentionInDays" type:"integer" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutRetentionPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutRetentionPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutRetentionPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutRetentionPolicyInput"} + if s.LogGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("LogGroupName")) + } + if s.LogGroupName != nil && len(*s.LogGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogGroupName", 1)) + } + if s.RetentionInDays == nil { + invalidParams.Add(request.NewErrParamRequired("RetentionInDays")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetLogGroupName sets the LogGroupName field's value. +func (s *PutRetentionPolicyInput) SetLogGroupName(v string) *PutRetentionPolicyInput { + s.LogGroupName = &v + return s +} + +// SetRetentionInDays sets the RetentionInDays field's value. +func (s *PutRetentionPolicyInput) SetRetentionInDays(v int64) *PutRetentionPolicyInput { + s.RetentionInDays = &v + return s +} + +type PutRetentionPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutRetentionPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutRetentionPolicyOutput) GoString() string { + return s.String() +} + +type PutSubscriptionFilterInput struct { + _ struct{} `type:"structure"` + + // The ARN of the destination to deliver matching log events to. Currently, + // the supported destinations are: + // + // * An Amazon Kinesis stream belonging to the same account as the subscription + // filter, for same-account delivery. + // + // * A logical destination (specified using an ARN) belonging to a different + // account, for cross-account delivery. If you're setting up a cross-account + // subscription, the destination must have an IAM policy associated with + // it. The IAM policy must allow the sender to send logs to the destination. + // For more information, see PutDestinationPolicy (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutDestinationPolicy.html). + // + // * A Kinesis Data Firehose delivery stream belonging to the same account + // as the subscription filter, for same-account delivery. + // + // * A Lambda function belonging to the same account as the subscription + // filter, for same-account delivery. + // + // DestinationArn is a required field + DestinationArn *string `locationName:"destinationArn" min:"1" type:"string" required:"true"` + + // The method used to distribute log data to the destination. By default, log + // data is grouped by log stream, but the grouping can be set to random for + // a more even distribution. This property is only applicable when the destination + // is an Amazon Kinesis data stream. + Distribution *string `locationName:"distribution" type:"string" enum:"Distribution"` + + // A name for the subscription filter. If you are updating an existing filter, + // you must specify the correct name in filterName. To find the name of the + // filter currently associated with a log group, use DescribeSubscriptionFilters + // (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_DescribeSubscriptionFilters.html). + // + // FilterName is a required field + FilterName *string `locationName:"filterName" min:"1" type:"string" required:"true"` + + // A filter pattern for subscribing to a filtered stream of log events. + // + // FilterPattern is a required field + FilterPattern *string `locationName:"filterPattern" type:"string" required:"true"` + + // The name of the log group. + // + // LogGroupName is a required field + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` + + // The ARN of an IAM role that grants CloudWatch Logs permissions to deliver + // ingested log events to the destination stream. You don't need to provide + // the ARN when you are working with a logical destination for cross-account + // delivery. + RoleArn *string `locationName:"roleArn" min:"1" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutSubscriptionFilterInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutSubscriptionFilterInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutSubscriptionFilterInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutSubscriptionFilterInput"} + if s.DestinationArn == nil { + invalidParams.Add(request.NewErrParamRequired("DestinationArn")) + } + if s.DestinationArn != nil && len(*s.DestinationArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DestinationArn", 1)) + } + if s.FilterName == nil { + invalidParams.Add(request.NewErrParamRequired("FilterName")) + } + if s.FilterName != nil && len(*s.FilterName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("FilterName", 1)) + } + if s.FilterPattern == nil { + invalidParams.Add(request.NewErrParamRequired("FilterPattern")) + } + if s.LogGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("LogGroupName")) + } + if s.LogGroupName != nil && len(*s.LogGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogGroupName", 1)) + } + if s.RoleArn != nil && len(*s.RoleArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RoleArn", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDestinationArn sets the DestinationArn field's value. +func (s *PutSubscriptionFilterInput) SetDestinationArn(v string) *PutSubscriptionFilterInput { + s.DestinationArn = &v + return s +} + +// SetDistribution sets the Distribution field's value. +func (s *PutSubscriptionFilterInput) SetDistribution(v string) *PutSubscriptionFilterInput { + s.Distribution = &v + return s +} + +// SetFilterName sets the FilterName field's value. +func (s *PutSubscriptionFilterInput) SetFilterName(v string) *PutSubscriptionFilterInput { + s.FilterName = &v + return s +} + +// SetFilterPattern sets the FilterPattern field's value. +func (s *PutSubscriptionFilterInput) SetFilterPattern(v string) *PutSubscriptionFilterInput { + s.FilterPattern = &v + return s +} + +// SetLogGroupName sets the LogGroupName field's value. +func (s *PutSubscriptionFilterInput) SetLogGroupName(v string) *PutSubscriptionFilterInput { + s.LogGroupName = &v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *PutSubscriptionFilterInput) SetRoleArn(v string) *PutSubscriptionFilterInput { + s.RoleArn = &v + return s +} + +type PutSubscriptionFilterOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutSubscriptionFilterOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutSubscriptionFilterOutput) GoString() string { + return s.String() +} + +// Reserved. +type QueryCompileError struct { + _ struct{} `type:"structure"` + + // Reserved. + Location *QueryCompileErrorLocation `locationName:"location" type:"structure"` + + // Reserved. + Message *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s QueryCompileError) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s QueryCompileError) GoString() string { + return s.String() +} + +// SetLocation sets the Location field's value. +func (s *QueryCompileError) SetLocation(v *QueryCompileErrorLocation) *QueryCompileError { + s.Location = v + return s +} + +// SetMessage sets the Message field's value. +func (s *QueryCompileError) SetMessage(v string) *QueryCompileError { + s.Message = &v + return s +} + +// Reserved. +type QueryCompileErrorLocation struct { + _ struct{} `type:"structure"` + + // Reserved. + EndCharOffset *int64 `locationName:"endCharOffset" type:"integer"` + + // Reserved. + StartCharOffset *int64 `locationName:"startCharOffset" type:"integer"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s QueryCompileErrorLocation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s QueryCompileErrorLocation) GoString() string { + return s.String() +} + +// SetEndCharOffset sets the EndCharOffset field's value. +func (s *QueryCompileErrorLocation) SetEndCharOffset(v int64) *QueryCompileErrorLocation { + s.EndCharOffset = &v + return s +} + +// SetStartCharOffset sets the StartCharOffset field's value. +func (s *QueryCompileErrorLocation) SetStartCharOffset(v int64) *QueryCompileErrorLocation { + s.StartCharOffset = &v + return s +} + +// This structure contains details about a saved CloudWatch Logs Insights query +// definition. +type QueryDefinition struct { + _ struct{} `type:"structure"` + + // The date that the query definition was most recently modified. + LastModified *int64 `locationName:"lastModified" type:"long"` + + // If this query definition contains a list of log groups that it is limited + // to, that list appears here. + LogGroupNames []*string `locationName:"logGroupNames" type:"list"` + + // The name of the query definition. + Name *string `locationName:"name" min:"1" type:"string"` + + // The unique ID of the query definition. + QueryDefinitionId *string `locationName:"queryDefinitionId" type:"string"` + + // The query string to use for this definition. For more information, see CloudWatch + // Logs Insights Query Syntax (https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/CWL_QuerySyntax.html). + QueryString *string `locationName:"queryString" min:"1" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s QueryDefinition) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s QueryDefinition) GoString() string { + return s.String() +} + +// SetLastModified sets the LastModified field's value. +func (s *QueryDefinition) SetLastModified(v int64) *QueryDefinition { + s.LastModified = &v + return s +} + +// SetLogGroupNames sets the LogGroupNames field's value. +func (s *QueryDefinition) SetLogGroupNames(v []*string) *QueryDefinition { + s.LogGroupNames = v + return s +} + +// SetName sets the Name field's value. +func (s *QueryDefinition) SetName(v string) *QueryDefinition { + s.Name = &v + return s +} + +// SetQueryDefinitionId sets the QueryDefinitionId field's value. +func (s *QueryDefinition) SetQueryDefinitionId(v string) *QueryDefinition { + s.QueryDefinitionId = &v + return s +} + +// SetQueryString sets the QueryString field's value. +func (s *QueryDefinition) SetQueryString(v string) *QueryDefinition { + s.QueryString = &v + return s +} + +// Information about one CloudWatch Logs Insights query that matches the request +// in a DescribeQueries operation. +type QueryInfo struct { + _ struct{} `type:"structure"` + + // The date and time that this query was created. + CreateTime *int64 `locationName:"createTime" type:"long"` + + // The name of the log group scanned by this query. + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string"` + + // The unique ID number of this query. + QueryId *string `locationName:"queryId" type:"string"` + + // The query string used in this query. + QueryString *string `locationName:"queryString" type:"string"` + + // The status of this query. Possible values are Cancelled, Complete, Failed, + // Running, Scheduled, and Unknown. + Status *string `locationName:"status" type:"string" enum:"QueryStatus"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s QueryInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s QueryInfo) GoString() string { + return s.String() +} + +// SetCreateTime sets the CreateTime field's value. +func (s *QueryInfo) SetCreateTime(v int64) *QueryInfo { + s.CreateTime = &v + return s +} + +// SetLogGroupName sets the LogGroupName field's value. +func (s *QueryInfo) SetLogGroupName(v string) *QueryInfo { + s.LogGroupName = &v + return s +} + +// SetQueryId sets the QueryId field's value. +func (s *QueryInfo) SetQueryId(v string) *QueryInfo { + s.QueryId = &v + return s +} + +// SetQueryString sets the QueryString field's value. +func (s *QueryInfo) SetQueryString(v string) *QueryInfo { + s.QueryString = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *QueryInfo) SetStatus(v string) *QueryInfo { + s.Status = &v + return s +} + +// Contains the number of log events scanned by the query, the number of log +// events that matched the query criteria, and the total number of bytes in +// the log events that were scanned. +type QueryStatistics struct { + _ struct{} `type:"structure"` + + // The total number of bytes in the log events scanned during the query. + BytesScanned *float64 `locationName:"bytesScanned" type:"double"` + + // The number of log events that matched the query string. + RecordsMatched *float64 `locationName:"recordsMatched" type:"double"` + + // The total number of log events scanned during the query. + RecordsScanned *float64 `locationName:"recordsScanned" type:"double"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s QueryStatistics) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s QueryStatistics) GoString() string { + return s.String() +} + +// SetBytesScanned sets the BytesScanned field's value. +func (s *QueryStatistics) SetBytesScanned(v float64) *QueryStatistics { + s.BytesScanned = &v + return s +} + +// SetRecordsMatched sets the RecordsMatched field's value. +func (s *QueryStatistics) SetRecordsMatched(v float64) *QueryStatistics { + s.RecordsMatched = &v + return s +} + +// SetRecordsScanned sets the RecordsScanned field's value. +func (s *QueryStatistics) SetRecordsScanned(v float64) *QueryStatistics { + s.RecordsScanned = &v + return s +} + +type RejectedEntityInfo struct { + _ struct{} `type:"structure"` + + ErrorType *string `locationName:"errorType" type:"string" enum:"EntityRejectionErrorType"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RejectedEntityInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RejectedEntityInfo) GoString() string { + return s.String() +} + +// SetErrorType sets the ErrorType field's value. +func (s *RejectedEntityInfo) SetErrorType(v string) *RejectedEntityInfo { + s.ErrorType = &v + return s +} + +// Represents the rejected events. +type RejectedLogEventsInfo struct { + _ struct{} `type:"structure"` + + // The expired log events. + ExpiredLogEventEndIndex *int64 `locationName:"expiredLogEventEndIndex" type:"integer"` + + // The index of the first log event that is too new. This field is inclusive. + TooNewLogEventStartIndex *int64 `locationName:"tooNewLogEventStartIndex" type:"integer"` + + // The index of the last log event that is too old. This field is exclusive. + TooOldLogEventEndIndex *int64 `locationName:"tooOldLogEventEndIndex" type:"integer"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RejectedLogEventsInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RejectedLogEventsInfo) GoString() string { + return s.String() +} + +// SetExpiredLogEventEndIndex sets the ExpiredLogEventEndIndex field's value. +func (s *RejectedLogEventsInfo) SetExpiredLogEventEndIndex(v int64) *RejectedLogEventsInfo { + s.ExpiredLogEventEndIndex = &v + return s +} + +// SetTooNewLogEventStartIndex sets the TooNewLogEventStartIndex field's value. +func (s *RejectedLogEventsInfo) SetTooNewLogEventStartIndex(v int64) *RejectedLogEventsInfo { + s.TooNewLogEventStartIndex = &v + return s +} + +// SetTooOldLogEventEndIndex sets the TooOldLogEventEndIndex field's value. +func (s *RejectedLogEventsInfo) SetTooOldLogEventEndIndex(v int64) *RejectedLogEventsInfo { + s.TooOldLogEventEndIndex = &v + return s +} + +// The specified resource already exists. +type ResourceAlreadyExistsException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ResourceAlreadyExistsException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ResourceAlreadyExistsException) GoString() string { + return s.String() +} + +func newErrorResourceAlreadyExistsException(v protocol.ResponseMetadata) error { + return &ResourceAlreadyExistsException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *ResourceAlreadyExistsException) Code() string { + return "ResourceAlreadyExistsException" +} + +// Message returns the exception's message. +func (s *ResourceAlreadyExistsException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ResourceAlreadyExistsException) OrigErr() error { + return nil +} + +func (s *ResourceAlreadyExistsException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *ResourceAlreadyExistsException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *ResourceAlreadyExistsException) RequestID() string { + return s.RespMetadata.RequestID +} + +// The specified resource does not exist. +type ResourceNotFoundException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ResourceNotFoundException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ResourceNotFoundException) GoString() string { + return s.String() +} + +func newErrorResourceNotFoundException(v protocol.ResponseMetadata) error { + return &ResourceNotFoundException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *ResourceNotFoundException) Code() string { + return "ResourceNotFoundException" +} + +// Message returns the exception's message. +func (s *ResourceNotFoundException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ResourceNotFoundException) OrigErr() error { + return nil +} + +func (s *ResourceNotFoundException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *ResourceNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *ResourceNotFoundException) RequestID() string { + return s.RespMetadata.RequestID +} + +// A policy enabling one or more entities to put logs to a log group in this +// account. +type ResourcePolicy struct { + _ struct{} `type:"structure"` + + // Timestamp showing when this policy was last updated, expressed as the number + // of milliseconds after Jan 1, 1970 00:00:00 UTC. + LastUpdatedTime *int64 `locationName:"lastUpdatedTime" type:"long"` + + // The details of the policy. + PolicyDocument *string `locationName:"policyDocument" min:"1" type:"string"` + + // The name of the resource policy. + PolicyName *string `locationName:"policyName" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ResourcePolicy) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ResourcePolicy) GoString() string { + return s.String() +} + +// SetLastUpdatedTime sets the LastUpdatedTime field's value. +func (s *ResourcePolicy) SetLastUpdatedTime(v int64) *ResourcePolicy { + s.LastUpdatedTime = &v + return s +} + +// SetPolicyDocument sets the PolicyDocument field's value. +func (s *ResourcePolicy) SetPolicyDocument(v string) *ResourcePolicy { + s.PolicyDocument = &v + return s +} + +// SetPolicyName sets the PolicyName field's value. +func (s *ResourcePolicy) SetPolicyName(v string) *ResourcePolicy { + s.PolicyName = &v + return s +} + +// Contains one field from one log event returned by a CloudWatch Logs Insights +// query, along with the value of that field. +// +// For more information about the fields that are generated by CloudWatch logs, +// see Supported Logs and Discovered Fields (https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/CWL_AnalyzeLogData-discoverable-fields.html). +type ResultField struct { + _ struct{} `type:"structure"` + + // The log event field. + Field *string `locationName:"field" type:"string"` + + // The value of this field. + Value *string `locationName:"value" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ResultField) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ResultField) GoString() string { + return s.String() +} + +// SetField sets the Field field's value. +func (s *ResultField) SetField(v string) *ResultField { + s.Field = &v + return s +} + +// SetValue sets the Value field's value. +func (s *ResultField) SetValue(v string) *ResultField { + s.Value = &v + return s +} + +// Represents the search status of a log stream. +type SearchedLogStream struct { + _ struct{} `type:"structure"` + + // The name of the log stream. + LogStreamName *string `locationName:"logStreamName" min:"1" type:"string"` + + // Indicates whether all the events in this log stream were searched. + SearchedCompletely *bool `locationName:"searchedCompletely" type:"boolean"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SearchedLogStream) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SearchedLogStream) GoString() string { + return s.String() +} + +// SetLogStreamName sets the LogStreamName field's value. +func (s *SearchedLogStream) SetLogStreamName(v string) *SearchedLogStream { + s.LogStreamName = &v + return s +} + +// SetSearchedCompletely sets the SearchedCompletely field's value. +func (s *SearchedLogStream) SetSearchedCompletely(v bool) *SearchedLogStream { + s.SearchedCompletely = &v + return s +} + +// This request exceeds a service quota. +type ServiceQuotaExceededException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ServiceQuotaExceededException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ServiceQuotaExceededException) GoString() string { + return s.String() +} + +func newErrorServiceQuotaExceededException(v protocol.ResponseMetadata) error { + return &ServiceQuotaExceededException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *ServiceQuotaExceededException) Code() string { + return "ServiceQuotaExceededException" +} + +// Message returns the exception's message. +func (s *ServiceQuotaExceededException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ServiceQuotaExceededException) OrigErr() error { + return nil +} + +func (s *ServiceQuotaExceededException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *ServiceQuotaExceededException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *ServiceQuotaExceededException) RequestID() string { + return s.RespMetadata.RequestID +} + +// The service cannot complete the request. +type ServiceUnavailableException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ServiceUnavailableException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ServiceUnavailableException) GoString() string { + return s.String() +} + +func newErrorServiceUnavailableException(v protocol.ResponseMetadata) error { + return &ServiceUnavailableException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *ServiceUnavailableException) Code() string { + return "ServiceUnavailableException" +} + +// Message returns the exception's message. +func (s *ServiceUnavailableException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ServiceUnavailableException) OrigErr() error { + return nil +} + +func (s *ServiceUnavailableException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *ServiceUnavailableException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *ServiceUnavailableException) RequestID() string { + return s.RespMetadata.RequestID +} + +// his exception is returned if an unknown error occurs during a Live Tail session. +type SessionStreamingException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SessionStreamingException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SessionStreamingException) GoString() string { + return s.String() +} + +// The SessionStreamingException is and event in the StartLiveTailResponseStream group of events. +func (s *SessionStreamingException) eventStartLiveTailResponseStream() {} + +// UnmarshalEvent unmarshals the EventStream Message into the SessionStreamingException value. +// This method is only used internally within the SDK's EventStream handling. +func (s *SessionStreamingException) UnmarshalEvent( + payloadUnmarshaler protocol.PayloadUnmarshaler, + msg eventstream.Message, +) error { + if err := payloadUnmarshaler.UnmarshalPayload( + bytes.NewReader(msg.Payload), s, + ); err != nil { + return err + } + return nil +} + +// MarshalEvent marshals the type into an stream event value. This method +// should only used internally within the SDK's EventStream handling. +func (s *SessionStreamingException) MarshalEvent(pm protocol.PayloadMarshaler) (msg eventstream.Message, err error) { + msg.Headers.Set(eventstreamapi.MessageTypeHeader, eventstream.StringValue(eventstreamapi.ExceptionMessageType)) + var buf bytes.Buffer + if err = pm.MarshalPayload(&buf, s); err != nil { + return eventstream.Message{}, err + } + msg.Payload = buf.Bytes() + return msg, err +} + +func newErrorSessionStreamingException(v protocol.ResponseMetadata) error { + return &SessionStreamingException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *SessionStreamingException) Code() string { + return "SessionStreamingException" +} + +// Message returns the exception's message. +func (s *SessionStreamingException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *SessionStreamingException) OrigErr() error { + return nil +} + +func (s *SessionStreamingException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *SessionStreamingException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *SessionStreamingException) RequestID() string { + return s.RespMetadata.RequestID +} + +// This exception is returned in a Live Tail stream when the Live Tail session +// times out. Live Tail sessions time out after three hours. +type SessionTimeoutException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SessionTimeoutException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SessionTimeoutException) GoString() string { + return s.String() +} + +// The SessionTimeoutException is and event in the StartLiveTailResponseStream group of events. +func (s *SessionTimeoutException) eventStartLiveTailResponseStream() {} + +// UnmarshalEvent unmarshals the EventStream Message into the SessionTimeoutException value. +// This method is only used internally within the SDK's EventStream handling. +func (s *SessionTimeoutException) UnmarshalEvent( + payloadUnmarshaler protocol.PayloadUnmarshaler, + msg eventstream.Message, +) error { + if err := payloadUnmarshaler.UnmarshalPayload( + bytes.NewReader(msg.Payload), s, + ); err != nil { + return err + } + return nil +} + +// MarshalEvent marshals the type into an stream event value. This method +// should only used internally within the SDK's EventStream handling. +func (s *SessionTimeoutException) MarshalEvent(pm protocol.PayloadMarshaler) (msg eventstream.Message, err error) { + msg.Headers.Set(eventstreamapi.MessageTypeHeader, eventstream.StringValue(eventstreamapi.ExceptionMessageType)) + var buf bytes.Buffer + if err = pm.MarshalPayload(&buf, s); err != nil { + return eventstream.Message{}, err + } + msg.Payload = buf.Bytes() + return msg, err +} + +func newErrorSessionTimeoutException(v protocol.ResponseMetadata) error { + return &SessionTimeoutException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *SessionTimeoutException) Code() string { + return "SessionTimeoutException" +} + +// Message returns the exception's message. +func (s *SessionTimeoutException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *SessionTimeoutException) OrigErr() error { + return nil +} + +func (s *SessionTimeoutException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *SessionTimeoutException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *SessionTimeoutException) RequestID() string { + return s.RespMetadata.RequestID +} + +type StartLiveTailInput struct { + _ struct{} `type:"structure"` + + // An optional pattern to use to filter the results to include only log events + // that match the pattern. For example, a filter pattern of error 404 causes + // only log events that include both error and 404 to be included in the Live + // Tail stream. + // + // Regular expression filter patterns are supported. + // + // For more information about filter pattern syntax, see Filter and Pattern + // Syntax (https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/FilterAndPatternSyntax.html). + LogEventFilterPattern *string `locationName:"logEventFilterPattern" type:"string"` + + // An array where each item in the array is a log group to include in the Live + // Tail session. + // + // Specify each log group by its ARN. + // + // If you specify an ARN, the ARN can't end with an asterisk (*). + // + // You can include up to 10 log groups. + // + // LogGroupIdentifiers is a required field + LogGroupIdentifiers []*string `locationName:"logGroupIdentifiers" min:"1" type:"list" required:"true"` + + // If you specify this parameter, then only log events in the log streams that + // have names that start with the prefixes that you specify here are included + // in the Live Tail session. + // + // If you specify this field, you can't also specify the logStreamNames field. + // + // You can specify this parameter only if you specify only one log group in + // logGroupIdentifiers. + LogStreamNamePrefixes []*string `locationName:"logStreamNamePrefixes" min:"1" type:"list"` + + // If you specify this parameter, then only log events in the log streams that + // you specify here are included in the Live Tail session. + // + // If you specify this field, you can't also specify the logStreamNamePrefixes + // field. + // + // You can specify this parameter only if you specify only one log group in + // logGroupIdentifiers. + LogStreamNames []*string `locationName:"logStreamNames" min:"1" type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s StartLiveTailInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s StartLiveTailInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StartLiveTailInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StartLiveTailInput"} + if s.LogGroupIdentifiers == nil { + invalidParams.Add(request.NewErrParamRequired("LogGroupIdentifiers")) + } + if s.LogGroupIdentifiers != nil && len(s.LogGroupIdentifiers) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogGroupIdentifiers", 1)) + } + if s.LogStreamNamePrefixes != nil && len(s.LogStreamNamePrefixes) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogStreamNamePrefixes", 1)) + } + if s.LogStreamNames != nil && len(s.LogStreamNames) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogStreamNames", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetLogEventFilterPattern sets the LogEventFilterPattern field's value. +func (s *StartLiveTailInput) SetLogEventFilterPattern(v string) *StartLiveTailInput { + s.LogEventFilterPattern = &v + return s +} + +// SetLogGroupIdentifiers sets the LogGroupIdentifiers field's value. +func (s *StartLiveTailInput) SetLogGroupIdentifiers(v []*string) *StartLiveTailInput { + s.LogGroupIdentifiers = v + return s +} + +// SetLogStreamNamePrefixes sets the LogStreamNamePrefixes field's value. +func (s *StartLiveTailInput) SetLogStreamNamePrefixes(v []*string) *StartLiveTailInput { + s.LogStreamNamePrefixes = v + return s +} + +// SetLogStreamNames sets the LogStreamNames field's value. +func (s *StartLiveTailInput) SetLogStreamNames(v []*string) *StartLiveTailInput { + s.LogStreamNames = v + return s +} + +type StartLiveTailOutput struct { + _ struct{} `type:"structure"` + + eventStream *StartLiveTailEventStream +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s StartLiveTailOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s StartLiveTailOutput) GoString() string { + return s.String() +} + +// GetStream returns the type to interact with the event stream. +func (s *StartLiveTailOutput) GetStream() *StartLiveTailEventStream { + return s.eventStream +} + +// The StartLiveTailOutput is and event in the StartLiveTailResponseStream group of events. +func (s *StartLiveTailOutput) eventStartLiveTailResponseStream() {} + +// UnmarshalEvent unmarshals the EventStream Message into the StartLiveTailOutput value. +// This method is only used internally within the SDK's EventStream handling. +func (s *StartLiveTailOutput) UnmarshalEvent( + payloadUnmarshaler protocol.PayloadUnmarshaler, + msg eventstream.Message, +) error { + if err := payloadUnmarshaler.UnmarshalPayload( + bytes.NewReader(msg.Payload), s, + ); err != nil { + return err + } + return nil +} + +// MarshalEvent marshals the type into an stream event value. This method +// should only used internally within the SDK's EventStream handling. +func (s *StartLiveTailOutput) MarshalEvent(pm protocol.PayloadMarshaler) (msg eventstream.Message, err error) { + msg.Headers.Set(eventstreamapi.MessageTypeHeader, eventstream.StringValue(eventstreamapi.EventMessageType)) + var buf bytes.Buffer + if err = pm.MarshalPayload(&buf, s); err != nil { + return eventstream.Message{}, err + } + msg.Payload = buf.Bytes() + return msg, err +} + +// StartLiveTailResponseStreamEvent groups together all EventStream +// events writes for StartLiveTailResponseStream. +// +// These events are: +// +// - LiveTailSessionStart +// - LiveTailSessionUpdate +type StartLiveTailResponseStreamEvent interface { + eventStartLiveTailResponseStream() + eventstreamapi.Marshaler + eventstreamapi.Unmarshaler +} + +// StartLiveTailResponseStreamReader provides the interface for reading to the stream. The +// default implementation for this interface will be StartLiveTailResponseStream. +// +// The reader's Close method must allow multiple concurrent calls. +// +// These events are: +// +// - LiveTailSessionStart +// - LiveTailSessionUpdate +// - StartLiveTailResponseStreamUnknownEvent +type StartLiveTailResponseStreamReader interface { + // Returns a channel of events as they are read from the event stream. + Events() <-chan StartLiveTailResponseStreamEvent + + // Close will stop the reader reading events from the stream. + Close() error + + // Returns any error that has occurred while reading from the event stream. + Err() error +} + +type readStartLiveTailResponseStream struct { + eventReader *eventstreamapi.EventReader + stream chan StartLiveTailResponseStreamEvent + err *eventstreamapi.OnceError + + done chan struct{} + closeOnce sync.Once +} + +func newReadStartLiveTailResponseStream(eventReader *eventstreamapi.EventReader) *readStartLiveTailResponseStream { + r := &readStartLiveTailResponseStream{ + eventReader: eventReader, + stream: make(chan StartLiveTailResponseStreamEvent), + done: make(chan struct{}), + err: eventstreamapi.NewOnceError(), + } + go r.readEventStream() + + return r +} + +// Close will close the underlying event stream reader. +func (r *readStartLiveTailResponseStream) Close() error { + r.closeOnce.Do(r.safeClose) + return r.Err() +} + +func (r *readStartLiveTailResponseStream) ErrorSet() <-chan struct{} { + return r.err.ErrorSet() +} + +func (r *readStartLiveTailResponseStream) Closed() <-chan struct{} { + return r.done +} + +func (r *readStartLiveTailResponseStream) safeClose() { + close(r.done) +} + +func (r *readStartLiveTailResponseStream) Err() error { + return r.err.Err() +} + +func (r *readStartLiveTailResponseStream) Events() <-chan StartLiveTailResponseStreamEvent { + return r.stream +} + +func (r *readStartLiveTailResponseStream) readEventStream() { + defer r.Close() + defer close(r.stream) + + for { + event, err := r.eventReader.ReadEvent() + if err != nil { + if err == io.EOF { + return + } + select { + case <-r.done: + // If closed already ignore the error + return + default: + } + if _, ok := err.(*eventstreamapi.UnknownMessageTypeError); ok { + continue + } + r.err.SetError(err) + return + } + + select { + case r.stream <- event.(StartLiveTailResponseStreamEvent): + case <-r.done: + return + } + } +} + +type unmarshalerForStartLiveTailResponseStreamEvent struct { + metadata protocol.ResponseMetadata +} + +func (u unmarshalerForStartLiveTailResponseStreamEvent) UnmarshalerForEventName(eventType string) (eventstreamapi.Unmarshaler, error) { + switch eventType { + case "sessionStart": + return &LiveTailSessionStart{}, nil + case "sessionUpdate": + return &LiveTailSessionUpdate{}, nil + case "SessionStreamingException": + return newErrorSessionStreamingException(u.metadata).(eventstreamapi.Unmarshaler), nil + case "SessionTimeoutException": + return newErrorSessionTimeoutException(u.metadata).(eventstreamapi.Unmarshaler), nil + default: + return &StartLiveTailResponseStreamUnknownEvent{Type: eventType}, nil + } +} + +// StartLiveTailResponseStreamUnknownEvent provides a failsafe event for the +// StartLiveTailResponseStream group of events when an unknown event is received. +type StartLiveTailResponseStreamUnknownEvent struct { + Type string + Message eventstream.Message +} + +// The StartLiveTailResponseStreamUnknownEvent is and event in the StartLiveTailResponseStream +// group of events. +func (s *StartLiveTailResponseStreamUnknownEvent) eventStartLiveTailResponseStream() {} + +// MarshalEvent marshals the type into an stream event value. This method +// should only used internally within the SDK's EventStream handling. +func (e *StartLiveTailResponseStreamUnknownEvent) MarshalEvent(pm protocol.PayloadMarshaler) ( + msg eventstream.Message, err error, +) { + return e.Message.Clone(), nil +} + +// UnmarshalEvent unmarshals the EventStream Message into the StartLiveTailResponseStream value. +// This method is only used internally within the SDK's EventStream handling. +func (e *StartLiveTailResponseStreamUnknownEvent) UnmarshalEvent( + payloadUnmarshaler protocol.PayloadUnmarshaler, + msg eventstream.Message, +) error { + e.Message = msg.Clone() + return nil +} + +type StartQueryInput struct { + _ struct{} `type:"structure"` + + // The end of the time range to query. The range is inclusive, so the specified + // end time is included in the query. Specified as epoch time, the number of + // seconds since January 1, 1970, 00:00:00 UTC. + // + // EndTime is a required field + EndTime *int64 `locationName:"endTime" type:"long" required:"true"` + + // The maximum number of log events to return in the query. If the query string + // uses the fields command, only the specified fields and their values are returned. + // The default is 1000. + Limit *int64 `locationName:"limit" min:"1" type:"integer"` + + // The list of log groups to query. You can include up to 50 log groups. + // + // You can specify them by the log group name or ARN. If a log group that you're + // querying is in a source account and you're using a monitoring account, you + // must specify the ARN of the log group here. The query definition must also + // be defined in the monitoring account. + // + // If you specify an ARN, the ARN can't end with an asterisk (*). + // + // A StartQuery operation must include exactly one of the following parameters: + // logGroupName, logGroupNames, or logGroupIdentifiers. + LogGroupIdentifiers []*string `locationName:"logGroupIdentifiers" type:"list"` + + // The log group on which to perform the query. + // + // A StartQuery operation must include exactly one of the following parameters: + // logGroupName, logGroupNames, or logGroupIdentifiers. + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string"` + + // The list of log groups to be queried. You can include up to 50 log groups. + // + // A StartQuery operation must include exactly one of the following parameters: + // logGroupName, logGroupNames, or logGroupIdentifiers. + LogGroupNames []*string `locationName:"logGroupNames" type:"list"` + + // The query string to use. For more information, see CloudWatch Logs Insights + // Query Syntax (https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/CWL_QuerySyntax.html). + // + // QueryString is a required field + QueryString *string `locationName:"queryString" type:"string" required:"true"` + + // The beginning of the time range to query. The range is inclusive, so the + // specified start time is included in the query. Specified as epoch time, the + // number of seconds since January 1, 1970, 00:00:00 UTC. + // + // StartTime is a required field + StartTime *int64 `locationName:"startTime" type:"long" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s StartQueryInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s StartQueryInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StartQueryInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StartQueryInput"} + if s.EndTime == nil { + invalidParams.Add(request.NewErrParamRequired("EndTime")) + } + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + if s.LogGroupName != nil && len(*s.LogGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogGroupName", 1)) + } + if s.QueryString == nil { + invalidParams.Add(request.NewErrParamRequired("QueryString")) + } + if s.StartTime == nil { + invalidParams.Add(request.NewErrParamRequired("StartTime")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEndTime sets the EndTime field's value. +func (s *StartQueryInput) SetEndTime(v int64) *StartQueryInput { + s.EndTime = &v + return s +} + +// SetLimit sets the Limit field's value. +func (s *StartQueryInput) SetLimit(v int64) *StartQueryInput { + s.Limit = &v + return s +} + +// SetLogGroupIdentifiers sets the LogGroupIdentifiers field's value. +func (s *StartQueryInput) SetLogGroupIdentifiers(v []*string) *StartQueryInput { + s.LogGroupIdentifiers = v + return s +} + +// SetLogGroupName sets the LogGroupName field's value. +func (s *StartQueryInput) SetLogGroupName(v string) *StartQueryInput { + s.LogGroupName = &v + return s +} + +// SetLogGroupNames sets the LogGroupNames field's value. +func (s *StartQueryInput) SetLogGroupNames(v []*string) *StartQueryInput { + s.LogGroupNames = v + return s +} + +// SetQueryString sets the QueryString field's value. +func (s *StartQueryInput) SetQueryString(v string) *StartQueryInput { + s.QueryString = &v + return s +} + +// SetStartTime sets the StartTime field's value. +func (s *StartQueryInput) SetStartTime(v int64) *StartQueryInput { + s.StartTime = &v + return s +} + +type StartQueryOutput struct { + _ struct{} `type:"structure"` + + // The unique ID of the query. + QueryId *string `locationName:"queryId" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s StartQueryOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s StartQueryOutput) GoString() string { + return s.String() +} + +// SetQueryId sets the QueryId field's value. +func (s *StartQueryOutput) SetQueryId(v string) *StartQueryOutput { + s.QueryId = &v + return s +} + +type StopQueryInput struct { + _ struct{} `type:"structure"` + + // The ID number of the query to stop. To find this ID number, use DescribeQueries. + // + // QueryId is a required field + QueryId *string `locationName:"queryId" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s StopQueryInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s StopQueryInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StopQueryInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StopQueryInput"} + if s.QueryId == nil { + invalidParams.Add(request.NewErrParamRequired("QueryId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetQueryId sets the QueryId field's value. +func (s *StopQueryInput) SetQueryId(v string) *StopQueryInput { + s.QueryId = &v + return s +} + +type StopQueryOutput struct { + _ struct{} `type:"structure"` + + // This is true if the query was stopped by the StopQuery operation. + Success *bool `locationName:"success" type:"boolean"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s StopQueryOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s StopQueryOutput) GoString() string { + return s.String() +} + +// SetSuccess sets the Success field's value. +func (s *StopQueryOutput) SetSuccess(v bool) *StopQueryOutput { + s.Success = &v + return s +} + +// Represents a subscription filter. +type SubscriptionFilter struct { + _ struct{} `type:"structure"` + + // The creation time of the subscription filter, expressed as the number of + // milliseconds after Jan 1, 1970 00:00:00 UTC. + CreationTime *int64 `locationName:"creationTime" type:"long"` + + // The Amazon Resource Name (ARN) of the destination. + DestinationArn *string `locationName:"destinationArn" min:"1" type:"string"` + + // The method used to distribute log data to the destination, which can be either + // random or grouped by log stream. + Distribution *string `locationName:"distribution" type:"string" enum:"Distribution"` + + // The name of the subscription filter. + FilterName *string `locationName:"filterName" min:"1" type:"string"` + + // A symbolic description of how CloudWatch Logs should interpret the data in + // each log event. For example, a log event can contain timestamps, IP addresses, + // strings, and so on. You use the filter pattern to specify what to look for + // in the log event message. + FilterPattern *string `locationName:"filterPattern" type:"string"` + + // The name of the log group. + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string"` + + RoleArn *string `locationName:"roleArn" min:"1" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SubscriptionFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SubscriptionFilter) GoString() string { + return s.String() +} + +// SetCreationTime sets the CreationTime field's value. +func (s *SubscriptionFilter) SetCreationTime(v int64) *SubscriptionFilter { + s.CreationTime = &v + return s +} + +// SetDestinationArn sets the DestinationArn field's value. +func (s *SubscriptionFilter) SetDestinationArn(v string) *SubscriptionFilter { + s.DestinationArn = &v + return s +} + +// SetDistribution sets the Distribution field's value. +func (s *SubscriptionFilter) SetDistribution(v string) *SubscriptionFilter { + s.Distribution = &v + return s +} + +// SetFilterName sets the FilterName field's value. +func (s *SubscriptionFilter) SetFilterName(v string) *SubscriptionFilter { + s.FilterName = &v + return s +} + +// SetFilterPattern sets the FilterPattern field's value. +func (s *SubscriptionFilter) SetFilterPattern(v string) *SubscriptionFilter { + s.FilterPattern = &v + return s +} + +// SetLogGroupName sets the LogGroupName field's value. +func (s *SubscriptionFilter) SetLogGroupName(v string) *SubscriptionFilter { + s.LogGroupName = &v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *SubscriptionFilter) SetRoleArn(v string) *SubscriptionFilter { + s.RoleArn = &v + return s +} + +// If you are suppressing an anomaly temporariliy, this structure defines how +// long the suppression period is to be. +type SuppressionPeriod struct { + _ struct{} `type:"structure"` + + // Specifies whether the value of value is in seconds, minutes, or hours. + SuppressionUnit *string `locationName:"suppressionUnit" type:"string" enum:"SuppressionUnit"` + + // Specifies the number of seconds, minutes or hours to suppress this anomaly. + // There is no maximum. + Value *int64 `locationName:"value" type:"integer"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SuppressionPeriod) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SuppressionPeriod) GoString() string { + return s.String() +} + +// SetSuppressionUnit sets the SuppressionUnit field's value. +func (s *SuppressionPeriod) SetSuppressionUnit(v string) *SuppressionPeriod { + s.SuppressionUnit = &v + return s +} + +// SetValue sets the Value field's value. +func (s *SuppressionPeriod) SetValue(v int64) *SuppressionPeriod { + s.Value = &v + return s +} + +// Deprecated: Please use the generic tagging API model TagResourceRequest +type TagLogGroupInput struct { + _ struct{} `deprecated:"true" type:"structure"` + + // The name of the log group. + // + // LogGroupName is a required field + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` + + // The key-value pairs to use for the tags. + // + // Tags is a required field + Tags map[string]*string `locationName:"tags" min:"1" type:"map" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TagLogGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TagLogGroupInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TagLogGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TagLogGroupInput"} + if s.LogGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("LogGroupName")) + } + if s.LogGroupName != nil && len(*s.LogGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogGroupName", 1)) + } + if s.Tags == nil { + invalidParams.Add(request.NewErrParamRequired("Tags")) + } + if s.Tags != nil && len(s.Tags) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Tags", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetLogGroupName sets the LogGroupName field's value. +func (s *TagLogGroupInput) SetLogGroupName(v string) *TagLogGroupInput { + s.LogGroupName = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *TagLogGroupInput) SetTags(v map[string]*string) *TagLogGroupInput { + s.Tags = v + return s +} + +type TagLogGroupOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TagLogGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TagLogGroupOutput) GoString() string { + return s.String() +} + +type TagResourceInput struct { + _ struct{} `type:"structure"` + + // The ARN of the resource that you're adding tags to. + // + // The ARN format of a log group is arn:aws:logs:Region:account-id:log-group:log-group-name + // + // The ARN format of a destination is arn:aws:logs:Region:account-id:destination:destination-name + // + // For more information about ARN format, see CloudWatch Logs resources and + // operations (https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html). + // + // ResourceArn is a required field + ResourceArn *string `locationName:"resourceArn" min:"1" type:"string" required:"true"` + + // The list of key-value pairs to associate with the resource. + // + // Tags is a required field + Tags map[string]*string `locationName:"tags" min:"1" type:"map" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TagResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TagResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TagResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TagResourceInput"} + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + if s.ResourceArn != nil && len(*s.ResourceArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 1)) + } + if s.Tags == nil { + invalidParams.Add(request.NewErrParamRequired("Tags")) + } + if s.Tags != nil && len(s.Tags) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Tags", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceArn sets the ResourceArn field's value. +func (s *TagResourceInput) SetResourceArn(v string) *TagResourceInput { + s.ResourceArn = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *TagResourceInput) SetTags(v map[string]*string) *TagResourceInput { + s.Tags = v + return s +} + +type TagResourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TagResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TagResourceOutput) GoString() string { + return s.String() +} + +type TestMetricFilterInput struct { + _ struct{} `type:"structure"` + + // A symbolic description of how CloudWatch Logs should interpret the data in + // each log event. For example, a log event can contain timestamps, IP addresses, + // strings, and so on. You use the filter pattern to specify what to look for + // in the log event message. + // + // FilterPattern is a required field + FilterPattern *string `locationName:"filterPattern" type:"string" required:"true"` + + // The log event messages to test. + // + // LogEventMessages is a required field + LogEventMessages []*string `locationName:"logEventMessages" min:"1" type:"list" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TestMetricFilterInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TestMetricFilterInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TestMetricFilterInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TestMetricFilterInput"} + if s.FilterPattern == nil { + invalidParams.Add(request.NewErrParamRequired("FilterPattern")) + } + if s.LogEventMessages == nil { + invalidParams.Add(request.NewErrParamRequired("LogEventMessages")) + } + if s.LogEventMessages != nil && len(s.LogEventMessages) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogEventMessages", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFilterPattern sets the FilterPattern field's value. +func (s *TestMetricFilterInput) SetFilterPattern(v string) *TestMetricFilterInput { + s.FilterPattern = &v + return s +} + +// SetLogEventMessages sets the LogEventMessages field's value. +func (s *TestMetricFilterInput) SetLogEventMessages(v []*string) *TestMetricFilterInput { + s.LogEventMessages = v + return s +} + +type TestMetricFilterOutput struct { + _ struct{} `type:"structure"` + + // The matched events. + Matches []*MetricFilterMatchRecord `locationName:"matches" type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TestMetricFilterOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TestMetricFilterOutput) GoString() string { + return s.String() +} + +// SetMatches sets the Matches field's value. +func (s *TestMetricFilterOutput) SetMatches(v []*MetricFilterMatchRecord) *TestMetricFilterOutput { + s.Matches = v + return s +} + +// The request was throttled because of quota limits. +type ThrottlingException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ThrottlingException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ThrottlingException) GoString() string { + return s.String() +} + +func newErrorThrottlingException(v protocol.ResponseMetadata) error { + return &ThrottlingException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *ThrottlingException) Code() string { + return "ThrottlingException" +} + +// Message returns the exception's message. +func (s *ThrottlingException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ThrottlingException) OrigErr() error { + return nil +} + +func (s *ThrottlingException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *ThrottlingException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *ThrottlingException) RequestID() string { + return s.RespMetadata.RequestID +} + +// A resource can have no more than 50 tags. +type TooManyTagsException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` + + // The name of the resource. + ResourceName *string `locationName:"resourceName" min:"1" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TooManyTagsException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TooManyTagsException) GoString() string { + return s.String() +} + +func newErrorTooManyTagsException(v protocol.ResponseMetadata) error { + return &TooManyTagsException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *TooManyTagsException) Code() string { + return "TooManyTagsException" +} + +// Message returns the exception's message. +func (s *TooManyTagsException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *TooManyTagsException) OrigErr() error { + return nil +} + +func (s *TooManyTagsException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *TooManyTagsException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *TooManyTagsException) RequestID() string { + return s.RespMetadata.RequestID +} + +// The most likely cause is an Amazon Web Services access key ID or secret key +// that's not valid. +type UnrecognizedClientException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UnrecognizedClientException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UnrecognizedClientException) GoString() string { + return s.String() +} + +func newErrorUnrecognizedClientException(v protocol.ResponseMetadata) error { + return &UnrecognizedClientException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *UnrecognizedClientException) Code() string { + return "UnrecognizedClientException" +} + +// Message returns the exception's message. +func (s *UnrecognizedClientException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *UnrecognizedClientException) OrigErr() error { + return nil +} + +func (s *UnrecognizedClientException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *UnrecognizedClientException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *UnrecognizedClientException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Deprecated: Please use the generic tagging API model UntagResourceRequest +type UntagLogGroupInput struct { + _ struct{} `deprecated:"true" type:"structure"` + + // The name of the log group. + // + // LogGroupName is a required field + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` + + // The tag keys. The corresponding tags are removed from the log group. + // + // Tags is a required field + Tags []*string `locationName:"tags" min:"1" type:"list" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UntagLogGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UntagLogGroupInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UntagLogGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UntagLogGroupInput"} + if s.LogGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("LogGroupName")) + } + if s.LogGroupName != nil && len(*s.LogGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogGroupName", 1)) + } + if s.Tags == nil { + invalidParams.Add(request.NewErrParamRequired("Tags")) + } + if s.Tags != nil && len(s.Tags) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Tags", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetLogGroupName sets the LogGroupName field's value. +func (s *UntagLogGroupInput) SetLogGroupName(v string) *UntagLogGroupInput { + s.LogGroupName = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *UntagLogGroupInput) SetTags(v []*string) *UntagLogGroupInput { + s.Tags = v + return s +} + +type UntagLogGroupOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UntagLogGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UntagLogGroupOutput) GoString() string { + return s.String() +} + +type UntagResourceInput struct { + _ struct{} `type:"structure"` + + // The ARN of the CloudWatch Logs resource that you're removing tags from. + // + // The ARN format of a log group is arn:aws:logs:Region:account-id:log-group:log-group-name + // + // The ARN format of a destination is arn:aws:logs:Region:account-id:destination:destination-name + // + // For more information about ARN format, see CloudWatch Logs resources and + // operations (https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html). + // + // ResourceArn is a required field + ResourceArn *string `locationName:"resourceArn" min:"1" type:"string" required:"true"` + + // The list of tag keys to remove from the resource. + // + // TagKeys is a required field + TagKeys []*string `locationName:"tagKeys" type:"list" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UntagResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UntagResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UntagResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UntagResourceInput"} + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + if s.ResourceArn != nil && len(*s.ResourceArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 1)) + } + if s.TagKeys == nil { + invalidParams.Add(request.NewErrParamRequired("TagKeys")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceArn sets the ResourceArn field's value. +func (s *UntagResourceInput) SetResourceArn(v string) *UntagResourceInput { + s.ResourceArn = &v + return s +} + +// SetTagKeys sets the TagKeys field's value. +func (s *UntagResourceInput) SetTagKeys(v []*string) *UntagResourceInput { + s.TagKeys = v + return s +} + +type UntagResourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UntagResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UntagResourceOutput) GoString() string { + return s.String() +} + +type UpdateAnomalyInput struct { + _ struct{} `type:"structure"` + + // The ARN of the anomaly detector that this operation is to act on. + // + // AnomalyDetectorArn is a required field + AnomalyDetectorArn *string `locationName:"anomalyDetectorArn" min:"1" type:"string" required:"true"` + + // If you are suppressing or unsuppressing an anomaly, specify its unique ID + // here. You can find anomaly IDs by using the ListAnomalies (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_ListAnomalies.html) + // operation. + AnomalyId *string `locationName:"anomalyId" min:"36" type:"string"` + + // If you are suppressing or unsuppressing an pattern, specify its unique ID + // here. You can find pattern IDs by using the ListAnomalies (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_ListAnomalies.html) + // operation. + PatternId *string `locationName:"patternId" min:"32" type:"string"` + + // If you are temporarily suppressing an anomaly or pattern, use this structure + // to specify how long the suppression is to last. + SuppressionPeriod *SuppressionPeriod `locationName:"suppressionPeriod" type:"structure"` + + // Use this to specify whether the suppression to be temporary or infinite. + // If you specify LIMITED, you must also specify a suppressionPeriod. If you + // specify INFINITE, any value for suppressionPeriod is ignored. + SuppressionType *string `locationName:"suppressionType" type:"string" enum:"SuppressionType"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UpdateAnomalyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UpdateAnomalyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateAnomalyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateAnomalyInput"} + if s.AnomalyDetectorArn == nil { + invalidParams.Add(request.NewErrParamRequired("AnomalyDetectorArn")) + } + if s.AnomalyDetectorArn != nil && len(*s.AnomalyDetectorArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AnomalyDetectorArn", 1)) + } + if s.AnomalyId != nil && len(*s.AnomalyId) < 36 { + invalidParams.Add(request.NewErrParamMinLen("AnomalyId", 36)) + } + if s.PatternId != nil && len(*s.PatternId) < 32 { + invalidParams.Add(request.NewErrParamMinLen("PatternId", 32)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAnomalyDetectorArn sets the AnomalyDetectorArn field's value. +func (s *UpdateAnomalyInput) SetAnomalyDetectorArn(v string) *UpdateAnomalyInput { + s.AnomalyDetectorArn = &v + return s +} + +// SetAnomalyId sets the AnomalyId field's value. +func (s *UpdateAnomalyInput) SetAnomalyId(v string) *UpdateAnomalyInput { + s.AnomalyId = &v + return s +} + +// SetPatternId sets the PatternId field's value. +func (s *UpdateAnomalyInput) SetPatternId(v string) *UpdateAnomalyInput { + s.PatternId = &v + return s +} + +// SetSuppressionPeriod sets the SuppressionPeriod field's value. +func (s *UpdateAnomalyInput) SetSuppressionPeriod(v *SuppressionPeriod) *UpdateAnomalyInput { + s.SuppressionPeriod = v + return s +} + +// SetSuppressionType sets the SuppressionType field's value. +func (s *UpdateAnomalyInput) SetSuppressionType(v string) *UpdateAnomalyInput { + s.SuppressionType = &v + return s +} + +type UpdateAnomalyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UpdateAnomalyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UpdateAnomalyOutput) GoString() string { + return s.String() +} + +type UpdateLogAnomalyDetectorInput struct { + _ struct{} `type:"structure"` + + // The ARN of the anomaly detector that you want to update. + // + // AnomalyDetectorArn is a required field + AnomalyDetectorArn *string `locationName:"anomalyDetectorArn" min:"1" type:"string" required:"true"` + + // The number of days to use as the life cycle of anomalies. After this time, + // anomalies are automatically baselined and the anomaly detector model will + // treat new occurrences of similar event as normal. Therefore, if you do not + // correct the cause of an anomaly during this time, it will be considered normal + // going forward and will not be detected. + AnomalyVisibilityTime *int64 `locationName:"anomalyVisibilityTime" min:"7" type:"long"` + + // Use this parameter to pause or restart the anomaly detector. + // + // Enabled is a required field + Enabled *bool `locationName:"enabled" type:"boolean" required:"true"` + + // Specifies how often the anomaly detector runs and look for anomalies. Set + // this value according to the frequency that the log group receives new logs. + // For example, if the log group receives new log events every 10 minutes, then + // setting evaluationFrequency to FIFTEEN_MIN might be appropriate. + EvaluationFrequency *string `locationName:"evaluationFrequency" type:"string" enum:"EvaluationFrequency"` + + // A symbolic description of how CloudWatch Logs should interpret the data in + // each log event. For example, a log event can contain timestamps, IP addresses, + // strings, and so on. You use the filter pattern to specify what to look for + // in the log event message. + FilterPattern *string `locationName:"filterPattern" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UpdateLogAnomalyDetectorInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UpdateLogAnomalyDetectorInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateLogAnomalyDetectorInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateLogAnomalyDetectorInput"} + if s.AnomalyDetectorArn == nil { + invalidParams.Add(request.NewErrParamRequired("AnomalyDetectorArn")) + } + if s.AnomalyDetectorArn != nil && len(*s.AnomalyDetectorArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AnomalyDetectorArn", 1)) + } + if s.AnomalyVisibilityTime != nil && *s.AnomalyVisibilityTime < 7 { + invalidParams.Add(request.NewErrParamMinValue("AnomalyVisibilityTime", 7)) + } + if s.Enabled == nil { + invalidParams.Add(request.NewErrParamRequired("Enabled")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAnomalyDetectorArn sets the AnomalyDetectorArn field's value. +func (s *UpdateLogAnomalyDetectorInput) SetAnomalyDetectorArn(v string) *UpdateLogAnomalyDetectorInput { + s.AnomalyDetectorArn = &v + return s +} + +// SetAnomalyVisibilityTime sets the AnomalyVisibilityTime field's value. +func (s *UpdateLogAnomalyDetectorInput) SetAnomalyVisibilityTime(v int64) *UpdateLogAnomalyDetectorInput { + s.AnomalyVisibilityTime = &v + return s +} + +// SetEnabled sets the Enabled field's value. +func (s *UpdateLogAnomalyDetectorInput) SetEnabled(v bool) *UpdateLogAnomalyDetectorInput { + s.Enabled = &v + return s +} + +// SetEvaluationFrequency sets the EvaluationFrequency field's value. +func (s *UpdateLogAnomalyDetectorInput) SetEvaluationFrequency(v string) *UpdateLogAnomalyDetectorInput { + s.EvaluationFrequency = &v + return s +} + +// SetFilterPattern sets the FilterPattern field's value. +func (s *UpdateLogAnomalyDetectorInput) SetFilterPattern(v string) *UpdateLogAnomalyDetectorInput { + s.FilterPattern = &v + return s +} + +type UpdateLogAnomalyDetectorOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UpdateLogAnomalyDetectorOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UpdateLogAnomalyDetectorOutput) GoString() string { + return s.String() +} + +// One of the parameters for the request is not valid. +type ValidationException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ValidationException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ValidationException) GoString() string { + return s.String() +} + +func newErrorValidationException(v protocol.ResponseMetadata) error { + return &ValidationException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *ValidationException) Code() string { + return "ValidationException" +} + +// Message returns the exception's message. +func (s *ValidationException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ValidationException) OrigErr() error { + return nil +} + +func (s *ValidationException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *ValidationException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *ValidationException) RequestID() string { + return s.RespMetadata.RequestID +} + +const ( + // AnomalyDetectorStatusInitializing is a AnomalyDetectorStatus enum value + AnomalyDetectorStatusInitializing = "INITIALIZING" + + // AnomalyDetectorStatusTraining is a AnomalyDetectorStatus enum value + AnomalyDetectorStatusTraining = "TRAINING" + + // AnomalyDetectorStatusAnalyzing is a AnomalyDetectorStatus enum value + AnomalyDetectorStatusAnalyzing = "ANALYZING" + + // AnomalyDetectorStatusFailed is a AnomalyDetectorStatus enum value + AnomalyDetectorStatusFailed = "FAILED" + + // AnomalyDetectorStatusDeleted is a AnomalyDetectorStatus enum value + AnomalyDetectorStatusDeleted = "DELETED" + + // AnomalyDetectorStatusPaused is a AnomalyDetectorStatus enum value + AnomalyDetectorStatusPaused = "PAUSED" +) + +// AnomalyDetectorStatus_Values returns all elements of the AnomalyDetectorStatus enum +func AnomalyDetectorStatus_Values() []string { + return []string{ + AnomalyDetectorStatusInitializing, + AnomalyDetectorStatusTraining, + AnomalyDetectorStatusAnalyzing, + AnomalyDetectorStatusFailed, + AnomalyDetectorStatusDeleted, + AnomalyDetectorStatusPaused, + } +} + +const ( + // DataProtectionStatusActivated is a DataProtectionStatus enum value + DataProtectionStatusActivated = "ACTIVATED" + + // DataProtectionStatusDeleted is a DataProtectionStatus enum value + DataProtectionStatusDeleted = "DELETED" + + // DataProtectionStatusArchived is a DataProtectionStatus enum value + DataProtectionStatusArchived = "ARCHIVED" + + // DataProtectionStatusDisabled is a DataProtectionStatus enum value + DataProtectionStatusDisabled = "DISABLED" +) + +// DataProtectionStatus_Values returns all elements of the DataProtectionStatus enum +func DataProtectionStatus_Values() []string { + return []string{ + DataProtectionStatusActivated, + DataProtectionStatusDeleted, + DataProtectionStatusArchived, + DataProtectionStatusDisabled, + } +} + +const ( + // DeliveryDestinationTypeS3 is a DeliveryDestinationType enum value + DeliveryDestinationTypeS3 = "S3" + + // DeliveryDestinationTypeCwl is a DeliveryDestinationType enum value + DeliveryDestinationTypeCwl = "CWL" + + // DeliveryDestinationTypeFh is a DeliveryDestinationType enum value + DeliveryDestinationTypeFh = "FH" +) + +// DeliveryDestinationType_Values returns all elements of the DeliveryDestinationType enum +func DeliveryDestinationType_Values() []string { + return []string{ + DeliveryDestinationTypeS3, + DeliveryDestinationTypeCwl, + DeliveryDestinationTypeFh, + } +} + +// The method used to distribute log data to the destination, which can be either +// random or grouped by log stream. +const ( + // DistributionRandom is a Distribution enum value + DistributionRandom = "Random" + + // DistributionByLogStream is a Distribution enum value + DistributionByLogStream = "ByLogStream" +) + +// Distribution_Values returns all elements of the Distribution enum +func Distribution_Values() []string { + return []string{ + DistributionRandom, + DistributionByLogStream, + } +} + +const ( + // EntityRejectionErrorTypeInvalidEntity is a EntityRejectionErrorType enum value + EntityRejectionErrorTypeInvalidEntity = "InvalidEntity" + + // EntityRejectionErrorTypeInvalidTypeValue is a EntityRejectionErrorType enum value + EntityRejectionErrorTypeInvalidTypeValue = "InvalidTypeValue" + + // EntityRejectionErrorTypeInvalidKeyAttributes is a EntityRejectionErrorType enum value + EntityRejectionErrorTypeInvalidKeyAttributes = "InvalidKeyAttributes" + + // EntityRejectionErrorTypeInvalidAttributes is a EntityRejectionErrorType enum value + EntityRejectionErrorTypeInvalidAttributes = "InvalidAttributes" + + // EntityRejectionErrorTypeEntitySizeTooLarge is a EntityRejectionErrorType enum value + EntityRejectionErrorTypeEntitySizeTooLarge = "EntitySizeTooLarge" + + // EntityRejectionErrorTypeUnsupportedLogGroupType is a EntityRejectionErrorType enum value + EntityRejectionErrorTypeUnsupportedLogGroupType = "UnsupportedLogGroupType" + + // EntityRejectionErrorTypeMissingRequiredFields is a EntityRejectionErrorType enum value + EntityRejectionErrorTypeMissingRequiredFields = "MissingRequiredFields" +) + +// EntityRejectionErrorType_Values returns all elements of the EntityRejectionErrorType enum +func EntityRejectionErrorType_Values() []string { + return []string{ + EntityRejectionErrorTypeInvalidEntity, + EntityRejectionErrorTypeInvalidTypeValue, + EntityRejectionErrorTypeInvalidKeyAttributes, + EntityRejectionErrorTypeInvalidAttributes, + EntityRejectionErrorTypeEntitySizeTooLarge, + EntityRejectionErrorTypeUnsupportedLogGroupType, + EntityRejectionErrorTypeMissingRequiredFields, + } +} + +const ( + // EvaluationFrequencyOneMin is a EvaluationFrequency enum value + EvaluationFrequencyOneMin = "ONE_MIN" + + // EvaluationFrequencyFiveMin is a EvaluationFrequency enum value + EvaluationFrequencyFiveMin = "FIVE_MIN" + + // EvaluationFrequencyTenMin is a EvaluationFrequency enum value + EvaluationFrequencyTenMin = "TEN_MIN" + + // EvaluationFrequencyFifteenMin is a EvaluationFrequency enum value + EvaluationFrequencyFifteenMin = "FIFTEEN_MIN" + + // EvaluationFrequencyThirtyMin is a EvaluationFrequency enum value + EvaluationFrequencyThirtyMin = "THIRTY_MIN" + + // EvaluationFrequencyOneHour is a EvaluationFrequency enum value + EvaluationFrequencyOneHour = "ONE_HOUR" +) + +// EvaluationFrequency_Values returns all elements of the EvaluationFrequency enum +func EvaluationFrequency_Values() []string { + return []string{ + EvaluationFrequencyOneMin, + EvaluationFrequencyFiveMin, + EvaluationFrequencyTenMin, + EvaluationFrequencyFifteenMin, + EvaluationFrequencyThirtyMin, + EvaluationFrequencyOneHour, + } +} + +const ( + // ExportTaskStatusCodeCancelled is a ExportTaskStatusCode enum value + ExportTaskStatusCodeCancelled = "CANCELLED" + + // ExportTaskStatusCodeCompleted is a ExportTaskStatusCode enum value + ExportTaskStatusCodeCompleted = "COMPLETED" + + // ExportTaskStatusCodeFailed is a ExportTaskStatusCode enum value + ExportTaskStatusCodeFailed = "FAILED" + + // ExportTaskStatusCodePending is a ExportTaskStatusCode enum value + ExportTaskStatusCodePending = "PENDING" + + // ExportTaskStatusCodePendingCancel is a ExportTaskStatusCode enum value + ExportTaskStatusCodePendingCancel = "PENDING_CANCEL" + + // ExportTaskStatusCodeRunning is a ExportTaskStatusCode enum value + ExportTaskStatusCodeRunning = "RUNNING" +) + +// ExportTaskStatusCode_Values returns all elements of the ExportTaskStatusCode enum +func ExportTaskStatusCode_Values() []string { + return []string{ + ExportTaskStatusCodeCancelled, + ExportTaskStatusCodeCompleted, + ExportTaskStatusCodeFailed, + ExportTaskStatusCodePending, + ExportTaskStatusCodePendingCancel, + ExportTaskStatusCodeRunning, + } +} + +const ( + // InheritedPropertyAccountDataProtection is a InheritedProperty enum value + InheritedPropertyAccountDataProtection = "ACCOUNT_DATA_PROTECTION" +) + +// InheritedProperty_Values returns all elements of the InheritedProperty enum +func InheritedProperty_Values() []string { + return []string{ + InheritedPropertyAccountDataProtection, + } +} + +const ( + // LogGroupClassStandard is a LogGroupClass enum value + LogGroupClassStandard = "STANDARD" + + // LogGroupClassInfrequentAccess is a LogGroupClass enum value + LogGroupClassInfrequentAccess = "INFREQUENT_ACCESS" +) + +// LogGroupClass_Values returns all elements of the LogGroupClass enum +func LogGroupClass_Values() []string { + return []string{ + LogGroupClassStandard, + LogGroupClassInfrequentAccess, + } +} + +const ( + // OrderByLogStreamName is a OrderBy enum value + OrderByLogStreamName = "LogStreamName" + + // OrderByLastEventTime is a OrderBy enum value + OrderByLastEventTime = "LastEventTime" +) + +// OrderBy_Values returns all elements of the OrderBy enum +func OrderBy_Values() []string { + return []string{ + OrderByLogStreamName, + OrderByLastEventTime, + } +} + +const ( + // OutputFormatJson is a OutputFormat enum value + OutputFormatJson = "json" + + // OutputFormatPlain is a OutputFormat enum value + OutputFormatPlain = "plain" + + // OutputFormatW3c is a OutputFormat enum value + OutputFormatW3c = "w3c" + + // OutputFormatRaw is a OutputFormat enum value + OutputFormatRaw = "raw" + + // OutputFormatParquet is a OutputFormat enum value + OutputFormatParquet = "parquet" +) + +// OutputFormat_Values returns all elements of the OutputFormat enum +func OutputFormat_Values() []string { + return []string{ + OutputFormatJson, + OutputFormatPlain, + OutputFormatW3c, + OutputFormatRaw, + OutputFormatParquet, + } +} + +const ( + // PolicyTypeDataProtectionPolicy is a PolicyType enum value + PolicyTypeDataProtectionPolicy = "DATA_PROTECTION_POLICY" + + // PolicyTypeSubscriptionFilterPolicy is a PolicyType enum value + PolicyTypeSubscriptionFilterPolicy = "SUBSCRIPTION_FILTER_POLICY" +) + +// PolicyType_Values returns all elements of the PolicyType enum +func PolicyType_Values() []string { + return []string{ + PolicyTypeDataProtectionPolicy, + PolicyTypeSubscriptionFilterPolicy, + } +} + +const ( + // QueryStatusScheduled is a QueryStatus enum value + QueryStatusScheduled = "Scheduled" + + // QueryStatusRunning is a QueryStatus enum value + QueryStatusRunning = "Running" + + // QueryStatusComplete is a QueryStatus enum value + QueryStatusComplete = "Complete" + + // QueryStatusFailed is a QueryStatus enum value + QueryStatusFailed = "Failed" + + // QueryStatusCancelled is a QueryStatus enum value + QueryStatusCancelled = "Cancelled" + + // QueryStatusTimeout is a QueryStatus enum value + QueryStatusTimeout = "Timeout" + + // QueryStatusUnknown is a QueryStatus enum value + QueryStatusUnknown = "Unknown" +) + +// QueryStatus_Values returns all elements of the QueryStatus enum +func QueryStatus_Values() []string { + return []string{ + QueryStatusScheduled, + QueryStatusRunning, + QueryStatusComplete, + QueryStatusFailed, + QueryStatusCancelled, + QueryStatusTimeout, + QueryStatusUnknown, + } +} + +const ( + // ScopeAll is a Scope enum value + ScopeAll = "ALL" +) + +// Scope_Values returns all elements of the Scope enum +func Scope_Values() []string { + return []string{ + ScopeAll, + } +} + +const ( + // StandardUnitSeconds is a StandardUnit enum value + StandardUnitSeconds = "Seconds" + + // StandardUnitMicroseconds is a StandardUnit enum value + StandardUnitMicroseconds = "Microseconds" + + // StandardUnitMilliseconds is a StandardUnit enum value + StandardUnitMilliseconds = "Milliseconds" + + // StandardUnitBytes is a StandardUnit enum value + StandardUnitBytes = "Bytes" + + // StandardUnitKilobytes is a StandardUnit enum value + StandardUnitKilobytes = "Kilobytes" + + // StandardUnitMegabytes is a StandardUnit enum value + StandardUnitMegabytes = "Megabytes" + + // StandardUnitGigabytes is a StandardUnit enum value + StandardUnitGigabytes = "Gigabytes" + + // StandardUnitTerabytes is a StandardUnit enum value + StandardUnitTerabytes = "Terabytes" + + // StandardUnitBits is a StandardUnit enum value + StandardUnitBits = "Bits" + + // StandardUnitKilobits is a StandardUnit enum value + StandardUnitKilobits = "Kilobits" + + // StandardUnitMegabits is a StandardUnit enum value + StandardUnitMegabits = "Megabits" + + // StandardUnitGigabits is a StandardUnit enum value + StandardUnitGigabits = "Gigabits" + + // StandardUnitTerabits is a StandardUnit enum value + StandardUnitTerabits = "Terabits" + + // StandardUnitPercent is a StandardUnit enum value + StandardUnitPercent = "Percent" + + // StandardUnitCount is a StandardUnit enum value + StandardUnitCount = "Count" + + // StandardUnitBytesSecond is a StandardUnit enum value + StandardUnitBytesSecond = "Bytes/Second" + + // StandardUnitKilobytesSecond is a StandardUnit enum value + StandardUnitKilobytesSecond = "Kilobytes/Second" + + // StandardUnitMegabytesSecond is a StandardUnit enum value + StandardUnitMegabytesSecond = "Megabytes/Second" + + // StandardUnitGigabytesSecond is a StandardUnit enum value + StandardUnitGigabytesSecond = "Gigabytes/Second" + + // StandardUnitTerabytesSecond is a StandardUnit enum value + StandardUnitTerabytesSecond = "Terabytes/Second" + + // StandardUnitBitsSecond is a StandardUnit enum value + StandardUnitBitsSecond = "Bits/Second" + + // StandardUnitKilobitsSecond is a StandardUnit enum value + StandardUnitKilobitsSecond = "Kilobits/Second" + + // StandardUnitMegabitsSecond is a StandardUnit enum value + StandardUnitMegabitsSecond = "Megabits/Second" + + // StandardUnitGigabitsSecond is a StandardUnit enum value + StandardUnitGigabitsSecond = "Gigabits/Second" + + // StandardUnitTerabitsSecond is a StandardUnit enum value + StandardUnitTerabitsSecond = "Terabits/Second" + + // StandardUnitCountSecond is a StandardUnit enum value + StandardUnitCountSecond = "Count/Second" + + // StandardUnitNone is a StandardUnit enum value + StandardUnitNone = "None" +) + +// StandardUnit_Values returns all elements of the StandardUnit enum +func StandardUnit_Values() []string { + return []string{ + StandardUnitSeconds, + StandardUnitMicroseconds, + StandardUnitMilliseconds, + StandardUnitBytes, + StandardUnitKilobytes, + StandardUnitMegabytes, + StandardUnitGigabytes, + StandardUnitTerabytes, + StandardUnitBits, + StandardUnitKilobits, + StandardUnitMegabits, + StandardUnitGigabits, + StandardUnitTerabits, + StandardUnitPercent, + StandardUnitCount, + StandardUnitBytesSecond, + StandardUnitKilobytesSecond, + StandardUnitMegabytesSecond, + StandardUnitGigabytesSecond, + StandardUnitTerabytesSecond, + StandardUnitBitsSecond, + StandardUnitKilobitsSecond, + StandardUnitMegabitsSecond, + StandardUnitGigabitsSecond, + StandardUnitTerabitsSecond, + StandardUnitCountSecond, + StandardUnitNone, + } +} + +const ( + // StateActive is a State enum value + StateActive = "Active" + + // StateSuppressed is a State enum value + StateSuppressed = "Suppressed" + + // StateBaseline is a State enum value + StateBaseline = "Baseline" +) + +// State_Values returns all elements of the State enum +func State_Values() []string { + return []string{ + StateActive, + StateSuppressed, + StateBaseline, + } +} + +const ( + // SuppressionStateSuppressed is a SuppressionState enum value + SuppressionStateSuppressed = "SUPPRESSED" + + // SuppressionStateUnsuppressed is a SuppressionState enum value + SuppressionStateUnsuppressed = "UNSUPPRESSED" +) + +// SuppressionState_Values returns all elements of the SuppressionState enum +func SuppressionState_Values() []string { + return []string{ + SuppressionStateSuppressed, + SuppressionStateUnsuppressed, + } +} + +const ( + // SuppressionTypeLimited is a SuppressionType enum value + SuppressionTypeLimited = "LIMITED" + + // SuppressionTypeInfinite is a SuppressionType enum value + SuppressionTypeInfinite = "INFINITE" +) + +// SuppressionType_Values returns all elements of the SuppressionType enum +func SuppressionType_Values() []string { + return []string{ + SuppressionTypeLimited, + SuppressionTypeInfinite, + } +} + +const ( + // SuppressionUnitSeconds is a SuppressionUnit enum value + SuppressionUnitSeconds = "SECONDS" + + // SuppressionUnitMinutes is a SuppressionUnit enum value + SuppressionUnitMinutes = "MINUTES" + + // SuppressionUnitHours is a SuppressionUnit enum value + SuppressionUnitHours = "HOURS" +) + +// SuppressionUnit_Values returns all elements of the SuppressionUnit enum +func SuppressionUnit_Values() []string { + return []string{ + SuppressionUnitSeconds, + SuppressionUnitMinutes, + SuppressionUnitHours, + } +} diff --git a/sdk/service/cloudwatchlogs/cloudwatchlogsiface/interface.go b/sdk/service/cloudwatchlogs/cloudwatchlogsiface/interface.go new file mode 100644 index 0000000000..d285cbadf3 --- /dev/null +++ b/sdk/service/cloudwatchlogs/cloudwatchlogsiface/interface.go @@ -0,0 +1,396 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +// Package cloudwatchlogsiface provides an interface to enable mocking the Amazon CloudWatch Logs service client +// for testing your code. +// +// It is important to note that this interface will have breaking changes +// when the service model is updated and adds new API operations, paginators, +// and waiters. +package cloudwatchlogsiface + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/cloudwatchlogs" +) + +// CloudWatchLogsAPI provides an interface to enable mocking the +// cloudwatchlogs.CloudWatchLogs service client's API operation, +// paginators, and waiters. This make unit testing your code that calls out +// to the SDK's service client's calls easier. +// +// The best way to use this interface is so the SDK's service client's calls +// can be stubbed out for unit testing your code with the SDK without needing +// to inject custom request handlers into the SDK's request pipeline. +// +// // myFunc uses an SDK service client to make a request to +// // Amazon CloudWatch Logs. +// func myFunc(svc cloudwatchlogsiface.CloudWatchLogsAPI) bool { +// // Make svc.AssociateKmsKey request +// } +// +// func main() { +// sess := session.New() +// svc := cloudwatchlogs.New(sess) +// +// myFunc(svc) +// } +// +// In your _test.go file: +// +// // Define a mock struct to be used in your unit tests of myFunc. +// type mockCloudWatchLogsClient struct { +// cloudwatchlogsiface.CloudWatchLogsAPI +// } +// func (m *mockCloudWatchLogsClient) AssociateKmsKey(input *cloudwatchlogs.AssociateKmsKeyInput) (*cloudwatchlogs.AssociateKmsKeyOutput, error) { +// // mock response/functionality +// } +// +// func TestMyFunc(t *testing.T) { +// // Setup Test +// mockSvc := &mockCloudWatchLogsClient{} +// +// myfunc(mockSvc) +// +// // Verify myFunc's functionality +// } +// +// It is important to note that this interface will have breaking changes +// when the service model is updated and adds new API operations, paginators, +// and waiters. Its suggested to use the pattern above for testing, or using +// tooling to generate mocks to satisfy the interfaces. +type CloudWatchLogsAPI interface { + AssociateKmsKey(*cloudwatchlogs.AssociateKmsKeyInput) (*cloudwatchlogs.AssociateKmsKeyOutput, error) + AssociateKmsKeyWithContext(aws.Context, *cloudwatchlogs.AssociateKmsKeyInput, ...request.Option) (*cloudwatchlogs.AssociateKmsKeyOutput, error) + AssociateKmsKeyRequest(*cloudwatchlogs.AssociateKmsKeyInput) (*request.Request, *cloudwatchlogs.AssociateKmsKeyOutput) + + CancelExportTask(*cloudwatchlogs.CancelExportTaskInput) (*cloudwatchlogs.CancelExportTaskOutput, error) + CancelExportTaskWithContext(aws.Context, *cloudwatchlogs.CancelExportTaskInput, ...request.Option) (*cloudwatchlogs.CancelExportTaskOutput, error) + CancelExportTaskRequest(*cloudwatchlogs.CancelExportTaskInput) (*request.Request, *cloudwatchlogs.CancelExportTaskOutput) + + CreateDelivery(*cloudwatchlogs.CreateDeliveryInput) (*cloudwatchlogs.CreateDeliveryOutput, error) + CreateDeliveryWithContext(aws.Context, *cloudwatchlogs.CreateDeliveryInput, ...request.Option) (*cloudwatchlogs.CreateDeliveryOutput, error) + CreateDeliveryRequest(*cloudwatchlogs.CreateDeliveryInput) (*request.Request, *cloudwatchlogs.CreateDeliveryOutput) + + CreateExportTask(*cloudwatchlogs.CreateExportTaskInput) (*cloudwatchlogs.CreateExportTaskOutput, error) + CreateExportTaskWithContext(aws.Context, *cloudwatchlogs.CreateExportTaskInput, ...request.Option) (*cloudwatchlogs.CreateExportTaskOutput, error) + CreateExportTaskRequest(*cloudwatchlogs.CreateExportTaskInput) (*request.Request, *cloudwatchlogs.CreateExportTaskOutput) + + CreateLogAnomalyDetector(*cloudwatchlogs.CreateLogAnomalyDetectorInput) (*cloudwatchlogs.CreateLogAnomalyDetectorOutput, error) + CreateLogAnomalyDetectorWithContext(aws.Context, *cloudwatchlogs.CreateLogAnomalyDetectorInput, ...request.Option) (*cloudwatchlogs.CreateLogAnomalyDetectorOutput, error) + CreateLogAnomalyDetectorRequest(*cloudwatchlogs.CreateLogAnomalyDetectorInput) (*request.Request, *cloudwatchlogs.CreateLogAnomalyDetectorOutput) + + CreateLogGroup(*cloudwatchlogs.CreateLogGroupInput) (*cloudwatchlogs.CreateLogGroupOutput, error) + CreateLogGroupWithContext(aws.Context, *cloudwatchlogs.CreateLogGroupInput, ...request.Option) (*cloudwatchlogs.CreateLogGroupOutput, error) + CreateLogGroupRequest(*cloudwatchlogs.CreateLogGroupInput) (*request.Request, *cloudwatchlogs.CreateLogGroupOutput) + + CreateLogStream(*cloudwatchlogs.CreateLogStreamInput) (*cloudwatchlogs.CreateLogStreamOutput, error) + CreateLogStreamWithContext(aws.Context, *cloudwatchlogs.CreateLogStreamInput, ...request.Option) (*cloudwatchlogs.CreateLogStreamOutput, error) + CreateLogStreamRequest(*cloudwatchlogs.CreateLogStreamInput) (*request.Request, *cloudwatchlogs.CreateLogStreamOutput) + + DeleteAccountPolicy(*cloudwatchlogs.DeleteAccountPolicyInput) (*cloudwatchlogs.DeleteAccountPolicyOutput, error) + DeleteAccountPolicyWithContext(aws.Context, *cloudwatchlogs.DeleteAccountPolicyInput, ...request.Option) (*cloudwatchlogs.DeleteAccountPolicyOutput, error) + DeleteAccountPolicyRequest(*cloudwatchlogs.DeleteAccountPolicyInput) (*request.Request, *cloudwatchlogs.DeleteAccountPolicyOutput) + + DeleteDataProtectionPolicy(*cloudwatchlogs.DeleteDataProtectionPolicyInput) (*cloudwatchlogs.DeleteDataProtectionPolicyOutput, error) + DeleteDataProtectionPolicyWithContext(aws.Context, *cloudwatchlogs.DeleteDataProtectionPolicyInput, ...request.Option) (*cloudwatchlogs.DeleteDataProtectionPolicyOutput, error) + DeleteDataProtectionPolicyRequest(*cloudwatchlogs.DeleteDataProtectionPolicyInput) (*request.Request, *cloudwatchlogs.DeleteDataProtectionPolicyOutput) + + DeleteDelivery(*cloudwatchlogs.DeleteDeliveryInput) (*cloudwatchlogs.DeleteDeliveryOutput, error) + DeleteDeliveryWithContext(aws.Context, *cloudwatchlogs.DeleteDeliveryInput, ...request.Option) (*cloudwatchlogs.DeleteDeliveryOutput, error) + DeleteDeliveryRequest(*cloudwatchlogs.DeleteDeliveryInput) (*request.Request, *cloudwatchlogs.DeleteDeliveryOutput) + + DeleteDeliveryDestination(*cloudwatchlogs.DeleteDeliveryDestinationInput) (*cloudwatchlogs.DeleteDeliveryDestinationOutput, error) + DeleteDeliveryDestinationWithContext(aws.Context, *cloudwatchlogs.DeleteDeliveryDestinationInput, ...request.Option) (*cloudwatchlogs.DeleteDeliveryDestinationOutput, error) + DeleteDeliveryDestinationRequest(*cloudwatchlogs.DeleteDeliveryDestinationInput) (*request.Request, *cloudwatchlogs.DeleteDeliveryDestinationOutput) + + DeleteDeliveryDestinationPolicy(*cloudwatchlogs.DeleteDeliveryDestinationPolicyInput) (*cloudwatchlogs.DeleteDeliveryDestinationPolicyOutput, error) + DeleteDeliveryDestinationPolicyWithContext(aws.Context, *cloudwatchlogs.DeleteDeliveryDestinationPolicyInput, ...request.Option) (*cloudwatchlogs.DeleteDeliveryDestinationPolicyOutput, error) + DeleteDeliveryDestinationPolicyRequest(*cloudwatchlogs.DeleteDeliveryDestinationPolicyInput) (*request.Request, *cloudwatchlogs.DeleteDeliveryDestinationPolicyOutput) + + DeleteDeliverySource(*cloudwatchlogs.DeleteDeliverySourceInput) (*cloudwatchlogs.DeleteDeliverySourceOutput, error) + DeleteDeliverySourceWithContext(aws.Context, *cloudwatchlogs.DeleteDeliverySourceInput, ...request.Option) (*cloudwatchlogs.DeleteDeliverySourceOutput, error) + DeleteDeliverySourceRequest(*cloudwatchlogs.DeleteDeliverySourceInput) (*request.Request, *cloudwatchlogs.DeleteDeliverySourceOutput) + + DeleteDestination(*cloudwatchlogs.DeleteDestinationInput) (*cloudwatchlogs.DeleteDestinationOutput, error) + DeleteDestinationWithContext(aws.Context, *cloudwatchlogs.DeleteDestinationInput, ...request.Option) (*cloudwatchlogs.DeleteDestinationOutput, error) + DeleteDestinationRequest(*cloudwatchlogs.DeleteDestinationInput) (*request.Request, *cloudwatchlogs.DeleteDestinationOutput) + + DeleteLogAnomalyDetector(*cloudwatchlogs.DeleteLogAnomalyDetectorInput) (*cloudwatchlogs.DeleteLogAnomalyDetectorOutput, error) + DeleteLogAnomalyDetectorWithContext(aws.Context, *cloudwatchlogs.DeleteLogAnomalyDetectorInput, ...request.Option) (*cloudwatchlogs.DeleteLogAnomalyDetectorOutput, error) + DeleteLogAnomalyDetectorRequest(*cloudwatchlogs.DeleteLogAnomalyDetectorInput) (*request.Request, *cloudwatchlogs.DeleteLogAnomalyDetectorOutput) + + DeleteLogGroup(*cloudwatchlogs.DeleteLogGroupInput) (*cloudwatchlogs.DeleteLogGroupOutput, error) + DeleteLogGroupWithContext(aws.Context, *cloudwatchlogs.DeleteLogGroupInput, ...request.Option) (*cloudwatchlogs.DeleteLogGroupOutput, error) + DeleteLogGroupRequest(*cloudwatchlogs.DeleteLogGroupInput) (*request.Request, *cloudwatchlogs.DeleteLogGroupOutput) + + DeleteLogStream(*cloudwatchlogs.DeleteLogStreamInput) (*cloudwatchlogs.DeleteLogStreamOutput, error) + DeleteLogStreamWithContext(aws.Context, *cloudwatchlogs.DeleteLogStreamInput, ...request.Option) (*cloudwatchlogs.DeleteLogStreamOutput, error) + DeleteLogStreamRequest(*cloudwatchlogs.DeleteLogStreamInput) (*request.Request, *cloudwatchlogs.DeleteLogStreamOutput) + + DeleteMetricFilter(*cloudwatchlogs.DeleteMetricFilterInput) (*cloudwatchlogs.DeleteMetricFilterOutput, error) + DeleteMetricFilterWithContext(aws.Context, *cloudwatchlogs.DeleteMetricFilterInput, ...request.Option) (*cloudwatchlogs.DeleteMetricFilterOutput, error) + DeleteMetricFilterRequest(*cloudwatchlogs.DeleteMetricFilterInput) (*request.Request, *cloudwatchlogs.DeleteMetricFilterOutput) + + DeleteQueryDefinition(*cloudwatchlogs.DeleteQueryDefinitionInput) (*cloudwatchlogs.DeleteQueryDefinitionOutput, error) + DeleteQueryDefinitionWithContext(aws.Context, *cloudwatchlogs.DeleteQueryDefinitionInput, ...request.Option) (*cloudwatchlogs.DeleteQueryDefinitionOutput, error) + DeleteQueryDefinitionRequest(*cloudwatchlogs.DeleteQueryDefinitionInput) (*request.Request, *cloudwatchlogs.DeleteQueryDefinitionOutput) + + DeleteResourcePolicy(*cloudwatchlogs.DeleteResourcePolicyInput) (*cloudwatchlogs.DeleteResourcePolicyOutput, error) + DeleteResourcePolicyWithContext(aws.Context, *cloudwatchlogs.DeleteResourcePolicyInput, ...request.Option) (*cloudwatchlogs.DeleteResourcePolicyOutput, error) + DeleteResourcePolicyRequest(*cloudwatchlogs.DeleteResourcePolicyInput) (*request.Request, *cloudwatchlogs.DeleteResourcePolicyOutput) + + DeleteRetentionPolicy(*cloudwatchlogs.DeleteRetentionPolicyInput) (*cloudwatchlogs.DeleteRetentionPolicyOutput, error) + DeleteRetentionPolicyWithContext(aws.Context, *cloudwatchlogs.DeleteRetentionPolicyInput, ...request.Option) (*cloudwatchlogs.DeleteRetentionPolicyOutput, error) + DeleteRetentionPolicyRequest(*cloudwatchlogs.DeleteRetentionPolicyInput) (*request.Request, *cloudwatchlogs.DeleteRetentionPolicyOutput) + + DeleteSubscriptionFilter(*cloudwatchlogs.DeleteSubscriptionFilterInput) (*cloudwatchlogs.DeleteSubscriptionFilterOutput, error) + DeleteSubscriptionFilterWithContext(aws.Context, *cloudwatchlogs.DeleteSubscriptionFilterInput, ...request.Option) (*cloudwatchlogs.DeleteSubscriptionFilterOutput, error) + DeleteSubscriptionFilterRequest(*cloudwatchlogs.DeleteSubscriptionFilterInput) (*request.Request, *cloudwatchlogs.DeleteSubscriptionFilterOutput) + + DescribeAccountPolicies(*cloudwatchlogs.DescribeAccountPoliciesInput) (*cloudwatchlogs.DescribeAccountPoliciesOutput, error) + DescribeAccountPoliciesWithContext(aws.Context, *cloudwatchlogs.DescribeAccountPoliciesInput, ...request.Option) (*cloudwatchlogs.DescribeAccountPoliciesOutput, error) + DescribeAccountPoliciesRequest(*cloudwatchlogs.DescribeAccountPoliciesInput) (*request.Request, *cloudwatchlogs.DescribeAccountPoliciesOutput) + + DescribeDeliveries(*cloudwatchlogs.DescribeDeliveriesInput) (*cloudwatchlogs.DescribeDeliveriesOutput, error) + DescribeDeliveriesWithContext(aws.Context, *cloudwatchlogs.DescribeDeliveriesInput, ...request.Option) (*cloudwatchlogs.DescribeDeliveriesOutput, error) + DescribeDeliveriesRequest(*cloudwatchlogs.DescribeDeliveriesInput) (*request.Request, *cloudwatchlogs.DescribeDeliveriesOutput) + + DescribeDeliveriesPages(*cloudwatchlogs.DescribeDeliveriesInput, func(*cloudwatchlogs.DescribeDeliveriesOutput, bool) bool) error + DescribeDeliveriesPagesWithContext(aws.Context, *cloudwatchlogs.DescribeDeliveriesInput, func(*cloudwatchlogs.DescribeDeliveriesOutput, bool) bool, ...request.Option) error + + DescribeDeliveryDestinations(*cloudwatchlogs.DescribeDeliveryDestinationsInput) (*cloudwatchlogs.DescribeDeliveryDestinationsOutput, error) + DescribeDeliveryDestinationsWithContext(aws.Context, *cloudwatchlogs.DescribeDeliveryDestinationsInput, ...request.Option) (*cloudwatchlogs.DescribeDeliveryDestinationsOutput, error) + DescribeDeliveryDestinationsRequest(*cloudwatchlogs.DescribeDeliveryDestinationsInput) (*request.Request, *cloudwatchlogs.DescribeDeliveryDestinationsOutput) + + DescribeDeliveryDestinationsPages(*cloudwatchlogs.DescribeDeliveryDestinationsInput, func(*cloudwatchlogs.DescribeDeliveryDestinationsOutput, bool) bool) error + DescribeDeliveryDestinationsPagesWithContext(aws.Context, *cloudwatchlogs.DescribeDeliveryDestinationsInput, func(*cloudwatchlogs.DescribeDeliveryDestinationsOutput, bool) bool, ...request.Option) error + + DescribeDeliverySources(*cloudwatchlogs.DescribeDeliverySourcesInput) (*cloudwatchlogs.DescribeDeliverySourcesOutput, error) + DescribeDeliverySourcesWithContext(aws.Context, *cloudwatchlogs.DescribeDeliverySourcesInput, ...request.Option) (*cloudwatchlogs.DescribeDeliverySourcesOutput, error) + DescribeDeliverySourcesRequest(*cloudwatchlogs.DescribeDeliverySourcesInput) (*request.Request, *cloudwatchlogs.DescribeDeliverySourcesOutput) + + DescribeDeliverySourcesPages(*cloudwatchlogs.DescribeDeliverySourcesInput, func(*cloudwatchlogs.DescribeDeliverySourcesOutput, bool) bool) error + DescribeDeliverySourcesPagesWithContext(aws.Context, *cloudwatchlogs.DescribeDeliverySourcesInput, func(*cloudwatchlogs.DescribeDeliverySourcesOutput, bool) bool, ...request.Option) error + + DescribeDestinations(*cloudwatchlogs.DescribeDestinationsInput) (*cloudwatchlogs.DescribeDestinationsOutput, error) + DescribeDestinationsWithContext(aws.Context, *cloudwatchlogs.DescribeDestinationsInput, ...request.Option) (*cloudwatchlogs.DescribeDestinationsOutput, error) + DescribeDestinationsRequest(*cloudwatchlogs.DescribeDestinationsInput) (*request.Request, *cloudwatchlogs.DescribeDestinationsOutput) + + DescribeDestinationsPages(*cloudwatchlogs.DescribeDestinationsInput, func(*cloudwatchlogs.DescribeDestinationsOutput, bool) bool) error + DescribeDestinationsPagesWithContext(aws.Context, *cloudwatchlogs.DescribeDestinationsInput, func(*cloudwatchlogs.DescribeDestinationsOutput, bool) bool, ...request.Option) error + + DescribeExportTasks(*cloudwatchlogs.DescribeExportTasksInput) (*cloudwatchlogs.DescribeExportTasksOutput, error) + DescribeExportTasksWithContext(aws.Context, *cloudwatchlogs.DescribeExportTasksInput, ...request.Option) (*cloudwatchlogs.DescribeExportTasksOutput, error) + DescribeExportTasksRequest(*cloudwatchlogs.DescribeExportTasksInput) (*request.Request, *cloudwatchlogs.DescribeExportTasksOutput) + + DescribeLogGroups(*cloudwatchlogs.DescribeLogGroupsInput) (*cloudwatchlogs.DescribeLogGroupsOutput, error) + DescribeLogGroupsWithContext(aws.Context, *cloudwatchlogs.DescribeLogGroupsInput, ...request.Option) (*cloudwatchlogs.DescribeLogGroupsOutput, error) + DescribeLogGroupsRequest(*cloudwatchlogs.DescribeLogGroupsInput) (*request.Request, *cloudwatchlogs.DescribeLogGroupsOutput) + + DescribeLogGroupsPages(*cloudwatchlogs.DescribeLogGroupsInput, func(*cloudwatchlogs.DescribeLogGroupsOutput, bool) bool) error + DescribeLogGroupsPagesWithContext(aws.Context, *cloudwatchlogs.DescribeLogGroupsInput, func(*cloudwatchlogs.DescribeLogGroupsOutput, bool) bool, ...request.Option) error + + DescribeLogStreams(*cloudwatchlogs.DescribeLogStreamsInput) (*cloudwatchlogs.DescribeLogStreamsOutput, error) + DescribeLogStreamsWithContext(aws.Context, *cloudwatchlogs.DescribeLogStreamsInput, ...request.Option) (*cloudwatchlogs.DescribeLogStreamsOutput, error) + DescribeLogStreamsRequest(*cloudwatchlogs.DescribeLogStreamsInput) (*request.Request, *cloudwatchlogs.DescribeLogStreamsOutput) + + DescribeLogStreamsPages(*cloudwatchlogs.DescribeLogStreamsInput, func(*cloudwatchlogs.DescribeLogStreamsOutput, bool) bool) error + DescribeLogStreamsPagesWithContext(aws.Context, *cloudwatchlogs.DescribeLogStreamsInput, func(*cloudwatchlogs.DescribeLogStreamsOutput, bool) bool, ...request.Option) error + + DescribeMetricFilters(*cloudwatchlogs.DescribeMetricFiltersInput) (*cloudwatchlogs.DescribeMetricFiltersOutput, error) + DescribeMetricFiltersWithContext(aws.Context, *cloudwatchlogs.DescribeMetricFiltersInput, ...request.Option) (*cloudwatchlogs.DescribeMetricFiltersOutput, error) + DescribeMetricFiltersRequest(*cloudwatchlogs.DescribeMetricFiltersInput) (*request.Request, *cloudwatchlogs.DescribeMetricFiltersOutput) + + DescribeMetricFiltersPages(*cloudwatchlogs.DescribeMetricFiltersInput, func(*cloudwatchlogs.DescribeMetricFiltersOutput, bool) bool) error + DescribeMetricFiltersPagesWithContext(aws.Context, *cloudwatchlogs.DescribeMetricFiltersInput, func(*cloudwatchlogs.DescribeMetricFiltersOutput, bool) bool, ...request.Option) error + + DescribeQueries(*cloudwatchlogs.DescribeQueriesInput) (*cloudwatchlogs.DescribeQueriesOutput, error) + DescribeQueriesWithContext(aws.Context, *cloudwatchlogs.DescribeQueriesInput, ...request.Option) (*cloudwatchlogs.DescribeQueriesOutput, error) + DescribeQueriesRequest(*cloudwatchlogs.DescribeQueriesInput) (*request.Request, *cloudwatchlogs.DescribeQueriesOutput) + + DescribeQueryDefinitions(*cloudwatchlogs.DescribeQueryDefinitionsInput) (*cloudwatchlogs.DescribeQueryDefinitionsOutput, error) + DescribeQueryDefinitionsWithContext(aws.Context, *cloudwatchlogs.DescribeQueryDefinitionsInput, ...request.Option) (*cloudwatchlogs.DescribeQueryDefinitionsOutput, error) + DescribeQueryDefinitionsRequest(*cloudwatchlogs.DescribeQueryDefinitionsInput) (*request.Request, *cloudwatchlogs.DescribeQueryDefinitionsOutput) + + DescribeResourcePolicies(*cloudwatchlogs.DescribeResourcePoliciesInput) (*cloudwatchlogs.DescribeResourcePoliciesOutput, error) + DescribeResourcePoliciesWithContext(aws.Context, *cloudwatchlogs.DescribeResourcePoliciesInput, ...request.Option) (*cloudwatchlogs.DescribeResourcePoliciesOutput, error) + DescribeResourcePoliciesRequest(*cloudwatchlogs.DescribeResourcePoliciesInput) (*request.Request, *cloudwatchlogs.DescribeResourcePoliciesOutput) + + DescribeSubscriptionFilters(*cloudwatchlogs.DescribeSubscriptionFiltersInput) (*cloudwatchlogs.DescribeSubscriptionFiltersOutput, error) + DescribeSubscriptionFiltersWithContext(aws.Context, *cloudwatchlogs.DescribeSubscriptionFiltersInput, ...request.Option) (*cloudwatchlogs.DescribeSubscriptionFiltersOutput, error) + DescribeSubscriptionFiltersRequest(*cloudwatchlogs.DescribeSubscriptionFiltersInput) (*request.Request, *cloudwatchlogs.DescribeSubscriptionFiltersOutput) + + DescribeSubscriptionFiltersPages(*cloudwatchlogs.DescribeSubscriptionFiltersInput, func(*cloudwatchlogs.DescribeSubscriptionFiltersOutput, bool) bool) error + DescribeSubscriptionFiltersPagesWithContext(aws.Context, *cloudwatchlogs.DescribeSubscriptionFiltersInput, func(*cloudwatchlogs.DescribeSubscriptionFiltersOutput, bool) bool, ...request.Option) error + + DisassociateKmsKey(*cloudwatchlogs.DisassociateKmsKeyInput) (*cloudwatchlogs.DisassociateKmsKeyOutput, error) + DisassociateKmsKeyWithContext(aws.Context, *cloudwatchlogs.DisassociateKmsKeyInput, ...request.Option) (*cloudwatchlogs.DisassociateKmsKeyOutput, error) + DisassociateKmsKeyRequest(*cloudwatchlogs.DisassociateKmsKeyInput) (*request.Request, *cloudwatchlogs.DisassociateKmsKeyOutput) + + FilterLogEvents(*cloudwatchlogs.FilterLogEventsInput) (*cloudwatchlogs.FilterLogEventsOutput, error) + FilterLogEventsWithContext(aws.Context, *cloudwatchlogs.FilterLogEventsInput, ...request.Option) (*cloudwatchlogs.FilterLogEventsOutput, error) + FilterLogEventsRequest(*cloudwatchlogs.FilterLogEventsInput) (*request.Request, *cloudwatchlogs.FilterLogEventsOutput) + + FilterLogEventsPages(*cloudwatchlogs.FilterLogEventsInput, func(*cloudwatchlogs.FilterLogEventsOutput, bool) bool) error + FilterLogEventsPagesWithContext(aws.Context, *cloudwatchlogs.FilterLogEventsInput, func(*cloudwatchlogs.FilterLogEventsOutput, bool) bool, ...request.Option) error + + GetDataProtectionPolicy(*cloudwatchlogs.GetDataProtectionPolicyInput) (*cloudwatchlogs.GetDataProtectionPolicyOutput, error) + GetDataProtectionPolicyWithContext(aws.Context, *cloudwatchlogs.GetDataProtectionPolicyInput, ...request.Option) (*cloudwatchlogs.GetDataProtectionPolicyOutput, error) + GetDataProtectionPolicyRequest(*cloudwatchlogs.GetDataProtectionPolicyInput) (*request.Request, *cloudwatchlogs.GetDataProtectionPolicyOutput) + + GetDelivery(*cloudwatchlogs.GetDeliveryInput) (*cloudwatchlogs.GetDeliveryOutput, error) + GetDeliveryWithContext(aws.Context, *cloudwatchlogs.GetDeliveryInput, ...request.Option) (*cloudwatchlogs.GetDeliveryOutput, error) + GetDeliveryRequest(*cloudwatchlogs.GetDeliveryInput) (*request.Request, *cloudwatchlogs.GetDeliveryOutput) + + GetDeliveryDestination(*cloudwatchlogs.GetDeliveryDestinationInput) (*cloudwatchlogs.GetDeliveryDestinationOutput, error) + GetDeliveryDestinationWithContext(aws.Context, *cloudwatchlogs.GetDeliveryDestinationInput, ...request.Option) (*cloudwatchlogs.GetDeliveryDestinationOutput, error) + GetDeliveryDestinationRequest(*cloudwatchlogs.GetDeliveryDestinationInput) (*request.Request, *cloudwatchlogs.GetDeliveryDestinationOutput) + + GetDeliveryDestinationPolicy(*cloudwatchlogs.GetDeliveryDestinationPolicyInput) (*cloudwatchlogs.GetDeliveryDestinationPolicyOutput, error) + GetDeliveryDestinationPolicyWithContext(aws.Context, *cloudwatchlogs.GetDeliveryDestinationPolicyInput, ...request.Option) (*cloudwatchlogs.GetDeliveryDestinationPolicyOutput, error) + GetDeliveryDestinationPolicyRequest(*cloudwatchlogs.GetDeliveryDestinationPolicyInput) (*request.Request, *cloudwatchlogs.GetDeliveryDestinationPolicyOutput) + + GetDeliverySource(*cloudwatchlogs.GetDeliverySourceInput) (*cloudwatchlogs.GetDeliverySourceOutput, error) + GetDeliverySourceWithContext(aws.Context, *cloudwatchlogs.GetDeliverySourceInput, ...request.Option) (*cloudwatchlogs.GetDeliverySourceOutput, error) + GetDeliverySourceRequest(*cloudwatchlogs.GetDeliverySourceInput) (*request.Request, *cloudwatchlogs.GetDeliverySourceOutput) + + GetLogAnomalyDetector(*cloudwatchlogs.GetLogAnomalyDetectorInput) (*cloudwatchlogs.GetLogAnomalyDetectorOutput, error) + GetLogAnomalyDetectorWithContext(aws.Context, *cloudwatchlogs.GetLogAnomalyDetectorInput, ...request.Option) (*cloudwatchlogs.GetLogAnomalyDetectorOutput, error) + GetLogAnomalyDetectorRequest(*cloudwatchlogs.GetLogAnomalyDetectorInput) (*request.Request, *cloudwatchlogs.GetLogAnomalyDetectorOutput) + + GetLogEvents(*cloudwatchlogs.GetLogEventsInput) (*cloudwatchlogs.GetLogEventsOutput, error) + GetLogEventsWithContext(aws.Context, *cloudwatchlogs.GetLogEventsInput, ...request.Option) (*cloudwatchlogs.GetLogEventsOutput, error) + GetLogEventsRequest(*cloudwatchlogs.GetLogEventsInput) (*request.Request, *cloudwatchlogs.GetLogEventsOutput) + + GetLogEventsPages(*cloudwatchlogs.GetLogEventsInput, func(*cloudwatchlogs.GetLogEventsOutput, bool) bool) error + GetLogEventsPagesWithContext(aws.Context, *cloudwatchlogs.GetLogEventsInput, func(*cloudwatchlogs.GetLogEventsOutput, bool) bool, ...request.Option) error + + GetLogGroupFields(*cloudwatchlogs.GetLogGroupFieldsInput) (*cloudwatchlogs.GetLogGroupFieldsOutput, error) + GetLogGroupFieldsWithContext(aws.Context, *cloudwatchlogs.GetLogGroupFieldsInput, ...request.Option) (*cloudwatchlogs.GetLogGroupFieldsOutput, error) + GetLogGroupFieldsRequest(*cloudwatchlogs.GetLogGroupFieldsInput) (*request.Request, *cloudwatchlogs.GetLogGroupFieldsOutput) + + GetLogRecord(*cloudwatchlogs.GetLogRecordInput) (*cloudwatchlogs.GetLogRecordOutput, error) + GetLogRecordWithContext(aws.Context, *cloudwatchlogs.GetLogRecordInput, ...request.Option) (*cloudwatchlogs.GetLogRecordOutput, error) + GetLogRecordRequest(*cloudwatchlogs.GetLogRecordInput) (*request.Request, *cloudwatchlogs.GetLogRecordOutput) + + GetQueryResults(*cloudwatchlogs.GetQueryResultsInput) (*cloudwatchlogs.GetQueryResultsOutput, error) + GetQueryResultsWithContext(aws.Context, *cloudwatchlogs.GetQueryResultsInput, ...request.Option) (*cloudwatchlogs.GetQueryResultsOutput, error) + GetQueryResultsRequest(*cloudwatchlogs.GetQueryResultsInput) (*request.Request, *cloudwatchlogs.GetQueryResultsOutput) + + ListAnomalies(*cloudwatchlogs.ListAnomaliesInput) (*cloudwatchlogs.ListAnomaliesOutput, error) + ListAnomaliesWithContext(aws.Context, *cloudwatchlogs.ListAnomaliesInput, ...request.Option) (*cloudwatchlogs.ListAnomaliesOutput, error) + ListAnomaliesRequest(*cloudwatchlogs.ListAnomaliesInput) (*request.Request, *cloudwatchlogs.ListAnomaliesOutput) + + ListAnomaliesPages(*cloudwatchlogs.ListAnomaliesInput, func(*cloudwatchlogs.ListAnomaliesOutput, bool) bool) error + ListAnomaliesPagesWithContext(aws.Context, *cloudwatchlogs.ListAnomaliesInput, func(*cloudwatchlogs.ListAnomaliesOutput, bool) bool, ...request.Option) error + + ListLogAnomalyDetectors(*cloudwatchlogs.ListLogAnomalyDetectorsInput) (*cloudwatchlogs.ListLogAnomalyDetectorsOutput, error) + ListLogAnomalyDetectorsWithContext(aws.Context, *cloudwatchlogs.ListLogAnomalyDetectorsInput, ...request.Option) (*cloudwatchlogs.ListLogAnomalyDetectorsOutput, error) + ListLogAnomalyDetectorsRequest(*cloudwatchlogs.ListLogAnomalyDetectorsInput) (*request.Request, *cloudwatchlogs.ListLogAnomalyDetectorsOutput) + + ListLogAnomalyDetectorsPages(*cloudwatchlogs.ListLogAnomalyDetectorsInput, func(*cloudwatchlogs.ListLogAnomalyDetectorsOutput, bool) bool) error + ListLogAnomalyDetectorsPagesWithContext(aws.Context, *cloudwatchlogs.ListLogAnomalyDetectorsInput, func(*cloudwatchlogs.ListLogAnomalyDetectorsOutput, bool) bool, ...request.Option) error + + ListTagsForResource(*cloudwatchlogs.ListTagsForResourceInput) (*cloudwatchlogs.ListTagsForResourceOutput, error) + ListTagsForResourceWithContext(aws.Context, *cloudwatchlogs.ListTagsForResourceInput, ...request.Option) (*cloudwatchlogs.ListTagsForResourceOutput, error) + ListTagsForResourceRequest(*cloudwatchlogs.ListTagsForResourceInput) (*request.Request, *cloudwatchlogs.ListTagsForResourceOutput) + + ListTagsLogGroup(*cloudwatchlogs.ListTagsLogGroupInput) (*cloudwatchlogs.ListTagsLogGroupOutput, error) + ListTagsLogGroupWithContext(aws.Context, *cloudwatchlogs.ListTagsLogGroupInput, ...request.Option) (*cloudwatchlogs.ListTagsLogGroupOutput, error) + ListTagsLogGroupRequest(*cloudwatchlogs.ListTagsLogGroupInput) (*request.Request, *cloudwatchlogs.ListTagsLogGroupOutput) + + PutAccountPolicy(*cloudwatchlogs.PutAccountPolicyInput) (*cloudwatchlogs.PutAccountPolicyOutput, error) + PutAccountPolicyWithContext(aws.Context, *cloudwatchlogs.PutAccountPolicyInput, ...request.Option) (*cloudwatchlogs.PutAccountPolicyOutput, error) + PutAccountPolicyRequest(*cloudwatchlogs.PutAccountPolicyInput) (*request.Request, *cloudwatchlogs.PutAccountPolicyOutput) + + PutDataProtectionPolicy(*cloudwatchlogs.PutDataProtectionPolicyInput) (*cloudwatchlogs.PutDataProtectionPolicyOutput, error) + PutDataProtectionPolicyWithContext(aws.Context, *cloudwatchlogs.PutDataProtectionPolicyInput, ...request.Option) (*cloudwatchlogs.PutDataProtectionPolicyOutput, error) + PutDataProtectionPolicyRequest(*cloudwatchlogs.PutDataProtectionPolicyInput) (*request.Request, *cloudwatchlogs.PutDataProtectionPolicyOutput) + + PutDeliveryDestination(*cloudwatchlogs.PutDeliveryDestinationInput) (*cloudwatchlogs.PutDeliveryDestinationOutput, error) + PutDeliveryDestinationWithContext(aws.Context, *cloudwatchlogs.PutDeliveryDestinationInput, ...request.Option) (*cloudwatchlogs.PutDeliveryDestinationOutput, error) + PutDeliveryDestinationRequest(*cloudwatchlogs.PutDeliveryDestinationInput) (*request.Request, *cloudwatchlogs.PutDeliveryDestinationOutput) + + PutDeliveryDestinationPolicy(*cloudwatchlogs.PutDeliveryDestinationPolicyInput) (*cloudwatchlogs.PutDeliveryDestinationPolicyOutput, error) + PutDeliveryDestinationPolicyWithContext(aws.Context, *cloudwatchlogs.PutDeliveryDestinationPolicyInput, ...request.Option) (*cloudwatchlogs.PutDeliveryDestinationPolicyOutput, error) + PutDeliveryDestinationPolicyRequest(*cloudwatchlogs.PutDeliveryDestinationPolicyInput) (*request.Request, *cloudwatchlogs.PutDeliveryDestinationPolicyOutput) + + PutDeliverySource(*cloudwatchlogs.PutDeliverySourceInput) (*cloudwatchlogs.PutDeliverySourceOutput, error) + PutDeliverySourceWithContext(aws.Context, *cloudwatchlogs.PutDeliverySourceInput, ...request.Option) (*cloudwatchlogs.PutDeliverySourceOutput, error) + PutDeliverySourceRequest(*cloudwatchlogs.PutDeliverySourceInput) (*request.Request, *cloudwatchlogs.PutDeliverySourceOutput) + + PutDestination(*cloudwatchlogs.PutDestinationInput) (*cloudwatchlogs.PutDestinationOutput, error) + PutDestinationWithContext(aws.Context, *cloudwatchlogs.PutDestinationInput, ...request.Option) (*cloudwatchlogs.PutDestinationOutput, error) + PutDestinationRequest(*cloudwatchlogs.PutDestinationInput) (*request.Request, *cloudwatchlogs.PutDestinationOutput) + + PutDestinationPolicy(*cloudwatchlogs.PutDestinationPolicyInput) (*cloudwatchlogs.PutDestinationPolicyOutput, error) + PutDestinationPolicyWithContext(aws.Context, *cloudwatchlogs.PutDestinationPolicyInput, ...request.Option) (*cloudwatchlogs.PutDestinationPolicyOutput, error) + PutDestinationPolicyRequest(*cloudwatchlogs.PutDestinationPolicyInput) (*request.Request, *cloudwatchlogs.PutDestinationPolicyOutput) + + PutLogEvents(*cloudwatchlogs.PutLogEventsInput) (*cloudwatchlogs.PutLogEventsOutput, error) + PutLogEventsWithContext(aws.Context, *cloudwatchlogs.PutLogEventsInput, ...request.Option) (*cloudwatchlogs.PutLogEventsOutput, error) + PutLogEventsRequest(*cloudwatchlogs.PutLogEventsInput) (*request.Request, *cloudwatchlogs.PutLogEventsOutput) + + PutMetricFilter(*cloudwatchlogs.PutMetricFilterInput) (*cloudwatchlogs.PutMetricFilterOutput, error) + PutMetricFilterWithContext(aws.Context, *cloudwatchlogs.PutMetricFilterInput, ...request.Option) (*cloudwatchlogs.PutMetricFilterOutput, error) + PutMetricFilterRequest(*cloudwatchlogs.PutMetricFilterInput) (*request.Request, *cloudwatchlogs.PutMetricFilterOutput) + + PutQueryDefinition(*cloudwatchlogs.PutQueryDefinitionInput) (*cloudwatchlogs.PutQueryDefinitionOutput, error) + PutQueryDefinitionWithContext(aws.Context, *cloudwatchlogs.PutQueryDefinitionInput, ...request.Option) (*cloudwatchlogs.PutQueryDefinitionOutput, error) + PutQueryDefinitionRequest(*cloudwatchlogs.PutQueryDefinitionInput) (*request.Request, *cloudwatchlogs.PutQueryDefinitionOutput) + + PutResourcePolicy(*cloudwatchlogs.PutResourcePolicyInput) (*cloudwatchlogs.PutResourcePolicyOutput, error) + PutResourcePolicyWithContext(aws.Context, *cloudwatchlogs.PutResourcePolicyInput, ...request.Option) (*cloudwatchlogs.PutResourcePolicyOutput, error) + PutResourcePolicyRequest(*cloudwatchlogs.PutResourcePolicyInput) (*request.Request, *cloudwatchlogs.PutResourcePolicyOutput) + + PutRetentionPolicy(*cloudwatchlogs.PutRetentionPolicyInput) (*cloudwatchlogs.PutRetentionPolicyOutput, error) + PutRetentionPolicyWithContext(aws.Context, *cloudwatchlogs.PutRetentionPolicyInput, ...request.Option) (*cloudwatchlogs.PutRetentionPolicyOutput, error) + PutRetentionPolicyRequest(*cloudwatchlogs.PutRetentionPolicyInput) (*request.Request, *cloudwatchlogs.PutRetentionPolicyOutput) + + PutSubscriptionFilter(*cloudwatchlogs.PutSubscriptionFilterInput) (*cloudwatchlogs.PutSubscriptionFilterOutput, error) + PutSubscriptionFilterWithContext(aws.Context, *cloudwatchlogs.PutSubscriptionFilterInput, ...request.Option) (*cloudwatchlogs.PutSubscriptionFilterOutput, error) + PutSubscriptionFilterRequest(*cloudwatchlogs.PutSubscriptionFilterInput) (*request.Request, *cloudwatchlogs.PutSubscriptionFilterOutput) + + StartLiveTail(*cloudwatchlogs.StartLiveTailInput) (*cloudwatchlogs.StartLiveTailOutput, error) + StartLiveTailWithContext(aws.Context, *cloudwatchlogs.StartLiveTailInput, ...request.Option) (*cloudwatchlogs.StartLiveTailOutput, error) + StartLiveTailRequest(*cloudwatchlogs.StartLiveTailInput) (*request.Request, *cloudwatchlogs.StartLiveTailOutput) + + StartQuery(*cloudwatchlogs.StartQueryInput) (*cloudwatchlogs.StartQueryOutput, error) + StartQueryWithContext(aws.Context, *cloudwatchlogs.StartQueryInput, ...request.Option) (*cloudwatchlogs.StartQueryOutput, error) + StartQueryRequest(*cloudwatchlogs.StartQueryInput) (*request.Request, *cloudwatchlogs.StartQueryOutput) + + StopQuery(*cloudwatchlogs.StopQueryInput) (*cloudwatchlogs.StopQueryOutput, error) + StopQueryWithContext(aws.Context, *cloudwatchlogs.StopQueryInput, ...request.Option) (*cloudwatchlogs.StopQueryOutput, error) + StopQueryRequest(*cloudwatchlogs.StopQueryInput) (*request.Request, *cloudwatchlogs.StopQueryOutput) + + TagLogGroup(*cloudwatchlogs.TagLogGroupInput) (*cloudwatchlogs.TagLogGroupOutput, error) + TagLogGroupWithContext(aws.Context, *cloudwatchlogs.TagLogGroupInput, ...request.Option) (*cloudwatchlogs.TagLogGroupOutput, error) + TagLogGroupRequest(*cloudwatchlogs.TagLogGroupInput) (*request.Request, *cloudwatchlogs.TagLogGroupOutput) + + TagResource(*cloudwatchlogs.TagResourceInput) (*cloudwatchlogs.TagResourceOutput, error) + TagResourceWithContext(aws.Context, *cloudwatchlogs.TagResourceInput, ...request.Option) (*cloudwatchlogs.TagResourceOutput, error) + TagResourceRequest(*cloudwatchlogs.TagResourceInput) (*request.Request, *cloudwatchlogs.TagResourceOutput) + + TestMetricFilter(*cloudwatchlogs.TestMetricFilterInput) (*cloudwatchlogs.TestMetricFilterOutput, error) + TestMetricFilterWithContext(aws.Context, *cloudwatchlogs.TestMetricFilterInput, ...request.Option) (*cloudwatchlogs.TestMetricFilterOutput, error) + TestMetricFilterRequest(*cloudwatchlogs.TestMetricFilterInput) (*request.Request, *cloudwatchlogs.TestMetricFilterOutput) + + UntagLogGroup(*cloudwatchlogs.UntagLogGroupInput) (*cloudwatchlogs.UntagLogGroupOutput, error) + UntagLogGroupWithContext(aws.Context, *cloudwatchlogs.UntagLogGroupInput, ...request.Option) (*cloudwatchlogs.UntagLogGroupOutput, error) + UntagLogGroupRequest(*cloudwatchlogs.UntagLogGroupInput) (*request.Request, *cloudwatchlogs.UntagLogGroupOutput) + + UntagResource(*cloudwatchlogs.UntagResourceInput) (*cloudwatchlogs.UntagResourceOutput, error) + UntagResourceWithContext(aws.Context, *cloudwatchlogs.UntagResourceInput, ...request.Option) (*cloudwatchlogs.UntagResourceOutput, error) + UntagResourceRequest(*cloudwatchlogs.UntagResourceInput) (*request.Request, *cloudwatchlogs.UntagResourceOutput) + + UpdateAnomaly(*cloudwatchlogs.UpdateAnomalyInput) (*cloudwatchlogs.UpdateAnomalyOutput, error) + UpdateAnomalyWithContext(aws.Context, *cloudwatchlogs.UpdateAnomalyInput, ...request.Option) (*cloudwatchlogs.UpdateAnomalyOutput, error) + UpdateAnomalyRequest(*cloudwatchlogs.UpdateAnomalyInput) (*request.Request, *cloudwatchlogs.UpdateAnomalyOutput) + + UpdateLogAnomalyDetector(*cloudwatchlogs.UpdateLogAnomalyDetectorInput) (*cloudwatchlogs.UpdateLogAnomalyDetectorOutput, error) + UpdateLogAnomalyDetectorWithContext(aws.Context, *cloudwatchlogs.UpdateLogAnomalyDetectorInput, ...request.Option) (*cloudwatchlogs.UpdateLogAnomalyDetectorOutput, error) + UpdateLogAnomalyDetectorRequest(*cloudwatchlogs.UpdateLogAnomalyDetectorInput) (*request.Request, *cloudwatchlogs.UpdateLogAnomalyDetectorOutput) +} + +var _ CloudWatchLogsAPI = (*cloudwatchlogs.CloudWatchLogs)(nil) diff --git a/sdk/service/cloudwatchlogs/doc.go b/sdk/service/cloudwatchlogs/doc.go new file mode 100644 index 0000000000..bd52e9d9f6 --- /dev/null +++ b/sdk/service/cloudwatchlogs/doc.go @@ -0,0 +1,57 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +// Package cloudwatchlogs provides the client and types for making API +// requests to Amazon CloudWatch Logs. +// +// You can use Amazon CloudWatch Logs to monitor, store, and access your log +// files from EC2 instances, CloudTrail, and other sources. You can then retrieve +// the associated log data from CloudWatch Logs using the CloudWatch console. +// Alternatively, you can use CloudWatch Logs commands in the Amazon Web Services +// CLI, CloudWatch Logs API, or CloudWatch Logs SDK. +// +// You can use CloudWatch Logs to: +// +// - Monitor logs from EC2 instances in real time: You can use CloudWatch +// Logs to monitor applications and systems using log data. For example, +// CloudWatch Logs can track the number of errors that occur in your application +// logs. Then, it can send you a notification whenever the rate of errors +// exceeds a threshold that you specify. CloudWatch Logs uses your log data +// for monitoring so no code changes are required. For example, you can monitor +// application logs for specific literal terms (such as "NullReferenceException"). +// You can also count the number of occurrences of a literal term at a particular +// position in log data (such as "404" status codes in an Apache access log). +// When the term you are searching for is found, CloudWatch Logs reports +// the data to a CloudWatch metric that you specify. +// +// - Monitor CloudTrail logged events: You can create alarms in CloudWatch +// and receive notifications of particular API activity as captured by CloudTrail. +// You can use the notification to perform troubleshooting. +// +// - Archive log data: You can use CloudWatch Logs to store your log data +// in highly durable storage. You can change the log retention setting so +// that any log events earlier than this setting are automatically deleted. +// The CloudWatch Logs agent helps to quickly send both rotated and non-rotated +// log data off of a host and into the log service. You can then access the +// raw log data when you need it. +// +// See https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28 for more information on this service. +// +// See cloudwatchlogs package documentation for more information. +// https://docs.aws.amazon.com/sdk-for-go/api/service/cloudwatchlogs/ +// +// # Using the Client +// +// To contact Amazon CloudWatch Logs with the SDK use the New function to create +// a new service client. With that client you can make API requests to the service. +// These clients are safe to use concurrently. +// +// See the SDK's documentation for more information on how to use the SDK. +// https://docs.aws.amazon.com/sdk-for-go/api/ +// +// See aws.Config documentation for more information on configuring SDK clients. +// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config +// +// See the Amazon CloudWatch Logs client CloudWatchLogs for more +// information on creating client for this service. +// https://docs.aws.amazon.com/sdk-for-go/api/service/cloudwatchlogs/#New +package cloudwatchlogs diff --git a/sdk/service/cloudwatchlogs/errors.go b/sdk/service/cloudwatchlogs/errors.go new file mode 100644 index 0000000000..4c4ff1b551 --- /dev/null +++ b/sdk/service/cloudwatchlogs/errors.go @@ -0,0 +1,159 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package cloudwatchlogs + +import ( + "github.com/aws/aws-sdk-go/private/protocol" +) + +const ( + + // ErrCodeAccessDeniedException for service response error code + // "AccessDeniedException". + // + // You don't have sufficient permissions to perform this action. + ErrCodeAccessDeniedException = "AccessDeniedException" + + // ErrCodeConflictException for service response error code + // "ConflictException". + // + // This operation attempted to create a resource that already exists. + ErrCodeConflictException = "ConflictException" + + // ErrCodeDataAlreadyAcceptedException for service response error code + // "DataAlreadyAcceptedException". + // + // The event was already logged. + // + // PutLogEvents actions are now always accepted and never return DataAlreadyAcceptedException + // regardless of whether a given batch of log events has already been accepted. + ErrCodeDataAlreadyAcceptedException = "DataAlreadyAcceptedException" + + // ErrCodeInvalidOperationException for service response error code + // "InvalidOperationException". + // + // The operation is not valid on the specified resource. + ErrCodeInvalidOperationException = "InvalidOperationException" + + // ErrCodeInvalidParameterException for service response error code + // "InvalidParameterException". + // + // A parameter is specified incorrectly. + ErrCodeInvalidParameterException = "InvalidParameterException" + + // ErrCodeInvalidSequenceTokenException for service response error code + // "InvalidSequenceTokenException". + // + // The sequence token is not valid. You can get the correct sequence token in + // the expectedSequenceToken field in the InvalidSequenceTokenException message. + // + // PutLogEvents actions are now always accepted and never return InvalidSequenceTokenException + // regardless of receiving an invalid sequence token. + ErrCodeInvalidSequenceTokenException = "InvalidSequenceTokenException" + + // ErrCodeLimitExceededException for service response error code + // "LimitExceededException". + // + // You have reached the maximum number of resources that can be created. + ErrCodeLimitExceededException = "LimitExceededException" + + // ErrCodeMalformedQueryException for service response error code + // "MalformedQueryException". + // + // The query string is not valid. Details about this error are displayed in + // a QueryCompileError object. For more information, see QueryCompileError (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_QueryCompileError.html). + // + // For more information about valid query syntax, see CloudWatch Logs Insights + // Query Syntax (https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/CWL_QuerySyntax.html). + ErrCodeMalformedQueryException = "MalformedQueryException" + + // ErrCodeOperationAbortedException for service response error code + // "OperationAbortedException". + // + // Multiple concurrent requests to update the same resource were in conflict. + ErrCodeOperationAbortedException = "OperationAbortedException" + + // ErrCodeResourceAlreadyExistsException for service response error code + // "ResourceAlreadyExistsException". + // + // The specified resource already exists. + ErrCodeResourceAlreadyExistsException = "ResourceAlreadyExistsException" + + // ErrCodeResourceNotFoundException for service response error code + // "ResourceNotFoundException". + // + // The specified resource does not exist. + ErrCodeResourceNotFoundException = "ResourceNotFoundException" + + // ErrCodeServiceQuotaExceededException for service response error code + // "ServiceQuotaExceededException". + // + // This request exceeds a service quota. + ErrCodeServiceQuotaExceededException = "ServiceQuotaExceededException" + + // ErrCodeServiceUnavailableException for service response error code + // "ServiceUnavailableException". + // + // The service cannot complete the request. + ErrCodeServiceUnavailableException = "ServiceUnavailableException" + + // ErrCodeSessionStreamingException for service response error code + // "SessionStreamingException". + // + // his exception is returned if an unknown error occurs during a Live Tail session. + ErrCodeSessionStreamingException = "SessionStreamingException" + + // ErrCodeSessionTimeoutException for service response error code + // "SessionTimeoutException". + // + // This exception is returned in a Live Tail stream when the Live Tail session + // times out. Live Tail sessions time out after three hours. + ErrCodeSessionTimeoutException = "SessionTimeoutException" + + // ErrCodeThrottlingException for service response error code + // "ThrottlingException". + // + // The request was throttled because of quota limits. + ErrCodeThrottlingException = "ThrottlingException" + + // ErrCodeTooManyTagsException for service response error code + // "TooManyTagsException". + // + // A resource can have no more than 50 tags. + ErrCodeTooManyTagsException = "TooManyTagsException" + + // ErrCodeUnrecognizedClientException for service response error code + // "UnrecognizedClientException". + // + // The most likely cause is an Amazon Web Services access key ID or secret key + // that's not valid. + ErrCodeUnrecognizedClientException = "UnrecognizedClientException" + + // ErrCodeValidationException for service response error code + // "ValidationException". + // + // One of the parameters for the request is not valid. + ErrCodeValidationException = "ValidationException" +) + +var exceptionFromCode = map[string]func(protocol.ResponseMetadata) error{ + "AccessDeniedException": newErrorAccessDeniedException, + "ConflictException": newErrorConflictException, + "DataAlreadyAcceptedException": newErrorDataAlreadyAcceptedException, + "InvalidOperationException": newErrorInvalidOperationException, + "InvalidParameterException": newErrorInvalidParameterException, + "InvalidSequenceTokenException": newErrorInvalidSequenceTokenException, + "LimitExceededException": newErrorLimitExceededException, + "MalformedQueryException": newErrorMalformedQueryException, + "OperationAbortedException": newErrorOperationAbortedException, + "ResourceAlreadyExistsException": newErrorResourceAlreadyExistsException, + "ResourceNotFoundException": newErrorResourceNotFoundException, + "ServiceQuotaExceededException": newErrorServiceQuotaExceededException, + "ServiceUnavailableException": newErrorServiceUnavailableException, + "SessionStreamingException": newErrorSessionStreamingException, + "SessionTimeoutException": newErrorSessionTimeoutException, + "ThrottlingException": newErrorThrottlingException, + "TooManyTagsException": newErrorTooManyTagsException, + "UnrecognizedClientException": newErrorUnrecognizedClientException, + "ValidationException": newErrorValidationException, +} diff --git a/sdk/service/cloudwatchlogs/integ_test.go b/sdk/service/cloudwatchlogs/integ_test.go new file mode 100644 index 0000000000..4fe858c27e --- /dev/null +++ b/sdk/service/cloudwatchlogs/integ_test.go @@ -0,0 +1,67 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +//go:build go1.16 && integration +// +build go1.16,integration + +package cloudwatchlogs_test + +import ( + "context" + "testing" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/awstesting/integration" + "github.com/aws/aws-sdk-go/service/cloudwatchlogs" +) + +var _ aws.Config +var _ awserr.Error +var _ request.Request + +func TestInteg_00_DescribeLogGroups(t *testing.T) { + ctx, cancelFn := context.WithTimeout(context.Background(), 5*time.Second) + defer cancelFn() + + sess := integration.SessionWithDefaultRegion("us-west-2") + svc := cloudwatchlogs.New(sess) + params := &cloudwatchlogs.DescribeLogGroupsInput{} + _, err := svc.DescribeLogGroupsWithContext(ctx, params, func(r *request.Request) { + r.Handlers.Validate.RemoveByName("core.ValidateParametersHandler") + }) + if err != nil { + t.Errorf("expect no error, got %v", err) + } +} +func TestInteg_01_GetLogEvents(t *testing.T) { + ctx, cancelFn := context.WithTimeout(context.Background(), 5*time.Second) + defer cancelFn() + + sess := integration.SessionWithDefaultRegion("us-west-2") + svc := cloudwatchlogs.New(sess) + params := &cloudwatchlogs.GetLogEventsInput{ + LogGroupName: aws.String("fakegroup"), + LogStreamName: aws.String("fakestream"), + } + _, err := svc.GetLogEventsWithContext(ctx, params, func(r *request.Request) { + r.Handlers.Validate.RemoveByName("core.ValidateParametersHandler") + }) + if err == nil { + t.Fatalf("expect request to fail") + } + aerr, ok := err.(awserr.RequestFailure) + if !ok { + t.Fatalf("expect awserr, was %T", err) + } + if len(aerr.Code()) == 0 { + t.Errorf("expect non-empty error code") + } + if len(aerr.Message()) == 0 { + t.Errorf("expect non-empty error message") + } + if v := aerr.Code(); v == request.ErrCodeSerialization { + t.Errorf("expect API error code got serialization failure") + } +} diff --git a/sdk/service/cloudwatchlogs/service.go b/sdk/service/cloudwatchlogs/service.go new file mode 100644 index 0000000000..180b8da3b9 --- /dev/null +++ b/sdk/service/cloudwatchlogs/service.go @@ -0,0 +1,112 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package cloudwatchlogs + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" +) + +// CloudWatchLogs provides the API operation methods for making requests to +// Amazon CloudWatch Logs. See this package's package overview docs +// for details on the service. +// +// CloudWatchLogs methods are safe to use concurrently. It is not safe to +// modify mutate any of the struct's properties though. +type CloudWatchLogs struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// Service information constants +const ( + ServiceName = "logs" // Name of service. + EndpointsID = ServiceName // ID to lookup a service endpoint with. + ServiceID = "CloudWatch Logs" // ServiceID is a unique identifier of a specific service. +) + +// New creates a new instance of the CloudWatchLogs client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// +// mySession := session.Must(session.NewSession()) +// +// // Create a CloudWatchLogs client from just a session. +// svc := cloudwatchlogs.New(mySession) +// +// // Create a CloudWatchLogs client with additional configuration +// svc := cloudwatchlogs.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *CloudWatchLogs { + c := p.ClientConfig(EndpointsID, cfgs...) + if c.SigningNameDerived || len(c.SigningName) == 0 { + c.SigningName = EndpointsID + // No Fallback + } + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName, c.ResolvedRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName, resolvedRegion string) *CloudWatchLogs { + svc := &CloudWatchLogs{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + ServiceID: ServiceID, + SigningName: signingName, + SigningRegion: signingRegion, + PartitionID: partitionID, + Endpoint: endpoint, + APIVersion: "2014-03-28", + ResolvedRegion: resolvedRegion, + JSONVersion: "1.1", + TargetPrefix: "Logs_20140328", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed( + protocol.NewUnmarshalErrorHandler(jsonrpc.NewUnmarshalTypedError(exceptionFromCode)).NamedHandler(), + ) + + svc.Handlers.BuildStream.PushBackNamed(jsonrpc.BuildHandler) + svc.Handlers.UnmarshalStream.PushBackNamed(jsonrpc.UnmarshalHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a CloudWatchLogs operation and runs any +// custom request initialization. +func (c *CloudWatchLogs) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} From 7ca943983d274340cd7f1934c9ec76f00491a521 Mon Sep 17 00:00:00 2001 From: Zhihong Lin Date: Tue, 20 Aug 2024 10:08:05 -0400 Subject: [PATCH 49/55] Merge remote-tracking branch 'upstream/main' into upstream-merge --- .github/workflows/build-test-artifacts.yml | 4 ++-- .github/workflows/integration-test.yml | 10 +--------- cmd/amazon-cloudwatch-agent/amazon-cloudwatch-agent.go | 1 - cmd/start-amazon-cloudwatch-agent/path_windows.go | 1 - extension/entitystore/config_test.go | 3 +-- go.mod | 2 ++ go.sum | 4 ++++ plugins/processors/awsentity/config_test.go | 3 +-- service/defaultcomponents/components_test.go | 8 ++++---- .../sampleConfig/appsignals_and_eks_config.yaml | 2 -- .../sampleConfig/appsignals_and_k8s_config.yaml | 2 -- .../appsignals_fallback_and_eks_config.yaml | 5 +++++ .../sampleConfig/appsignals_over_fallback_config.yaml | 5 +++++ .../sampleConfig/base_appsignals_config.yaml | 2 -- .../sampleConfig/base_appsignals_fallback_config.yaml | 2 ++ .../tocwconfig/sampleConfig/complete_linux_config.yaml | 4 +--- .../sampleConfig/complete_windows_config.yaml | 5 ----- .../tocwconfig/sampleConfig/jmx_config_linux.yaml | 4 ++++ .../tocwconfig/sampleConfig/trace_config_linux.yaml | 2 -- .../tocwconfig/sampleConfig/trace_config_windows.yaml | 4 ---- 20 files changed, 32 insertions(+), 41 deletions(-) diff --git a/.github/workflows/build-test-artifacts.yml b/.github/workflows/build-test-artifacts.yml index ca12788fc7..97062ef1fb 100644 --- a/.github/workflows/build-test-artifacts.yml +++ b/.github/workflows/build-test-artifacts.yml @@ -86,7 +86,7 @@ jobs: Bucket: ${{ vars.S3_INTEGRATION_BUCKET_CN }} StartIntegrationTests: - needs: [ BuildAndUploadPackages, BuildAndUploadITAR, BuildAndUploadCN, BuildDocker ] + needs: [ BuildAndUpload ] runs-on: ubuntu-latest steps: - run: gh workflow run integration-test.yml --ref ${{ github.ref_name }} --repo $GITHUB_REPOSITORY -f build_run_id=${{ github.run_id }} -f build_sha=${{ github.sha }} @@ -94,7 +94,7 @@ jobs: GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} StartApplicationSignalsE2ETests: - needs: [ BuildAndUploadPackages, BuildAndUploadITAR, BuildAndUploadCN, BuildDocker ] + needs: [ BuildAndUpload ] # Workflow only runs against main if: ${{ contains(github.ref_name, 'main') }} runs-on: ubuntu-latest diff --git a/.github/workflows/integration-test.yml b/.github/workflows/integration-test.yml index 193febe09f..0c308f7018 100644 --- a/.github/workflows/integration-test.yml +++ b/.github/workflows/integration-test.yml @@ -46,14 +46,6 @@ jobs: echo "Build SHA does not match test SHA" exit 1 fi - - run: | - conclusion=$(gh run view ${{ inputs.build_run_id }} --repo $GITHUB_REPOSITORY --json conclusion -q '.conclusion') - if [[ $conclusion == "success" ]]; then - echo "Run succeeded" - else - echo "Run failed" - exit 1 - fi env: GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} @@ -1346,7 +1338,7 @@ jobs: fi terraform destroy --auto-approve CompassLinuxIntegrationTest: - needs: [ BuildAndUpload ] + needs: [ GenerateTestMatrix ] name: 'CompassLinuxIntegrationTest' runs-on: ubuntu-latest permissions: diff --git a/cmd/amazon-cloudwatch-agent/amazon-cloudwatch-agent.go b/cmd/amazon-cloudwatch-agent/amazon-cloudwatch-agent.go index 90fa7a2fa1..b538875596 100644 --- a/cmd/amazon-cloudwatch-agent/amazon-cloudwatch-agent.go +++ b/cmd/amazon-cloudwatch-agent/amazon-cloudwatch-agent.go @@ -29,7 +29,6 @@ import ( "github.com/influxdata/wlog" "github.com/kardianos/service" "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/featuregate" "go.opentelemetry.io/collector/otelcol" "go.uber.org/zap" diff --git a/cmd/start-amazon-cloudwatch-agent/path_windows.go b/cmd/start-amazon-cloudwatch-agent/path_windows.go index 87cdea8169..a253348f64 100644 --- a/cmd/start-amazon-cloudwatch-agent/path_windows.go +++ b/cmd/start-amazon-cloudwatch-agent/path_windows.go @@ -15,7 +15,6 @@ import ( "github.com/aws/amazon-cloudwatch-agent/cfg/envconfig" "github.com/aws/amazon-cloudwatch-agent/tool/paths" - "github.com/aws/amazon-cloudwatch-agent/translator/config" ) func startAgent(writer io.WriteCloser) error { diff --git a/extension/entitystore/config_test.go b/extension/entitystore/config_test.go index fc0576330f..fac7a6683f 100644 --- a/extension/entitystore/config_test.go +++ b/extension/entitystore/config_test.go @@ -7,13 +7,12 @@ import ( "testing" "github.com/stretchr/testify/assert" - "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/confmap" ) func TestUnmarshalDefaultConfig(t *testing.T) { factory := NewFactory() cfg := factory.CreateDefaultConfig() - assert.NoError(t, component.UnmarshalConfig(confmap.New(), cfg)) + assert.NoError(t, confmap.New().Unmarshal(cfg)) assert.Equal(t, factory.CreateDefaultConfig(), cfg) } diff --git a/go.mod b/go.mod index c61de167af..38cece21c1 100644 --- a/go.mod +++ b/go.mod @@ -399,9 +399,11 @@ require ( go.opentelemetry.io/collector/confmap/provider/httpsprovider v0.103.0 // indirect go.opentelemetry.io/collector/confmap/provider/yamlprovider v0.103.0 // indirect go.opentelemetry.io/collector/connector v0.103.0 // indirect + go.opentelemetry.io/collector/exporter/nopexporter v0.103.0 // indirect go.opentelemetry.io/collector/extension/auth v0.103.0 // indirect go.opentelemetry.io/collector/featuregate v1.10.0 // indirect go.opentelemetry.io/collector/pdata/testdata v0.103.0 // indirect + go.opentelemetry.io/collector/receiver/nopreceiver v0.103.0 // indirect go.opentelemetry.io/contrib/config v0.7.0 // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.52.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0 // indirect diff --git a/go.sum b/go.sum index 7be028f49a..52096524a4 100644 --- a/go.sum +++ b/go.sum @@ -1483,6 +1483,8 @@ go.opentelemetry.io/collector/exporter v0.103.0 h1:g0nF/FAwuA7tTJf5uo1PvlQl7xFqC go.opentelemetry.io/collector/exporter v0.103.0/go.mod h1:PC2OvciPEew2kaA/ZMyxRqfeOW8Wgi0CYR614PEyS/w= go.opentelemetry.io/collector/exporter/debugexporter v0.103.0 h1:jwZHoXvp3vdQ3obtnU+Vav5ChTCUBSC6mvlOZJ8doCU= go.opentelemetry.io/collector/exporter/debugexporter v0.103.0/go.mod h1:kzmBnKxsLNVBRGS8nwu497SvHspzyeiV06+LiPHktto= +go.opentelemetry.io/collector/exporter/nopexporter v0.103.0 h1:QaxkFbHSSYj2RRgkIhB6lDjJHFSGr71WlLk46fG0mAo= +go.opentelemetry.io/collector/exporter/nopexporter v0.103.0/go.mod h1:/wopRTmGS20A2Ihxcuj8M4j4VWMG6AFwmrt0eT6rDNg= go.opentelemetry.io/collector/extension v0.103.0 h1:vTsd+GElvT7qKk9Y9d6UKuuT2Ngx0mai8Q48hkKQMwM= go.opentelemetry.io/collector/extension v0.103.0/go.mod h1:rp2l3xskNKWv0yBCyU69Pv34TnP1QVD1ijr0zSndnsM= go.opentelemetry.io/collector/extension/auth v0.103.0 h1:i7cQl+Ewpve/DIN4rFMg1GiyUPE14LZsYWrJ1RqtP84= @@ -1505,6 +1507,8 @@ go.opentelemetry.io/collector/processor/batchprocessor v0.103.0 h1:vunxXGq5Pzcaw go.opentelemetry.io/collector/processor/batchprocessor v0.103.0/go.mod h1:c5nh1LHVlBFQajCnm/5hwKqAvOLpTTOd2GQyB7lT75E= go.opentelemetry.io/collector/receiver v0.103.0 h1:V3JBKkX+7e/NYpDDZVyeu2VQB1/lLFuoJFPfupdCcZs= go.opentelemetry.io/collector/receiver v0.103.0/go.mod h1:Yybv4ynKFdMOYViWWPMmjkugR89FSQN0P37wP6mX6qM= +go.opentelemetry.io/collector/receiver/nopreceiver v0.103.0 h1:GgeYAKOaHWDm+8JVN63y/0elp1uTOF+XqDQfXWm2i1A= +go.opentelemetry.io/collector/receiver/nopreceiver v0.103.0/go.mod h1:Hwoaia7m3+5qVtZyXb5/qSlFFfDP0Wd0F/2yKC/LFiw= go.opentelemetry.io/collector/receiver/otlpreceiver v0.103.0 h1:TycVVl4AWioV6kWeFcCIk2QuKfXOzn88yw989opsMdE= go.opentelemetry.io/collector/receiver/otlpreceiver v0.103.0/go.mod h1:jAbzL5lwOGG93YbcPZ6aFZIZq+tjYQ+BS3vKKT2nRgw= go.opentelemetry.io/collector/semconv v0.103.0 h1:5tlVoZlo9USHAU2Bz4YrEste0Vm5AMufXkYJhAVve1Q= diff --git a/plugins/processors/awsentity/config_test.go b/plugins/processors/awsentity/config_test.go index 0605b0f712..7e1b28997c 100644 --- a/plugins/processors/awsentity/config_test.go +++ b/plugins/processors/awsentity/config_test.go @@ -7,13 +7,12 @@ import ( "testing" "github.com/stretchr/testify/assert" - "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/confmap" ) func TestUnmarshalDefaultConfig(t *testing.T) { factory := NewFactory() cfg := factory.CreateDefaultConfig() - assert.NoError(t, component.UnmarshalConfig(confmap.New(), cfg)) + assert.NoError(t, confmap.New().Unmarshal(cfg)) assert.Equal(t, factory.CreateDefaultConfig(), cfg) } diff --git a/service/defaultcomponents/components_test.go b/service/defaultcomponents/components_test.go index 18c9603158..a80f202860 100644 --- a/service/defaultcomponents/components_test.go +++ b/service/defaultcomponents/components_test.go @@ -11,10 +11,10 @@ import ( ) const ( - receiversCount = 6 - processorCount = 10 - exportersCount = 5 - extensionsCount = 2 + receiversCount = 7 + processorCount = 11 + exportersCount = 6 + extensionsCount = 3 ) func TestComponents(t *testing.T) { diff --git a/translator/tocwconfig/sampleConfig/appsignals_and_eks_config.yaml b/translator/tocwconfig/sampleConfig/appsignals_and_eks_config.yaml index 59bf7ce25e..323b25e108 100644 --- a/translator/tocwconfig/sampleConfig/appsignals_and_eks_config.yaml +++ b/translator/tocwconfig/sampleConfig/appsignals_and_eks_config.yaml @@ -627,8 +627,6 @@ receivers: dialer: timeout: 0s endpoint: 0.0.0.0:4315 - dialer: - timeout: "0s" include_metadata: false max_concurrent_streams: 0 max_recv_msg_size_mib: 0 diff --git a/translator/tocwconfig/sampleConfig/appsignals_and_k8s_config.yaml b/translator/tocwconfig/sampleConfig/appsignals_and_k8s_config.yaml index 891ad3cf68..92f78be8e3 100644 --- a/translator/tocwconfig/sampleConfig/appsignals_and_k8s_config.yaml +++ b/translator/tocwconfig/sampleConfig/appsignals_and_k8s_config.yaml @@ -627,8 +627,6 @@ receivers: dialer: timeout: 0s endpoint: 0.0.0.0:4315 - dialer: - timeout: "0s" include_metadata: false max_concurrent_streams: 0 max_recv_msg_size_mib: 0 diff --git a/translator/tocwconfig/sampleConfig/appsignals_fallback_and_eks_config.yaml b/translator/tocwconfig/sampleConfig/appsignals_fallback_and_eks_config.yaml index 9728e3a838..33b3645085 100644 --- a/translator/tocwconfig/sampleConfig/appsignals_fallback_and_eks_config.yaml +++ b/translator/tocwconfig/sampleConfig/appsignals_fallback_and_eks_config.yaml @@ -336,6 +336,7 @@ processors: enabled: true host.name: enabled: true + tags: [] compression: "" consul: address: "" @@ -496,6 +497,7 @@ processors: k8snode: auth_type: serviceAccount context: "" + kube_config_path: "" node_from_env_var: "" resource_attributes: k8s.node.name: @@ -602,7 +604,10 @@ receivers: container_orchestrator: eks enable_control_plane_metrics: false endpoint: "" + host_ip: "" + host_name: "" imds_retries: 1 + kube_config_path: "" leader_lock_name: cwagent-clusterleader leader_lock_using_config_map_only: true local_mode: false diff --git a/translator/tocwconfig/sampleConfig/appsignals_over_fallback_config.yaml b/translator/tocwconfig/sampleConfig/appsignals_over_fallback_config.yaml index a0d628b79a..f37a2c0683 100644 --- a/translator/tocwconfig/sampleConfig/appsignals_over_fallback_config.yaml +++ b/translator/tocwconfig/sampleConfig/appsignals_over_fallback_config.yaml @@ -336,6 +336,7 @@ processors: enabled: true host.name: enabled: true + tags: [] compression: "" consul: address: "" @@ -496,6 +497,7 @@ processors: k8snode: auth_type: serviceAccount context: "" + kube_config_path: "" node_from_env_var: "" resource_attributes: k8s.node.name: @@ -602,7 +604,10 @@ receivers: container_orchestrator: eks enable_control_plane_metrics: false endpoint: "" + host_ip: "" + host_name: "" imds_retries: 1 + kube_config_path: "" leader_lock_name: cwagent-clusterleader leader_lock_using_config_map_only: true local_mode: false diff --git a/translator/tocwconfig/sampleConfig/base_appsignals_config.yaml b/translator/tocwconfig/sampleConfig/base_appsignals_config.yaml index 8cda45c85f..ee33930115 100644 --- a/translator/tocwconfig/sampleConfig/base_appsignals_config.yaml +++ b/translator/tocwconfig/sampleConfig/base_appsignals_config.yaml @@ -258,8 +258,6 @@ processors: enabled: true aws.ecs.task.id: enabled: true - aws.ecs.task.id: - enabled: true aws.ecs.task.revision: enabled: true aws.log.group.arns: diff --git a/translator/tocwconfig/sampleConfig/base_appsignals_fallback_config.yaml b/translator/tocwconfig/sampleConfig/base_appsignals_fallback_config.yaml index ce424e59dd..636aa701a2 100644 --- a/translator/tocwconfig/sampleConfig/base_appsignals_fallback_config.yaml +++ b/translator/tocwconfig/sampleConfig/base_appsignals_fallback_config.yaml @@ -194,6 +194,7 @@ processors: enabled: true host.name: enabled: true + tags: [] compression: "" consul: address: "" @@ -354,6 +355,7 @@ processors: k8snode: auth_type: serviceAccount context: "" + kube_config_path: "" node_from_env_var: "" resource_attributes: k8s.node.name: diff --git a/translator/tocwconfig/sampleConfig/complete_linux_config.yaml b/translator/tocwconfig/sampleConfig/complete_linux_config.yaml index ea302955fa..b4d9e42a32 100644 --- a/translator/tocwconfig/sampleConfig/complete_linux_config.yaml +++ b/translator/tocwconfig/sampleConfig/complete_linux_config.yaml @@ -195,9 +195,9 @@ processors: metric_statements: - context: metric statements: - - set(name, "kafka.fetch-rate") where name == "kafka.consumer.fetch-rate" - set(unit, "unit") where name == "jvm.memory.heap.used" - set(name, "JVM_MEM_HEAP_USED") where name == "jvm.memory.heap.used" + - set(name, "kafka.fetch-rate") where name == "kafka.consumer.fetch-rate" trace_statements: [] transform/jmx/1: error_mode: propagate @@ -379,11 +379,9 @@ service: - telegraf_disk - telegraf_statsd - telegraf_swap - - telegraf_processes - telegraf_mem - telegraf_netstat - telegraf_processes - - telegraf_mem metrics/hostDeltaMetrics: exporters: - awscloudwatch diff --git a/translator/tocwconfig/sampleConfig/complete_windows_config.yaml b/translator/tocwconfig/sampleConfig/complete_windows_config.yaml index 7af5ac86d0..74a98804b2 100644 --- a/translator/tocwconfig/sampleConfig/complete_windows_config.yaml +++ b/translator/tocwconfig/sampleConfig/complete_windows_config.yaml @@ -131,8 +131,6 @@ receivers: dialer: timeout: 0s endpoint: 0.0.0.0:2001 - dialer: - timeout: "0s" proxy_server: aws_endpoint: https://x-ray-endpoint.us-west-2.amazonaws.com certificate_file_path: "" @@ -144,7 +142,6 @@ receivers: profile: "" proxy_address: https://proxy.proxy.com region: us-west-2 - service_name: "xray" role_arn: trace_role_arn_value_test service_name: xray tls: @@ -165,8 +162,6 @@ receivers: dialer: timeout: 0s endpoint: 0.0.0.0:1111 - dialer: - timeout: "0s" include_metadata: false max_concurrent_streams: 0 max_recv_msg_size_mib: 0 diff --git a/translator/tocwconfig/sampleConfig/jmx_config_linux.yaml b/translator/tocwconfig/sampleConfig/jmx_config_linux.yaml index 31e88dc697..1177a5de08 100644 --- a/translator/tocwconfig/sampleConfig/jmx_config_linux.yaml +++ b/translator/tocwconfig/sampleConfig/jmx_config_linux.yaml @@ -20,6 +20,9 @@ extensions: usage_flags: mode: EC2 region_type: ACJ + entitystore: + mode: ec2 + region: us-west-2 processors: cumulativetodelta/jmx: exclude: @@ -100,6 +103,7 @@ receivers: service: extensions: - agenthealth/metrics + - entitystore pipelines: metrics/host: exporters: diff --git a/translator/tocwconfig/sampleConfig/trace_config_linux.yaml b/translator/tocwconfig/sampleConfig/trace_config_linux.yaml index 1844d1fdf6..9408a86001 100644 --- a/translator/tocwconfig/sampleConfig/trace_config_linux.yaml +++ b/translator/tocwconfig/sampleConfig/trace_config_linux.yaml @@ -45,8 +45,6 @@ receivers: dialer: timeout: 0s endpoint: 127.0.0.1:2000 - dialer: - timeout: "0s" proxy_server: aws_endpoint: "" certificate_file_path: "" diff --git a/translator/tocwconfig/sampleConfig/trace_config_windows.yaml b/translator/tocwconfig/sampleConfig/trace_config_windows.yaml index 7e5c2056d8..84cc456c97 100644 --- a/translator/tocwconfig/sampleConfig/trace_config_windows.yaml +++ b/translator/tocwconfig/sampleConfig/trace_config_windows.yaml @@ -45,11 +45,7 @@ receivers: dialer: timeout: 0s endpoint: 127.0.0.1:2000 - dialer: - timeout: "0s" proxy_server: - dialer: - timeout: "0s" aws_endpoint: "" certificate_file_path: "" dialer: From b4d1a93ef83458c61393cf2ccaf69171a3bc8090 Mon Sep 17 00:00:00 2001 From: zhihonl <61301537+zhihonl@users.noreply.github.com> Date: Tue, 20 Aug 2024 16:43:08 -0400 Subject: [PATCH 50/55] Fix CloudWatch Log SDK unit tests by referencing correct dependency (#784) --- sdk/service/cloudwatchlogs/cloudwatchlogsiface/interface.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/sdk/service/cloudwatchlogs/cloudwatchlogsiface/interface.go b/sdk/service/cloudwatchlogs/cloudwatchlogsiface/interface.go index d285cbadf3..92defc703d 100644 --- a/sdk/service/cloudwatchlogs/cloudwatchlogsiface/interface.go +++ b/sdk/service/cloudwatchlogs/cloudwatchlogsiface/interface.go @@ -11,7 +11,8 @@ package cloudwatchlogsiface import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/service/cloudwatchlogs" + + "github.com/aws/amazon-cloudwatch-agent/sdk/service/cloudwatchlogs" ) // CloudWatchLogsAPI provides an interface to enable mocking the From 75c0223410925349300ae5bf9c7ffe7de69cb1bd Mon Sep 17 00:00:00 2001 From: Dinakar Chappa Date: Tue, 3 Sep 2024 11:16:46 -0400 Subject: [PATCH 51/55] removes unrelated components --- .github/workflows/alpha-release.yml | 25 -- .github/workflows/apm-beta-pre-release.yml | 17 - .github/workflows/apm-beta-release.yml | 15 - ...lication-signals-java-e2e-ec2-asg-test.yml | 168 ---------- ...cation-signals-python-e2e-ec2-asg-test.yml | 167 --------- ...pplication-signals-python-e2e-ec2-test.yml | 160 --------- ...pplication-signals-python-e2e-eks-test.yml | 317 ------------------ .github/workflows/compass-beta-release.yml | 18 - .../workflows/configurable-nonprod-test.yml | 33 -- ...hanced-container-insights-beta-release.yml | 28 -- .../enhanced-container-insights-internal.yml | 17 - .github/workflows/repo-sync.yml | 58 ---- 12 files changed, 1023 deletions(-) delete mode 100644 .github/workflows/alpha-release.yml delete mode 100644 .github/workflows/apm-beta-pre-release.yml delete mode 100644 .github/workflows/apm-beta-release.yml delete mode 100644 .github/workflows/application-signals-java-e2e-ec2-asg-test.yml delete mode 100644 .github/workflows/application-signals-python-e2e-ec2-asg-test.yml delete mode 100644 .github/workflows/application-signals-python-e2e-ec2-test.yml delete mode 100644 .github/workflows/application-signals-python-e2e-eks-test.yml delete mode 100644 .github/workflows/compass-beta-release.yml delete mode 100644 .github/workflows/configurable-nonprod-test.yml delete mode 100644 .github/workflows/enhanced-container-insights-beta-release.yml delete mode 100644 .github/workflows/enhanced-container-insights-internal.yml delete mode 100644 .github/workflows/repo-sync.yml diff --git a/.github/workflows/alpha-release.yml b/.github/workflows/alpha-release.yml deleted file mode 100644 index c5d3c29fd6..0000000000 --- a/.github/workflows/alpha-release.yml +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -# SPDX-License-Identifier: MIT - -name: CCWA Release - -on: - release: - types: [published] - -jobs: - BuildAndUpload: - uses: ./.github/workflows/test-build.yml - secrets: inherit - permissions: - id-token: write - contents: read - with: - ContainerRepositoryNameAndTag: "ccwa-release:latest" - BucketKey: "release" - PackageBucketKey: "release" - - DeployCanary: - needs: [BuildAndUpload] - uses: ./.github/workflows/deploy-canary.yml - secrets: inherit diff --git a/.github/workflows/apm-beta-pre-release.yml b/.github/workflows/apm-beta-pre-release.yml deleted file mode 100644 index 12e201f29c..0000000000 --- a/.github/workflows/apm-beta-pre-release.yml +++ /dev/null @@ -1,17 +0,0 @@ -# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -# SPDX-License-Identifier: MIT - -name: APM Beta Pre-Release -on: - workflow_dispatch: -jobs: - BuildAndUpload: - uses: ./.github/workflows/test-build.yml - secrets: inherit - permissions: - id-token: write - contents: read - with: - ContainerRepositoryNameAndTag: "apm-beta-pre-release:latest" - BucketKey: "apm-beta-pre-release" - PackageBucketKey: "apm-beta-pre-release" diff --git a/.github/workflows/apm-beta-release.yml b/.github/workflows/apm-beta-release.yml deleted file mode 100644 index 5268d64c6b..0000000000 --- a/.github/workflows/apm-beta-release.yml +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -# SPDX-License-Identifier: MIT - -name: APM Beta Release - -on: - workflow_dispatch: - -jobs: - no-op: - runs-on: ubuntu-latest - steps: - - name: do nothing - run: | - echo "This is a no-op" \ No newline at end of file diff --git a/.github/workflows/application-signals-java-e2e-ec2-asg-test.yml b/.github/workflows/application-signals-java-e2e-ec2-asg-test.yml deleted file mode 100644 index af25972507..0000000000 --- a/.github/workflows/application-signals-java-e2e-ec2-asg-test.yml +++ /dev/null @@ -1,168 +0,0 @@ -## Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -## SPDX-License-Identifier: Apache-2.0 - -# This is a reusable workflow for running the E2E test for App Signals. -# It is meant to be called from another workflow. -# Read more about reusable workflows: https://docs.github.com/en/actions/using-workflows/reusing-workflows#overview -name: App Signals Enablement E2E Testing - EC2 ASG Use Case -on: - workflow_call: - -permissions: - id-token: write - contents: read - -env: - # The presence of this env var is required for use by terraform and AWS CLI commands - # It is not redundant - AWS_DEFAULT_REGION: us-east-1 - APP_SIGNALS_E2E_TEST_ACCOUNT_ID: ${{ secrets.APP_SIGNALS_E2E_TEST_ACCOUNT_ID }} - SAMPLE_APP_FRONTEND_SERVICE_JAR: "s3://aws-appsignals-sample-app-prod-us-east-1/main-service.jar" - SAMPLE_APP_REMOTE_SERVICE_JAR: "s3://aws-appsignals-sample-app-prod-us-east-1/remote-service.jar" - GET_ADOT_JAR_COMMAND: "aws s3 cp s3://adot-main-build-staging-jar/aws-opentelemetry-agent.jar ./adot.jar" - GET_CW_AGENT_RPM_COMMAND: "aws s3 cp s3://${{ secrets.S3_INTEGRATION_BUCKET }}/integration-test/binary/${{ github.sha }}/amazon_linux/amd64/latest/amazon-cloudwatch-agent.rpm ./cw-agent.rpm" - METRIC_NAMESPACE: ApplicationSignals - LOG_GROUP_NAME: /aws/application-signals/data - -jobs: - e2e-ec2-single-asg-test: - runs-on: ubuntu-latest - steps: - - name: Get testing resources from aws-application-signals-test-framework - uses: actions/checkout@v4 - with: - repository: aws-observability/aws-application-signals-test-framework - ref: add-ec2-platform-support - - - name: Generate testing id - run: echo TESTING_ID="java-asg-${{ github.run_id }}-${{ github.run_number }}" >> $GITHUB_ENV - - - name: Configure AWS Credentials - uses: aws-actions/configure-aws-credentials@v4 - with: - role-to-assume: arn:aws:iam::${{ env.APP_SIGNALS_E2E_TEST_ACCOUNT_ID }}:role/${{ secrets.APP_SIGNALS_E2E_TEST_ROLE_NAME }} - aws-region: ${{ env.AWS_DEFAULT_REGION }} - - - name: Set up terraform - uses: hashicorp/setup-terraform@v3 - with: - terraform_wrapper: false - - - name: Deploy sample app via terraform - working-directory: terraform/ec2/asg - run: | - terraform init - terraform validate - terraform apply -auto-approve \ - -var="aws_region=${{ env.AWS_DEFAULT_REGION }}" \ - -var="test_id=${{ env.TESTING_ID }}" \ - -var="sample_app_jar=${{ env.SAMPLE_APP_FRONTEND_SERVICE_JAR }}" \ - -var="sample_remote_app_jar=${{ env.SAMPLE_APP_REMOTE_SERVICE_JAR }}" \ - -var="get_cw_agent_rpm_command=${{ env.GET_CW_AGENT_RPM_COMMAND }}" \ - -var="get_adot_jar_command=${{ env.GET_ADOT_JAR_COMMAND }}" - - - name: Get sample app and EC2 instance information - working-directory: terraform/ec2/asg - run: | - main_service_instance_id=$(aws autoscaling describe-auto-scaling-groups --auto-scaling-group-names ec2-single-asg-${{ env.TESTING_ID }} --region ${{ env.AWS_DEFAULT_REGION }} --query "AutoScalingGroups[].Instances[0].InstanceId" --output text) - main_service_public_ip=$(aws ec2 describe-instances --instance-ids $main_service_instance_id --region ${{ env.AWS_DEFAULT_REGION }} --query "Reservations[].Instances[].PublicIpAddress" --output text) - main_service_private_dns_name=$(aws ec2 describe-instances --instance-ids $main_service_instance_id --region ${{ env.AWS_DEFAULT_REGION }} --query "Reservations[].Instances[].PrivateDnsName" --output text) - echo "INSTANCE_ID=$main_service_instance_id" >> $GITHUB_ENV - echo "MAIN_SERVICE_ENDPOINT=$main_service_public_ip:8080" >> $GITHUB_ENV - echo "PRIVATE_DNS_NAME=$main_service_private_dns_name" >> $GITHUB_ENV - echo "EC2_INSTANCE_AMI=$(terraform output ec2_instance_ami)" >> $GITHUB_ENV - echo "REMOTE_SERVICE_IP=$(terraform output sample_app_remote_service_public_ip)" >> $GITHUB_ENV - - - name: Wait for app endpoint to come online - id: endpoint-check - run: | - attempt_counter=0 - max_attempts=30 - until $(curl --output /dev/null --silent --head --fail http://${{ env.MAIN_SERVICE_ENDPOINT }}); do - if [ ${attempt_counter} -eq ${max_attempts} ];then - echo "Max attempts reached" - exit 1 - fi - - printf '.' - attempt_counter=$(($attempt_counter+1)) - sleep 10 - done - - # This steps increases the speed of the validation by creating the telemetry data in advance - - name: Call all test APIs - continue-on-error: true - run: | - curl -S -s "http://${{ env.MAIN_SERVICE_ENDPOINT }}/outgoing-http-call" - curl -S -s "http://${{ env.MAIN_SERVICE_ENDPOINT }}/aws-sdk-call?ip=${{ env.REMOTE_SERVICE_IP }}&testingId=${{ env.TESTING_ID }}" - curl -S -s "http://${{ env.MAIN_SERVICE_ENDPOINT }}/remote-service?ip=${{ env.REMOTE_SERVICE_IP }}&testingId=${{ env.TESTING_ID }}" - curl -S -s "http://${{ env.MAIN_SERVICE_ENDPOINT }}/client-call" - - # Validation for pulse telemetry data - - name: Validate generated EMF logs - id: log-validation - run: ./gradlew validator:run --args='-c java/ec2/asg/log-validation.yml - --testing-id ${{ env.TESTING_ID }} - --endpoint http://${{ env.MAIN_SERVICE_ENDPOINT }} - --remote-service-deployment-name ${{ env.REMOTE_SERVICE_IP }}:8080 - --region ${{ env.AWS_DEFAULT_REGION }} - --account-id ${{ env.APP_SIGNALS_E2E_TEST_ACCOUNT_ID }} - --metric-namespace ${{ env.METRIC_NAMESPACE }} - --log-group ${{ env.LOG_GROUP_NAME }} - --service-name sample-application-${{ env.TESTING_ID }} - --remote-service-name sample-remote-application-${{ env.TESTING_ID }} - --instance-ami ${{ env.EC2_INSTANCE_AMI }} - --platform-info ec2-single-asg-${{ env.TESTING_ID }} - --instance-id ${{ env.INSTANCE_ID }} - --private-dns-name ${{ env.PRIVATE_DNS_NAME }} - --query-string ip=${{ env.REMOTE_SERVICE_IP }}&testingId=${{ env.TESTING_ID }} - --rollup' - - - name: Validate generated metrics - id: metric-validation - if: (success() || steps.log-validation-1.outcome == 'failure') && !cancelled() - run: ./gradlew validator:run --args='-c java/ec2/asg/metric-validation.yml - --testing-id ${{ env.TESTING_ID }} - --endpoint http://${{ env.MAIN_SERVICE_ENDPOINT }} - --remote-service-deployment-name ${{ env.REMOTE_SERVICE_IP }}:8080 - --region ${{ env.AWS_DEFAULT_REGION }} - --account-id ${{ env.APP_SIGNALS_E2E_TEST_ACCOUNT_ID }} - --metric-namespace ${{ env.METRIC_NAMESPACE }} - --log-group ${{ env.LOG_GROUP_NAME }} - --service-name sample-application-${{ env.TESTING_ID }} - --remote-service-name sample-remote-application-${{ env.TESTING_ID }} - --instance-ami ${{ env.EC2_INSTANCE_AMI }} - --platform-info ec2-single-asg-${{ env.TESTING_ID }} - --instance-id ${{ env.INSTANCE_ID }} - --private-dns-name ${{ env.PRIVATE_DNS_NAME }} - --query-string ip=${{ env.REMOTE_SERVICE_IP }}&testingId=${{ env.TESTING_ID }} - --rollup' - - - name: Validate generated traces - id: trace-validation - if: (success() || steps.log-validation-1.outcome == 'failure' || steps.metric-validation-1.outcome == 'failure') && !cancelled() - run: ./gradlew validator:run --args='-c java/ec2/asg/trace-validation.yml - --testing-id ${{ env.TESTING_ID }} - --endpoint http://${{ env.MAIN_SERVICE_ENDPOINT }} - --remote-service-deployment-name ${{ env.REMOTE_SERVICE_IP }}:8080 - --region ${{ env.AWS_DEFAULT_REGION }} - --account-id ${{ env.APP_SIGNALS_E2E_TEST_ACCOUNT_ID }} - --metric-namespace ${{ env.METRIC_NAMESPACE }} - --log-group ${{ env.LOG_GROUP_NAME }} - --service-name sample-application-${{ env.TESTING_ID }} - --remote-service-name sample-remote-application-${{ env.TESTING_ID }} - --instance-ami ${{ env.EC2_INSTANCE_AMI }} - --platform-info ec2-single-asg-${{ env.TESTING_ID }} - --instance-id ${{ env.INSTANCE_ID }} - --private-dns-name ${{ env.PRIVATE_DNS_NAME }} - --query-string ip=${{ env.REMOTE_SERVICE_IP }}&testingId=${{ env.TESTING_ID }} - --rollup' - - # Clean up Procedures - - name: Terraform destroy - if: always() - continue-on-error: true - working-directory: terraform/ec2/asg - run: | - terraform destroy -auto-approve \ - -var="test_id=${{ env.TESTING_ID }}" diff --git a/.github/workflows/application-signals-python-e2e-ec2-asg-test.yml b/.github/workflows/application-signals-python-e2e-ec2-asg-test.yml deleted file mode 100644 index 553e792e10..0000000000 --- a/.github/workflows/application-signals-python-e2e-ec2-asg-test.yml +++ /dev/null @@ -1,167 +0,0 @@ -## Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -## SPDX-License-Identifier: Apache-2.0 - -# This is a reusable workflow for running the Python E2E Canary test for Application Signals. -# It is meant to be called from another workflow. -# Read more about reusable workflows: https://docs.github.com/en/actions/using-workflows/reusing-workflows#overview -name: Application Signals Enablement E2E Testing - Python EC2 Asg Use Case -on: - workflow_call: - -permissions: - id-token: write - contents: read - -env: - AWS_DEFAULT_REGION: us-east-1 - APP_SIGNALS_E2E_TEST_ACCOUNT_ID: ${{ secrets.APP_SIGNALS_E2E_TEST_ACCOUNT_ID }} - SAMPLE_APP_ZIP: s3://aws-appsignals-sample-app-prod-us-east-1/python-sample-app.zip - METRIC_NAMESPACE: ApplicationSignals - LOG_GROUP_NAME: /aws/application-signals/data - ADOT_WHEEL_NAME: ${{ inputs.staging_wheel_name }} - TEST_RESOURCES_FOLDER: ${GITHUB_WORKSPACE} - GET_CW_AGENT_RPM_COMMAND: "aws s3 cp s3://${{ secrets.S3_INTEGRATION_BUCKET }}/integration-test/binary/${{ github.sha }}/amazon_linux/amd64/latest/amazon-cloudwatch-agent.rpm ./cw-agent.rpm" - GET_ADOT_WHEEL_COMMAND: "aws s3 cp s3://metric-schema-changes/aws_opentelemetry_distro-0.2.0-py3-none-any.whl ./aws_opentelemetry_distro-0.2.0-py3-none-any.whl && python3.9 -m pip install aws_opentelemetry_distro-0.2.0-py3-none-any.whl" - -jobs: - python-e2e-ec2-asg-test: - runs-on: ubuntu-latest - steps: - - name: Get testing resources from aws-application-signals-test-framework - uses: actions/checkout@v4 - with: - repository: aws-observability/aws-application-signals-test-framework - ref: add-ec2-platform-for-python-ga - - - name: Generate testing id - run: echo TESTING_ID="python-asg-${{ github.run_id }}-${{ github.run_number }}" >> $GITHUB_ENV - - - name: Configure AWS Credentials - uses: aws-actions/configure-aws-credentials@v4 - with: - role-to-assume: arn:aws:iam::${{ env.APP_SIGNALS_E2E_TEST_ACCOUNT_ID }}:role/${{ secrets.APP_SIGNALS_E2E_TEST_ROLE_NAME }} - aws-region: ${{ env.AWS_DEFAULT_REGION }} - - - name: Set up terraform - uses: hashicorp/setup-terraform@v3 - with: - terraform_wrapper: false - - - name: Deploy sample app via terraform - working-directory: terraform/python/ec2/asg - run: | - terraform init - terraform validate - terraform apply -auto-approve \ - -var="aws_region=${{ env.AWS_DEFAULT_REGION }}" \ - -var="test_id=${{ env.TESTING_ID }}" \ - -var="sample_app_zip=${{ env.SAMPLE_APP_ZIP }}" \ - -var="get_cw_agent_rpm_command=${{ env.GET_CW_AGENT_RPM_COMMAND }}" \ - -var="get_adot_wheel_command=${{ env.GET_ADOT_WHEEL_COMMAND }}" - - - name: Get sample app and EC2 instance information - working-directory: terraform/python/ec2/asg - run: | - main_service_instance_id=$(aws autoscaling describe-auto-scaling-groups --auto-scaling-group-names python-ec2-single-asg-${{ env.TESTING_ID }} --region ${{ env.AWS_DEFAULT_REGION }} --query "AutoScalingGroups[].Instances[0].InstanceId" --output text) - main_service_public_ip=$(aws ec2 describe-instances --instance-ids $main_service_instance_id --region ${{ env.AWS_DEFAULT_REGION }} --query "Reservations[].Instances[].PublicIpAddress" --output text) - main_service_private_dns_name=$(aws ec2 describe-instances --instance-ids $main_service_instance_id --region ${{ env.AWS_DEFAULT_REGION }} --query "Reservations[].Instances[].PrivateDnsName" --output text) - echo "INSTANCE_ID=$main_service_instance_id" >> $GITHUB_ENV - echo "MAIN_SERVICE_ENDPOINT=$main_service_public_ip:8000" >> $GITHUB_ENV - echo "PRIVATE_DNS_NAME=$main_service_private_dns_name" >> $GITHUB_ENV - echo "EC2_INSTANCE_AMI=$(terraform output ec2_instance_ami)" >> $GITHUB_ENV - echo "REMOTE_SERVICE_IP=$(terraform output sample_app_remote_service_public_ip)" >> $GITHUB_ENV - - - name: Wait for app endpoint to come online - id: endpoint-check - run: | - attempt_counter=0 - max_attempts=30 - until $(curl --output /dev/null --silent --head --fail http://${{ env.MAIN_SERVICE_ENDPOINT }}); do - if [ ${attempt_counter} -eq ${max_attempts} ];then - echo "Max attempts reached" - exit 1 - fi - - printf '.' - attempt_counter=$(($attempt_counter+1)) - sleep 10 - done - - # This steps increases the speed of the validation by creating the telemetry data in advance - - name: Call all test APIs - continue-on-error: true - run: | - curl -S -s "http://${{ env.MAIN_SERVICE_ENDPOINT }}/outgoing-http-call" - curl -S -s "http://${{ env.MAIN_SERVICE_ENDPOINT }}/aws-sdk-call?ip=${{ env.REMOTE_SERVICE_IP }}&testingId=${{ env.TESTING_ID }}" - curl -S -s "http://${{ env.MAIN_SERVICE_ENDPOINT }}/remote-service?ip=${{ env.REMOTE_SERVICE_IP }}&testingId=${{ env.TESTING_ID }}" - curl -S -s "http://${{ env.MAIN_SERVICE_ENDPOINT }}/client-call" - - - name: Build Gradle - run: ./gradlew - - # Validation for pulse telemetry data - - name: Validate generated EMF logs - id: log-validation - run: ./gradlew validator:run --args='-c python/ec2/asg/log-validation.yml - --testing-id ${{ env.TESTING_ID }} - --endpoint http://${{ env.MAIN_SERVICE_ENDPOINT }} - --remote-service-deployment-name ${{ env.REMOTE_SERVICE_IP }}:8001 - --region ${{ env.AWS_DEFAULT_REGION }} - --metric-namespace ${{ env.METRIC_NAMESPACE }} - --log-group ${{ env.LOG_GROUP_NAME }} - --service-name python-sample-application-${{ env.TESTING_ID }} - --remote-service-name python-sample-remote-application-${{ env.TESTING_ID }} - --query-string ip=${{ env.REMOTE_SERVICE_IP }}&testingId=${{ env.TESTING_ID }} - --instance-ami ${{ env.EC2_INSTANCE_AMI }} - --platform-info python-ec2-single-asg-${{ env.TESTING_ID }} - --instance-id ${{ env.INSTANCE_ID }} - --private-dns-name ${{ env.PRIVATE_DNS_NAME }} - --rollup' - - - name: Validate generated metrics - id: metric-validation - if: (success() || steps.log-validation.outcome == 'failure') && !cancelled() - run: ./gradlew validator:run --args='-c python/ec2/asg/metric-validation.yml - --testing-id ${{ env.TESTING_ID }} - --endpoint http://${{ env.MAIN_SERVICE_ENDPOINT }} - --remote-service-deployment-name ${{ env.REMOTE_SERVICE_IP }}:8001 - --region ${{ env.AWS_DEFAULT_REGION }} - --metric-namespace ${{ env.METRIC_NAMESPACE }} - --log-group ${{ env.LOG_GROUP_NAME }} - --service-name python-sample-application-${{ env.TESTING_ID }} - --remote-service-name python-sample-remote-application-${{ env.TESTING_ID }} - --query-string ip=${{ env.REMOTE_SERVICE_IP }}&testingId=${{ env.TESTING_ID }} - --instance-ami ${{ env.EC2_INSTANCE_AMI }} - --platform-info python-ec2-single-asg-${{ env.TESTING_ID }} - --instance-id ${{ env.INSTANCE_ID }} - --private-dns-name ${{ env.PRIVATE_DNS_NAME }} - --rollup' - - - name: Validate generated traces - id: trace-validation - if: (success() || steps.log-validation.outcome == 'failure' || steps.metric-validation.outcome == 'failure') && !cancelled() - run: ./gradlew validator:run --args='-c python/ec2/asg/trace-validation.yml - --testing-id ${{ env.TESTING_ID }} - --endpoint http://${{ env.MAIN_SERVICE_ENDPOINT }} - --remote-service-deployment-name ${{ env.REMOTE_SERVICE_IP }}:8001 - --region ${{ env.AWS_DEFAULT_REGION }} - --account-id ${{ env.APP_SIGNALS_E2E_TEST_ACCOUNT_ID }} - --metric-namespace ${{ env.METRIC_NAMESPACE }} - --log-group ${{ env.LOG_GROUP_NAME }} - --service-name python-sample-application-${{ env.TESTING_ID }} - --remote-service-name python-sample-remote-application-${{ env.TESTING_ID }} - --query-string ip=${{ env.REMOTE_SERVICE_IP }}&testingId=${{ env.TESTING_ID }} - --instance-ami ${{ env.EC2_INSTANCE_AMI }} - --platform-info python-ec2-single-asg-${{ env.TESTING_ID }} - --instance-id ${{ env.INSTANCE_ID }} - --private-dns-name ${{ env.PRIVATE_DNS_NAME }} - --rollup' - - # Clean up Procedures - - name: Terraform destroy - if: always() - continue-on-error: true - working-directory: terraform/python/ec2/asg - run: | - terraform destroy -auto-approve \ - -var="test_id=${{ env.TESTING_ID }}" \ No newline at end of file diff --git a/.github/workflows/application-signals-python-e2e-ec2-test.yml b/.github/workflows/application-signals-python-e2e-ec2-test.yml deleted file mode 100644 index 470bb492ac..0000000000 --- a/.github/workflows/application-signals-python-e2e-ec2-test.yml +++ /dev/null @@ -1,160 +0,0 @@ -## Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -## SPDX-License-Identifier: Apache-2.0 - -# This is a reusable workflow for running the Python E2E Canary test for Application Signals. -# It is meant to be called from another workflow. -# Read more about reusable workflows: https://docs.github.com/en/actions/using-workflows/reusing-workflows#overview -name: Application Signals Enablement E2E Testing - Python EC2 Use Case -on: - workflow_call: - -permissions: - id-token: write - contents: read - -env: - AWS_DEFAULT_REGION: us-east-1 - APP_SIGNALS_E2E_TEST_ACCOUNT_ID: ${{ secrets.APP_SIGNALS_E2E_TEST_ACCOUNT_ID }} - SAMPLE_APP_ZIP: s3://aws-appsignals-sample-app-prod-us-east-1/python-sample-app.zip - METRIC_NAMESPACE: ApplicationSignals - LOG_GROUP_NAME: /aws/application-signals/data - TEST_RESOURCES_FOLDER: ${GITHUB_WORKSPACE} - GET_CW_AGENT_RPM_COMMAND: "aws s3 cp s3://${{ secrets.S3_INTEGRATION_BUCKET }}/integration-test/binary/${{ github.sha }}/amazon_linux/amd64/latest/amazon-cloudwatch-agent.rpm ./cw-agent.rpm" - GET_ADOT_WHEEL_COMMAND: "aws s3 cp s3://metric-schema-changes/aws_opentelemetry_distro-0.2.0-py3-none-any.whl ./aws_opentelemetry_distro-0.2.0-py3-none-any.whl && python3.9 -m pip install aws_opentelemetry_distro-0.2.0-py3-none-any.whl" - -jobs: - python-e2e-ec2-test: - runs-on: ubuntu-latest - steps: - - name: Get testing resources from aws-application-signals-test-framework - uses: actions/checkout@v4 - with: - repository: aws-observability/aws-application-signals-test-framework - ref: ga-python - - - name: Generate testing id - run: echo TESTING_ID="${{ github.run_id }}-${{ github.run_number }}" >> $GITHUB_ENV - - - name: Configure AWS Credentials - uses: aws-actions/configure-aws-credentials@v4 - with: - role-to-assume: arn:aws:iam::${{ env.APP_SIGNALS_E2E_TEST_ACCOUNT_ID }}:role/${{ secrets.APP_SIGNALS_E2E_TEST_ROLE_NAME }} - aws-region: ${{ env.AWS_DEFAULT_REGION }} - - - name: Set up terraform - uses: hashicorp/setup-terraform@v3 - with: - terraform_wrapper: false - - - name: Deploy sample app via terraform - working-directory: terraform/python/ec2 - run: | - terraform init - terraform validate - terraform apply -auto-approve \ - -var="aws_region=${{ env.AWS_DEFAULT_REGION }}" \ - -var="test_id=${{ env.TESTING_ID }}" \ - -var="sample_app_zip=${{ env.SAMPLE_APP_ZIP }}" \ - -var="get_cw_agent_rpm_command=${{ env.GET_CW_AGENT_RPM_COMMAND }}" \ - -var="get_adot_wheel_command=${{ env.GET_ADOT_WHEEL_COMMAND }}" - - - name: Get the ec2 instance ami id - working-directory: terraform/python/ec2 - run: | - echo "EC2_INSTANCE_AMI=$(terraform output ec2_instance_ami)" >> $GITHUB_ENV - - - name: Get the sample app endpoint - working-directory: terraform/python/ec2 - run: | - echo "MAIN_SERVICE_ENDPOINT=$(terraform output sample_app_main_service_public_dns):8000" >> $GITHUB_ENV - echo "REMOTE_SERVICE_IP=$(terraform output sample_app_remote_service_public_ip)" >> $GITHUB_ENV - echo "MAIN_SERVICE_INSTANCE_ID=$(terraform output main_service_instance_id)" >> $GITHUB_ENV - - - name: Wait for app endpoint to come online - id: endpoint-check - run: | - attempt_counter=0 - max_attempts=30 - until $(curl --output /dev/null --silent --head --fail http://${{ env.MAIN_SERVICE_ENDPOINT }}); do - if [ ${attempt_counter} -eq ${max_attempts} ];then - echo "Max attempts reached" - exit 1 - fi - - printf '.' - attempt_counter=$(($attempt_counter+1)) - sleep 10 - done - - # This steps increases the speed of the validation by creating the telemetry data in advance - - name: Call all test APIs - continue-on-error: true - run: | - curl -S -s -o /dev/null http://${{ env.MAIN_SERVICE_ENDPOINT }}/outgoing-http-call/; echo - curl -S -s -o /dev/null "http://${{ env.MAIN_SERVICE_ENDPOINT }}/aws-sdk-call?ip=${{ env.REMOTE_SERVICE_IP }}&testingId=${{ env.TESTING_ID }}"; echo - curl -S -s -o /dev/null "http://${{ env.MAIN_SERVICE_ENDPOINT }}/remote-service?ip=${{ env.REMOTE_SERVICE_IP }}&testingId=${{ env.TESTING_ID }}"; echo - curl -S -s -o /dev/null http://${{ env.MAIN_SERVICE_ENDPOINT }}/client-call/; echo - - - name: Build Gradle - run: ./gradlew - - # Validation for pulse telemetry data - - name: Validate generated EMF logs - id: log-validation - run: ./gradlew validator:run --args='-c python/ec2/log-validation.yml - --testing-id ${{ env.TESTING_ID }} - --endpoint http://${{ env.MAIN_SERVICE_ENDPOINT }} - --remote-service-deployment-name ${{ env.REMOTE_SERVICE_IP }}:8001 - --region ${{ env.AWS_DEFAULT_REGION }} - --metric-namespace ${{ env.METRIC_NAMESPACE }} - --log-group ${{ env.LOG_GROUP_NAME }} - --service-name python-sample-application-${{ env.TESTING_ID }} - --remote-service-name python-sample-remote-application-${{ env.TESTING_ID }} - --query-string ip=${{ env.REMOTE_SERVICE_IP }}&testingId=${{ env.TESTING_ID }} - --instance-ami ${{ env.EC2_INSTANCE_AMI }} - --instance-id ${{ env.MAIN_SERVICE_INSTANCE_ID }} - --rollup' - - - name: Validate generated metrics - id: metric-validation - if: (success() || steps.log-validation.outcome == 'failure') && !cancelled() - run: ./gradlew validator:run --args='-c python/ec2/metric-validation.yml - --testing-id ${{ env.TESTING_ID }} - --endpoint http://${{ env.MAIN_SERVICE_ENDPOINT }} - --remote-service-deployment-name ${{ env.REMOTE_SERVICE_IP }}:8001 - --region ${{ env.AWS_DEFAULT_REGION }} - --metric-namespace ${{ env.METRIC_NAMESPACE }} - --log-group ${{ env.LOG_GROUP_NAME }} - --service-name python-sample-application-${{ env.TESTING_ID }} - --remote-service-name python-sample-remote-application-${{ env.TESTING_ID }} - --query-string ip=${{ env.REMOTE_SERVICE_IP }}&testingId=${{ env.TESTING_ID }} - --instance-ami ${{ env.EC2_INSTANCE_AMI }} - --instance-id ${{ env.MAIN_SERVICE_INSTANCE_ID }} - --rollup' - - - name: Validate generated traces - id: trace-validation - if: (success() || steps.log-validation.outcome == 'failure' || steps.metric-validation.outcome == 'failure') && !cancelled() - run: ./gradlew validator:run --args='-c python/ec2/trace-validation.yml - --testing-id ${{ env.TESTING_ID }} - --endpoint http://${{ env.MAIN_SERVICE_ENDPOINT }} - --remote-service-deployment-name ${{ env.REMOTE_SERVICE_IP }}:8001 - --region ${{ env.AWS_DEFAULT_REGION }} - --account-id ${{ env.APP_SIGNALS_E2E_TEST_ACCOUNT_ID }} - --metric-namespace ${{ env.METRIC_NAMESPACE }} - --log-group ${{ env.LOG_GROUP_NAME }} - --service-name python-sample-application-${{ env.TESTING_ID }} - --remote-service-name python-sample-remote-application-${{ env.TESTING_ID }} - --query-string ip=${{ env.REMOTE_SERVICE_IP }}&testingId=${{ env.TESTING_ID }} - --instance-ami ${{ env.EC2_INSTANCE_AMI }} - --instance-id ${{ env.MAIN_SERVICE_INSTANCE_ID }} - --rollup' - - # Clean up Procedures - - name: Terraform destroy - if: always() - continue-on-error: true - working-directory: terraform/ec2 - run: | - terraform destroy -auto-approve \ - -var="test_id=${{ env.TESTING_ID }}" \ No newline at end of file diff --git a/.github/workflows/application-signals-python-e2e-eks-test.yml b/.github/workflows/application-signals-python-e2e-eks-test.yml deleted file mode 100644 index f33e12a553..0000000000 --- a/.github/workflows/application-signals-python-e2e-eks-test.yml +++ /dev/null @@ -1,317 +0,0 @@ -## Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -## SPDX-License-Identifier: Apache-2.0 - -# This is a reusable workflow for running the E2E test for Application Signals. -# It is meant to be called from another workflow. -# Read more about reusable workflows: https://docs.github.com/en/actions/using-workflows/reusing-workflows#overview -name: Application Signals Enablement E2E Testing - Python EKS -on: - workflow_call: - inputs: - test-cluster-name: - required: true - type: string - -permissions: - id-token: write - contents: read - -env: - AWS_DEFAULT_REGION: us-east-1 - APP_SIGNALS_E2E_TEST_ACCOUNT_ID: ${{ secrets.APP_SIGNALS_E2E_TEST_ACCOUNT_ID }} - SAMPLE_APP_NAMESPACE: sample-app-namespace - SAMPLE_APP_FRONTEND_SERVICE_IMAGE: ${{ secrets.APP_SIGNALS_E2E_SAMPLE_APP_FRONTEND_SVC_IMG }} - SAMPLE_APP_REMOTE_SERVICE_IMAGE: ${{ secrets.APP_SIGNALS_E2E_SAMPLE_APP_REMOTE_SVC_IMG }} - METRIC_NAMESPACE: ApplicationSignals - LOG_GROUP_NAME: /aws/application-signals/data - ECR_INTEGRATION_TEST_REPO: "cwagent-integration-test" - APPLICATION_SIGNALS_ADOT_IMAGE: 637423224110.dkr.ecr.us-east-1.amazonaws.com/aws-observability/adot-autoinstrumentation-python-staging:0.2.0-408d938 - -jobs: - python-e2e-eks-test: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - with: - repository: aws-observability/aws-application-signals-test-framework - ref: ga-python - - - name: Download enablement script - uses: actions/checkout@v4 - with: - repository: aws-observability/application-signals-demo - ref: main - path: enablement-script - sparse-checkout: | - scripts/eks/appsignals/enable-app-signals.sh - scripts/eks/appsignals/clean-app-signals.sh - sparse-checkout-cone-mode: false - - - name: Generate testing id - run: echo TESTING_ID="${{ env.AWS_DEFAULT_REGION }}-${{ github.run_id }}-${{ github.run_number }}" >> $GITHUB_ENV - - - name: Configure AWS Credentials - uses: aws-actions/configure-aws-credentials@v4 - with: - role-to-assume: arn:aws:iam::${{ env.APP_SIGNALS_E2E_TEST_ACCOUNT_ID }}:role/${{ secrets.APP_SIGNALS_E2E_TEST_ROLE_NAME }} - aws-region: ${{ env.AWS_DEFAULT_REGION }} - - # local directory to store the kubernetes config - - name: Create kubeconfig directory - run: mkdir -p ${{ github.workspace }}/.kube - - - name: Set KUBECONFIG environment variable - run: echo KUBECONFIG="${{ github.workspace }}/.kube/config" >> $GITHUB_ENV - - - name: Set up kubeconfig - run: aws eks update-kubeconfig --name ${{ inputs.test-cluster-name }} --region ${{ env.AWS_DEFAULT_REGION }} - - - name: Install eksctl - run: | - mkdir ${{ github.workspace }}/eksctl - curl -sLO "https://github.com/weaveworks/eksctl/releases/latest/download/eksctl_Linux_amd64.tar.gz" - tar -xzf eksctl_Linux_amd64.tar.gz -C ${{ github.workspace }}/eksctl && rm eksctl_Linux_amd64.tar.gz - echo "${{ github.workspace }}/eksctl" >> $GITHUB_PATH - - - name: Create role for AWS access from the sample app - id: create_service_account - run: | - eksctl create iamserviceaccount \ - --name service-account-${{ env.TESTING_ID }} \ - --namespace ${{ env.SAMPLE_APP_NAMESPACE }} \ - --cluster ${{ inputs.test-cluster-name }} \ - --role-name eks-s3-access-${{ env.TESTING_ID }} \ - --attach-policy-arn arn:aws:iam::aws:policy/AmazonS3ReadOnlyAccess \ - --region ${{ env.AWS_DEFAULT_REGION }} \ - --approve - - - name: Set up terraform - uses: hashicorp/setup-terraform@v3 - with: - terraform_wrapper: false - - - name: Deploy sample app via terraform - working-directory: terraform/python/eks - run: | - terraform init - terraform validate - terraform apply -auto-approve \ - -var="test_id=${{ env.TESTING_ID }}" \ - -var="aws_region=${{ env.AWS_DEFAULT_REGION }}" \ - -var="kube_directory_path=${{ github.workspace }}/.kube" \ - -var="eks_cluster_name=${{ inputs.test-cluster-name }}" \ - -var="eks_cluster_context_name=$(kubectl config current-context)" \ - -var="test_namespace=${{ env.SAMPLE_APP_NAMESPACE }}" \ - -var="service_account_aws_access=service-account-${{ env.TESTING_ID }}" \ - -var="python_app_image=654654176582.dkr.ecr.us-east-1.amazonaws.com/appsignals-python-django-main-service" \ - -var="python_remote_app_image=654654176582.dkr.ecr.us-east-1.amazonaws.com/appsignals-python-django-remote-service" - - # Enable App Signals on the test cluster - - name: Enable App Signals - working-directory: enablement-script/scripts/eks/appsignals - run: | - ./enable-app-signals.sh \ - ${{ inputs.test-cluster-name }} \ - ${{ env.AWS_DEFAULT_REGION }} \ - ${{ env.SAMPLE_APP_NAMESPACE }} - - - name: Save CloudWatch image to environment before patching - run: | - echo "OLD_CW_AGENT_IMAGE"=$(kubectl get pods -n amazon-cloudwatch -l app.kubernetes.io/name=cloudwatch-agent -o json | \ - jq '.items[0].status.containerStatuses[0].image') >> $GITHUB_ENV - - - name: Patch the CloudWatch Agent image and restart CloudWatch pods - run: | - kubectl patch amazoncloudwatchagents -n amazon-cloudwatch cloudwatch-agent --type='json' -p='[{"op": "replace", "path": "/spec/image", "value": ${{ secrets.AWS_ECR_PRIVATE_REGISTRY }}/${{ env.ECR_INTEGRATION_TEST_REPO }}:${{ github.sha }}}]' - kubectl delete pods --all -n amazon-cloudwatch - sleep 10 - kubectl wait --for=condition=Ready pod --all -n amazon-cloudwatch - - - name: Patch the ADOT image and restart CloudWatch pods - run: | - kubectl patch deploy -namazon-cloudwatch amazon-cloudwatch-observability-controller-manager --type='json' \ - -p='[{"op": "replace", "path": "/spec/template/spec/containers/0/args/2", "value": "--auto-instrumentation-python-image=${{ env.APPLICATION_SIGNALS_ADOT_IMAGE }}"}]' - kubectl delete pods --all -n amazon-cloudwatch - sleep 10 - kubectl wait --for=condition=Ready pod --all -n amazon-cloudwatch - - # Application pods need to be restarted for the - # app signals instrumentation to take effect - - name: Restart the app pods - run: kubectl delete pods --all -n ${{ env.SAMPLE_APP_NAMESPACE }} - - - name: Wait for sample app pods to come up - run: | - kubectl wait --for=condition=Ready pod --all -n ${{ env.SAMPLE_APP_NAMESPACE }} \ - - - name: Get remote service deployment name and IP - run: | - echo "REMOTE_SERVICE_DEPLOYMENT_NAME=$(kubectl get deployments -n ${{ env.SAMPLE_APP_NAMESPACE }} --selector=app=remote-app -o jsonpath='{.items[0].metadata.name}')" >> $GITHUB_ENV - echo "REMOTE_SERVICE_POD_IP=$(kubectl get pods -n ${{ env.SAMPLE_APP_NAMESPACE }} --selector=app=remote-app -o jsonpath='{.items[0].status.podIP}')" >> $GITHUB_ENV - - - name: Log pod ADOT image ID - run: | - kubectl get pods -n ${{ env.SAMPLE_APP_NAMESPACE }} --output json | \ - jq '.items[0].status.initContainerStatuses[0].imageID' - - - name: Log pod CWAgent Operator image ID - run: | - kubectl get pods -n amazon-cloudwatch -l app.kubernetes.io/name=amazon-cloudwatch-observability -o json | \ - jq '.items[0].status.containerStatuses[0].imageID' - - - name: Log pod FluentBit image ID - run: | - kubectl get pods -n amazon-cloudwatch -l k8s-app=fluent-bit -o json | \ - jq '.items[0].status.containerStatuses[0].imageID' - - - name: Log pod CWAgent image ID and save image to the environment - run: | - kubectl get pods -n amazon-cloudwatch -l app.kubernetes.io/name=cloudwatch-agent -o json | \ - jq '.items[0].status.containerStatuses[0].imageID' - - echo "NEW_CW_AGENT_IMAGE"=$(kubectl get pods -n amazon-cloudwatch -l app.kubernetes.io/name=cloudwatch-agent -o json | \ - jq '.items[0].status.containerStatuses[0].image') >> $GITHUB_ENV - -# - name: Check if CW Agent image has changed -# run: | -# if [ ${{ env.OLD_CW_AGENT_IMAGE }} = ${{ env.NEW_CW_AGENT_IMAGE }} ]; then -# echo "Operator image did not change" -# exit 1 -# fi - - - name: Get the sample app endpoint - run: | - echo "APP_ENDPOINT=$(terraform output python_app_endpoint)" >> $GITHUB_ENV - working-directory: terraform/python/eks - - - name: Wait for app endpoint to come online - id: endpoint-check - run: | - attempt_counter=0 - max_attempts=30 - until $(curl --output /dev/null --silent --head --fail http://${{ env.APP_ENDPOINT }}); do - if [ ${attempt_counter} -eq ${max_attempts} ];then - echo "Max attempts reached" - exit 1 - fi - - printf '.' - attempt_counter=$(($attempt_counter+1)) - sleep 10 - done - - # This steps increases the speed of the validation by creating the telemetry data in advance - - name: Call all test APIs - continue-on-error: true - run: | - curl -S -s -o /dev/null "http://${{ env.APP_ENDPOINT }}/outgoing-http-call"; echo - curl -S -s -o /dev/null "http://${{ env.APP_ENDPOINT }}/aws-sdk-call?ip=${{ env.REMOTE_SERVICE_POD_IP }}&testingId=${{ env.TESTING_ID }}"; echo - curl -S -s -o /dev/null "http://${{ env.APP_ENDPOINT }}/remote-service?ip=${{ env.REMOTE_SERVICE_POD_IP }}&testingId=${{ env.TESTING_ID }}"; echo - curl -S -s -o /dev/null "http://${{ env.APP_ENDPOINT }}/client-call"; echo - - - name: Build Gradle - run: ./gradlew - - # Validation for application signals telemetry data - - name: Call endpoint and validate generated EMF logs - id: log-validation - if: steps.endpoint-check.outcome == 'success' && !cancelled() - run: ./gradlew validator:run --args='-c python/eks/log-validation.yml - --testing-id ${{ env.TESTING_ID }} - --endpoint http://${{ env.APP_ENDPOINT }} - --region ${{ env.AWS_DEFAULT_REGION }} - --account-id ${{ env.APP_SIGNALS_E2E_TEST_ACCOUNT_ID }} - --metric-namespace ${{ env.METRIC_NAMESPACE }} - --log-group ${{ env.LOG_GROUP_NAME }} - --app-namespace ${{ env.SAMPLE_APP_NAMESPACE }} - --platform-info ${{ inputs.test-cluster-name }} - --service-name python-application-${{ env.TESTING_ID }} - --remote-service-deployment-name ${{ env.REMOTE_SERVICE_DEPLOYMENT_NAME }} - --query-string ip=${{ env.REMOTE_SERVICE_POD_IP }}&testingId=${{ env.TESTING_ID }} - --rollup' - - - name: Call endpoints and validate generated metrics - id: metric-validation - if: (success() || steps.log-validation.outcome == 'failure') && !cancelled() - run: ./gradlew validator:run --args='-c python/eks/metric-validation.yml - --testing-id ${{ env.TESTING_ID }} - --endpoint http://${{ env.APP_ENDPOINT }} - --region ${{ env.AWS_DEFAULT_REGION }} - --account-id ${{ env.APP_SIGNALS_E2E_TEST_ACCOUNT_ID }} - --metric-namespace ${{ env.METRIC_NAMESPACE }} - --log-group ${{ env.LOG_GROUP_NAME }} - --app-namespace ${{ env.SAMPLE_APP_NAMESPACE }} - --platform-info ${{ inputs.test-cluster-name }} - --service-name python-application-${{ env.TESTING_ID }} - --remote-service-name python-remote-application-${{ env.TESTING_ID }} - --remote-service-deployment-name ${{ env.REMOTE_SERVICE_DEPLOYMENT_NAME }} - --query-string ip=${{ env.REMOTE_SERVICE_POD_IP }}&testingId=${{ env.TESTING_ID }} - --rollup' - - - name: Call endpoints and validate generated traces - id: trace-validation - if: (success() || steps.log-validation.outcome == 'failure' || steps.metric-validation.outcome == 'failure') && !cancelled() - run: ./gradlew validator:run --args='-c python/eks/trace-validation.yml - --testing-id ${{ env.TESTING_ID }} - --endpoint http://${{ env.APP_ENDPOINT }} - --region ${{ env.AWS_DEFAULT_REGION }} - --account-id ${{ env.APP_SIGNALS_E2E_TEST_ACCOUNT_ID }} - --log-group ${{ env.LOG_GROUP_NAME }} - --app-namespace ${{ env.SAMPLE_APP_NAMESPACE }} - --platform-info ${{ inputs.test-cluster-name }} - --service-name python-application-${{ env.TESTING_ID }} - --remote-service-deployment-name ${{ env.REMOTE_SERVICE_DEPLOYMENT_NAME }} - --query-string ip=${{ env.REMOTE_SERVICE_POD_IP }}&testingId=${{ env.TESTING_ID }} - --rollup' - - # Clean up Procedures - - - name: Remove log group deletion command - if: always() - working-directory: enablement-script/scripts/eks/appsignals - run: | - delete_log_group="aws logs delete-log-group --log-group-name '${{ env.LOG_GROUP_NAME }}' --region \$REGION" - sed -i "s#$delete_log_group##g" clean-app-signals.sh - - - name: Clean Up Application Signals - if: always() - continue-on-error: true - working-directory: enablement-script - run: | - ./clean-app-signals.sh \ - ${{ inputs.test-cluster-name }} \ - ${{ inputs.aws-region }} \ - ${{ env.SAMPLE_APP_NAMESPACE }} - - # This step also deletes lingering resources from previous test runs - - name: Delete all sample app resources - if: always() - continue-on-error: true - timeout-minutes: 10 - run: kubectl delete namespace ${{ env.SAMPLE_APP_NAMESPACE }} - - - name: Terraform destroy - if: always() - continue-on-error: true - timeout-minutes: 5 - working-directory: terraform/python/eks - run: | - terraform destroy -auto-approve \ - -var="test_id=${{ env.TESTING_ID }}" \ - -var="aws_region=${{ inputs.aws-region }}" \ - -var="kube_directory_path=${{ github.workspace }}/.kube" \ - -var="eks_cluster_name=${{ inputs.test-cluster-name }}" \ - -var="test_namespace=${{ env.SAMPLE_APP_NAMESPACE }}" \ - -var="service_account_aws_access=service-account-${{ env.TESTING_ID }}" \ - -var="python_app_image=${{ env.ACCOUNT_ID }}.dkr.ecr.${{ inputs.aws-region }}.amazonaws.com/${{ secrets.APP_SIGNALS_PYTHON_E2E_FE_SA_IMG }}" \ - -var="python_remote_app_image=${{ env.ACCOUNT_ID }}.dkr.ecr.${{ inputs.aws-region }}.amazonaws.com/${{ secrets.APP_SIGNALS_PYTHON_E2E_RE_SA_IMG }}" - - - name: Remove aws access service account - if: always() - continue-on-error: true - run: | - eksctl delete iamserviceaccount \ - --name service-account-${{ env.TESTING_ID }} \ - --namespace ${{ env.SAMPLE_APP_NAMESPACE }} \ - --cluster ${{ inputs.test-cluster-name }} \ - --region ${{ inputs.aws-region }} diff --git a/.github/workflows/compass-beta-release.yml b/.github/workflows/compass-beta-release.yml deleted file mode 100644 index 85599bd051..0000000000 --- a/.github/workflows/compass-beta-release.yml +++ /dev/null @@ -1,18 +0,0 @@ -# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -# SPDX-License-Identifier: MIT - -name: Compass Beta Release -on: - workflow_dispatch: -jobs: - BuildAndUpload: - uses: ./.github/workflows/test-build.yml - secrets: inherit - permissions: - id-token: write - contents: read - with: - BucketKey: "compass-beta-release" - PackageBucketKey: "compass-beta-release" - TerraformAWSAssumeRole: ${{ vars.TERRAFORM_AWS_ASSUME_ROLE }} - Bucket: "private-cloudwatch-agent-integration-test" diff --git a/.github/workflows/configurable-nonprod-test.yml b/.github/workflows/configurable-nonprod-test.yml deleted file mode 100644 index df7f6bb85b..0000000000 --- a/.github/workflows/configurable-nonprod-test.yml +++ /dev/null @@ -1,33 +0,0 @@ -# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -# SPDX-License-Identifier: MIT - -name: NonProd Test Build Binaries Configurable -on: - workflow_dispatch: - inputs: - ContainerPostfixTag: - # e.g. "ccwa_nonprod:test-${input goes here}" - description: "ECR repo name and tag" - required: true - type: string - BucketPostfixPath: - # e.g. s3:///nonprod/test/${input goes here} - description: "S3 URI to upload artifacts into." - required: true - type: string - PackageBucketPostfixPath: - # e.g. s3:///nonprod/test/${input goes here} - description: "Integration tests put the MSI and PKG in a different bucket path than the binaries." - required: true - type: string -jobs: - BuildAndUpload: - uses: ./.github/workflows/test-build.yml - secrets: inherit - permissions: - id-token: write - contents: read - with: - ContainerRepositoryNameAndTag: "ccwa_nonprod:test-${{ inputs.ContainerPostfixTag }}" - BucketKey: "nonprod/test/${{ inputs.BucketPostfixPath }}" - PackageBucketKey: "nonprod/test/${{ github.PackageBucketPostfixPath }}" \ No newline at end of file diff --git a/.github/workflows/enhanced-container-insights-beta-release.yml b/.github/workflows/enhanced-container-insights-beta-release.yml deleted file mode 100644 index f08844262f..0000000000 --- a/.github/workflows/enhanced-container-insights-beta-release.yml +++ /dev/null @@ -1,28 +0,0 @@ -# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -# SPDX-License-Identifier: MIT - -name: Enhanced Container Insights Beta Release -on: - workflow_dispatch: -jobs: - BuildAndPushToUSWEST2: - uses: ./.github/workflows/test-build.yml - secrets: inherit - permissions: - id-token: write - contents: read - with: - ContainerRepositoryNameAndTag: "enhanced-container-insights-beta:latest" - BucketKey: "enhanced-container-insights-beta" - PackageBucketKey: "enhanced-container-insights-beta" - BuildAndPushToUSEAST1: - uses: ./.github/workflows/test-build.yml - secrets: inherit - permissions: - id-token: write - contents: read - with: - ContainerRepositoryNameAndTag: "enhanced-container-insights-beta:latest" - BucketKey: "enhanced-container-insights-beta" - PackageBucketKey: "enhanced-container-insights-beta" - TargetRegion: "us-east-1" diff --git a/.github/workflows/enhanced-container-insights-internal.yml b/.github/workflows/enhanced-container-insights-internal.yml deleted file mode 100644 index 4ac6250bf8..0000000000 --- a/.github/workflows/enhanced-container-insights-internal.yml +++ /dev/null @@ -1,17 +0,0 @@ -# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -# SPDX-License-Identifier: MIT - -name: Enhanced Container Insights Internal Test Release -on: - workflow_dispatch: -jobs: - BuildAndUpload: - uses: ./.github/workflows/test-build.yml - secrets: inherit - permissions: - id-token: write - contents: read - with: - ContainerRepositoryNameAndTag: "enhanced-container-insights-internal:latest" - BucketKey: "enhanced-container-insights-internal" - PackageBucketKey: "enhanced-container-insights-internal" diff --git a/.github/workflows/repo-sync.yml b/.github/workflows/repo-sync.yml deleted file mode 100644 index 1bef136e90..0000000000 --- a/.github/workflows/repo-sync.yml +++ /dev/null @@ -1,58 +0,0 @@ -# disable this workflow after beta phase -name: Sync with upstream - -on: - schedule: - - cron: "0 0 * * 0" # every sunday at 12AM - workflow_dispatch: - -env: - RUN_ID: "${{ github.run_number }}.${{ github.run_attempt }}" - UPSTREAM: "https://github.com/aws/amazon-cloudwatch-agent.git" - -jobs: - # gets the last commit hash from public/master and defines the branch name - # https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idoutputs - define-branch-name: - runs-on: ubuntu-latest - steps: - - name: Get last commit hash from public - id: get-last-commit - run: echo "hash=$(git ls-remote ${{ env.UPSTREAM }} HEAD | awk '{print $1;}')" >> $GITHUB_OUTPUT - outputs: - LAST_COMMIT: ${{ steps.get-last-commit.outputs.hash }} - PR_BRANCH: "repo-sync-${{ steps.get-last-commit.outputs.hash }}-${{ env.RUN_ID }}" - - # pushes the latest from public/main to private/repo-sync - # https://github.com/marketplace/actions/github-repo-sync - create-branch: - needs: define-branch-name - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v3 - with: - persist-credentials: false - - name: repo-sync - uses: repo-sync/github-sync@v2 - with: - source_repo: ${{ env.UPSTREAM }} - source_branch: "main" - destination_branch: ${{ needs.define-branch-name.outputs.PR_BRANCH }} - github_token: ${{ secrets.WILLIAZZ_PAT }} - - # upon create-branch completion, creates a PR from private/repo-sync to private/main - # https://github.com/marketplace/actions/github-pull-request-action - create-pr: - needs: [define-branch-name, create-branch] - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v3 - - name: pull-request - uses: repo-sync/pull-request@v2 - with: - source_branch: ${{ needs.define-branch-name.outputs.PR_BRANCH }} - destination_branch: "main" - github_token: ${{ secrets.GITHUB_TOKEN }} - pr_title: "Automated sync with upstream - last commit ${{ needs.define-branch-name.outputs.LAST_COMMIT }} - run #${{ env.RUN_ID }}" - pr_template: ".github/repo_sync_pr_template.md" - pr_allow_empty: false From 2d8e9928ac4353c8e03dfa804ca91fb6669c2c8f Mon Sep 17 00:00:00 2001 From: Dinakar Chappa Date: Tue, 3 Sep 2024 11:22:05 -0400 Subject: [PATCH 52/55] removes region-lock for entity call --- plugins/outputs/cloudwatchlogs/pusher.go | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/plugins/outputs/cloudwatchlogs/pusher.go b/plugins/outputs/cloudwatchlogs/pusher.go index d00c7a6a69..d3d86466a9 100644 --- a/plugins/outputs/cloudwatchlogs/pusher.go +++ b/plugins/outputs/cloudwatchlogs/pusher.go @@ -233,9 +233,7 @@ func (p *pusher) send() { LogStreamName: &p.Stream, SequenceToken: p.sequenceToken, } - if p.region == "us-east-1" { - input.Entity = entity - } + input.Entity = entity startTime := time.Now() From fb52eb8fe619a8cd05aefe5ff790bcfc0d48d728 Mon Sep 17 00:00:00 2001 From: Dinakar Chappa Date: Tue, 3 Sep 2024 11:30:00 -0400 Subject: [PATCH 53/55] removes integ tests, reverts .github folder to its status in main --- .github/repo_sync_pr_template.md | 46 -- .github/workflows/PR-build.yml | 38 -- .github/workflows/build-test-artifacts.yml | 4 +- .github/workflows/integration-test.yml | 79 +-- .github/workflows/nonprod-release.yml | 17 - .github/workflows/test-build.yml | 10 - test/README.md | 11 - test/compass/compass_test.go | 527 ------------------ .../resources/compass_default_log.json | 22 - .../resources/compass_role_arn_check.json | 24 - .../resources/compass_service_in_config.json | 23 - test/go.mod | 58 -- test/go.sum | 119 ---- 13 files changed, 10 insertions(+), 968 deletions(-) delete mode 100644 .github/repo_sync_pr_template.md delete mode 100644 .github/workflows/nonprod-release.yml delete mode 100644 test/README.md delete mode 100644 test/compass/compass_test.go delete mode 100644 test/compass/resources/compass_default_log.json delete mode 100644 test/compass/resources/compass_role_arn_check.json delete mode 100644 test/compass/resources/compass_service_in_config.json delete mode 100644 test/go.mod delete mode 100644 test/go.sum diff --git a/.github/repo_sync_pr_template.md b/.github/repo_sync_pr_template.md deleted file mode 100644 index 1bc6cea78a..0000000000 --- a/.github/repo_sync_pr_template.md +++ /dev/null @@ -1,46 +0,0 @@ -# Description of the issue -An automated PR to kickstart the process of syncing the latest changes from [cw-agent](https://github.com/aws/amazon-cloudwatch-agent/) - -# Description of changes - -### Follow the git CLI instructions resolve the merge conflicts - -```shell -git pull origin main -git checkout repo-sync-- -git merge main # do a regular merge -- we want to keep the commits -# resolve merge conflicts in your preferred IDE -git push -u origin repo-sync-- -``` - -Some useful commands -* [Restore conflict resolution in a single file](https://stackoverflow.com/questions/14409420/restart-undo-conflict-resolution-in-a-single-file) - `git checkout -m ` -* Total reset - `git merge --abort` - -### Related docs -* Resolving conflicts with: - * [Git CLI](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/addressing-merge-conflicts/resolving-a-merge-conflict-using-the-command-line) - * [IntelliJ](https://www.jetbrains.com/help/idea/resolving-conflicts.html#distributed-version-control-systems) - * [GoLand](https://www.jetbrains.com/help/go/resolve-conflicts.html) - * [VSCode](https://learn.microsoft.com/en-us/visualstudio/version-control/git-resolve-conflicts?view=vs-2022) - -### Best practices - -* Remember to update all references from `amazon-cloudwatch-agent` to `private-amazon-cloudwatch-agent-staging` -* Resolve the `go.sum` with `go mod tidy`. Don't bother manually resolving conflicts in this file -* When finished, ensure builds work by using `make build` or `make release` -* When unsure or blocked, do a deep dive on the `git blame` for greater context. Maybe even look for the associated PR's and ask the original authors and PR approvers -* If another automated PR arrives before your work is merged, just close your current one and save the branch -* After your PR is approved, **do a regular merge to preserve the commits**. -* Remember to cleanup your commits because none of them will be squashed in a regular merge - -# License -By submitting this pull request, I confirm that you can use, modify, copy, and redistribute this contribution, under the terms of your choice. - -# Tests -n/a - -# Requirements -_Before commit the code, please do the following steps._ -1. Run `make fmt` and `make fmt-sh` -2. Run `make lint` diff --git a/.github/workflows/PR-build.yml b/.github/workflows/PR-build.yml index 243ef3bb3c..ec4b07249c 100644 --- a/.github/workflows/PR-build.yml +++ b/.github/workflows/PR-build.yml @@ -2,10 +2,6 @@ # SPDX-License-Identifier: MIT name: PR Build -env: - TERRAFORM_AWS_ASSUME_ROLE: ${{ vars.TERRAFORM_AWS_ASSUME_ROLE }} - TERRAFORM_AWS_ASSUME_ROLE_DURATION: 14400 # 4 hours - on: workflow_dispatch: pull_request: @@ -77,9 +73,6 @@ jobs: needs: [lint, changes] name: Build ${{ matrix.os }} runs-on: ${{ matrix.os }} - permissions: - id-token: write - contents: read strategy: fail-fast: false matrix: @@ -106,13 +99,6 @@ jobs: ~\AppData\Local\go-build ~\go\pkg\mod steps: - - - name: Configure AWS Credentials - uses: aws-actions/configure-aws-credentials@v2 - with: - role-to-assume: ${{ vars.TERRAFORM_AWS_ASSUME_ROLE }} - aws-region: us-east-1 - - name: Set up Go 1.x if: needs.changes.outputs.build == 'true' uses: actions/setup-go@v4 @@ -143,30 +129,6 @@ jobs: if: matrix.family == 'windows' && steps.cached_binaries.outputs.cache-hit != 'true' && needs.changes.outputs.build == 'true' run: choco install make -# TODO: remove "Replace AWS SDK" once changes are available publicly - - name: Replace AWS SDK (Windows) - if: matrix.family == 'windows' - run: | - mkdir C:/Users/runneradmin/gosdk - aws s3 cp s3://compass-pre-release/staging.zip $env:SYSTEMROOT - tar -xf $env:SYSTEMROOT/staging.zip -C C:/Users/runneradmin/gosdk - $env:sdkPath=(Get-ChildItem "C:/Users/runneradmin/gosdk/apollo/env/AWSGoSDK-Release/var/tmp/release-automation/staging-*/sdk/src/github.com/aws/aws-sdk-go") - echo $env:sdkPath - cd D:\a\private-amazon-cloudwatch-agent-staging\private-amazon-cloudwatch-agent-staging - ls - go mod edit -replace github.com/aws/aws-sdk-go=$env:sdkPath - - - name: Replace AWS SDK (Linux) - if: matrix.family != 'windows' - run: | - mkdir ~/gosdk - aws s3 cp s3://compass-pre-release/staging.zip ~ - unzip -q -d ~/gosdk ~/staging.zip || true - sdkPath=$(echo ~/gosdk/apollo/env/AWSGoSDK-Release/var/tmp/release-automation/staging-*/sdk/src/github.com/aws/aws-sdk-go) - echo $sdkPath - ls - go mod edit -replace github.com/aws/aws-sdk-go=$sdkPath - - name: Unit Test if: steps.cached_binaries.outputs.cache-hit != 'true' && needs.changes.outputs.build == 'true' run: make test diff --git a/.github/workflows/build-test-artifacts.yml b/.github/workflows/build-test-artifacts.yml index 97062ef1fb..ca12788fc7 100644 --- a/.github/workflows/build-test-artifacts.yml +++ b/.github/workflows/build-test-artifacts.yml @@ -86,7 +86,7 @@ jobs: Bucket: ${{ vars.S3_INTEGRATION_BUCKET_CN }} StartIntegrationTests: - needs: [ BuildAndUpload ] + needs: [ BuildAndUploadPackages, BuildAndUploadITAR, BuildAndUploadCN, BuildDocker ] runs-on: ubuntu-latest steps: - run: gh workflow run integration-test.yml --ref ${{ github.ref_name }} --repo $GITHUB_REPOSITORY -f build_run_id=${{ github.run_id }} -f build_sha=${{ github.sha }} @@ -94,7 +94,7 @@ jobs: GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} StartApplicationSignalsE2ETests: - needs: [ BuildAndUpload ] + needs: [ BuildAndUploadPackages, BuildAndUploadITAR, BuildAndUploadCN, BuildDocker ] # Workflow only runs against main if: ${{ contains(github.ref_name, 'main') }} runs-on: ubuntu-latest diff --git a/.github/workflows/integration-test.yml b/.github/workflows/integration-test.yml index 0c308f7018..222e465bd4 100644 --- a/.github/workflows/integration-test.yml +++ b/.github/workflows/integration-test.yml @@ -46,6 +46,14 @@ jobs: echo "Build SHA does not match test SHA" exit 1 fi + - run: | + conclusion=$(gh run view ${{ inputs.build_run_id }} --repo $GITHUB_REPOSITORY --json conclusion -q '.conclusion') + if [[ $conclusion == "success" ]]; then + echo "Run succeeded" + else + echo "Run failed" + exit 1 + fi env: GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} @@ -1337,74 +1345,3 @@ jobs: cd terraform/eks/addon/gpu fi terraform destroy --auto-approve - CompassLinuxIntegrationTest: - needs: [ GenerateTestMatrix ] - name: 'CompassLinuxIntegrationTest' - runs-on: ubuntu-latest - permissions: - id-token: write - contents: read - steps: - - name: Checkout CWA Test - uses: actions/checkout@v3 - with: - repository: ${{env.CWA_GITHUB_TEST_REPO_NAME}} - ref: ${{env.CWA_GITHUB_TEST_REPO_BRANCH}} - - - name: Configure AWS Credentials - uses: aws-actions/configure-aws-credentials@v2 - with: - role-to-assume: ${{ env.TERRAFORM_AWS_ASSUME_ROLE }} - aws-region: us-east-1 - role-duration-seconds: ${{ env.TERRAFORM_AWS_ASSUME_ROLE_DURATION }} - - - name: Cache if success - id: compass-integration-test - uses: actions/cache@v3 - with: - path: go.mod - key: compass-integration-test-${{ github.sha }} - - # nick-fields/retry@v2 starts at base dir - - name: Terraform apply - if: steps.compass-integration-test.outputs.cache-hit != 'true' - uses: nick-fields/retry@v2 - env: - TF_VAR_test_name: compass - TF_VAR_ssh_key_name: ${{ env.KEY_NAME }} - TF_VAR_ssh_key_value: ${{ env.PRIVATE_KEY }} - TF_VAR_user: ec2-user - TF_VAR_ami: cloudwatch-agent-integration-test-al2* - TF_VAR_arc: amd64 - TF_VAR_ec2_instance_type: t3a.medium - TF_VAR_github_test_repo: ${{ env.CWA_GITHUB_TEST_REPO_URL }} - TF_VAR_github_test_repo_branch: ${{ env.CWA_GITHUB_TEST_REPO_BRANCH }} - TF_VAR_cwa_github_sha: ${{ github.sha }} - TF_VAR_s3_bucket: ${{ vars.S3_INTEGRATION_BUCKET }} - TF_VAR_binary_name: amazon-cloudwatch-agent.rpm - TF_VAR_install_agent: go run ./install/install_agent.go rpm - TF_VAR_pre_test_setup: | - git clone https://x-access-token:${{ secrets.GITHUB_TOKEN }}@github.com/aws/private-amazon-cloudwatch-agent-staging.git compass - cd compass - git checkout ${{ github.sha }} - cd test - with: - max_attempts: 3 - timeout_minutes: 60 - retry_wait_seconds: 5 - command: | - cd terraform/ec2/linux - terraform init - if terraform apply --auto-approve -var="test_dir=./compass" -var="region=us-east-1" ; then terraform destroy -auto-approve - else - terraform destroy -auto-approve && exit 1 - fi - #This is here just in case workflow cancel - - name: Terraform destroy - if: ${{ cancelled() || failure() }} - uses: nick-fields/retry@v2 - with: - max_attempts: 3 - timeout_minutes: 8 - retry_wait_seconds: 5 - command: cd terraform/ec2/linux && terraform destroy --auto-approve diff --git a/.github/workflows/nonprod-release.yml b/.github/workflows/nonprod-release.yml deleted file mode 100644 index d8d4485c16..0000000000 --- a/.github/workflows/nonprod-release.yml +++ /dev/null @@ -1,17 +0,0 @@ -# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -# SPDX-License-Identifier: MIT - -name: NonProd Release -on: - workflow_dispatch: -jobs: - BuildAndUpload: - uses: ./.github/workflows/test-build.yml - secrets: inherit - permissions: - id-token: write - contents: read - with: - ContainerRepositoryNameAndTag: "ccwa_nonprod:latest" - BucketKey: "nonprod" - PackageBucketKey: "nonprod" \ No newline at end of file diff --git a/.github/workflows/test-build.yml b/.github/workflows/test-build.yml index 9f5747f375..9a4f68fa4f 100644 --- a/.github/workflows/test-build.yml +++ b/.github/workflows/test-build.yml @@ -111,16 +111,6 @@ jobs: gpg_private_key: ${{ secrets.GPG_PRIVATE_KEY }} passphrase: ${{ secrets.PASSPHRASE }} - - name: Replace AWS SDK (Linux) - run: | - mkdir ~/gosdk - aws s3 cp s3://compass-pre-release/staging.zip ~ - unzip -q -d ~/gosdk ~/staging.zip || true - sdkPath=$(echo ~/gosdk/apollo/env/AWSGoSDK-Release/var/tmp/release-automation/staging-*/sdk/src/github.com/aws/aws-sdk-go) - echo $sdkPath - ls - go mod edit -replace github.com/aws/aws-sdk-go=$sdkPath - - name: Build Binaries if: contains(inputs.BucketKey, 'test') == false || steps.cached_binaries.outputs.cache-hit == false run: make amazon-cloudwatch-agent-linux amazon-cloudwatch-agent-windows package-rpm package-deb package-win diff --git a/test/README.md b/test/README.md deleted file mode 100644 index 8676abc5db..0000000000 --- a/test/README.md +++ /dev/null @@ -1,11 +0,0 @@ -## Private Integration Tests -The `test` module is meant to serve as a place for integration tests that cannot be placed in the external `amazon-cloudwatch-agent-test` repo. -These follow the pattern established by the external test repo and import dependencies from it to reuse as much as possible. Therefore, there are -a few requirements that are needed before running the tests. - -### Base Requirements -- GoLang 1.22+ -- A built and installed version of the agent from this repo - -### Compass -The compass integration tests. Verifies that PutLogEvents calls are attached with entities by the agent. \ No newline at end of file diff --git a/test/compass/compass_test.go b/test/compass/compass_test.go deleted file mode 100644 index 711674c505..0000000000 --- a/test/compass/compass_test.go +++ /dev/null @@ -1,527 +0,0 @@ -// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -// SPDX-License-Identifier: MIT - -package compass - -import ( - "context" - "errors" - "fmt" - "log" - "os" - "path/filepath" - "strings" - "testing" - "time" - - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/config" - "github.com/aws/aws-sdk-go-v2/credentials/stscreds" - "github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs" - cwlTypes "github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs/types" - "github.com/aws/aws-sdk-go-v2/service/ec2" - ec2Types "github.com/aws/aws-sdk-go-v2/service/ec2/types" - "github.com/aws/aws-sdk-go-v2/service/sts" - "github.com/google/uuid" - "github.com/stretchr/testify/assert" - - "github.com/aws/amazon-cloudwatch-agent-test/environment" - "github.com/aws/amazon-cloudwatch-agent-test/util/awsservice" - "github.com/aws/amazon-cloudwatch-agent-test/util/common" -) - -const ( - configOutputPath = "/opt/aws/amazon-cloudwatch-agent/bin/config.json" - logLineId1 = "foo" - logLineId2 = "bar" - logFilePath = "/tmp/cwagent_log_test.log" - sleepForFlush = 60 * time.Second - retryWaitTime = 30 * time.Second - cwlPerfEndpoint = "https://logs-perf.us-east-1.amazonaws.com" - iadRegionalCode = "us-east-1" - - entityType = "@entity.KeyAttributes.Type" - entityName = "@entity.KeyAttributes.Name" - entityEnvironment = "@entity.KeyAttributes.Environment" - - entityPlatform = "@entity.Attributes.PlatformType" - entityInstanceId = "@entity.Attributes.EC2.InstanceId" - credsDir = "/tmp/.aws" - testAccountRoleArn = "arn:aws:iam::874389809020:role/CloudWatchAgentServerRole" -) - -var ( - logLineIds = []string{logLineId1, logLineId2} - rnf *cwlTypes.ResourceNotFoundException - cwlClient *cloudwatchlogs.Client - crossAccountLogClient *cloudwatchlogs.Client - ec2Client *ec2.Client -) - -type expectedEntity struct { - entityType string - name string - environment string - platformType string - instanceId string -} - -func init() { - environment.RegisterEnvironmentMetaDataFlags() - awsCfg, err := config.LoadDefaultConfig( - context.Background(), - config.WithRegion(iadRegionalCode), - ) - if err != nil { - // handle error - fmt.Println("There was an error trying to load default config: ", err) - return - } - - cwlClient = cloudwatchlogs.NewFromConfig(awsCfg, func(o *cloudwatchlogs.Options) { - o.BaseEndpoint = aws.String(cwlPerfEndpoint) - }) - ec2Client = ec2.NewFromConfig(awsCfg) - - // Initialize STS client for cross-account checks - stsClient := sts.NewFromConfig(awsCfg) - - // Assume the role in the target account - appCreds := stscreds.NewAssumeRoleProvider(stsClient, testAccountRoleArn) - - // Create a new configuration using the assumed role credentials - assumedCfg, err := config.LoadDefaultConfig(context.TODO(), - config.WithRegion(iadRegionalCode), - config.WithCredentialsProvider( - appCreds, - ), - ) - if err != nil { - log.Fatalf("unable to load assumed role config, %v", err) - } - - // Create a CloudWatch Logs client with the assumed role credentials - crossAccountLogClient = cloudwatchlogs.NewFromConfig(assumedCfg, func(o *cloudwatchlogs.Options) { - o.BaseEndpoint = aws.String(cwlPerfEndpoint) - }) - -} - -// TestWriteLogsToCloudWatch writes N number of logs, and then validates that the -// log events are associated with entities from CloudWatch Logs -func TestWriteLogsToCloudWatch(t *testing.T) { - // this uses the {instance_id} placeholder in the agent configuration, - // so we need to determine the host's instance ID for validation - instanceId := awsservice.GetInstanceId() - log.Printf("Found instance id %s", instanceId) - - err := ResetProfile() - // Truncate the common-config so we don't use the profile credential - if err != nil { - log.Fatalf("Error truncating file: %s", err) - } - - defer DeleteLogGroupAndStream(cwlClient, instanceId, instanceId) - - testCases := map[string]struct { - agentConfigPath string - iterations int - useEC2Tag bool - expectedEntity expectedEntity - }{ - "Compass/IAMRole": { - agentConfigPath: filepath.Join("resources", "compass_default_log.json"), - iterations: 1000, - expectedEntity: expectedEntity{ - entityType: "Service", - name: "cwa-e2e-iam-instance-profile", - environment: "ec2:default", - platformType: "AWS::EC2", - instanceId: instanceId, - }, - }, - "Compass/EC2Tags": { - agentConfigPath: filepath.Join("resources", "compass_default_log.json"), - iterations: 1000, - useEC2Tag: true, - expectedEntity: expectedEntity{ - entityType: "Service", - name: "compass-service-test", - environment: "ec2:default", - platformType: "AWS::EC2", - instanceId: instanceId, - }, - }, - "Compass/ServiceInConfig": { - agentConfigPath: filepath.Join("resources", "compass_service_in_config.json"), - iterations: 1000, - expectedEntity: expectedEntity{ - entityType: "Service", - name: "compass-service", - environment: "compass-environment", - platformType: "AWS::EC2", - instanceId: instanceId, - }, - }, - } - for name, testCase := range testCases { - t.Run(name, func(t *testing.T) { - - if testCase.useEC2Tag { - input := &ec2.CreateTagsInput{ - Resources: []string{instanceId}, - Tags: []ec2Types.Tag{ - { - Key: aws.String("service"), - Value: aws.String("compass-service-test"), - }, - }, - } - _, err := ec2Client.CreateTags(context.TODO(), input) - assert.NoError(t, err) - } - id := uuid.New() - f, err := os.Create(logFilePath + "-" + id.String()) - if err != nil { - t.Fatalf("Error occurred creating log file for writing: %v", err) - } - common.DeleteFile(common.AgentLogFile) - common.TouchFile(common.AgentLogFile) - - common.CopyFile(testCase.agentConfigPath, configOutputPath) - - common.StartAgent(configOutputPath, true, false) - - // ensure that there is enough time from the "start" time and the first log line, - // so we don't miss it in the GetLogEvents call - writeLogLines(t, f, testCase.iterations) - time.Sleep(sleepForFlush) - common.StopAgent() - end := time.Now() - - // check CWL to ensure we got the expected entities in the log group - ValidateEntity(t, cwlClient, instanceId, instanceId, &end, testCase.expectedEntity, false) - - f.Close() - os.Remove(logFilePath + "-" + id.String()) - }) - } -} - -// TestCrossAccount writes N number of logs, and then validates that the -// log events being sent to the other account are not associated with entity -func TestCrossAccount(t *testing.T) { - // this uses the {instance_id} placeholder in the agent configuration, - // so we need to determine the host's instance ID for validation - instanceId := awsservice.GetInstanceId() - log.Printf("Found instance id %s", instanceId) - defer DeleteLogGroupAndStream(crossAccountLogClient, instanceId, instanceId) - - testCases := map[string]struct { - agentConfigPath string - iterations int - setupFunction func() error - entityFieldsShouldMiss bool - expectedEntity expectedEntity - }{ - "Compass/RoleArnCrossAccount": { - agentConfigPath: filepath.Join("resources", "compass_role_arn_check.json"), - entityFieldsShouldMiss: true, - setupFunction: SetupRoleArnCredential, - iterations: 1000, - }, - "Compass/ProfileCrossAccount": { - agentConfigPath: filepath.Join("resources", "compass_default_log.json"), - entityFieldsShouldMiss: true, - setupFunction: SetupProfileCredential, - iterations: 1000, - }, - } - for name, testCase := range testCases { - t.Run(name, func(t *testing.T) { - err := testCase.setupFunction() - if err != nil { - t.Fatalf("Error setting up cross-account credential: %v", err) - } - id := uuid.New() - f, err := os.Create(logFilePath + "-" + id.String()) - if err != nil { - t.Fatalf("Error occurred creating log file for writing: %v", err) - } - common.DeleteFile(common.AgentLogFile) - common.TouchFile(common.AgentLogFile) - - common.CopyFile(testCase.agentConfigPath, configOutputPath) - - common.StartAgent(configOutputPath, true, false) - - // ensure that there is enough time from the "start" time and the first log line, - // so we don't miss it in the GetLogEvents call - writeLogLines(t, f, testCase.iterations) - time.Sleep(sleepForFlush) - common.StopAgent() - end := time.Now() - - // check CWL to ensure we got the expected entities in the log group - ValidateEntity(t, crossAccountLogClient, instanceId, instanceId, &end, testCase.expectedEntity, testCase.entityFieldsShouldMiss) - - f.Close() - os.Remove(logFilePath + "-" + id.String()) - }) - } -} - -func SetupRoleArnCredential() error { - err := ResetProfile() - // Truncate the common-config so we don't use the profile credential - if err != nil { - return fmt.Errorf("error truncating file: %s", err) - } - log.Println("common-config has been emptied successfully") - - jsonPath := filepath.Join("resources", "compass_role_arn_check.json") - // Read the JSON file - fileContent, err := os.ReadFile(jsonPath) - if err != nil { - return fmt.Errorf("error reading file: %s", err) - } - // Convert the file content to a string - jsonString := string(fileContent) - - // Replace the placeholder with the actual role ARN - updatedJsonString := strings.ReplaceAll(jsonString, "{integ-test-role-arn}", testAccountRoleArn) - - // Write the updated JSON string back to the file - err = os.WriteFile(jsonPath, []byte(updatedJsonString), 0644) - if err != nil { - return fmt.Errorf("error writing file: %s", err) - } - - log.Println("Successfully updated the role ARN in the JSON file") - return nil -} - -func SetupProfileCredential() error { - err := common.RunCommands(profileSetupCommand(testAccountRoleArn)) - return err -} - -func ResetProfile() error { - err := common.RunCommands(profileResetCommand()) - // Truncate the common-config so we don't use the profile credential - return err -} - -func profileSetupCommand(roleArn string) []string { - return []string{ - "mkdir -p " + credsDir, - "printf '[default]\naws_access_key_id=%s\naws_secret_access_key=%s\naws_session_token=%s' $(aws sts assume-role --role-arn " + roleArn + " --role-session-name test --query 'Credentials.[AccessKeyId,SecretAccessKey,SessionToken]' --output text) | tee " + credsDir + "/credentials>/dev/null", - "printf '[credentials]\n shared_credential_profile = \"default\"\n shared_credential_file = \"" + credsDir + "/credentials\"' | sudo tee /opt/aws/amazon-cloudwatch-agent/etc/common-config.toml>/dev/null", - } -} - -func profileResetCommand() []string { - return []string{ - "sudo truncate -s 0 /opt/aws/amazon-cloudwatch-agent/etc/common-config.toml", - } -} - -func writeLogLines(t *testing.T, f *os.File, iterations int) { - log.Printf("Writing %d lines to %s", iterations*len(logLineIds), f.Name()) - - for i := 0; i < iterations; i++ { - ts := time.Now() - for _, id := range logLineIds { - _, err := f.WriteString(fmt.Sprintf("%s - [%s] #%d This is a log line.\n", ts.Format(time.StampMilli), id, i)) - if err != nil { - // don't need to fatal error here. if a log line doesn't get written, the count - // when validating the log stream should be incorrect and fail there. - t.Logf("Error occurred writing log line: %v", err) - } - } - time.Sleep(30 * time.Millisecond) - } -} - -// ValidateLogs queries a given LogGroup/LogStream combination given the start and end times, and executes an -// arbitrary validator function on the found logs. -func ValidateEntity(t *testing.T, logClient *cloudwatchlogs.Client, logGroup, logStream string, end *time.Time, expectedEntity expectedEntity, entityFieldsShouldMiss bool) { - log.Printf("Checking log group/stream: %s/%s", logGroup, logStream) - - logGroupInfo, err := getLogGroup(logClient) - for _, lg := range logGroupInfo { - if *lg.LogGroupName == logGroup { - log.Println("Log group " + *lg.LogGroupName + " exists") - break - } - } - assert.NoError(t, err) - begin := end.Add(-sleepForFlush * 2) - log.Printf("Start time is " + begin.String() + " and end time is " + end.String()) - queryId, err := getLogQueryId(logClient, logGroup, &begin, end) - assert.NoError(t, err) - log.Printf("queryId is " + *queryId) - result, err := getQueryResult(logClient, queryId) - assert.NoError(t, err) - if !assert.NotZero(t, len(result)) { - return - } - requiredEntityFields := map[string]bool{ - entityType: false, - entityName: false, - entityEnvironment: false, - entityPlatform: false, - entityInstanceId: false, - } - for _, field := range result[0] { - switch aws.ToString(field.Field) { - case entityType: - requiredEntityFields[entityType] = true - assert.Equal(t, expectedEntity.entityType, aws.ToString(field.Value)) - case entityName: - requiredEntityFields[entityName] = true - assert.Equal(t, expectedEntity.name, aws.ToString(field.Value)) - case entityEnvironment: - requiredEntityFields[entityEnvironment] = true - assert.Equal(t, expectedEntity.environment, aws.ToString(field.Value)) - case entityPlatform: - requiredEntityFields[entityPlatform] = true - assert.Equal(t, expectedEntity.platformType, aws.ToString(field.Value)) - case entityInstanceId: - requiredEntityFields[entityInstanceId] = true - assert.Equal(t, expectedEntity.instanceId, aws.ToString(field.Value)) - - } - fmt.Printf("%s: %s\n", aws.ToString(field.Field), aws.ToString(field.Value)) - } - entityFieldFoundCount := 0 - for _, value := range requiredEntityFields { - if value { - entityFieldFoundCount += 1 - } - } - if entityFieldsShouldMiss { - assert.Equal(t, 0, entityFieldFoundCount) - } else { - assert.Equal(t, 5, entityFieldFoundCount) - } -} - -func getLogQueryId(logClient *cloudwatchlogs.Client, logGroup string, since, until *time.Time) (*string, error) { - var queryId *string - params := &cloudwatchlogs.StartQueryInput{ - QueryString: aws.String("fields @message, @entity.KeyAttributes.Type, @entity.KeyAttributes.Name, @entity.KeyAttributes.Environment, @entity.Attributes.PlatformType, @entity.Attributes.EC2.InstanceId"), - LogGroupName: aws.String(logGroup), - } - if since != nil { - params.StartTime = aws.Int64(since.UnixMilli()) - } - if until != nil { - params.EndTime = aws.Int64(until.UnixMilli()) - } - attempts := 0 - - for { - output, err := logClient.StartQuery(context.Background(), params) - attempts += 1 - - if err != nil { - if errors.As(err, &rnf) && attempts <= awsservice.StandardRetries { - // The log group/stream hasn't been created yet, so wait and retry - time.Sleep(retryWaitTime) - continue - } - - // if the error is not a ResourceNotFoundException, we should fail here. - return queryId, err - } - queryId = output.QueryId - return queryId, err - } -} - -func getQueryResult(logClient *cloudwatchlogs.Client, queryId *string) ([][]cwlTypes.ResultField, error) { - attempts := 0 - var results [][]cwlTypes.ResultField - params := &cloudwatchlogs.GetQueryResultsInput{ - QueryId: aws.String(*queryId), - } - for { - if attempts > awsservice.StandardRetries { - return results, errors.New("exceeded retry count") - } - result, err := logClient.GetQueryResults(context.Background(), params) - log.Printf("GetQueryResult status is: %v", result.Status) - attempts += 1 - if result.Status != cwlTypes.QueryStatusComplete { - log.Printf("GetQueryResult: sleeping for 5 seconds until status is complete") - time.Sleep(5 * time.Second) - continue - } - log.Printf("GetQueryResult: result length is %d", len(result.Results)) - if err != nil { - if errors.As(err, &rnf) { - // The log group/stream hasn't been created yet, so wait and retry - time.Sleep(retryWaitTime) - continue - } - - // if the error is not a ResourceNotFoundException, we should fail here. - return results, err - } - results = result.Results - return results, err - } -} - -func getLogGroup(logClient *cloudwatchlogs.Client) ([]cwlTypes.LogGroup, error) { - attempts := 0 - var logGroups []cwlTypes.LogGroup - params := &cloudwatchlogs.DescribeLogGroupsInput{} - for { - output, err := logClient.DescribeLogGroups(context.Background(), params) - - attempts += 1 - - if err != nil { - if errors.As(err, &rnf) && attempts <= awsservice.StandardRetries { - // The log group/stream hasn't been created yet, so wait and retry - time.Sleep(retryWaitTime) - continue - } - - // if the error is not a ResourceNotFoundException, we should fail here. - return logGroups, err - } - logGroups = output.LogGroups - return logGroups, err - } -} - -// DeleteLogGroupAndStream cleans up a log group and stream by name. This gracefully handles -// ResourceNotFoundException errors from calling the APIs -func DeleteLogGroupAndStream(logClient *cloudwatchlogs.Client, logGroupName, logStreamName string) { - DeleteLogStream(logClient, logGroupName, logStreamName) - DeleteLogGroup(logClient, logGroupName) -} - -// DeleteLogStream cleans up log stream by name -func DeleteLogStream(logClient *cloudwatchlogs.Client, logGroupName, logStreamName string) { - _, err := logClient.DeleteLogStream(context.TODO(), &cloudwatchlogs.DeleteLogStreamInput{ - LogGroupName: aws.String(logGroupName), - LogStreamName: aws.String(logStreamName), - }) - if err != nil && !errors.As(err, &rnf) { - log.Printf("Error occurred while deleting log stream %s: %v", logStreamName, err) - } -} - -// DeleteLogGroup cleans up log group by name -func DeleteLogGroup(logClient *cloudwatchlogs.Client, logGroupName string) { - _, err := logClient.DeleteLogGroup(context.TODO(), &cloudwatchlogs.DeleteLogGroupInput{ - LogGroupName: aws.String(logGroupName), - }) - if err != nil && !errors.As(err, &rnf) { - log.Printf("Error occurred while deleting log group %s: %v", logGroupName, err) - } -} diff --git a/test/compass/resources/compass_default_log.json b/test/compass/resources/compass_default_log.json deleted file mode 100644 index a4b3c40c35..0000000000 --- a/test/compass/resources/compass_default_log.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "agent": { - "run_as_user": "root", - "debug": true - }, - "logs": { - "endpoint_override": "https://logs-perf.us-east-1.amazonaws.com", - "force_flush_interval": 1, - "logs_collected": { - "files": { - "collect_list": [ - { - "file_path": "/tmp/cwagent_log_test.log*", - "log_group_name": "{instance_id}", - "log_stream_name": "{instance_id}", - "timezone": "UTC" - } - ] - } - } - } -} diff --git a/test/compass/resources/compass_role_arn_check.json b/test/compass/resources/compass_role_arn_check.json deleted file mode 100644 index 90b1eaaabb..0000000000 --- a/test/compass/resources/compass_role_arn_check.json +++ /dev/null @@ -1,24 +0,0 @@ -{ - "agent": { - "run_as_user": "root", - "debug": true, - "credentials": { - "role_arn": "{integ-test-role-arn}" - } - }, - "logs": { - "endpoint_override": "https://logs-perf.us-east-1.amazonaws.com", - "logs_collected": { - "files": { - "collect_list": [ - { - "file_path": "/tmp/cwagent_log_test.log*", - "log_group_name": "{instance_id}", - "log_stream_name": "{instance_id}", - "timezone": "UTC" - } - ] - } - } - } -} diff --git a/test/compass/resources/compass_service_in_config.json b/test/compass/resources/compass_service_in_config.json deleted file mode 100644 index e66a91f9a3..0000000000 --- a/test/compass/resources/compass_service_in_config.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "agent": { - "run_as_user": "root", - "debug": true - }, - "logs": { - "endpoint_override": "https://logs-perf.us-east-1.amazonaws.com", - "logs_collected": { - "files": { - "collect_list": [ - { - "file_path": "/tmp/cwagent_log_test.log*", - "log_group_name": "{instance_id}", - "log_stream_name": "{instance_id}", - "timezone": "UTC", - "service.name": "compass-service", - "deployment.environment": "compass-environment" - } - ] - } - } - } -} diff --git a/test/go.mod b/test/go.mod deleted file mode 100644 index fc4978f595..0000000000 --- a/test/go.mod +++ /dev/null @@ -1,58 +0,0 @@ -module github.com/aws/private-amazon-cloudwatch-agent-staging/test - -go 1.22.4 - -require ( - github.com/aws/amazon-cloudwatch-agent-test v0.0.0-20240613210401-2cd967b759dc - github.com/aws/aws-sdk-go-v2 v1.23.5 - github.com/aws/aws-sdk-go-v2/config v1.25.11 - github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs v1.29.2 - github.com/stretchr/testify v1.8.4 -) - -require ( - collectd.org v0.5.0 // indirect - github.com/DataDog/datadog-go v4.8.3+incompatible // indirect - github.com/Microsoft/go-winio v0.6.1 // indirect - github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.5.3 // indirect - github.com/aws/aws-sdk-go-v2/credentials v1.16.9 // indirect - github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue v1.12.9 // indirect - github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.9 // indirect - github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.15.4 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.8 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.8 // indirect - github.com/aws/aws-sdk-go-v2/internal/ini v1.7.1 // indirect - github.com/aws/aws-sdk-go-v2/internal/v4a v1.2.8 // indirect - github.com/aws/aws-sdk-go-v2/service/cloudformation v1.42.0 // indirect - github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.31.2 // indirect - github.com/aws/aws-sdk-go-v2/service/dynamodb v1.26.3 // indirect - github.com/aws/aws-sdk-go-v2/service/dynamodbstreams v1.18.2 // indirect - github.com/aws/aws-sdk-go-v2/service/ec2 v1.138.2 // indirect - github.com/aws/aws-sdk-go-v2/service/ecs v1.35.2 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.3 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.2.8 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.8.9 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.8 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.16.8 // indirect - github.com/aws/aws-sdk-go-v2/service/s3 v1.47.2 // indirect - github.com/aws/aws-sdk-go-v2/service/ssm v1.44.2 // indirect - github.com/aws/aws-sdk-go-v2/service/sso v1.18.2 // indirect - github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.2 // indirect - github.com/aws/aws-sdk-go-v2/service/sts v1.26.2 // indirect - github.com/aws/aws-sdk-go-v2/service/xray v1.23.2 // indirect - github.com/aws/smithy-go v1.18.1 // indirect - github.com/cenkalti/backoff/v4 v4.2.1 // indirect - github.com/davecgh/go-spew v1.1.1 // indirect - github.com/google/uuid v1.4.0 // indirect - github.com/jmespath/go-jmespath v0.4.0 // indirect - github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/prozz/aws-embedded-metrics-golang v1.2.0 // indirect - github.com/qri-io/jsonpointer v0.1.1 // indirect - github.com/qri-io/jsonschema v0.2.1 // indirect - go.uber.org/multierr v1.11.0 // indirect - golang.org/x/exp v0.0.0-20231127185646-65229373498e // indirect - golang.org/x/mod v0.14.0 // indirect - golang.org/x/sys v0.15.0 // indirect - golang.org/x/tools v0.16.0 // indirect - gopkg.in/yaml.v3 v3.0.1 // indirect -) diff --git a/test/go.sum b/test/go.sum deleted file mode 100644 index fa1e4997a1..0000000000 --- a/test/go.sum +++ /dev/null @@ -1,119 +0,0 @@ -collectd.org v0.5.0 h1:mRTLdljvxJNXPMMO9RSxf0PANDAqu/Tz+I6Dt6OjB28= -collectd.org v0.5.0/go.mod h1:A/8DzQBkF6abtvrT2j/AU/4tiBgJWYyh0y/oB/4MlWE= -github.com/DataDog/datadog-go v4.8.3+incompatible h1:fNGaYSuObuQb5nzeTQqowRAd9bpDIRRV4/gUtIBjh8Q= -github.com/DataDog/datadog-go v4.8.3+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= -github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= -github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= -github.com/aws/amazon-cloudwatch-agent-test v0.0.0-20240613210401-2cd967b759dc h1:oC0cgVlspqNbwRKk9Zk9zweYKZcjnW48Hwp0isLh1Co= -github.com/aws/amazon-cloudwatch-agent-test v0.0.0-20240613210401-2cd967b759dc/go.mod h1:E/w/idAjJTY+laomuWIO8wCE8Rtq3hSA2sVeNeV+YGA= -github.com/aws/aws-sdk-go-v2 v1.23.5 h1:xK6C4udTyDMd82RFvNkDQxtAd00xlzFUtX4fF2nMZyg= -github.com/aws/aws-sdk-go-v2 v1.23.5/go.mod h1:t3szzKfP0NeRU27uBFczDivYJjsmSnqI8kIvKyWb9ds= -github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.5.3 h1:Zx9+31KyB8wQna6SXFWOewlgoY5uGdDAu6PTOEU3OQI= -github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.5.3/go.mod h1:zxbEJhRdKTH1nqS2qu6UJ7zGe25xaHxZXaC2CvuQFnA= -github.com/aws/aws-sdk-go-v2/config v1.25.11 h1:RWzp7jhPRliIcACefGkKp03L0Yofmd2p8M25kbiyvno= -github.com/aws/aws-sdk-go-v2/config v1.25.11/go.mod h1:BVUs0chMdygHsQtvaMyEOpW2GIW+ubrxJLgIz/JU29s= -github.com/aws/aws-sdk-go-v2/credentials v1.16.9 h1:LQo3MUIOzod9JdUK+wxmSdgzLVYUbII3jXn3S/HJZU0= -github.com/aws/aws-sdk-go-v2/credentials v1.16.9/go.mod h1:R7mDuIJoCjH6TxGUc/cylE7Lp/o0bhKVoxdBThsjqCM= -github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue v1.12.9 h1:/KXnrU9g/RzJwJKuZ7G635w9segJCpg9OIwkjPYZs7g= -github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue v1.12.9/go.mod h1:i6u5850nH0SFslKYMUVLW8Uc+JgEdpx4XHNA7T1S2C0= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.9 h1:FZVFahMyZle6WcogZCOxo6D/lkDA2lqKIn4/ueUmVXw= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.9/go.mod h1:kjq7REMIkxdtcEC9/4BVXjOsNY5isz6jQbEgk6osRTU= -github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.15.4 h1:TUCNKBd4/JEefsZDxo5deRmrRRPZHqGyBYiUAeBKOWU= -github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.15.4/go.mod h1:egDkcl+zsgFqS6VO142bKboip5Pe1sNMwN55Xy38QsM= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.8 h1:8GVZIR0y6JRIUNSYI1xAMF4HDfV8H/bOsZ/8AD/uY5Q= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.8/go.mod h1:rwBfu0SoUkBUZndVgPZKAD9Y2JigaZtRP68unRiYToQ= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.8 h1:ZE2ds/qeBkhk3yqYvS3CDCFNvd9ir5hMjlVStLZWrvM= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.8/go.mod h1:/lAPPymDYL023+TS6DJmjuL42nxix2AvEvfjqOBRODk= -github.com/aws/aws-sdk-go-v2/internal/ini v1.7.1 h1:uR9lXYjdPX0xY+NhvaJ4dD8rpSRz5VY81ccIIoNG+lw= -github.com/aws/aws-sdk-go-v2/internal/ini v1.7.1/go.mod h1:6fQQgfuGmw8Al/3M2IgIllycxV7ZW7WCdVSqfBeUiCY= -github.com/aws/aws-sdk-go-v2/internal/v4a v1.2.8 h1:abKT+RuM1sdCNZIGIfZpLkvxEX3Rpsto019XG/rkYG8= -github.com/aws/aws-sdk-go-v2/internal/v4a v1.2.8/go.mod h1:Owc4ysUE71JSruVTTa3h4f2pp3E4hlcAtmeNXxDmjj4= -github.com/aws/aws-sdk-go-v2/service/cloudformation v1.42.0 h1:Eub6qmSRH5ahS1zhVLa1i1qT3raC9Sxrn2kgtG19J3I= -github.com/aws/aws-sdk-go-v2/service/cloudformation v1.42.0/go.mod h1:ehWDbgXo5Zy6eLjP+xX+Vf8wXaSyLGeRf6KlvoVAaXk= -github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.31.2 h1:HWB+RXvOQQkhEp8QCpTlgullbCiysRQlo6ulVZRBBtM= -github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.31.2/go.mod h1:YHhAfr9Qd5xd0fLT2B7LxDFWbIZ6RbaI81Hu2ASCiTY= -github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs v1.29.2 h1:pq1AgSc6YRDkT3/iuXgPUPL0ArmdEmjPoAl0YEJZ4d4= -github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs v1.29.2/go.mod h1:ZGxc+lOwUVsyeKrneIf8/hhowNgyqvCcwmLU/Hrscbk= -github.com/aws/aws-sdk-go-v2/service/dynamodb v1.26.3 h1:Ytz7+VR04GK7wF1C+yQScMZ4Q01xeL4EbQ4kOQ8HY1c= -github.com/aws/aws-sdk-go-v2/service/dynamodb v1.26.3/go.mod h1:qqiIi0EbEEovHG/nQXYGAXcVvHPaUg7KMwh3VARzQz4= -github.com/aws/aws-sdk-go-v2/service/dynamodbstreams v1.18.2 h1:/zmckWK6/SL9MTnCD8p2vOEmOT+LFQtXeoo/bTRBa3c= -github.com/aws/aws-sdk-go-v2/service/dynamodbstreams v1.18.2/go.mod h1:Wkk+2ZcFVCqnuf/yXjvSlySsoy5l2RSFfv/ikosEv3M= -github.com/aws/aws-sdk-go-v2/service/ec2 v1.138.2 h1:e3Imv1oXz+W3Tfclflkh72t5TUPUwWdkHP7ctQGk8Dc= -github.com/aws/aws-sdk-go-v2/service/ec2 v1.138.2/go.mod h1:d1hAqgLDOPaSO1Piy/0bBmj6oAplFwv6p0cquHntNHM= -github.com/aws/aws-sdk-go-v2/service/ecs v1.35.2 h1:yIr1T8uPhZT2cKCBeO39utfzG/RKJn3SxbuBOdj18Nc= -github.com/aws/aws-sdk-go-v2/service/ecs v1.35.2/go.mod h1:MvDz+yXfa2sSEfHB57rdf83deKJIeKEopqHFhVmaRlk= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.3 h1:e3PCNeEaev/ZF01cQyNZgmYE9oYYePIMJs2mWSKG514= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.3/go.mod h1:gIeeNyaL8tIEqZrzAnTeyhHcE0yysCtcaP+N9kxLZ+E= -github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.2.8 h1:xyfOAYV/ujzZOo01H9+OnyeiRKmTEp6EsITTsmq332Q= -github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.2.8/go.mod h1:coLeQEoKzW9ViTL2bn0YUlU7K0RYjivKudG74gtd+sI= -github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.8.9 h1:Vn/qqsXxe3JEALfoU6ypVt86fb811wKqv4kdxvAUk/Q= -github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.8.9/go.mod h1:TQYzeHkuQrsz/AsxxK96CYJO4KRd4E6QozqktOR2h3w= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.8 h1:EamsKe+ZjkOQjDdHd86/JCEucjFKQ9T0atWKO4s2Lgs= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.8/go.mod h1:Q0vV3/csTpbkfKLI5Sb56cJQTCTtJ0ixdb7P+Wedqiw= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.16.8 h1:ip5ia3JOXl4OAsqeTdrOOmqKgoWiu+t9XSOnRzBwmRs= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.16.8/go.mod h1:kE+aERnK9VQIw1vrk7ElAvhCsgLNzGyCPNg2Qe4Eq4c= -github.com/aws/aws-sdk-go-v2/service/s3 v1.47.2 h1:DLSAG8zpJV2pYsU+UPkj1IEZghyBnnUsvIRs6UuXSDU= -github.com/aws/aws-sdk-go-v2/service/s3 v1.47.2/go.mod h1:thjZng67jGsvMyVZnSxlcqKyLwB0XTG8bHIRZPTJ+Bs= -github.com/aws/aws-sdk-go-v2/service/ssm v1.44.2 h1:lmdmYCvG1EJKGLEsUsYDNO6MwZyBZROrRg04Vrb5TwA= -github.com/aws/aws-sdk-go-v2/service/ssm v1.44.2/go.mod h1:pHJ1md/3F3WkYfZ4JKOllPfXQi4NiWk7NxbeOD53HQc= -github.com/aws/aws-sdk-go-v2/service/sso v1.18.2 h1:xJPydhNm0Hiqct5TVKEuHG7weC0+sOs4MUnd7A5n5F4= -github.com/aws/aws-sdk-go-v2/service/sso v1.18.2/go.mod h1:zxk6y1X2KXThESWMS5CrKRvISD8mbIMab6nZrCGxDG0= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.2 h1:8dU9zqA77C5egbU6yd4hFLaiIdPv3rU+6cp7sz5FjCU= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.2/go.mod h1:7Lt5mjQ8x5rVdKqg+sKKDeuwoszDJIIPmkd8BVsEdS0= -github.com/aws/aws-sdk-go-v2/service/sts v1.26.2 h1:fFrLsy08wEbAisqW3KDl/cPHrF43GmV79zXB9EwJiZw= -github.com/aws/aws-sdk-go-v2/service/sts v1.26.2/go.mod h1:7Ld9eTqocTvJqqJ5K/orbSDwmGcpRdlDiLjz2DO+SL8= -github.com/aws/aws-sdk-go-v2/service/xray v1.23.2 h1:mFHM/R2FYnCkmUB52SqJncU5TWDCfI55uXlNTp96g3Y= -github.com/aws/aws-sdk-go-v2/service/xray v1.23.2/go.mod h1:zz5H6SRVFHj93yt3lxA8Ql63c/pY90YjNvvalulrCTk= -github.com/aws/smithy-go v1.18.1 h1:pOdBTUfXNazOlxLrgeYalVnuTpKreACHtc62xLwIB3c= -github.com/aws/smithy-go v1.18.1/go.mod h1:NukqUGpCZIILqqiV0NIjeFh24kd/FAa4beRb6nbIUPE= -github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= -github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg= -github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/uuid v1.4.0 h1:MtMxsa51/r9yyhkyLsVeVt0B+BGQZzpQiTQ4eHZ8bc4= -github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= -github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= -github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= -github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= -github.com/kinbiko/jsonassert v1.0.1 h1:8gdLmUaPWuxk2TzQSofKRqatFH6zwTF6AsUH4bugJYY= -github.com/kinbiko/jsonassert v1.0.1/go.mod h1:QRwBwiAsrcJpjw+L+Q4WS8psLxuUY+HylVZS/4j74TM= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prozz/aws-embedded-metrics-golang v1.2.0 h1:b/LFb8J9LbgANow/9nYZE3M3bkb457/dj0zAB3hPyvo= -github.com/prozz/aws-embedded-metrics-golang v1.2.0/go.mod h1:MXOqF9cJCEHjj77LWq7NWK44/AOyaFzwmcAYqR3057M= -github.com/qri-io/jsonpointer v0.1.1 h1:prVZBZLL6TW5vsSB9fFHFAMBLI4b0ri5vribQlTJiBA= -github.com/qri-io/jsonpointer v0.1.1/go.mod h1:DnJPaYgiKu56EuDp8TU5wFLdZIcAnb/uH9v37ZaMV64= -github.com/qri-io/jsonschema v0.2.1 h1:NNFoKms+kut6ABPf6xiKNM5214jzxAhDBrPHCJ97Wg0= -github.com/qri-io/jsonschema v0.2.1/go.mod h1:g7DPkiOsK1xv6T/Ao5scXRkd+yTFygcANPBaaqW+VrI= -github.com/sergi/go-diff v1.0.0 h1:Kpca3qRNrduNnOQeazBd0ysaKrUJiIuISHxogkT9RPQ= -github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= -github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= -github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= -go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= -golang.org/x/exp v0.0.0-20231127185646-65229373498e h1:Gvh4YaCaXNs6dKTlfgismwWZKyjVZXwOPfIyUaqU3No= -golang.org/x/exp v0.0.0-20231127185646-65229373498e/go.mod h1:iRJReGqOEeBhDZGkGbynYwcHlctCvnjTYIamk7uXpHI= -golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0= -golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= -golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c= -golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U= -golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE= -golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc= -golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/tools v0.16.0 h1:GO788SKMRunPIBCXiQyo2AaexLstOrVhuAL5YwsckQM= -golang.org/x/tools v0.16.0/go.mod h1:kYVVN6I1mBNoB1OX+noeBjbRk4IUEPa7JJ+TJMEooJ0= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= -gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= -gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= From cd09428c8e9a0fb444907c68aff85f08fbc6775c Mon Sep 17 00:00:00 2001 From: Dinakar Chappa Date: Tue, 3 Sep 2024 12:52:32 -0400 Subject: [PATCH 54/55] changes from project name to entity --- Makefile | 5 ----- .../{compass_linux_config.conf => entity_linux_config.conf} | 0 .../{compass_linux_config.json => entity_linux_config.json} | 0 .../{compass_linux_config.yaml => entity_linux_config.yaml} | 0 translator/tocwconfig/tocwconfig_test.go | 6 +++--- 5 files changed, 3 insertions(+), 8 deletions(-) rename translator/tocwconfig/sampleConfig/{compass_linux_config.conf => entity_linux_config.conf} (100%) rename translator/tocwconfig/sampleConfig/{compass_linux_config.json => entity_linux_config.json} (100%) rename translator/tocwconfig/sampleConfig/{compass_linux_config.yaml => entity_linux_config.yaml} (100%) diff --git a/Makefile b/Makefile index 8554c670eb..21b2fe7d88 100644 --- a/Makefile +++ b/Makefile @@ -116,11 +116,6 @@ build-for-docker-windows-amd64: $(WIN_BUILD)/start-amazon-cloudwatch-agent.exe github.com/aws/amazon-cloudwatch-agent/cmd/start-amazon-cloudwatch-agent $(WIN_BUILD)/config-translator.exe github.com/aws/amazon-cloudwatch-agent/cmd/config-translator -build-for-docker-windows-amd64: - $(WIN_BUILD)/amazon-cloudwatch-agent.exe github.com/aws/amazon-cloudwatch-agent/cmd/amazon-cloudwatch-agent - $(WIN_BUILD)/start-amazon-cloudwatch-agent.exe github.com/aws/amazon-cloudwatch-agent/cmd/start-amazon-cloudwatch-agent - $(WIN_BUILD)/config-translator.exe github.com/aws/amazon-cloudwatch-agent/cmd/config-translator - build-for-docker-arm64: $(LINUX_ARM64_BUILD)/amazon-cloudwatch-agent github.com/aws/amazon-cloudwatch-agent/cmd/amazon-cloudwatch-agent $(LINUX_ARM64_BUILD)/start-amazon-cloudwatch-agent github.com/aws/amazon-cloudwatch-agent/cmd/start-amazon-cloudwatch-agent diff --git a/translator/tocwconfig/sampleConfig/compass_linux_config.conf b/translator/tocwconfig/sampleConfig/entity_linux_config.conf similarity index 100% rename from translator/tocwconfig/sampleConfig/compass_linux_config.conf rename to translator/tocwconfig/sampleConfig/entity_linux_config.conf diff --git a/translator/tocwconfig/sampleConfig/compass_linux_config.json b/translator/tocwconfig/sampleConfig/entity_linux_config.json similarity index 100% rename from translator/tocwconfig/sampleConfig/compass_linux_config.json rename to translator/tocwconfig/sampleConfig/entity_linux_config.json diff --git a/translator/tocwconfig/sampleConfig/compass_linux_config.yaml b/translator/tocwconfig/sampleConfig/entity_linux_config.yaml similarity index 100% rename from translator/tocwconfig/sampleConfig/compass_linux_config.yaml rename to translator/tocwconfig/sampleConfig/entity_linux_config.yaml diff --git a/translator/tocwconfig/tocwconfig_test.go b/translator/tocwconfig/tocwconfig_test.go index 997d425e7f..285f579529 100644 --- a/translator/tocwconfig/tocwconfig_test.go +++ b/translator/tocwconfig/tocwconfig_test.go @@ -152,7 +152,7 @@ func TestAppSignalsAndNativeKubernetesConfig(t *testing.T) { checkTranslation(t, "appsignals_and_k8s_config", "windows", expectedEnvVars, "") } -func TestCompassConfig(t *testing.T) { +func TestEntityConfig(t *testing.T) { resetContext(t) context.CurrentContext().SetRunInContainer(true) @@ -161,8 +161,8 @@ func TestCompassConfig(t *testing.T) { t.Setenv(config.HOST_NAME, "host_name_from_env") t.Setenv(config.HOST_IP, "127.0.0.1") - checkTranslation(t, "compass_linux_config", "linux", nil, "") - checkTranslation(t, "compass_linux_config", "darwin", nil, "") + checkTranslation(t, "entity_linux_config", "linux", nil, "") + checkTranslation(t, "entity_linux_config", "darwin", nil, "") } func TestEmfAndKubernetesConfig(t *testing.T) { From 3606ecacbbdf937ce286723779c9037e5846ac2b Mon Sep 17 00:00:00 2001 From: Dinakar Chappa Date: Tue, 3 Sep 2024 15:50:22 -0400 Subject: [PATCH 55/55] removes some duplicate lines, region parameter in pusher --- plugins/outputs/cloudwatchlogs/cloudwatchlogs.go | 2 +- plugins/outputs/cloudwatchlogs/pusher.go | 4 +--- plugins/outputs/cloudwatchlogs/pusher_test.go | 2 +- translator/translate/otel/exporter/awsemf/translator_test.go | 2 -- 4 files changed, 3 insertions(+), 7 deletions(-) diff --git a/plugins/outputs/cloudwatchlogs/cloudwatchlogs.go b/plugins/outputs/cloudwatchlogs/cloudwatchlogs.go index a75198f38d..680e63b23b 100644 --- a/plugins/outputs/cloudwatchlogs/cloudwatchlogs.go +++ b/plugins/outputs/cloudwatchlogs/cloudwatchlogs.go @@ -167,7 +167,7 @@ func (c *CloudWatchLogs) getDest(t Target, logSrc logs.LogSrc) *cwDest { c.Log.Info("Configured middleware on AWS client") } } - pusher := NewPusher(c.Region, t, client, c.ForceFlushInterval.Duration, maxRetryTimeout, c.Log, c.pusherStopChan, &c.pusherWaitGroup, logSrc) + pusher := NewPusher(t, client, c.ForceFlushInterval.Duration, maxRetryTimeout, c.Log, c.pusherStopChan, &c.pusherWaitGroup, logSrc) cwd := &cwDest{pusher: pusher, retryer: logThrottleRetryer} c.cwDests[t] = cwd return cwd diff --git a/plugins/outputs/cloudwatchlogs/pusher.go b/plugins/outputs/cloudwatchlogs/pusher.go index d3d86466a9..10cc50a364 100644 --- a/plugins/outputs/cloudwatchlogs/pusher.go +++ b/plugins/outputs/cloudwatchlogs/pusher.go @@ -43,7 +43,6 @@ type pusher struct { RetryDuration time.Duration Log telegraf.Logger - region string logSrc logs.LogSrc events []*cloudwatchlogs.InputLogEvent minT, maxT *time.Time @@ -65,14 +64,13 @@ type pusher struct { wg *sync.WaitGroup } -func NewPusher(region string, target Target, service CloudWatchLogsService, flushTimeout time.Duration, retryDuration time.Duration, logger telegraf.Logger, stop <-chan struct{}, wg *sync.WaitGroup, logSrc logs.LogSrc) *pusher { +func NewPusher(target Target, service CloudWatchLogsService, flushTimeout time.Duration, retryDuration time.Duration, logger telegraf.Logger, stop <-chan struct{}, wg *sync.WaitGroup, logSrc logs.LogSrc) *pusher { p := &pusher{ Target: target, Service: service, FlushTimeout: flushTimeout, RetryDuration: retryDuration, Log: logger, - region: region, logSrc: logSrc, events: make([]*cloudwatchlogs.InputLogEvent, 0, 10), eventsCh: make(chan logs.LogEvent, 100), diff --git a/plugins/outputs/cloudwatchlogs/pusher_test.go b/plugins/outputs/cloudwatchlogs/pusher_test.go index 40357d23ec..e4170b4492 100644 --- a/plugins/outputs/cloudwatchlogs/pusher_test.go +++ b/plugins/outputs/cloudwatchlogs/pusher_test.go @@ -800,6 +800,6 @@ func TestResendWouldStopAfterExhaustedRetries(t *testing.T) { func testPreparation(retention int, s *svcMock, flushTimeout time.Duration, retryDuration time.Duration) (chan struct{}, *pusher) { stop := make(chan struct{}) mockLogSrcObj := &mockLogSrc{} - p := NewPusher("us-east-1", Target{"G", "S", util.StandardLogGroupClass, retention}, s, flushTimeout, retryDuration, models.NewLogger("cloudwatchlogs", "test", ""), stop, &wg, mockLogSrcObj) + p := NewPusher(Target{"G", "S", util.StandardLogGroupClass, retention}, s, flushTimeout, retryDuration, models.NewLogger("cloudwatchlogs", "test", ""), stop, &wg, mockLogSrcObj) return stop, p } diff --git a/translator/translate/otel/exporter/awsemf/translator_test.go b/translator/translate/otel/exporter/awsemf/translator_test.go index 0d390aa30d..10e7d08280 100644 --- a/translator/translate/otel/exporter/awsemf/translator_test.go +++ b/translator/translate/otel/exporter/awsemf/translator_test.go @@ -30,8 +30,6 @@ func TestTranslator(t *testing.T) { agent.Global_Config.Region = "us-east-1" agent.Global_Config.Role_arn = "global_arn" tt := NewTranslator() - agent.Global_Config.Region = "us-east-1" - agent.Global_Config.Role_arn = "global_arn" require.EqualValues(t, "awsemf", tt.ID().String()) testCases := map[string]struct { env map[string]string