diff --git a/CHANGELOG.md b/CHANGELOG.md index 9152d81..24204f6 100755 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,28 +1,54 @@ # Change Log + All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +## [2.5.0] - 2023-09-15 + +### Added + +- Added support for transferring ECR assets without tags #118 +- Users can now list stopped tasks and clone tasks from the stopped ones #117 + +### Changed + +- Enhanced transfer performance by utilizing cluster capabilities to transfer large files through multipart upload in parallel #116 +- Added automatic restart functionality for the Worker CLI #109 +- Enabled IMDSv2 by default for Auto Scaling Groups (ASG) +- Switched to using Launch Templates instead of Launch Configurations to launch ASG instances #115 +- Automatically create a CloudFormation invalidation after the upgrade #52 + ## [2.4.0] - 2023-04-28 + ### Added + - Support for requester pay mode in S3 transfer task. ## [2.3.1] - 2023-04-18 + ### Fixed + - Fix deployment failure due to S3 ACL changes. ## [2.3.0] - 2023-03-30 + ### Added + - Support embedded dashboard and logs - Support S3 Access Key Rotation - Enhance One Time Transfer Task monitoring ## [1.0.1] - 2022-09-07 + ### Fixed + - Upgrade lambda runtime to python v3.7.0 - Fix the list limit of secrets ## [1.0.0] - 2021-12-22 + ### Added + - All files, initial version diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index c853f2b..52eaad7 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -11,7 +11,7 @@ information to effectively respond to your bug report or contribution. We welcome you to use the GitHub issue tracker to report bugs or suggest features. -When filing an issue, please check [existing open](https://github.com/awslabs/data-transfer-hub/issues), or [recently closed](https://github.com/awslabs/data-transfer-hub/issues?utf8=%E2%9C%93&q=is%3Aissue%20is%3Aclosed%20), issues to make sure somebody else hasn't already +When filing an issue, please check [existing open](https://github.com/awslabs/data-transfer-hub/issues), or [recently closed](https://github.com/awslabs/data-transfer-hub/issues?q=is%3Aissue+is%3Aclosed), issues to make sure somebody else hasn't already reported the issue. Please try to include as much information as you can. Details like these are incredibly useful: * A reproducible test case or series of steps @@ -42,7 +42,7 @@ GitHub provides additional document on [forking a repository](https://help.githu ## Finding contributions to work on -Looking at the existing issues is a great way to find something to contribute on. As our projects, by default, use the default GitHub issue labels ((enhancement/bug/duplicate/help wanted/invalid/question/wontfix), looking at any ['help wanted'](https://github.com/awslabs/aws-data-replication-hub/labels/help%20wanted) issues is a great place to start. +Looking at the existing issues is a great way to find something to contribute on. As our projects, by default, use the default GitHub issue labels ((enhancement/bug/duplicate/help wanted/invalid/question/wontfix), looking at any ['help wanted'](https://github.com/aws-solutions/centralized-logging-with-opensearch/labels/help%20wanted) issues is a great place to start. ## Code of Conduct @@ -56,7 +56,6 @@ If you discover a potential security issue in this project we ask that you notif ## Licensing - See the [LICENSE](https://github.com/awslabs/data-transfer-hub/blob/main/LICENSE.txt) file for our project's licensing. We will ask you to confirm the licensing of your contribution. -We may ask you to sign a [Contributor License Agreement (CLA)](http://en.wikipedia.org/wiki/Contributor_License_Agreement) for larger changes. +We may ask you to sign a [Contributor License Agreement (CLA)](http://en.wikipedia.org/wiki/Contributor_License_Agreement) for larger changes. \ No newline at end of file diff --git a/NOTICE.txt b/NOTICE.txt index a11eb0e..89caf9e 100755 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -1,5 +1,11 @@ Data Transfer Hub Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +Licensed under the Apache License Version 2.0 (the "License"). You may not use this file except +in compliance with the License. A copy of the License is located at http://www.apache.org/licenses/ +or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, express or implied. See the License for the +specific language governing permissions and limitations under the License. + ********************** THIRD PARTY COMPONENTS @@ -82,4 +88,55 @@ eslint-plugin-import under the Massachusetts Institute of Technology (MIT) licen eslint-plugin-n under the Massachusetts Institute of Technology (MIT) license eslint-plugin-prettier under the Massachusetts Institute of Technology (MIT) license eslint-plugin-react under the Massachusetts Institute of Technology (MIT) license -prettier under the Massachusetts Institute of Technology (MIT) license \ No newline at end of file +prettier under the Massachusetts Institute of Technology (MIT) license +attrs under MIT +boto3 under Apache License 2.0 +botocore under Apache License 2.0 +cffi under MIT +coverage under Apache License 2.0 +defusedxml under PSF +docker under Apache License 2.0 +filelock under Apache License 2.0 +flake8 under MIT +iniconfig under MIT +jmespath under MIT +jsonschema under MIT +jsonschema-spec under MIT +jsonschema-specifications under MIT +mccabe under MIT +moto under Apache License 2.0 +openapi-spec-validator under MIT +pluggy under MIT +py-serializable under Apache License 2.0 +pycodestyle under MIT +pyflakes under MIT +pytest under MIT +pytest-cov under MIT +pytz under MIT +referencing under MIT +responses under Apache License 2.0 +rfc3339-validator under MIT +rpds-py under MIT +s3transfer under Apache License 2.0 +tomli under MIT +types-PyYAML under MIT +websocket-client under LGPL +xmltodict under MIT +exceptiongroup under the MIT License +moment under the MIT License +@testing-library/react under the MIT License +@aws-amplify/ui-react under the Apache License Version 2.0 +@aws-amplify/ui-components under the Apache License Version 2.0 +fs under the MIT License +Jinja2 under the BSD-3-Clause +MarkupSafe under the BSD-3-Clause +Werkzeug under the BSD-3-Clause +cryptography under the Apache License Version 2.0 or OR BSD-3-Clause +lazy-object-proxy under the BSD-2-Clause +openapi-schema-validator under the BSD-3-Clause +pathable under the Apache License Version 2.0 (details in this link: https://github.com/p1c2u/pathable/blob/master/LICENSE) +pycparser under the BSD License +python-dateutil under Apache Software License, BSD License (Dual License) +typing_extensions under Python Software Foundation License +boolean.py under BSD-2-Clause +license-expression under Apache License Version 2.0 \ No newline at end of file diff --git a/README.md b/README.md index 17f405c..a5ae589 100644 --- a/README.md +++ b/README.md @@ -52,8 +52,8 @@ invokes the CloudFormation API to deploy another stack to provision all resource only provision resources when needed. You can also choose to deploy the data transfer plugin independently. Available Plugins: -* [S3 Plugin](https://github.com/awslabs/amazon-s3-data-replication-hub-plugin) -* [ECR Plugin](https://github.com/awslabs/amazon-ecr-data-replication-hub-plugin) +* [S3 Plugin](./docs/S3_PLUGIN.md) +* [ECR Plugin](./docs/ECR_PLUGIN.md) ## Solution Deployment @@ -110,5 +110,5 @@ of Account A. * [How to customize this solution and build your own distributable?](./docs/build-your-own-distributable.md) * [Deploy this solution via AWS CDK](./docs/deploy-via-cdk.md) -* [Data Transfer Hub S3 Plugin](https://github.com/awslabs/amazon-s3-data-replication-hub-plugin) -* [Data Transfer Hub ECR Plugin](https://github.com/awslabs/amazon-ecr-data-replication-hub-plugin) \ No newline at end of file +* [Data Transfer Hub S3 Plugin](./docs/S3_PLUGIN.md) +* [Data Transfer Hub ECR Plugin](./docs/ECR_PLUGIN.md) \ No newline at end of file diff --git a/deployment/build-s3-dist.sh b/deployment/build-s3-dist.sh index e2048ed..7da503a 100755 --- a/deployment/build-s3-dist.sh +++ b/deployment/build-s3-dist.sh @@ -3,6 +3,11 @@ # This script packages your project into a solution distributable that can be # used as an input to the solution builder validation pipeline. # +# Important notes and prereq's: +# 1. The initialize-repo.sh script must have been run in order for this script to +# function properly. +# 2. This script should be run from the repo's /deployment folder. +# # This script will perform the following tasks: # 1. Remove any old dist files from previous runs. # 2. Install dependencies for the cdk-solution-helper; responsible for @@ -20,11 +25,40 @@ # The template will then expect the source code to be located in the solutions-[region_name] bucket # - solution-name: name of the solution for consistency # - version-code: version of the package -set -e +#----------------------- +# Formatting +bold=$(tput bold) +normal=$(tput sgr0) +#------------------------------------------------------------------------------ +# SETTINGS +#------------------------------------------------------------------------------ +template_format="json" +run_helper="true" + +# run_helper is false for yaml - not supported +[[ $template_format == "yaml" ]] && { + run_helper="false" + echo "${bold}Solution_helper disabled:${normal} template format is yaml" +} -run() { - >&2 echo "[run] $*" - $* +#------------------------------------------------------------------------------ +# DISABLE OVERRIDE WARNINGS +#------------------------------------------------------------------------------ +# Use with care: disables the warning for overridden properties on +# AWS Solutions Constructs +export overrideWarningsEnabled=false + +#------------------------------------------------------------------------------ +# Build Functions +#------------------------------------------------------------------------------ +# Echo, execute, and check the return code for a command. Exit if rc > 0 +# ex. do_cmd npm run build +usage() +{ + echo "Usage: $0 bucket solution-name version" + echo "Please provide the base source bucket name, trademarked solution name, and version." + echo "For example: ./build-s3-dist.sh mybucket my-solution v1.0.0" + exit 1 } do_cmd() @@ -45,8 +79,49 @@ sedi() sed -i $* 2>/dev/null || sed -i "" $* } -## Important: CDK global version number -cdk_version=1.64.1 +do_replace() +{ + replace="s/$2/$3/g" + file=$1 + do_cmd sedi $replace $file +} + +create_template_json() +{ + # Run 'cdk synth' to generate raw solution outputs + do_cmd npx cdk synth --output=$staging_dist_dir + + # Remove unnecessary output files + do_cmd cd $staging_dist_dir + # ignore return code - can be non-zero if any of these does not exist + rm tree.json manifest.json cdk.out + + # Move outputs from staging to template_dist_dir + echo "Move outputs from staging to template_dist_dir" + do_cmd mv $staging_dist_dir/*.template.json $template_dist_dir/ + + # Rename all *.template.json files to *.template + echo "Rename all *.template.json to *.template" + echo "copy templates and rename" + for f in $template_dist_dir/*.template.json; do + mv -- "$f" "${f%.template.json}.template" + done +} + +create_template_yaml() +{ + # Assumes current working directory is where the CDK is defined + # Output YAML - this is currently the only way to do this for multiple templates + maxrc=0 + for template in `cdk list`; do + echo Create template $template + npx cdk synth $template > ${template_dist_dir}/${template}.template + if [[ $? > $maxrc ]]; then + maxrc=$? + fi + done +} + cleanup_temporary_generted_files() { echo "------------------------------------------------------------------------------" @@ -60,28 +135,89 @@ cleanup_temporary_generted_files() # Delete the temporary /staging folder do_cmd rm -rf $staging_dist_dir } + +fn_exists() +{ + exists=`LC_ALL=C type $1` + return $? +} + +#------------------------------------------------------------------------------ +# INITIALIZATION +#------------------------------------------------------------------------------ +# solution_config must exist in the deployment folder (same folder as this +# file) . It is the definitive source for solution ID, name, and trademarked +# name. # -# Check to see if the required parameters have been provided: -if [ -z "$1" ] || [ -z "$2" ]; then - echo "Please provide the base source bucket name, trademark approved solution name and version where the lambda code will eventually reside." - echo "For example: ./build-s3-dist.sh solutions trademarked-solution-name v1.0.0" +# Example: +# +# SOLUTION_ID='SO0111' +# SOLUTION_NAME='AWS Security Hub Automated Response & Remediation' +# SOLUTION_TRADEMARKEDNAME='aws-security-hub-automated-response-and-remediation' +# SOLUTION_VERSION='v1.1.1' # optional +if [[ -e './solution_config' ]]; then + source ./solution_config +else + echo "solution_config is missing from the solution root." exit 1 fi + +if [[ -z $SOLUTION_ID ]]; then + echo "SOLUTION_ID is missing from ../solution_config" + exit 1 +else + export SOLUTION_ID +fi + +if [[ -z $SOLUTION_NAME ]]; then + echo "SOLUTION_NAME is missing from ../solution_config" + exit 1 +else + export SOLUTION_NAME +fi + +if [[ -z $SOLUTION_TRADEMARKEDNAME ]]; then + echo "SOLUTION_TRADEMARKEDNAME is missing from ../solution_config" + exit 1 +else + export SOLUTION_TRADEMARKEDNAME +fi + + +#------------------------------------------------------------------------------ +# Validate command line parameters +#------------------------------------------------------------------------------ +# Validate command line input - must provide bucket +[[ -z $1 ]] && { usage; exit 1; } || { SOLUTION_BUCKET=$1; } + +# Environmental variables for use in CDK +export DIST_OUTPUT_BUCKET=$SOLUTION_BUCKET + +# Version from the command line is definitive. Otherwise, use, in order of precedence: +# - SOLUTION_VERSION from solution_config +# - version.txt +# +# Note: Solutions Pipeline sends bucket, name, version. Command line expects bucket, version +# if there is a 3rd parm then version is $3, else $2 +# +# If confused, use build-s3-dist.sh if [ ! -z $3 ]; then export VERSION="$3" else export VERSION=$(git describe --tags --exact-match || { [ -n "$BRANCH_NAME" ] && echo "$BRANCH_NAME"; } || echo v0.0.0) fi +#----------------------------------------------------------------------------------- # Get reference for all important folders -template_dir="$(cd "$(dirname $0)";pwd)" +#----------------------------------------------------------------------------------- +template_dir="$PWD" staging_dist_dir="$template_dir/staging" template_dist_dir="$template_dir/global-s3-assets" build_dist_dir="$template_dir/regional-s3-assets" source_dir="$template_dir/../source" echo "------------------------------------------------------------------------------" -echo "[Init] Remove any old dist files from previous runs" +echo "${bold}[Init] Remove any old dist files from previous runs${normal}" echo "------------------------------------------------------------------------------" do_cmd rm -rf $template_dist_dir @@ -92,78 +228,74 @@ do_cmd rm -rf $staging_dist_dir do_cmd mkdir -p $staging_dist_dir echo "------------------------------------------------------------------------------" -echo "[Init] Install dependencies for the cdk-solution-helper" +echo "${bold}[Init] Install dependencies for the cdk-solution-helper${normal}" echo "------------------------------------------------------------------------------" do_cmd cd $template_dir/cdk-solution-helper do_cmd npm install echo "------------------------------------------------------------------------------" -echo "[Build] Build web portal artifacts" +echo "${bold}[Synth] CDK Project${normal}" echo "------------------------------------------------------------------------------" -cd $source_dir/portal -npm install --legacy-peer-deps -npm run build +# Install and build web console asset will be completed in run-all-test -echo "------------------------------------------------------------------------------" -echo "[Synth] CDK Project" -echo "------------------------------------------------------------------------------" # Install the global aws-cdk package -#echo "npm install -g aws-cdk@$cdk_version" -#npm install -g aws-cdk@$cdk_version - +# Note: do not install using global (-g) option. This makes build-s3-dist.sh difficult +# for customers and developers to use, as it globally changes their environment. +do_cmd cd $source_dir/constructs +do_cmd npm install -# Run 'npm run build && cdk synth' to generate raw solution outputs -echo "cd $source_dir/constructs" -cd $source_dir/constructs -echo "npm install" -npm install -echo "npm run build" -npm run build +# Add local install to PATH +export PATH=$(npm bin):$PATH +do_cmd npm run build # build javascript from typescript to validate the code + # cdk synth doesn't always detect issues in the typescript + # and may succeed using old build files. This ensures we + # have fresh javascript from a successful build -run npx cdk synth --output=$staging_dist_dir --json true > $template_dist_dir/DataTransferHub-cognito.template -run npx cdk synth -c authType=openid --output=$staging_dist_dir --json true > $template_dist_dir/DataTransferHub-openid.template -ls -l $template_dist_dir +echo "------------------------------------------------------------------------------" +echo "${bold}[Create] Templates${normal}" +echo "------------------------------------------------------------------------------" -# Remove unnecessary output files -echo "cd $staging_dist_dir" -cd $staging_dist_dir -echo "rm tree.json manifest.json cdk.out" -rm tree.json manifest.json cdk.out +if fn_exists create_template_${template_format}; then + create_template_${template_format} +else + echo "Invalid setting for \$template_format: $template_format" + exit 255 +fi echo "------------------------------------------------------------------------------" -echo "[Packing] Template artifacts" +echo "${bold}[Packing] Template artifacts${normal}" echo "------------------------------------------------------------------------------" # Run the helper to clean-up the templates and remove unnecessary CDK elements -cd $template_dir/cdk-solution-helper echo "Run the helper to clean-up the templates and remove unnecessary CDK elements" -echo "node $template_dir/cdk-solution-helper/index" -node $template_dir/cdk-solution-helper/index -if [ "$?" = "1" ]; then - echo "(cdk-solution-helper) ERROR: there is likely output above." 1>&2 - exit 1 -fi +[[ $run_helper == "true" ]] && { + echo "node $template_dir/cdk-solution-helper/index" + node $template_dir/cdk-solution-helper/index + if [ "$?" = "1" ]; then + echo "(cdk-solution-helper) ERROR: there is likely output above." 1>&2 + exit 1 + fi +} || echo "${bold}Solution Helper skipped: ${normal}run_helper=false" # Find and replace bucket_name, solution_name, and version echo "Find and replace bucket_name, solution_name, and version" cd $template_dist_dir -echo "Updating code source bucket in template with $1" -replace="s/%%BUCKET_NAME%%/$1/g" -run sedi $replace $template_dist_dir/*.template -replace="s/%%SOLUTION_NAME%%/$2/g" -run sedi $replace $template_dist_dir/*.template -replace="s/%%VERSION%%/$VERSION/g" -run sedi $replace $template_dist_dir/*.template - +do_replace "*.template" %%BUCKET_NAME%% ${SOLUTION_BUCKET} +do_replace "*.template" %%SOLUTION_NAME%% ${SOLUTION_TRADEMARKEDNAME} +do_replace "*.template" %%VERSION%% ${VERSION} echo "------------------------------------------------------------------------------" -echo "[Packing] Source code artifacts" +echo "${bold}[Packing] Source code artifacts${normal}" echo "------------------------------------------------------------------------------" +# General cleanup of node_modules files +echo "find $staging_dist_dir -iname "node_modules" -type d -exec rm -rf "{}" \; 2> /dev/null" +find $staging_dist_dir -iname "node_modules" -type d -exec rm -rf "{}" \; 2> /dev/null + # ... For each asset.* source code artifact in the temporary /staging folder... cd $staging_dist_dir for d in `find . -mindepth 1 -maxdepth 1 -type d`; do @@ -209,5 +341,5 @@ done # cleanup temporary generated files that are not needed for later stages of the build pipeline cleanup_temporary_generted_files - - +# Return to original directory from when we started the build +cd $template_dir diff --git a/deployment/cdk-solution-helper/index.js b/deployment/cdk-solution-helper/index.js index 425ab82..fc90c9a 100755 --- a/deployment/cdk-solution-helper/index.js +++ b/deployment/cdk-solution-helper/index.js @@ -1,18 +1,9 @@ -/** - * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance - * with the License. A copy of the License is located at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * or in the 'license' file accompanying this file. This file is distributed on an 'AS IS' BASIS, WITHOUT WARRANTIES - * OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions - * and limitations under the License. - */ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 // Imports const fs = require('fs'); +const _regex = /[\w]*AssetParameters/g; //this regular express also takes into account lambda functions defined in nested stacks // Paths const global_s3_assets = '../global-s3-assets'; @@ -104,7 +95,10 @@ fs.readdirSync(global_s3_assets).forEach(file => { // Clean-up parameters section const parameters = (template.Parameters) ? template.Parameters : {}; const assetParameters = Object.keys(parameters).filter(function (key) { - return key.includes('AssetParameters'); + if (key.search(_regex) > -1) { + return true; + } + return false; }); assetParameters.forEach(function (a) { template.Parameters[a] = undefined; @@ -121,7 +115,8 @@ fs.readdirSync(global_s3_assets).forEach(file => { rules.CheckBootstrapVersion = undefined } + // Output modified template file const output_template = JSON.stringify(template, null, 2); fs.writeFileSync(`${global_s3_assets}/${file}`, output_template); -}); \ No newline at end of file +}); diff --git a/deployment/cdk-solution-helper/package.json b/deployment/cdk-solution-helper/package.json index 89fac67..852b2bb 100755 --- a/deployment/cdk-solution-helper/package.json +++ b/deployment/cdk-solution-helper/package.json @@ -1,6 +1,12 @@ { "name": "cdk-solution-helper", - "version": "0.1.0", + "description": "cdk solution helper", + "version": "2.5.0", + "license": "Apache-2.0", + "author": { + "name": "Amazon Web Services", + "url": "https://aws.amazon.com/solutions" + }, "devDependencies": { "fs": "0.0.1-security" }, diff --git a/deployment/run-unit-tests.sh b/deployment/run-unit-tests.sh new file mode 100644 index 0000000..9cc852a --- /dev/null +++ b/deployment/run-unit-tests.sh @@ -0,0 +1,11 @@ +#!/bin/bash +# +# You can remove this script if you do NOT have unit test. +# +# This script should be run from the repo's deployment directory +# cd deployment +# ./run-unit-tests.sh +# +source_template_dir="$PWD" +cd $source_template_dir/../source/constructs +./run-all-tests.sh \ No newline at end of file diff --git a/docs/CUSTOM_BUILD.md b/docs/CUSTOM_BUILD.md new file mode 100644 index 0000000..fe874b6 --- /dev/null +++ b/docs/CUSTOM_BUILD.md @@ -0,0 +1,49 @@ +# Create custom build + +The solution can be deployed through the CloudFormation template available on the solution home page. +To make changes to the solution, download or clone this repo, update the source code and then run the deployment/build-s3-dist.sh script to deploy the updated code to an Amazon S3 bucket in your account. + +## Prerequisites: +* [AWS Command Line Interface](https://aws.amazon.com/cli/) +* Node.js 18.x or later + +## 1. Clone the repository + +## 2. Run unit tests for customization +Run unit tests to make sure added customization passes the tests: + +```bash +chmod +x ./run-unit-tests.sh +./run-unit-tests.sh +``` + +## 3. Declare environment variables +```bash +export REGION=aws-region-code # the AWS region to launch the solution (e.g. us-east-1) +export DIST_OUTPUT_BUCKET=my-bucket-name # bucket where customized code will reside +export SOLUTION_NAME=my-solution-name # the solution name +export VERSION=my-version # version number for the customized code +``` + +## 4. Create an Amazon S3 Bucket +The CloudFormation template is configured to pull the Lambda deployment packages from Amazon S3 buckets in the Region where the template is launched. Use below command to create the buckets. + +```bash +aws s3 mb s3://$DIST_OUTPUT_BUCKET --region $REGION +aws s3 mb s3://$DIST_OUTPUT_BUCKET-$REGION --region $REGION +``` + +## 5. Create the deployment packages +Build the distributable: +```bash +chmod +x ./build-s3-dist.sh +./build-s3-dist.sh $DIST_OUTPUT_BUCKET $SOLUTION_NAME $VERSION $REGION +``` + +## 6. Deploy the distributable + +Deploy the distributable to the Amazon S3 bucket in your account: +```bash +aws s3 cp ./global-s3-assets/ s3://$DIST_OUTPUT_BUCKET/$SOLUTION_NAME/$VERSION/ --recursive --acl bucket-owner-full-control +aws s3 cp ./regional-s3-assets/ s3://$DIST_OUTPUT_BUCKET-$REGION/$SOLUTION_NAME/$VERSION/ --recursive --acl bucket-owner-full-control +``` \ No newline at end of file diff --git a/docs/ECR_DEPLOYMENT_CN.md b/docs/ECR_DEPLOYMENT_CN.md new file mode 100644 index 0000000..b29a79c --- /dev/null +++ b/docs/ECR_DEPLOYMENT_CN.md @@ -0,0 +1,76 @@ + +[English](./ECR_DEPLOYMENT_EN.md) + +# 部署指南 + + +> 注意:如果您已经部署了 Data Transfer Hub 控制台,请直接参考[通过控制台创建Amazon ECR传输任务](https://awslabs.github.io/data-transfer-hub/zh/user-guide/tutorial-ecr/)。 + +> 本教程是纯后端版本的部署指南。 + +## 1. 准备VPC (可选) + +此解决方案可以部署在公共和私有子网中。 建议使用公共子网。 + +- 如果您想使用现有的 VPC,请确保 VPC 至少有 2 个子网,并且两个子网都必须具有公网访问权限(带有 Internet 网关的公有子网或带有 NAT 网关的私有子网) + +- 如果您想为此解决方案创建新的默认 VPC,请转到步骤2,并确保您在创建集群时选择了*为此集群创建一个新的 VPC*。 + + +## 2. 配置ECS集群 + +此方案需要ECS 集群才能运行Fargate任务。 + +打开AWS 管理控制台 > Elastic Container Service (ECS)。 在 ECS集群首页上,单击 **创建集群** + +步骤1:选择集群模版,确保选择 **仅限联网** 类型。 + +步骤2:配置集群,指定集群名称,点击创建即可。 如果您还想创建一个新的 VCP(仅限公有子网),还请选中**为此集群创建新的 VPC** 选项。 + +![创建集群](./images/cluster_cn.png) + + + +## 3. 配置凭据 + +如果源(或目标)不在当前的AWS账户中,则您需要提供`AccessKeyID`和`SecretAccessKey`(即`AK` / `SK`)以从Amazon ECR中拉取或推送镜像。 Amazon Secrets Manager 用于以安全方式存储访问凭证。 + +> 注意:如果源类型为“公共(Public)”,则无需提供源的访问凭证。 + +打开AWS 管理控制台 > Secrets Manager。 在 Secrets Manager 主页上,单击 **存储新的密钥**。 对于密钥类型,请使用**其他类型的秘密**。 对于键/值对,请将下面的 JSON 文本复制并粘贴到明文部分,并相应地将值更改为您的 AK/SK。 + +``` +{ + "access_key_id": "", + "secret_access_key": "" +} +``` + +![密钥](./images/secret_cn.png) + +然后下一步指定密钥名称,最后一步点击创建。 + +## 4. 启动AWS Cloudformation部署 + +请按照以下步骤通过AWS Cloudformation部署此插件。 + +1.登录到AWS管理控制台,切换到将CloudFormation Stack部署到的区域。 + +1.单击以下按钮在该区域中启动CloudFormation堆栈。 + + - 部署到AWS中国北京和宁夏区 + + [![Launch Stack](./images/launch-stack.svg)](https://console.amazonaws.cn/cloudformation/home#/stacks/create/template?stackName=DTHECRStack&templateURL=https://solutions-reference.s3.amazonaws.com/data-transfer-hub/latest/DataTransferECRStack.template) + + - 部署到AWS海外区 + + [![Launch Stack](./images/launch-stack.svg)](https://console.aws.amazon.com/cloudformation/home#/stacks/create/template?stackName=DTHECRStack&templateURL=https://solutions-reference.s3.amazonaws.com/data-transfer-hub/latest/DataTransferECRStack.template) + + +1.单击**下一步**。 相应地为参数指定值。 如果需要,请更改堆栈名称。 + +1.单击**下一步**。 配置其他堆栈选项,例如标签(可选)。 + +1.单击**下一步**。 查看并勾选确认,然后单击“创建堆栈”开始部署。 + +部署预计用时3-5分钟 \ No newline at end of file diff --git a/docs/ECR_DEPLOYMENT_EN.md b/docs/ECR_DEPLOYMENT_EN.md new file mode 100644 index 0000000..794ab96 --- /dev/null +++ b/docs/ECR_DEPLOYMENT_EN.md @@ -0,0 +1,75 @@ + +[中文](./ECR_DEPLOYMENT_CN.md) + +# Deployment Guide + +> Note: If you have already deployed the Data Transfer Hub console, please refer directly to [Create Amazon ECR transfer task through Portal](https://awslabs.github.io/data-transfer-hub/en/user-guide/tutorial-ecr/). + +> This tutorial is a deployment guide for the backend-only version. + +## 1. Prepare VPC (optional) + +This solution can be deployed in both public and private subnets. Using public subnets is recommended. + +- If you want to use existing VPC, please make sure the VPC has at least 2 subnets, and both subnets must have public internet access (Either public subnets with internet gateway or private subnets with NAT gateway) + +- If you want to create new default VPC for this solution, please go to Step 2 and make sure you have *Create a new VPC for this cluster* selected when you create the cluster. + + +## 2. Set up ECS Cluster + +A ECS Cluster is required for this solution to run Fargate task. + +Go to AWS Management Console > Elastic Container Service (ECS). From ECS Cluster home page, click **Create Cluster** + +Step 1: Select Cluster Template, make sure you choose **Network Only** type. + +Step 2: Configure cluster, just specify a cluster name and click Create. If you want to also create a new VCP (public subnets only), please also check the **Create a new VPC for this cluster** option. + +![Create Cluster](./images/cluster_en.png) + + + +## 3. Configure credentials + +If source (or destination) is NOT in current AWS account, you will need to provide `AccessKeyID` and `SecretAccessKey` (namely `AK/SK`) to pull from or push to Amazon ECR. And Secrets Manager is used to store the credentials in a secure manner. + +>Note: If source type is Public, there is no need to provide the source credentials. + +Go to AWS Management Console > Secrets Manager. From Secrets Manager home page, click **Store a new secret**. For secret type, please use **Other type of secrets**. For key/value paris, please copy and paste below JSON text into the Plaintext section, and change value to your AK/SK accordingly. + +``` +{ + "access_key_id": "", + "secret_access_key": "" +} +``` + +![Secret](./images/secret_en.png) + +Click Next to specify a secret name, and click Create in teh last step. + +## 4. Launch AWS Cloudformation Stack + +Please follow below steps to deploy this plugin via AWS Cloudformation. + +1. Sign in to AWS Management Console, switch to the region to deploy the CloudFormation Stack to. + +1. Click the following button to launch the CloudFormation Stack in that region. + + - For AWS China Regions + + [![Launch Stack](./images/launch-stack.svg)](https://console.amazonaws.cn/cloudformation/home#/stacks/create/template?stackName=DTHECRStack&templateURL=https://solutions-reference.s3.amazonaws.com/data-transfer-hub/latest/DataTransferECRStack.template) + + + - For AWS Global regions + + [![Launch Stack](./images/launch-stack.svg)](https://console.aws.amazon.com/cloudformation/home#/stacks/create/template?stackName=DTHECRStack&templateURL=https://solutions-reference.s3.amazonaws.com/data-transfer-hub/latest/DataTransferECRStack.template) + +1. Click **Next**. Specify values to parameters accordingly. Change the stack name if required. + +1. Click **Next**. Configure additional stack options such as tags (Optional). + +1. Click **Next**. Review and confirm acknowledgement, then click **Create Stack** to start the deployment. + +The deployment will take approximately 3-5 minutes. \ No newline at end of file diff --git a/docs/ECR_PLUGIN.md b/docs/ECR_PLUGIN.md new file mode 100644 index 0000000..6473052 --- /dev/null +++ b/docs/ECR_PLUGIN.md @@ -0,0 +1,83 @@ + +[中文](./ECR_PLUGIN_CN.md) + +# Data Transfer Hub - ECR Plugin + +## Table of contents +* [Introduction](#introduction) +* [Architect](#architect) +* [Deployment](#deployment) +* [FAQ](#faq) + * [How to debug](#how-to-debug) + * [How to customize](#how-to-customize) +## Introduction + +[Data Transfer Hub](https://github.com/awslabs/data-transfer-hub), a.k.a Data Replication Hub, is a solution for transferring data from different sources into AWS. This project is for ECR transfer plugin. You can deploy and run this plugin independently without the UI. + +The following are the planned features of this plugin. + +- Transfer Amazon ECR between AWS accounts or regions +- Transfer Amazon ECR between AWS Standard partition and AWS CN partition +- Transfer Public container image registry to AWS ECR +- Transfer all images or only selected Images +- Support One-time transfer +- Support Incremental transfer + +This plugin uses [**skopeo**](https://github.com/containers/skopeo) as the tool to copy images to Aamazon ECR. If same layer already exists in target ECR, it will not be copied again. + + +## Architecture + +![ECR Plugin Architecture](ecr-plugin-architect.png) + +EventBridge Rule to trigger Step functions to execute on a regular basis. (By default, daily) + +Step functions will invoke Lambda to get the list of images from source. + +Lambda will either list all the repositorys in the source ECR or get the stored selected image list from System Manager Parameter Store. + +The transfer task will be run within Fargate in a max concurrency of 10. If a transfer task failed for some reason, it will automatically retry for 3 times. + +Each task uses `skopeo copy` to copy the images into target ECR + +Once copy is completed, the status (either success or failed) will be logged into DynamoDB for tracking purpose + + +## Deployment + +Things to know about the deployment of this plugin: + +- The deployment will automatically provision resources like lambda, dynamoDB table, ECS Task Definition in your AWS account, etc. +- The deployment will take approximately 3-5 minutes. +- Once the deployment is completed, the data transfer task will start right away. + +Please follow the steps in the [Deployment Guide](./docs/DEPLOYMENT_EN.md) to start the deployment. + +> Note: You can simply delete the stack from CloudFormation console if the data transfer job is no longer required. + +## FAQ +### How to debug + +**Q**: There seems to be something wrong, how to debug? + +**A**: When you deploy the stack, you will be asked to input the stack name (default is DTHECRStack), most of the resources will be created with name prefix as the stack name. For example, Step Function name will be in a format of `-ECRReplicationSM`. + +There will be two main log groups created by this plugin. + +- /aws/lambda/<StackName>-ListImagesFunction<random suffix> + +This is the log group for listing Image Lambda Function. If there is no data transferred, you should check if something is wrong in the Lambda log. This is the first step. + +- <StackName>-DTHECRContainerLogGroup<random suffix> + +This is the log group for all ECS containers, detailed transfer log can be found here. + +If you can't find anything helpful in the log group, please raise an issue in Github. + +### How to customize + +**Q**: I want to make some custom changes, how do I do? + +If you want to make custom changes to this plugin, you can follow [custom build](CUSTOM_BUILD.md) guide. + +> Note: More FAQ please refer to [Implementation Guide - FAQ](https://awslabs.github.io/data-transfer-hub/en/faq/). diff --git a/docs/ECR_PLUGIN_CN.md b/docs/ECR_PLUGIN_CN.md new file mode 100644 index 0000000..65436cb --- /dev/null +++ b/docs/ECR_PLUGIN_CN.md @@ -0,0 +1,80 @@ +[English](./ECR_PLUGIN.md) + +# Data Transfer Hub - ECR 插件 + +## 目录 +* [简介](#简介) +* [架构](#架构) +* [部署](#部署) +* [FAQ](#faq) + * [如何调试](#如何调试) + * [如何客制化](#如何客制化) + +## 简介 + +[Data Transfer Hub](https://github.com/awslabs/data-transfer-hub) ,前称是Data Replication Hub,是一个用于从不同的源传输数据到AWS的解决方案。本项目是该方案的其中一款插件(ECR插件)。你可以独立部署和运行此插件而无需使用UI。 + +以下是此插件的功能。 + +- AWS账户或区域之间的Amazon ECR的传输 +- AWS Global区和AWS 中国区之间的Amazon ECR的传输 +- 公共容器镜像仓库到AWS ECR的传输 +- 传输所有镜像,或仅传输选定的镜像 +- 支持一次性传输 +- 支持增量传输 + +该插件使用 [**skopeo**](https://github.com/containers/skopeo) 作为将镜像传输到Aamazon ECR的工具。 如果目标ECR中已经存在相同的层,则不会被再次传输。 + + +## 架构 + +![ECR Plugin Architect](ecr-plugin-architect.png) + +EventBridge 规则用于触发Step Function以定期执行任务。 (默认情况下,每天触发) + +将调用Lambda以从源获取镜像列表 + +Lambda将列出源ECR中的所有存储库,或者从 AWS System Manager Parameter Store 中获取已存储的选定镜像列表 + +传输任务将在Fargate中以最大10个并发运行。如果传输任务由于某种原因失败,它将自动重试3次 + +每个任务都使用`skopeo copy`将图像传输到目标ECR中 + +传输完成后,状态(成功或失败)将记录到DynamoDB中以进行跟踪 +## 部署 + +有关此插件的部署的注意事项:: + +- 部署本插件会自动在您的AWS账号里创建包括Lambda, DyanomoDB表,ECS任务等 +- 部署预计用时3-5分钟 +- 一旦部署完成,复制任务就会马上开始 + +请参考[部署指南](./docs/DEPLOYMENT_CN.md)里的步骤进行部署。 + +> 注意:如果不再需要数据传输任务,则可以从CloudFormation控制台中删除堆栈。 +## FAQ +### 如何调试 + +**问题**:部署完后似乎没有正常运行,该如何调试? + +**回答**:部署堆栈时,将要求您输入堆栈名称(默认为 DTHECRStack),大多数资源将使用该堆栈名称作为前缀进行创建。 例如,Step Function名称将采用`<堆栈名>-ECRReplicationSM`的格式。 + +此插件将创建两个主要的CloudWatch日志组。 + +- /aws/lambda/<堆栈名>-ListImagesFunction<随机后缀> + +这是获取镜像列表的日志组。如果未传输任何数据,则应首先检查Lambda运行日志中是否出了问题。 这是第一步。 + +- <堆栈名>-DTHECRContainerLogGroup<随机后缀> + +这是所有ECS容器的日志组,可以在此处找到详细的传输日志。 + +如果您在日志组中找不到任何有帮组的内容,请在Github中提出问题。 + +### 如何客制化 + +**问题**:我想要更改此方案,需要做什么? + +**回答**:如果要更改解决方案,可以参考[定制](CUSTOM_BUILD.md) 指南. + +> 注意:更多常见问题请参考[实施指南 - 常见问题解答](https://awslabs.github.io/data-transfer-hub/zh/faq/)。 \ No newline at end of file diff --git a/docs/IAM-Policy.md b/docs/IAM-Policy.md index 79473e7..0503e44 100644 --- a/docs/IAM-Policy.md +++ b/docs/IAM-Policy.md @@ -2,7 +2,7 @@ # Set up Credential for Amazon S3 -- ## Step 1: Create IAM Policy +## Step 1: Create IAM Policy Open AWS Management Console, Go to IAM > Policy, click **Create Policy** @@ -10,7 +10,7 @@ Create a policy using below example IAM policy statement with minimum permission _Note_: If it's for S3 buckets in China regions, please make sure you also change to use `arn:aws-cn:s3:::` instead of `arn:aws:s3:::` -- ### For Source Bucket +### For Source Bucket ``` { @@ -33,7 +33,7 @@ _Note_: If it's for S3 buckets in China regions, please make sure you also chang ``` -- ### For Destination Bucket +### For Destination Bucket ``` { @@ -63,7 +63,7 @@ _Note_: If it's for S3 buckets in China regions, please make sure you also chang > Data Transfer Hub native support the S3 source bucket enabled SSE-S3 and SSE-KMS, but if your source bucket enabled *SSE-CMK*, please replace the source bucket policy with the policy in the link [for S3 SSE-KMS](./S3-SSE-KMS-Policy.md). -- ## Step 2: Create User +## Step 2: Create User Open AWS Management Console, Go to IAM > User, click **Add User**, follow the wizard to create the user with credential. diff --git a/docs/IAM-Policy_CN.md b/docs/IAM-Policy_CN.md index bf5d096..393c0d9 100644 --- a/docs/IAM-Policy_CN.md +++ b/docs/IAM-Policy_CN.md @@ -2,7 +2,7 @@ # 为 Amazon S3 设置凭证 -- ## Step 1: 创建 IAM Policy +## Step 1: 创建 IAM Policy 打开 AWS 管理控制台,转到 IAM > 策略,单击 **Create Policy** @@ -12,7 +12,7 @@ Create a policy using below example IAM policy statement with minimum permission _Note_: 如果是针对中国地区的 S3 存储桶,请确保您更改为使用 `arn:aws-cn:s3::::` 而不是 `arn:aws:s3:::` -- ### 对于源存储桶 +### 对于源存储桶 ``` { @@ -35,7 +35,7 @@ _Note_: 如果是针对中国地区的 S3 存储桶,请确保您更改为使 ``` -- ### 对于目标存储桶 +### 对于目标存储桶 ``` { @@ -65,7 +65,7 @@ _Note_: 如果是针对中国地区的 S3 存储桶,请确保您更改为使 > Data Transfer Hub 原生支持使用 SSE-S3 和 SSE-KMS 的数据源,但如果您的源存储桶启用了 *SSE-CMK*,请将源存储桶策略替换为链接 [for S3 SSE-CMK](./S3-SSE-KMS-Policy_CN.md)中的策略。 -- ## Step 2: 创建 User +## Step 2: 创建 User 打开 AWS 管理控制台,转至 IAM > 用户,单击 **添加用户**,按照向导创建具有凭证的用户。 diff --git a/docs/S3_DEPLOYMENT_CN.md b/docs/S3_DEPLOYMENT_CN.md new file mode 100644 index 0000000..2b67754 --- /dev/null +++ b/docs/S3_DEPLOYMENT_CN.md @@ -0,0 +1,63 @@ + +[English](./DEPLOYMENT_EN.md) + +# 部署指南 + +> 注意:如果您已经部署了 Data Transfer Hub 控制台,请直接参考[通过控制台创建 Amazon S3 传输任务](https://awslabs.github.io/data-transfer-hub/zh/user-guide/tutorial-s3/)。 + +> 本教程是纯后端版本的部署指南。 + +## 1. 准备VPC + +此解决方案可以部署在公共和私有子网中。 建议使用公共子网。 + +- 如果您想使用现有的 VPC,请确保 VPC 至少有 2 个子网,并且两个子网都必须具有公网访问权限(带有 Internet 网关的公有子网或带有 NAT 网关的私有子网) + +- 如果您想为此解决方案创建新的默认 VPC,请转到步骤2,并确保您在创建集群时选择了*为此集群创建一个新的 VPC*。 + +## 2. 配置凭据 + +您需要提供`AccessKeyID`和`SecretAccessKey`(即`AK/SK`)才能从另一个 AWS 账户或其他云存储服务读取或写入 S3中的存储桶,凭证将存储在 AWS Secrets Manager 中。 您**不需要**为此方案部署的当前账户里的存储桶创建凭证。 + +打开AWS 管理控制台 > Secrets Manager。 在 Secrets Manager 主页上,单击 **存储新的密钥**。 对于密钥类型,请使用**其他类型的秘密**。 对于键/值对,请将下面的 JSON 文本复制并粘贴到明文部分,并相应地将值更改为您的 AK/SK。 + +``` +{ + "access_key_id": "", + "secret_access_key": "" +} +``` + +![密钥](./images/secret_cn.png) + +然后下一步指定密钥名称,最后一步点击创建。 + + +> 注意:如果该AK/SK是针对源桶, 则需要具有桶的**读**权限, 如果是针对目标桶, 则需要具有桶的**读与写**权限。 如果是Amazon S3, 可以参考[配置凭据](./IAM-Policy_CN.md) + + +## 3. 启动AWS Cloudformation部署 + +请按照以下步骤通过AWS Cloudformation部署此解决方案。 + +1. 登录到AWS管理控制台,切换到将CloudFormation Stack部署到的区域。 + +1. 单击以下按钮在该区域中启动CloudFormation堆栈。 + + - 部署到AWS中国北京和宁夏区 + + [![Launch Stack](./images/launch-stack.svg)](https://console.amazonaws.cn/cloudformation/home#/stacks/create/template?stackName=DTHS3Stack&templateURL=https://solutions-reference.s3.amazonaws.com/data-transfer-hub/latest/DataTransferS3Stack.template) + + - 部署到AWS海外区 + + [![Launch Stack](./images/launch-stack.svg)](https://console.aws.amazon.com/cloudformation/home#/stacks/create/template?stackName=DTHS3Stack&templateURL=https://solutions-reference.s3.amazonaws.com/data-transfer-hub/latest/DataTransferS3Stack.template) + + + +1. 单击**下一步**。 相应地为参数指定值。 如果需要,请更改堆栈名称。 + +1. 单击**下一步**。 配置其他堆栈选项,例如标签(可选)。 + +1. 单击**下一步**。 查看并勾选确认,然后单击“创建堆栈”开始部署。 + +部署预计用时3-5分钟 \ No newline at end of file diff --git a/docs/S3_DEPLOYMENT_EN.md b/docs/S3_DEPLOYMENT_EN.md new file mode 100644 index 0000000..02fe692 --- /dev/null +++ b/docs/S3_DEPLOYMENT_EN.md @@ -0,0 +1,61 @@ + +[中文](./S3_DEPLOYMENT_CN.md) + +# Deployment Guide + +> Note: If you have already deployed the Data Transfer Hub console, please refer directly to [Create Amazon S3 transfer task through Portal](https://awslabs.github.io/data-transfer-hub/en/user-guide/tutorial-s3/). + +> This tutorial is a deployment guide for the backend-only version. + +## 1. Prepare VPC + +This solution can be deployed in both public and private subnets. Using public subnets is recommended. + +- If you want to use existing VPC, please make sure the VPC has at least 2 subnets, and both subnets must have public internet access (Either public subnets with internet gateway or private subnets with NAT gateway) + +- If you want to create new default VPC for this solution, please go to Step 2 and make sure you have *Create a new VPC for this cluster* selected when you create the cluster. + +## 2. Configure credentials + +You will need to provide `AccessKeyID` and `SecretAccessKey` (namely `AK/SK`) to read or write bucket in S3 from or to another AWS account or other cloud storage service, and the credential will be stored in AWS Secrets Manager. You DON'T need to create credential for bucket in the current account you are deploying the solution to. + +Go to AWS Management Console > Secrets Manager. From Secrets Manager home page, click **Store a new secret**. For secret type, please use **Other type of secrets**. For key/value paris, please copy and paste below JSON text into the Plaintext section, and change value to your AK/SK accordingly. + +``` +{ + "access_key_id": "", + "secret_access_key": "" +} +``` + +![Secret](./images/secret_en.png) + +Click Next to specify a secret name, and click Create in teh last step. + + +> Note that if the AK/SK is for source bucket, **READ** access to bucket is required, if it's for destination bucket, **READ** and **WRITE** access to bucket is required. For Amazon S3, you can refer to [Set up Credential](./IAM-Policy.md) + + +## 3. Launch AWS Cloudformation Stack + +Please follow below steps to deploy this solution via AWS Cloudformation. + +1. Sign in to AWS Management Console, switch to the region to deploy the CloudFormation Stack to. + +1. Click the following button to launch the CloudFormation Stack in that region. + + - For AWS China Regions + + [![Launch Stack](./images/launch-stack.svg)](https://console.amazonaws.cn/cloudformation/home#/stacks/create/template?stackName=DTHS3Stack&templateURL=https://solutions-reference.s3.amazonaws.com/data-transfer-hub/latest/DataTransferS3Stack.template) + + - For AWS Global regions + + [![Launch Stack](./images/launch-stack.svg)](https://console.aws.amazon.com/cloudformation/home#/stacks/create/template?stackName=DTHS3Stack&templateURL=https://solutions-reference.s3.amazonaws.com/data-transfer-hub/latest/DataTransferS3Stack.template) + +1. Click **Next**. Specify values to parameters accordingly. Change the stack name if required. + +1. Click **Next**. Configure additional stack options such as tags (Optional). + +1. Click **Next**. Review and confirm acknowledgement, then click **Create Stack** to start the deployment. + +The deployment will take approximately 3-5 minutes. \ No newline at end of file diff --git a/docs/S3_PLUGIN.md b/docs/S3_PLUGIN.md new file mode 100644 index 0000000..cc4a5b6 --- /dev/null +++ b/docs/S3_PLUGIN.md @@ -0,0 +1,117 @@ + +[中文](./README_CN.md) + +# Data Transfer Hub - S3 Plugin + +## Table of contents +* [Introduction](#introduction) +* [Breaking Change](#breaking-change) +* [Architect](#architect) +* [Deployment](#deployment) +* [FAQ](#faq) + * [How to monitor](#how-to-monitor) + * [How to debug](#how-to-debug) + * [No CloudWatch logs](#no-cloudwatch-logs) + * [How to customize](#how-to-customize) +* [Known Issues](#known-issues) + + +## Introduction + +[Data Transfer Hub](https://github.com/awslabs/aws-data-replication-hub), a.k.a Data Replication Hub, is a solution for transferring data from different sources into AWS. This project is for S3 Transfer plugin. **You can deploy and run this plugin independently without the UI.** + +_This Date Transfer Hub - S3 Plugin is based on [amazon-s3-resumable-upload](https://github.com/aws-samples/amazon-s3-resumable-upload) contributed by [huangzbaws@](https://github.com/huangzbaws)._ + +The following are the features supported by this plugin. + +- Transfer Amazon S3 objects between AWS China regions and Global regions +- Transfer objects from Aliyun OSS / Tencent COS / Qiniu Kodo +- Large file support +- Support S3 Event trigger +- Support Transfer with object metadata +- Support incremental data transfer +- Support transfer from S3 compatible storage +- Auto retry and error handling + +## Architecture + +![S3 Plugin Architecture](./en-base/images/s3-arch-global.png) + +The Amazon S3 plugin runs the following workflows: + +1. A time-based Event Bridge rule triggers a AWS Lambda function on an hourly basis. +2. AWS Lambda uses the launch template to launch a data comparison job (JobFinder) in an [Amazon Elastic Compute Cloud (Amazon EC2)](https://aws.amazon.com/ec2/). +3. The job lists all the objects in the source and destination +buckets, makes comparisons among objects and determines which objects should be transferred. +4. Amazon EC2 sends a message for each object that will be transferred to [Amazon Simple Queue Service (Amazon SQS)](https://aws.amazon.com/sqs/). Amazon S3 event messages can also be supported for more real-time data transfer; whenever there is object uploaded to source bucket, the event message is sent to the same Amazon SQS queue. +5. A JobWorker running in Amazon EC2 consumes the messages in SQS and transfers the object from the source bucket to the destination bucket. You can use an Auto Scaling Group to control the number of EC2 instances to transfer the data based on business need. +6. A record with transfer status for each object is stored in Amazon DynamoDB. +7. The Amazon EC2 instance will get (download) the object from the source bucket based on the Amazon SQS message. +8. The Amazon EC2 instance will put (upload) the object to the destination bucket based on the Amazon SQS message. +9. Upon the initial identification of a large file by the Worker node (with a default threshold of 1 GB), an Multipart Upload task is initialized. The corresponding UploadId is then conveyed to the Step Function, which triggers a scheduled recurring task. This Step Function undertakes periodic checks, every 1 minute, to verify the successful transmission of the distributed shards associated with the UploadId across the entire cluster. +10. If all shards have been successfully transmitted, the CompleteMultipartUpload API is invoked to finalize the consolidation of the shards. Alternatively, if any shards are found to be invalid, they are discarded. + +> Note: This solution uses `t4g.micro` EC2 instance type to save cost. The pricing of this instance type is `$0.0084 per Hour` in US West (Oregon) region at the point of writing. Check out [EC2 Pricing](https://aws.amazon.com/ec2/pricing/on-demand/) to get the latest price. And the EC2 operating systems will by default have BBR (Bottleneck Bandwidth and RTT) enabled to improve network performance. + + +## Deployment + +Things to know about the deployment of this plugin: + +- The deployment will automatically provision resources like lambda, dynamoDB table, ECS Task Definition, etc. in your AWS account. +- The deployment will take approximately 3-5 minutes. +- Once the deployment is completed, the data transfer task will start right away. + +Please follow the steps in the [Deployment Guide](./docs/S3_DEPLOYMENT_EN.md) to start the deployment. + +> Note: You can simply delete the stack from CloudFormation console if the data transfer job is no longer required. + + +## FAQ + +### How to monitor + +**Q**: After I deployed the solution, how can I monitor the progress? + +**A**: After deployment, there will be a cloudwatch dashboard created for you to mornitor the progress, metrics such as running/waiting jobs, network, transferred/failed objects will be logged in the dashboard. Below screenshot is an example: + +![Cloudwatch Dashboard Example](./en-base/images/dashboard.png) + +### How to debug + +**Q**: There seems to be something wrong, how to debug? + +**A**: + +- **For Portal users** + + Go to **Tasks** list page, and click the **Task ID**. You can see the dashboard and logs under the **Monitoring** section. + + Data Transfer Hub has embedded Dashboard and log groups on the Portal, so you do not need to navigate to AWS CloudWatch console to view the logs. + +- **For Plugin (Pure Backend) users** + + When deploying the stack, you will be asked to enter the stack name (`DTHS3Stack` by default), and most resources will be created with the name prefix as the stack name. For example, the format of the queue name is `-S3TransferQueue-`. This plugin will create two main log groups. + + - If there is no data transfer, you need to check whether there is a problem in the Finder task log. The following is the log group for scheduling Finder tasks. For more information, refer to the [Troubleshooting](../troubleshooting) section. + + `-EC2FinderLogGroup` + + - The following are the log groups of all EC2 instances, and you can find detailed transfer logs. + + `-CommonS3RepWorkerLogGroup` + +### No CloudWatch logs + +**Q**: After I deployed, I can't find any log streams in the two CloudWatch Log Groups + +**A**: This must because the subnets you choose when you deployed this solution doesn't have public network access, therefore, the Fargate task failed to pull the images, and the EC2 can't download the CloudWatch Agent to send logs to CloudWatch. So please check you VPC set up (See [Deployment Guide](./docs/DEPLOYMENT_EN.md) Step 1). Once you fix the issue, you need to manually terminate the running EC2 instances by this solution if any. After that, the auto scaling group will automatically start new ones. + + +### How to customize + +**Q**: I want to make some custom changes, how do I do? + +If you want to make custom changes to this plugin, you can follow [custom build](CUSTOM_BUILD.md) guide. + +> Note: More FAQ please refer to [Implementation Guide - FAQ](https://awslabs.github.io/data-transfer-hub/en/faq/). diff --git a/docs/S3_PLUGIN_CN.md b/docs/S3_PLUGIN_CN.md new file mode 100644 index 0000000..e96c401 --- /dev/null +++ b/docs/S3_PLUGIN_CN.md @@ -0,0 +1,111 @@ + +[English](./S3_PLUGIN.md) + +# Data Transfer Hub - S3插件 + +## 目录 +* [介绍](#介绍) +* [最新改动](#最新改动) +* [架构](#架构) +* [部署](#部署) +* [FAQ](#faq) + * [如何监控](#如何监控) + * [如何调试](#如何调试) + * [在CloudWatch里没有日志](#在CloudWatch里没有日志) + * [如何客制化](#如何客制化) +* [已知问题](#已知问题) + +## 介绍 + +[Data Transfer Hub](https://github.com/awslabs/aws-data-replication-hub),前称是Data Replication Hub,是一个用于从不同的源传输数据到AWS的解决方案。本项目是该方案的其中一款插件(S3插件)。你可以独立部署和运行此插件而无需使用UI。 + +_本项目(Date Replication Hub - S3 Plugin)是基于[huangzbaws@](https://github.com/huangzbaws) 的 [amazon-s3-resumable-upload](https://github.com/aws-samples/amazon-s3-resumable-upload) 基础上开发的。_ + +以下是本插件提供的功能列表: + +- 在 Amazon S3 中国区和海外区之间传输对象 +- 从阿里云OSS /腾讯COS /七牛Kodo 传输对象 +- 大文件支持 +- 支持S3事件触发 +- 支持对象元数据传输 +- 支持增量数据传输 +- 支持从 S3 兼容对象存储传输数据 +- 自动重试和错误处理 + +## 架构 + +![S3 Plugin Architect](./en-base/images/s3-arch-global.png) + +在AWS Fargate中运行的*Finder* 任务列出了源存储桶和目标存储桶中的所有对象,并确定应复制哪些对象,将在SQS中为每个要复制的对象创建一条消息。 *基于时间的CloudWatch规则*将触发ECS任务每小时运行一次。 + +此外,本插件也支持S3事件通知,以(实时)触发复制,前提是需要将此插件部署在与源存储桶相同的帐户(和区域)中。 事件消息也将发送到相同的SQS队列。 + +在EC2中运行的*Worker*任务会消费SQS中的消息,并将对象从源存储桶传输到目标存储桶。你可以根据业务需要, 在Auto Scaling Group里调整使用多少台EC2实例进行数据传输。 + +如果某个对象或对象的一部分传输失败,则*JobWorker*将在队列中释放该消息,并且该消息在队列中可见后将再次传输该对象(默认可见性超时设置为15分钟,大文件会自动延长)。经过几次尝试,如果传输依然失败,该消息就会被移到Dead Letter Queue并且触发Alarm提醒。 + +该插件支持传输大文件。它将大文件分成多个小的部分并利用Amazon S3的[multipart upload](https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html) 功能进行分段传输,支持断点续传。 + +> 注意: 此解决方案使用`t4g.micro`实例类型以节省成本。在撰写本文时,此实例类型在US West (Oregon)区的价格为`每小时$0.0084`。请查看[EC2定价](https://aws.amazon.com/ec2/pricing/on-demand/)以获取最新价格。 并且Amazon EC2操作系统将默认启用BBR(Bottleneck Bandwidth and RTT)以提高网络性能。 + +## 部署 + +有关此插件的部署的注意事项:: + +- 部署本插件会自动在您的AWS账号里创建包括Lambda, DyanomoDB表,ECS任务等 +- 部署预计用时3-5分钟 +- 一旦部署完成,复制任务就会马上开始 + +请参考[部署指南](./docs/DEPLOYMENT_CN.md)里的步骤进行部署。 + +> 注意:如果不再需要数据传输任务,则可以从CloudFormation控制台中删除堆栈。 + + +## FAQ + +### 如何监控 + +**问题**:部署解决方案后,如何监视进度? + +**回答**:部署后,将创建一个cloudwatch仪表板供您监视进度,运行/等待作业,网络,已传输/失败的对象等指标将记录在仪表板中。下图是一个示例: + +![Cloudwatch Dashboard Example](./en-base/images/dashboard.png) + +### 如何调试 + +**问题**:部署完后似乎没有正常运行,该如何调试? + +**回答**: + +- **对于控制台用户** + + 转到**任务**列表页面,然后单击**任务编号**。 您可以在 **日志监控** 部分下看到仪表板和日志。 + + Data Transfer Hub 已将 Dashboard 和日志组集成到 Portal 中,您无需跳转到 AWS CloudWatch 控制台即可查看日志。 + +- **对于 Plugin(纯后端版本)用户** + + 部署堆栈时,会要求您输入堆栈名称(默认为DTHS3Stack),大多数资源都会以名称前缀作为堆栈名称创建。例如,队列名称的格式为`-S3TransferQueue-`。此插件将创建两个主要日志组。 + + - 如果没有数据传输,您需要检查ECS任务日志中是否有问题。以下是调度ECS任务的日志组。您可以在 [错误消息列表](#error-code-list) 中找到更多信息. + + `-EC2FinderLogGroup` + + - 以下是所有EC2实例的日志组,您可以找到详细的传输日志。 + + `-CommonS3RepWorkerLogGroup` + +### 在CloudWatch里没有日志 + +**问题**:我部署完该插件, 但我在在CloudWatch日志组里没有找到任何日志 + +**回答**:这一定是因为您在部署此解决方案时选择的子网没有公共网络访问权限,因此 Fargate任务无法拉取映像,而EC2实例则无法下载 CloudWatch Agent以将日志发送到 CloudWatch。 请检查您的 VPC 设置(请参阅[部署指南](./docs/DEPLOYMENT_CN.md) 步骤 1)。 修复问题后,您需要手动终止正在运行的此方案的EC2 实例(如果有的话)。之后,Auto Scaling Group将自动启动新实例。 + + +### 如何客制化 + +**问题**:我想要更改此方案,需要做什么? + +**回答**:如果要更改解决方案,可以参考[定制](CUSTOM_BUILD.md) 指南. + +> 注意:更多常见问题请参考[实施指南 - 常见问题解答](https://awslabs.github.io/data-transfer-hub/zh/faq/)。 \ No newline at end of file diff --git a/docs/USING_PREFIX_LIST.md b/docs/USING_PREFIX_LIST.md new file mode 100644 index 0000000..41b9237 --- /dev/null +++ b/docs/USING_PREFIX_LIST.md @@ -0,0 +1,23 @@ +[中文](./USING_PREFIX_LIST_CN.md) + +# Using Prefix List File to Filter Data Transmission Job + +## Step 1: Create a Prefix List File + +Please write the list of prefixes into a Plain Text format file, with one prefix for each line. + +For example: +![Prefix List File](images/prefix_list_file.png) + +## Step 2: Upload the Prefix List File to the source data bucket + +You can put the prefix list file in anywhere in your source bucket. +> Note: Please remember to write its actual path when filling in the location of the Prefix List File in the Step 3. + +![prefix_list_file_in_s3](images/prefix_list_file_in_s3.png) + +## Step 3: Config the Cloudformation Stack template + +Write the path of the Prefix List File into the input box. + +![cloudformaiton](images/cloudformation_prefix_list.png) \ No newline at end of file diff --git a/docs/USING_PREFIX_LIST_CN.md b/docs/USING_PREFIX_LIST_CN.md new file mode 100644 index 0000000..3d370a4 --- /dev/null +++ b/docs/USING_PREFIX_LIST_CN.md @@ -0,0 +1,23 @@ +[English](./USING_PREFIX_LIST_EN.md) + +# 使用前缀列表完成多个指定前缀中数据的传输 + +## Step 1: 创建前缀列表 + +请将前缀列表写入纯文本格式文件,每行一个前缀。 + +示例如下: +![Prefix List File](images/prefix_list_file.png) + +## Step 2: 上传前缀列表文件到源数据桶 + +您可以将前缀列表文件放在源存储桶中的任何位置。 +> 注意: 请记住在步骤3填写Prefix List File的位置时填入它的实际路径。 + +![prefix_list_file_in_s3](images/prefix_list_file_in_s3.png) + +## Step 3: 配置 Cloudformation 的堆栈模板 + +将Prefix List File的路径写入堆栈模板的指定参数中。 + +![cloudformaiton](images/cloudformation_prefix_list.png) \ No newline at end of file diff --git a/docs/en-base/architecture-overview/architecture.md b/docs/en-base/architecture-overview/architecture.md index 7e1965a..c2176b8 100644 --- a/docs/en-base/architecture-overview/architecture.md +++ b/docs/en-base/architecture-overview/architecture.md @@ -10,9 +10,9 @@ This solution deploys the Amazon CloudFormation template in your AWS Cloud accou 3. Users are authenticated by either [Amazon Cognito][cognito] User Pool (in AWS Regions) or by an OpenID connect provider (in AWS China Regions) such as [Authing](https://www.authing.cn/), [Auth0](https://auth0.com/), etc. 4. AWS AppSync runs [AWS Lambda][lambda] to call backend APIs. 5. Lambda starts an [AWS Step Functions][stepfunction] workflow that uses [AWS CloudFormation][cloudformation] to start or stop/delete the Amazon ECR or Amazon S3 plugin template. -6. The plugin templates are hosted in a centralized Amazon S3 bucket manged by AWS. +6. The plugin templates are hosted in a centralized Amazon S3 bucket managed by AWS. 7. The solution also provisions an [Amazon ECS][ecs] cluster that runs the container images used by the plugin template, and the container images are hosted in [Amazon ECR][ecr]. -8. The data transfer task information is stored in in [Amazon DynamoDB][dynamodb]. +8. The data transfer task information is stored in [Amazon DynamoDB][dynamodb]. After deploying the solution, you can use [AWS WAF][waf] to protect CloudFront or AppSync. @@ -22,7 +22,7 @@ After deploying the solution, you can use [AWS WAF][waf] to protect CloudFront o The web console is a centralized place to create and manage all data transfer jobs. Each data type (for example, Amazon S3 or Amazon ECR) is a plugin for Data Transfer Hub, and is packaged as an AWS CloudFormation template hosted in an S3 bucket that AWS owns. When the you create a transfer task, an AWS Lambda function initiates the Amazon CloudFormation template, and state of each task is stored and displayed in the DynamoDB tables. -As of April 2023, the solution supports two data transfer plugins: an Amazon S3 plugin and an Amazon ECR plugin. +As of this revision, the solution supports two data transfer plugins: an Amazon S3 plugin and an Amazon ECR plugin. ## Amazon S3 plugin @@ -36,14 +36,15 @@ The Amazon S3 plugin runs the following workflows: 3. The job lists all the objects in the source and destination buckets, makes comparisons among objects and determines which objects should be transferred. 4. Amazon EC2 sends a message for each object that will be transferred to [Amazon Simple Queue Service (Amazon SQS)][sqs]. Amazon S3 event messages can also be supported for more real-time data transfer; whenever there is object uploaded to source bucket, the event message is sent to the same Amazon SQS queue. -5. A JobWorker running in Amazon EC2 consumes the messages in SQS and transfers the object from the source bucket to the destination bucket. You can use an Auto Scaling Group to control the number of EC2 instances to transfer the data based on business need. +5. A JobWorker running in Amazon EC2 consumes the messages in SQS and transfers the object from the source bucket to the destination bucket. You can use an Auto Scaling group to control the number of EC2 instances to transfer the data based on business need. 6. A record with transfer status for each object is stored in Amazon DynamoDB. 7. The Amazon EC2 instance will get (download) the object from the source bucket based on the Amazon SQS message. 8. The Amazon EC2 instance will put (upload) the object to the destination bucket based on the Amazon SQS message. - +9. When the JobWorker node identifies a large file (with a default threshold of 1 GB) for the first time, a Multipart Upload task running in Amazon EC2 is initiated. The corresponding UploadId is then conveyed to the AWS Step Functions, which invokes a scheduled recurring task. Every minute, AWS Step Functions verifies the successful transmission of the distributed shards associated with the UploadId across the entire cluster. +10. If all shards have been transmitted successfully, Amazon EC2 invokes the CompleteMultipartUpload API in Amazon S3 to finalize the consolidation of the shards. Otherwise, any invalid shards are discarded. !!! note "Note" - If an object (or part of an object) failed to transfer, the JobWorker releases the message in the queue, and the object is transferred again after the message is visible in the queue (default visibility timeout is set to 15 minutes). If the transfer fails again, the message is sent to the dead letter queue and a notification alarm is sent. + If an object (or part of an object) transfer failed, the JobWorker releases the message in the queue, and the object is transferred again after the message is visible in the queue (default visibility timeout is set to 15 minutes). If the transfer failed five times, the message is sent to the dead letter queue and a notification alarm is initiated. ## Amazon ECR plugin diff --git a/docs/en-base/faq.md b/docs/en-base/faq.md index bfeeccb..590bdfd 100644 --- a/docs/en-base/faq.md +++ b/docs/en-base/faq.md @@ -4,7 +4,7 @@ The following are common questions you might have when deploying and using the s **1. In which AWS Regions can this solution be deployed?**
-For the list of supported regions, refer to [supported regions](../plan-deployment/regions). +For the list of supported Regions, refer to [supported Regions](../plan-deployment/regions). **2. When creating a transfer task, should I deploy it on the data source side or the destination side?**
@@ -14,8 +14,8 @@ If you do not have a domain name registered by ICP in AWS China Regions, we reco If you need to deploy in AWS China Regions but do not have a domain name, you can directly deploy the back-end version: -- Amazon S3 Plugin: [https://github.com/awslabs/amazon-s3-data-replication-hub-plugin](https://github.com/awslabs/amazon-s3-data-replication-hub-plugin) -- Amazon ECR Plugin: [https://github.com/awslabs/amazon-ecr-data-replication-hub-plugin](https://github.com/awslabs/amazon-ecr-data-replication-hub-plugin) +- Amazon S3 Plugin: [https://github.com/awslabs/data-transfer-hub/blob/main/docs/S3_PLUGIN.md](https://github.com/awslabs/data-transfer-hub/blob/main/docs/S3_PLUGIN.md) +- Amazon ECR Plugin: [https://github.com/awslabs/data-transfer-hub/blob/main/docs/ECR_PLUGIN.md](https://github.com/awslabs/data-transfer-hub/blob/main/docs/ECR_PLUGIN.md) **3. Do I need to deploy the solution on the data source and destination side separately?**
@@ -193,7 +193,7 @@ You need to update Secrets in Secrets Manager first, and then go to the EC2 cons - If there is no data transfer, you need to check whether there is a problem in the Finder task log. The following is the log group for scheduling Finder tasks. For more information, refer to the [Troubleshooting](../troubleshooting) section. - `-EC2FinderLogGroup` + `-CommonS3RepWorkerLogGroup` - The following are the log groups of all EC2 instances, and you can find detailed transfer logs. @@ -201,7 +201,7 @@ You need to update Secrets in Secrets Manager first, and then go to the EC2 cons **4. How to make customized build?**
-If you want to make customized changes to this plugin, refer to [Custom Build](https://github.com/awslabs/amazon-s3-data-replication-hub-plugin/blob/main/docs/CUSTOM_BUILD.md). +If you want to make customized changes to this plugin, refer to [Custom Build](https://github.com/awslabs/data-transfer-hub/blob/main/CUSTOM_BUILD.md). **5. After the deployment is complete, why can't I find any log streams in the two CloudWatch log groups?**
diff --git a/docs/en-base/images/dashboard.png b/docs/en-base/images/dashboard.png new file mode 100644 index 0000000..4699f47 Binary files /dev/null and b/docs/en-base/images/dashboard.png differ diff --git a/docs/en-base/images/s3-arch-global.png b/docs/en-base/images/s3-arch-global.png index 855b408..4891441 100644 Binary files a/docs/en-base/images/s3-arch-global.png and b/docs/en-base/images/s3-arch-global.png differ diff --git a/docs/en-base/solution-overview/index.md b/docs/en-base/index.md similarity index 82% rename from docs/en-base/solution-overview/index.md rename to docs/en-base/index.md index dd8caf1..00d953d 100644 --- a/docs/en-base/solution-overview/index.md +++ b/docs/en-base/index.md @@ -6,11 +6,11 @@ Use this navigation table to quickly find answers to these questions: | If you want to … | Read… | |----------|--------| -| Know the cost for running this solution | [Cost](../plan-deployment/cost) | -| Understand the security considerations for this solution | [Security](../plan-deployment/security) | -| Know how to plan for quotas for this solution | [Quotas](../plan-deployment/quotas) | -| Know which AWS Regions are supported for this solution | [Supported AWS Regions](../plan-deployment/regions) | -| View or download the AWS CloudFormation template included in this solution to automatically deploy the infrastructure resources (the “stack”) for this solution | [AWS CloudFormation templates](../deployment/template) | +| Know the cost for running this solution | [Cost](./plan-deployment/cost) | +| Understand the security considerations for this solution | [Security](./plan-deployment/security) | +| Know how to plan for quotas for this solution | [Quotas](./plan-deployment/quotas) | +| Know which AWS Regions are supported for this solution | [Supported AWS Regions](./plan-deployment/regions) | +| View or download the AWS CloudFormation template included in this solution to automatically deploy the infrastructure resources (the “stack”) for this solution | [AWS CloudFormation templates](./deployment/template) | This guide is intended for IT architects, developers, DevOps, data analysts, and marketing technology professionals who have practical experience architecting in the AWS Cloud. diff --git a/docs/en-base/plan-deployment/cost.md b/docs/en-base/plan-deployment/cost.md index f9f85f5..fbcb524 100644 --- a/docs/en-base/plan-deployment/cost.md +++ b/docs/en-base/plan-deployment/cost.md @@ -16,7 +16,7 @@ Transfer 1 TB of S3 files from AWS Oregon Region (us-west-2) to AWS Beijing Regi - Average speed per EC2 instance: ~1GB/min - Total EC2 instance hours: ~17 hours -As of April 2023, the cost of using the solution to complete the transfer task is shown in the following table: +As of this revision, the cost of using the solution to complete the transfer task is shown in the following table: | AWS service | Dimensions | Total Cost | |----------|--------|--------| @@ -36,7 +36,7 @@ Transfer 1 TB of S3 files from AWS Oregon region (us-west-2) to China Beijing Re - Average speed per EC2 instance: ~6MB/min (~10 files per sec) - Total EC2 instance hours: ~3000 hours -As of April 2023, the cost of using the solution to complete the transfer task is shown in the following table: +As of this revision, the cost of using the solution to complete the transfer task is shown in the following table: | AWS service | Dimensions | Total Cost | |----------|--------|--------| @@ -56,7 +56,7 @@ For an Amazon ECR transfer task, the cost can vary based on network speed and to Transfer 27 Amazon ECR images (~3 GB in total size) from AWS Ireland Region (eu-west-1) to AWS Beijing Region (cn-north-1). The total runtime is about 6 minutes. -As of April 2023, the cost of using the solution to complete the transfer task is shown in the following table: +As of this revision, the cost of using the solution to complete the transfer task is shown in the following table: | AWS service | Dimensions | Total Cost | |----------|--------|--------| diff --git a/docs/en-base/revisions.md b/docs/en-base/revisions.md index 9a9be3b..d761e0a 100644 --- a/docs/en-base/revisions.md +++ b/docs/en-base/revisions.md @@ -5,4 +5,5 @@ | December 2021 | Released version 2.1
1. Support custom prefix list to filter transfer tasks
2. Support configuration of single-run file transfer tasks
3. Support configuration of tasks through custom CRON Expression timetable
4. Support manual enabling or disabling of data comparison function | | July 2022 | Released version 2.2
1. Support transfer data through Direct Connect| | March 2023 | Released version 2.3
1. Support embedded dashboard and logs
2. Support S3 Access Key Rotation
3. Enhance One Time Transfer Task monitoring| -| April 2023 | Released version 2.4
1. Support payer request S3 object transfer| \ No newline at end of file +| April 2023 | Released version 2.4
1. Support payer request S3 object transfer| +| September 2023 | Released version 2.5
1. Added support for transferring ECR assets without tags
2. Optimize stop task operation, add new filter condition to view all history tasks
3. Enhanced transfer performance by utilizing cluster capabilities through parallel multipart upload for large file transfers
4.Added automatic restart functionality for the Worker CLI
5.Enabled IMDSv2 by default for Auto Scaling Groups | \ No newline at end of file diff --git a/docs/en-base/update.md b/docs/en-base/update.md index 22c7049..aff03a3 100644 --- a/docs/en-base/update.md +++ b/docs/en-base/update.md @@ -36,19 +36,8 @@ You can view the status of the stack in the AWS CloudFormation console in the ** ## Step 2. (Optional) Update the OIDC configuration -If you have deployed the solution in China Region with OIDC, refer to the [deployment](../deployment/#prerequisite-1-create-an-oidc-user-pool) section to update the authorization and authentication configuration in OIDC. +If you have deployed the solution in China Region with OIDC, refer to the [deployment](deployment/deployment.md#prerequisite-1-create-an-oidc-user-pool) section to update the authorization and authentication configuration in OIDC. -## Step 3. Create an invalidation on CloudFront - -CloudFront has cached an old version of Data Transfer Hub console at its pop locations. We need to create an invalidation on the CloudFront console to -force the deletion of cache. - -1. Go to the [AWS CloudFront console](https://console.aws.amazon.com/cloudfront/){target='_blank'}. - -2. Choose the Distribution of Data Transfer Hub. The Description is like `SolutionName - Web Console Distribution (RegionName)`. - -3. On the **Invalidation** page, click **Create invalidation**, and create an invalidation with `/*`. - -## Step 4. Refresh the web console +## Step 3. Refresh the web console Now you have completed all the upgrade steps. Please click the **refresh** button in your browser. diff --git a/docs/en-base/user-guide/tutorial-cli-launch.md b/docs/en-base/user-guide/tutorial-cli-launch.md index 672c497..1cbefa5 100644 --- a/docs/en-base/user-guide/tutorial-cli-launch.md +++ b/docs/en-base/user-guide/tutorial-cli-launch.md @@ -3,14 +3,8 @@ You can use the [AWS CLI][aws-cli] to create an Amazon S3 transfer task. Note th 1. Create an Amazon VPC with two public subnets or two private subnets with [NAT gateway][nat]. 2. Replace `` as shown below. - - - Global Region: - ``` - https://s3.amazonaws.com/solutions-reference/data-transfer-hub-s3-plugin/latest/DataTransferS3Stack-ec2.template - ``` - - China Region: ``` - https://s3.amazonaws.com/solutions-reference/data-transfer-hub-s3-plugin/latest/DataTransferS3Stack-ec2.template + https://solutions-reference.s3.amazonaws.com/data-transfer-hub/latest/DataTransferS3Stack.template ``` 3. Go to your terminal and enter the following command. For the parameter details, refer to the Parameters table. diff --git a/docs/en-base/user-guide/tutorial-ecr.md b/docs/en-base/user-guide/tutorial-ecr.md index ddaa98b..a44258d 100644 --- a/docs/en-base/user-guide/tutorial-ecr.md +++ b/docs/en-base/user-guide/tutorial-ecr.md @@ -1,6 +1,6 @@ You can use the web console to create an Amazon ECR transfer task. For more information about how to launch the web console, see [deployment](../../deployment/deployment-overview). -1. From the **Create Transfer Task** page, select **Create New Task**, and then select **Next**. +1. From the **Create Transfer Task** page, select **Start a New Task**, and then select **Next**. 1. From the **Engine options** page, under engine, select **Amazon ECR**, and then choose **Next Step**. You can also copy image from Docker Hub,GCR.io,Quay.io, and so on by choosing **Public Container Registry**. diff --git a/docs/en-base/user-guide/tutorial-oss.md b/docs/en-base/user-guide/tutorial-oss.md index 93b5d4a..73ae526 100644 --- a/docs/en-base/user-guide/tutorial-oss.md +++ b/docs/en-base/user-guide/tutorial-oss.md @@ -24,7 +24,7 @@ You have already deployed the Data Transfer Hub in **Oregon (us-west-2)** region 1. Click **Store**. ## Step 2: Create an OSS transfer task -1. From the **Create Transfer Task** page, select **Create New Task**, and then select **Next**. +1. From the **Create Transfer Task** page, select **Start a New Task**, and then select **Next**. 2. From the **Engine options** page, under engine, select **Amazon S3**, and then choose **Next Step**. diff --git a/docs/en-base/user-guide/tutorial-s3.md b/docs/en-base/user-guide/tutorial-s3.md index 2b90200..46b4da5 100644 --- a/docs/en-base/user-guide/tutorial-s3.md +++ b/docs/en-base/user-guide/tutorial-s3.md @@ -3,7 +3,7 @@ You can use the web console to create an Amazon S3 transfer task. For more infor !!! Note "Note" Data Transfer Hub also supports using AWS CLI to create an Amazon S3 transfer task. For details, refer to this [tutorial](./tutorial-cli-launch.md). -1. From the **Create Transfer Task** page, select **Create New Task**, and then select **Next**. +1. From the **Create Transfer Task** page, select **Start a New Task**, and then select **Next**. 2. From the **Engine options** page, under engine, select **Amazon S3**, and then choose **Next Step**. @@ -15,7 +15,7 @@ You can use the web console to create an Amazon S3 transfer task. For more infor - If you need to achieve real-time incremental data synchronization, please configure whether to enable S3 event notification. Note that this option can only be configured when the program and your data source are deployed in the same area of the same account. - If you do not enable S3 event notification, the program will periodically synchronize incremental data according to the scheduling frequency you configure in the future. - If the source bucket is not in the same account where Data Transfer Hub was deployed, select **No**, then specify the credentials for the source bucket. - - If you choose to synchronize objects with multiple prefixes, please transfer the prefix list file separated by rows to the root directory of the data source bucket, and then fill in the name of the file. For details, please refer to [Multi-Prefix List Configuration Tutorial](https://github.com/awslabs/amazon-s3-data-replication-hub-plugin/blob/r2_1/docs/USING_PREFIX_LIST_EN.md)。 + - If you choose to synchronize objects with multiple prefixes, please transfer the prefix list file separated by rows to the root directory of the data source bucket, and then fill in the name of the file. For details, please refer to [Multi-Prefix List Configuration Tutorial](https://github.com/awslabs/data-transfer-hub/blob/main/docs/USING_PREFIX_LIST.md)。 5. To create credential information, select [Secrets Manager](https://console.aws.amazon.com/secretsmanager/home) to jump to the AWS Secrets Manager console in the current region. - From the left menu, select **Secrets**, then choose **Store a new secret** and select the **other type of secrets** key type. diff --git a/docs/images/cloudformation_prefix_list.png b/docs/images/cloudformation_prefix_list.png new file mode 100644 index 0000000..0510073 Binary files /dev/null and b/docs/images/cloudformation_prefix_list.png differ diff --git a/docs/images/cluster_cn.png b/docs/images/cluster_cn.png new file mode 100644 index 0000000..bdcd59a Binary files /dev/null and b/docs/images/cluster_cn.png differ diff --git a/docs/images/cluster_en.png b/docs/images/cluster_en.png new file mode 100644 index 0000000..fdb1b13 Binary files /dev/null and b/docs/images/cluster_en.png differ diff --git a/docs/images/launch-stack.svg b/docs/images/launch-stack.svg new file mode 100644 index 0000000..eb7d015 --- /dev/null +++ b/docs/images/launch-stack.svg @@ -0,0 +1 @@ +Launch Stack \ No newline at end of file diff --git a/docs/images/prefix_list_file.png b/docs/images/prefix_list_file.png new file mode 100644 index 0000000..c6d6fda Binary files /dev/null and b/docs/images/prefix_list_file.png differ diff --git a/docs/images/prefix_list_file_in_s3.png b/docs/images/prefix_list_file_in_s3.png new file mode 100644 index 0000000..1894294 Binary files /dev/null and b/docs/images/prefix_list_file_in_s3.png differ diff --git a/docs/images/secret_cn.png b/docs/images/secret_cn.png new file mode 100644 index 0000000..d1854c1 Binary files /dev/null and b/docs/images/secret_cn.png differ diff --git a/docs/images/secret_en.png b/docs/images/secret_en.png new file mode 100644 index 0000000..070f766 Binary files /dev/null and b/docs/images/secret_en.png differ diff --git a/docs/mkdocs.en.yml b/docs/mkdocs.en.yml index ee40b6c..97d5db3 100644 --- a/docs/mkdocs.en.yml +++ b/docs/mkdocs.en.yml @@ -9,7 +9,7 @@ theme: nav: - Implementation Guide: - Solution Overview: - - Overview: solution-overview/index.md + - Overview: index.md - Features and benefits: solution-overview/features-and-benefits.md - Use cases: solution-overview/use-cases.md - Architecture Overview: diff --git a/docs/mkdocs.zh.yml b/docs/mkdocs.zh.yml index 03a760d..104123d 100644 --- a/docs/mkdocs.zh.yml +++ b/docs/mkdocs.zh.yml @@ -10,7 +10,7 @@ theme: nav: - 实施指南: - 方案概述: - - 概述: solution-overview/index.md + - 概述: index.md - 功能和优势: solution-overview/features-and-benefits.md - 客户用例: solution-overview/use-cases.md - 架构概览: diff --git a/docs/tutorial-directconnect-isolated.md b/docs/tutorial-directconnect-isolated.md index db8a7e3..5812fea 100644 --- a/docs/tutorial-directconnect-isolated.md +++ b/docs/tutorial-directconnect-isolated.md @@ -78,11 +78,11 @@ We recommend using the **DTH S3-plugin** to create the transfer task, instead of **For AWS China Regions** -[![Launch Stack](./images/launch-stack.png)](https://console.amazonaws.cn/cloudformation/home#/stacks/create/template?stackName=DTHS3Stack&templateURL=https://s3.amazonaws.com/solutions-reference/data-transfer-hub-s3-plugin/latest/DataTransferS3Stack-ec2.template) +[![Launch Stack](./images/launch-stack.png)](https://console.amazonaws.cn/cloudformation/home#/stacks/create/template?stackName=DTHS3Stack&templateURL=https://solutions-reference.s3.amazonaws.com/data-transfer-hub/latest/DataTransferS3Stack.template) **For AWS Global Regions** -[![Launch Stack](./images/launch-stack.png)](https://console.aws.amazon.com/cloudformation/home#/stacks/create/template?stackName=DTHS3Stack&templateURL=https://s3.amazonaws.com/solutions-reference/data-transfer-hub-s3-plugin/latest/DataTransferS3Stack-ec2.template) +[![Launch Stack](./images/launch-stack.png)](https://console.aws.amazon.com/cloudformation/home#/stacks/create/template?stackName=DTHS3Stack&templateURL=https://solutions-reference.s3.amazonaws.com/data-transfer-hub/latest/DataTransferS3Stack.template) 1. For **Source Type**, choose **Amazon_S3**. diff --git a/docs/zh-base/architecture-overview/architecture.md b/docs/zh-base/architecture-overview/architecture.md index 5b4d284..94b1281 100644 --- a/docs/zh-base/architecture-overview/architecture.md +++ b/docs/zh-base/architecture-overview/architecture.md @@ -22,7 +22,7 @@ 网页控制台用于集中创建和管理所有数据传输任务。每种数据类型(例如,Amazon S3或Amazon ECR)都是插件,并打包为AWS CloudFormation模板,托管在Amazon S3存储桶中。当您创建传输任务时,AWS Lambda函数会启动AWS CloudFormation模板,并且每个任务的状态都会存储并显示在Amazon DynamoDB表中。 -截至2023年4月,该解决方案支持两个数据传输插件:Amazon S3插件和Amazon ECR插件。 +截至本次发布版本,该解决方案支持两个数据传输插件:Amazon S3插件和Amazon ECR插件。 ## Amazon S3插件 ![s3-architecture](../images/s3-arch-global.png) @@ -32,16 +32,18 @@ 1. Event Bridge规则定时触发AWS Lambda 函数,默认每小时运行一次。 2. AWS Lambda 将使用启动模板在 Amazon EC2 中启动数据比较作业 (JobFinder)。 -2. 该任务列出源和目标存储桶中的所有对象,进行比较并确定传输对象。 -3. Amazon EC2 为每一个需要传输的对象发送一条消息到 Amazon SQS 队列中。同时该方案还支持Amazon S3事件消息,以实现更实时的数据传输;每当有对象上传到源存储桶时,事件消息就会被发送到同一个 Amazon SQS 队列。 -4. 在Amazon EC2中运行的JobWorker使用 Amazon SQS 中的消息,并将对象从源存储桶传输到目标存储桶。该方案将使用Auto Scaling Group来控制 Amazon EC2 实例的数量,并根据业务需要传输数据。 -5. 每个对象的传输状态记录存储在Amazon DynamoDB中。 -6. Amazon EC2实例将根据SQS消息从源存储桶中获取(下载)对象。 -7. Amazon EC2实例将根据SQS消息将对象放入(上传)到目标存储桶。 +3. 该任务列出源和目标存储桶中的所有对象,进行比较并确定传输对象。 +4. Amazon EC2 为每一个需要传输的对象发送一条消息到 Amazon SQS 队列中。同时该方案还支持Amazon S3事件消息,以实现更实时的数据传输;每当有对象上传到源存储桶时,事件消息就会被发送到同一个 Amazon SQS 队列。 +5. 在Amazon EC2中运行的JobWorker使用 Amazon SQS 中的消息,并将对象从源存储桶传输到目标存储桶。该方案将使用Auto Scaling Group来控制 Amazon EC2 实例的数量,并根据业务需要传输数据。 +6. 每个对象的传输状态记录存储在Amazon DynamoDB中。 +7. Amazon EC2实例将根据SQS消息从源存储桶中获取(下载)对象。 +8. Amazon EC2实例将根据SQS消息将对象放入(上传)到目标存储桶。 +9. 当工作节点首次识别到一个大文件(默认阈值为1 GB)时,将启动在Amazon EC2上运行的分段上传任务。然后将相应的 UploadId 传递给 Step Functions,触发一个定期的重复任务。此 Step Functions 会每隔1分钟进行周期性检查,以验证与 UploadId 相关的分布式分片是否成功传输到整个集群。 +10. 如果所有分片都成功传输,Amazon EC2 将调用 Amazon S3 的 CompleteMultipartUpload API 来完成分片的合并。如果发现任何分片无效,它们将被丢弃。 !!! note "注意" - 如果对象(或对象的一部分)传输失败,JobWorker释放队列中的消息,待消息在队列中可见后再次传输对象(默认可见性超时设置为15分钟)。如果传输再次失败,消息将发送到死信队列,同时还将发送通知警报。 + 如果对象(或对象的一部分)传输失败,JobWorker释放队列中的消息,待消息在队列中可见后再次传输对象(默认可见性超时设置为15分钟)。如果传输失败达到 5 次,消息将发送到死信队列,同时还将发送通知警报。 ## Amazon ECR插件 diff --git a/docs/zh-base/faq.md b/docs/zh-base/faq.md index 01d5f50..a8fb864 100644 --- a/docs/zh-base/faq.md +++ b/docs/zh-base/faq.md @@ -13,8 +13,8 @@ 如果客户想在中国区域部署,但是没有域名,可以直接部署后端版本: -- Amazon S3 Plugin: [https://github.com/awslabs/amazon-s3-data-replication-hub-plugin](https://github.com/awslabs/amazon-s3-data-replication-hub-plugin) -- Amazon ECR Plugin: [https://github.com/awslabs/amazon-ecr-data-replication-hub-plugin](https://github.com/awslabs/amazon-ecr-data-replication-hub-plugin) +- Amazon S3 Plugin: [https://github.com/awslabs/data-transfer-hub/blob/main/docs/S3_PLUGIN.md](https://github.com/awslabs/data-transfer-hub/blob/main/docs/S3_PLUGIN.md) +- Amazon ECR Plugin: [https://github.com/awslabs/data-transfer-hub/blob/main/docs/ECR_PLUGIN.md](https://github.com/awslabs/data-transfer-hub/blob/main/docs/ECR_PLUGIN.md) **3. 是否需要分别在数据源和目标端各部署一次数据传输解决方案吗?**
@@ -178,11 +178,11 @@ Auto Scaling Group 的大小会根据 SQS 中的任务数量[自动放大或缩 - 以下是所有EC2实例的日志组,您可以找到详细的传输日志。 - `-EC2WorkerStackS3RepWorkerLogGroup` + `-CommonS3RepWorkerLogGroup` **4. 如何进行自定义更改?**
-如果要对此插件进行自定义更改,请参阅[自定义构建](https://github.com/awslabs/amazon-s3-data-replication-hub-plugin/blob/main/docs/CUSTOM_BUILD.md)。 +如果要对此插件进行自定义更改,请参阅[自定义构建](https://github.com/awslabs/data-transfer-hub/blob/main/CUSTOM_BUILD.md)。 **5. 部署完成后,为什么在两个CloudWatch日志组中找不到任何日志流?**
diff --git a/docs/zh-base/images/s3-arch-global.png b/docs/zh-base/images/s3-arch-global.png index 855b408..4891441 100644 Binary files a/docs/zh-base/images/s3-arch-global.png and b/docs/zh-base/images/s3-arch-global.png differ diff --git a/docs/zh-base/solution-overview/index.md b/docs/zh-base/index.md similarity index 75% rename from docs/zh-base/solution-overview/index.md rename to docs/zh-base/index.md index 8e09952..4b00265 100644 --- a/docs/zh-base/solution-overview/index.md +++ b/docs/zh-base/index.md @@ -6,11 +6,11 @@ | 如果您想要... | 请阅读... | |----------|--------| -| 了解运行此解决方案的成本 | [成本](../plan-deployment/cost) | -| 理解此解决方案的安全注意事项 | [安全](../plan-deployment/security) | -| 知道如何为此解决方案计划配额 | [配额](../plan-deployment/quotas) | -| 知道此解决方案支持哪些 AWS 区域 | [支持的 AWS 区域](../plan-deployment/regions) | -| 查看或下载此解决方案中包含的 AWS CloudFormation 模板以自动部署此解决方案的基础设施资源(“堆栈”) | [AWS CloudFormation 模板](../deployment/template) | +| 了解运行此解决方案的成本 | [成本](./plan-deployment/cost) | +| 理解此解决方案的安全注意事项 | [安全](./plan-deployment/security) | +| 知道如何为此解决方案计划配额 | [配额](./plan-deployment/quotas) | +| 知道此解决方案支持哪些 AWS 区域 | [支持的 AWS 区域](./plan-deployment/regions) | +| 查看或下载此解决方案中包含的 AWS CloudFormation 模板以自动部署此解决方案的基础设施资源(“堆栈”) | [AWS CloudFormation 模板](./deployment/template) | 本指南适用于在 AWS 云中进行实际架构工作的 IT 架构师、开发人员、DevOps、数据分析师和市场技术专业人员。 diff --git a/docs/zh-base/plan-deployment/cost.md b/docs/zh-base/plan-deployment/cost.md index 9d5edc2..56f0726 100644 --- a/docs/zh-base/plan-deployment/cost.md +++ b/docs/zh-base/plan-deployment/cost.md @@ -15,7 +15,7 @@ - 每个EC2实例运行的平均速度: 1GB/min - EC2实例运行的总时长: 17小时 - 截至2023年4月,使用解决方案完成该传输任务的成本费用如下表所示: + 截至本次发布版本,使用解决方案完成该传输任务的成本费用如下表所示: | 服务 | 用量 | 费用 | |----------|--------|--------| @@ -35,7 +35,7 @@ - 每个EC2实例运行的平均速度: 约6MB/分钟(约每秒10个文件) - EC2实例运行的总时长: 约3000小时 -截至2023年4月,使用解决方案完成传输任务的成本费用如下表所示: +截至本次发布版本,使用解决方案完成传输任务的成本费用如下表所示: | 服务 | 用量 | 费用 | |----------|--------|--------| @@ -55,7 +55,7 @@ 将27个Amazon ECR镜像(总大小约3GB)从欧洲(爱尔兰)区域(eu-west-1)传输到由光环新网运营的亚马逊云科技中国(北京)区域(cn-north-1),运行总时长约为6分钟。 -截至2023年4月,使用解决方案完成传输任务的成本费用如下表所示: +截至本次发布版本,使用解决方案完成传输任务的成本费用如下表所示: | 服务 | 用量 | 费用 | |----------|--------|--------| diff --git a/docs/zh-base/revisions.md b/docs/zh-base/revisions.md index 369e942..dd13dbd 100644 --- a/docs/zh-base/revisions.md +++ b/docs/zh-base/revisions.md @@ -5,4 +5,5 @@ | 2021年12月 | 发布版本2.1
1. 支持自定义Prefix列表过滤传输任务
2. 支持配置单次运行的文件传输任务
3. 支持通过自定义CRON表达式配置任务时间表
4. 支持自定义开启或关闭数据比对功能 | | 2022年7月 | 发布版本2.2
1. 支持通过 Direct Connect 传输数据| | 2023年3月 | 发布版本2.3
1. 支持嵌入式仪表板和监控日志
2. 支持S3 Access Key 自动轮换
3. 增强一次性传输任务监控| -| 2023年4月 | 发布版本2.4
1. 支持请求者付费模式| \ No newline at end of file +| 2023年4月 | 发布版本2.4
1. 支持请求者付费模式| +| 2023年9月 | 发布版本2.5
1. 通过利用集群的并行能力,提升了Amazon S3大文件传输的性能
2. 启用了未标记的ECR镜像传输
3. 优化了停止任务操作,并添加了新的筛选条件以查看所有历史任务 | \ No newline at end of file diff --git a/docs/zh-base/update.md b/docs/zh-base/update.md index 5009a32..4a9abe6 100644 --- a/docs/zh-base/update.md +++ b/docs/zh-base/update.md @@ -37,19 +37,8 @@ ## 第 2 步.(可选)更新 OIDC 配置 -如果您已经在中国区结合 OIDC 部署了该方案,请参考[部署](../deployment/#1oidc)章节更新 OIDC 中的授权、认证配置。 +如果您已经在中国区结合 OIDC 部署了该方案,请参考[部署](deployment/deployment.md#launch-openid)章节更新 OIDC 中的授权、认证配置。 - -## 步骤 3. 在 CloudFront 创建 CDN 刷新 - -CloudFront 已在其边缘节点缓存旧版本的 Data Transfer Hub 控制台。 我们需要在 CloudFront 控制台上创建一个失效(invalidation)以强制删除缓存。 - -1. 登录 [AWS CloudFront 控制台](https://console.aws.amazon.com/cloudfront/){target='_blank'}。 - -2. 选择并点击 Data Transfer Hub 的分配。 其说明类似于 `SolutionName - Web Console Distribution (RegionName)`。 - -3. 在**失效**界面,点击**创建失效**,并以 `/*` 路径创建一个失效。 - -## 步骤 4. 刷新网页控制台 +## 步骤 3. 刷新网页控制台 现在您已完成所有升级步骤。 请点击浏览器中的**刷新**按钮。 \ No newline at end of file diff --git a/docs/zh-base/user-guide/tutorial-s3.md b/docs/zh-base/user-guide/tutorial-s3.md index 243011f..4fa4f05 100644 --- a/docs/zh-base/user-guide/tutorial-s3.md +++ b/docs/zh-base/user-guide/tutorial-s3.md @@ -12,7 +12,7 @@ - 如果您需要实现实时的增量数据同步,请配置是否启用S3事件通知。注意,只有当该方案和您的数据源部署在同一个账户的同一个区域内时,方可配置该选项。 - 如果您不启用S3事件通知,该方案会按照您在后续所配置的调度频率来定期实现增量数据的同步。 - 如果数据源桶不在方案部署的账户中,请选择**No**,然后指定源存储桶的凭证。 - - 如果您选择同步多个前缀的对象,请将以换行为分隔的前缀列表文件传输到数据源桶的根目录下,然后填写该文件的名称。具体可参考[多前缀列表配置教程](https://github.com/awslabs/amazon-s3-data-replication-hub-plugin/blob/r2_1/docs/USING_PREFIX_LIST_CN.md)。 + - 如果您选择同步多个前缀的对象,请将以换行为分隔的前缀列表文件传输到数据源桶的根目录下,然后填写该文件的名称。具体可参考[多前缀列表配置教程](https://github.com/awslabs/data-transfer-hub/blob/main/docs/USING_PREFIX_LIST_CN.md)。 5. 要创建凭证信息,请选择[Secrets Manager](https://console.aws.amazon.com/secretsmanager/home)以跳转到当前区域的AWS Secrets Manager控制台。 - 从左侧菜单中,选择**密钥**,然后选择**储存新的密钥**并选择**其他类型的密钥**类型。 - 根据下面的格式在Plaintext输入框中填写`access_key_id`和`secret_access_key`信息。有关更多信息,请参阅*IAM用户指南*中的[IAM功能](https://docs.aws.amazon.com/IAM/latest/UserGuide/introduction.html)。选择**下一步**。 diff --git a/source/constructs/bin/cdk-solution.ts b/source/constructs/bin/cdk-solution.ts index d8da2a8..bd49cfe 100755 --- a/source/constructs/bin/cdk-solution.ts +++ b/source/constructs/bin/cdk-solution.ts @@ -1,35 +1,94 @@ // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 -import 'source-map-support/register'; +import "source-map-support/register"; import { App, Aspects, Stack } from "aws-cdk-lib"; -import { ConstructsStack } from '../lib/constructs-stack'; +import { ConstructsStack, AuthType } from "../lib/constructs-stack"; +import { DataTransferS3Stack } from "../lib/s3-plugin/s3-plugin-stack"; +import { DataTransferECRStack } from "../lib/ecr-plugin/ecr-plugin-stack"; import { - AwsSolutionsChecks, - NagPackSuppression, - NagSuppressions, + AwsSolutionsChecks, + NagPackSuppression, + NagSuppressions } from "cdk-nag"; const app = new App(); function stackSuppressions( - stacks: Stack[], - suppressions: NagPackSuppression[] + stacks: Stack[], + suppressions: NagPackSuppression[] ) { - stacks.forEach((s) => - NagSuppressions.addStackSuppressions(s, suppressions, true) - ); + stacks.forEach((s) => + NagSuppressions.addStackSuppressions(s, suppressions, true) + ); } -stackSuppressions([ - new ConstructsStack(app, 'DataTransferHub'), -], [ - { id: 'AwsSolutions-IAM5', reason: 'some policies need to get dynamic resources' }, - { id: 'AwsSolutions-IAM4', reason: 'these policies is used by CDK Customer Resource lambda' }, - { id: 'AwsSolutions-SF2', reason: 'we do not need xray' }, - { id: 'AwsSolutions-S1', reason: 'these buckets dont need access log', }, - { id: 'AwsSolutions-S10', reason: 'these buckets dont need SSL', }, - { id: 'AwsSolutions-L1', reason: 'not applicable to use the latest lambda runtime version' }, -]); +stackSuppressions( + [ + new ConstructsStack(app, "DataTransferHub-cognito", { + authType: AuthType.COGNITO + }), + new ConstructsStack(app, "DataTransferHub-openid", { + authType: AuthType.OPENID + }) + ], + [ + { + id: "AwsSolutions-IAM5", + reason: "some policies need to get dynamic resources" + }, + { + id: "AwsSolutions-IAM4", + reason: "these policies is used by CDK Customer Resource lambda" + }, + { id: "AwsSolutions-S1", reason: "these buckets dont need access log" }, + { id: "AwsSolutions-S10", reason: "these buckets dont need SSL" }, + { + id: "AwsSolutions-L1", + reason: "not applicable to use the latest lambda runtime version" + } + ] +); -Aspects.of(app).add(new AwsSolutionsChecks()); \ No newline at end of file +stackSuppressions( + [new DataTransferS3Stack(app, "DataTransferS3Stack")], + [ + { + id: "AwsSolutions-IAM5", + reason: "some policies need to get dynamic resources" + }, + { + id: "AwsSolutions-IAM4", + reason: "these policies is used by CDK Customer Resource lambda" + }, + { + id: "AwsSolutions-L1", + reason: + "not applicable to use the latest lambda runtime version for aws cdk cr" + } + ] +); + +stackSuppressions( + [new DataTransferECRStack(app, "DataTransferECRStack")], + [ + { + id: "AwsSolutions-IAM5", + reason: "some policies need to get dynamic resources" + }, + { + id: "AwsSolutions-IAM4", + reason: "these policies is used by CDK Customer Resource lambda" + }, + { + id: "AwsSolutions-L1", + reason: "not applicable to use the latest lambda runtime version" + }, + { + id: "AwsSolutions-ECS2", + reason: "We need to create a dynamic ECS Service" + } + ] +); + +Aspects.of(app).add(new AwsSolutionsChecks()); diff --git a/source/constructs/cdk.json b/source/constructs/cdk.json index 576bc18..6dd15ab 100755 --- a/source/constructs/cdk.json +++ b/source/constructs/cdk.json @@ -1,6 +1,7 @@ { - "app": "npx ts-node bin/cdk-solution.ts", + "app": "npx ts-node --prefer-ts-exts bin/cdk-solution.ts", "context": { - "@aws-cdk/core:newStyleStackSynthesis": true + "@aws-cdk/core:newStyleStackSynthesis": true, + "@aws-cdk/customresources:installLatestAwsSdkDefault": false } } diff --git a/source/constructs/ecr/Dockerfile b/source/constructs/ecr/Dockerfile new file mode 100644 index 0000000..8784c5c --- /dev/null +++ b/source/constructs/ecr/Dockerfile @@ -0,0 +1,28 @@ +FROM alpine:edge + +RUN apk update \ + && apk add skopeo aws-cli-v2 jq + +WORKDIR /drh + +ENV IMAGE alpine +ENV TAG latest +ENV MULTI_ARCH_OPTION all +ENV SOURCE_TYPE Amazon_ECR +ENV AWS_DEFAULT_REGION us-west-2 +ENV AWS_ACCOUNT_ID 111111111111 + +ENV SRC_REGION us-west-2 +ENV SRC_ACCOUNT_ID 222222222222 +ENV SRC_CREDENTIAL src +ENV SRC_CREDENTIAL_NAME srcNmae + +ENV DEST_REGION cn-north-1 +ENV DEST_ACCOUNT_ID 123456789012 +ENV DEST_PREFIX '' +ENV DEST_CREDENTIAL dest +ENV DEST_CREDENTIAL_NAME desNmae + +COPY copy.sh . +RUN chmod +x copy.sh +CMD ["sh", "copy.sh"] \ No newline at end of file diff --git a/source/constructs/ecr/copy.sh b/source/constructs/ecr/copy.sh new file mode 100755 index 0000000..0bcb577 --- /dev/null +++ b/source/constructs/ecr/copy.sh @@ -0,0 +1,119 @@ +#!/bin/sh + +set -e + +MULTI_ARCH_OPTION=${MULTI_ARCH_OPTION:-system} + +echo "[aws version]: 001" +aws --version + +echo "[Init] Get Image Repo Name and Tag" +echo "repo is $IMAGE and tag is $TAG" +echo "multi arch option is $MULTI_ARCH_OPTION" + +echo "[Init] Get Secrets Manager info" + +get_aksk() +{ + echo "Get AK/SK in Secrets Manager" + if [ -z "$1" ]; then + echo "No credential is provided, no ak/sk" + ak='0' + sk='0' + else + echo "Get $1 from $AWS_DEFAULT_REGION" + cred_secret_manager=$(aws secretsmanager get-secret-value --secret-id $1 --version-stage AWSCURRENT) + ak=$(echo $cred_secret_manager | jq -c '.SecretString | fromjson | .access_key_id' | tr -d '"') + sk=$(echo $cred_secret_manager | jq -c '.SecretString | fromjson | .secret_access_key' | tr -d '"') + fi +} + +get_aksk $SRC_CREDENTIAL_NAME +src_ak=$ak +src_sk=$sk +get_aksk $DEST_CREDENTIAL_NAME +dest_ak=$ak +dest_sk=$sk + +# function to get ecr login password +# Usage: get_cred region account_id ak sk +get_cred() +{ + # echo "All params are $@" + if [ -z "$4" ]; then + # In current account + echo "Get login pwd in region $1 in current account" + cred=`aws ecr get-login-password --region $1` + ACCOUNT_ID=$AWS_ACCOUNT_ID + else + ACCOUNT_ID=$2 + echo "Read AK/SK" + # echo $3 + # echo $4 + # export AWS_ACCESS_KEY_ID=$3 + # export AWS_SECRET_ACCESS_KEY=$4 + echo "Get login pwd in region $1" + cred=$(AWS_ACCESS_KEY_ID=$3 AWS_SECRET_ACCESS_KEY=$4 AWS_DEFAULT_REGION=$1 aws ecr get-login-password --region $1) + # cred=$(AWS_ACCESS_KEY_ID=$3 AWS_SECRET_ACCESS_KEY=$4 AWS_DEFAULT_REGION=$1 aws ecr get-login-password --region $1 --endpoint-url https://vpce-0a7c7079ec6a7548e-ue0esgj8.api.ecr.cn-north-1.vpce.amazonaws.com.cn) + + # echo "cred is $cred" + fi + + # Get ecr domain name + if [ "$1" = "cn-north-1" ] || [ "$1" = "cn-northwest-1" ]; then + domain=$ACCOUNT_ID.dkr.ecr.$1.amazonaws.com.cn + # domain=vpce-00692c5c78d9f56d7-nxeny3cq.dkr.ecr.cn-north-1.vpce.amazonaws.com.cn + else + domain=$ACCOUNT_ID.dkr.ecr.$1.amazonaws.com + fi + echo "domain is $domain" +} + + +echo "[Source] Get Source Info" +if [ "$SOURCE_TYPE" = "Amazon_ECR" ]; then + echo "Source Type is ECR" + get_cred $SRC_REGION $SRC_ACCOUNT_ID $src_ak $src_sk + src_cred=$cred + src_domain=$domain + # echo "src_cred is $src_cred" + echo "src_domain is $src_domain" +else + echo "Source Type is NOT Amazon ECR" +fi + + +echo "[Destination] Get Destination Info" + +get_cred $DEST_REGION $DEST_ACCOUNT_ID $dest_ak $dest_sk +dest_cred=$cred +dest_domain=$domain + +# echo "dest_cred is $dest_cred" +echo "dest_domain is $dest_domain" + +echo "[Destination] Create ECR repo" +# echo "Create ecr repo $IMAGE" +if [ -n "$DEST_ACCOUNT_ID" ]; then + echo "Set env" + export AWS_ACCESS_KEY_ID=$dest_ak + export AWS_SECRET_ACCESS_KEY=$dest_sk + export AWS_DEFAULT_REGION=$DEST_REGION +fi +aws ecr create-repository --repository-name $IMAGE --region $DEST_REGION >/dev/null || true + + +echo "[Copy] Start copying" +start_time=$(date +%s) + + +# echo $dest_pwd | skopeo login --username AWS --password-stdin $dest_domain +if [ "$SOURCE_TYPE" = "Amazon_ECR" ]; then + skopeo copy docker://$src_domain/$IMAGE:$TAG docker://$dest_domain/$IMAGE:$TAG --src-creds AWS:$src_cred --dest-creds AWS:$dest_cred --multi-arch $MULTI_ARCH_OPTION +else + skopeo copy docker://$IMAGE:$TAG docker://$dest_domain/$IMAGE:$TAG --dest-creds AWS:$dest_cred --multi-arch $MULTI_ARCH_OPTION +fi + +end_time=$(date +%s) +cost_time=`expr $end_time - $start_time` +echo "Time elapsed to copy is $(($cost_time/60))min $(($cost_time%60))s" diff --git a/source/constructs/lambda/api/api-task.ts b/source/constructs/lambda/api/api-task.ts deleted file mode 100644 index 2c12fcf..0000000 --- a/source/constructs/lambda/api/api-task.ts +++ /dev/null @@ -1,183 +0,0 @@ -// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -// SPDX-License-Identifier: Apache-2.0 - -import * as AWS from "aws-sdk"; -import { Context } from "aws-lambda"; -import { Task, CreateTaskInput, UpdateTaskInput, assert, pprint, CommonTaskProgress, makeid, } from '../common'; -import { v4 as uuidv4 } from 'uuid'; - -type CreateTaskInputArg = { - input: CreateTaskInput -} - -type UpdateTaskInputArg = { - id: string, - input: UpdateTaskInput -} - -type StopTaskInputArg = { - id: string -} - -type UpdateTaskProgressArg = { - id: string - input: CommonTaskProgress -} - -interface AppSyncEvent { - info: { - fieldName: string - parentTypeName: string - variables: any - } - arguments: CreateTaskInputArg | UpdateTaskInputArg | StopTaskInputArg | UpdateTaskProgressArg -} - -/** - * Create a transfer task. - * - * @param event Task input parameters. { type: TaskType, parameters: [{ ParameterKey: String, ParameterValue: String }] } - * @param _context - */ -const handler = async function (event: AppSyncEvent, _context?: Context) { - assert(process.env.AWS_REGION !== undefined, 'NO AWS_REGION') - // pprint('EVENT', event) - // pprint('CONTEXT', _context) - - switch (event.info.fieldName) { - case ('createTask'): { - assert('input' in event.arguments, 'No input filed') - assert('type' in event.arguments.input, 'No input.type field') - return await createTask(event.arguments.input) - } - case 'stopTask': { - assert('id' in event.arguments, 'No id field') - return await stopTask(event.arguments.id) - } - case 'updateTaskProgress': - assert('id' in event.arguments, 'No id field') - assert('input' in event.arguments, 'no input') - const arg = event.arguments as UpdateTaskProgressArg - return await updateTaskProgress(arg.id, arg.input) - default: - throw new Error('Unknown field, unable to resolve ' + event.info.fieldName) - } - -} - -async function updateTaskProgress(taskId: string, progress: CommonTaskProgress) { - assert(process.env.TASK_TABLE !== undefined, 'no environment variable TASK_TABLE') - const ddb = new AWS.DynamoDB.DocumentClient({ - region: process.env.AWS_REGION - }); - const updateTaskRes = await ddb.update({ - TableName: process.env.TASK_TABLE, - Key: { - id: taskId - }, - UpdateExpression: 'set progressInfo = :progressInfo', - ExpressionAttributeValues: { - ':progressInfo': progress - }, - ReturnValues: "ALL_NEW" - }).promise() - - pprint('updateTaskRes', updateTaskRes) - return updateTaskRes.Attributes as Task -} - -async function stopTask(taskId: string) { - assert(process.env.STATE_MACHINE_ARN !== undefined, 'no environment variable STATE_MACHINE_ARN found') - assert(process.env.TASK_TABLE !== undefined, 'no environment variable TASK_TABLE') - const isDryRun = process.env.DRY_RUN == 'True' - const sfn = new AWS.StepFunctions({ - region: process.env.AWS_REGION - }); - const ddb = new AWS.DynamoDB.DocumentClient({ - region: process.env.AWS_REGION - }); - - const taskList = await ddb.query({ - TableName: process.env.TASK_TABLE, - KeyConditionExpression: 'id = :id', - ExpressionAttributeValues: { - ':id': taskId - } - }).promise() - - assert(taskList.Items && taskList.Items[0], `Cannot find the task with id ${taskId}`) - const task = taskList.Items[0] - - const sfnRes = !isDryRun ? await sfn.startExecution({ - stateMachineArn: process.env.STATE_MACHINE_ARN, - input: JSON.stringify({ ...task, action: 'STOP' }) // Add action STOP - }).promise() : { - executionArn: `dry-run-execution-arn-${makeid(10)}` - } - pprint('StepFunctions Res', sfnRes) - - const updateTaskRes = await ddb.update({ - TableName: process.env.TASK_TABLE, - Key: { - id: taskId - }, - UpdateExpression: 'set executionArn = :executionArn, progress = :progress', - ExpressionAttributeValues: { - ':executionArn': sfnRes.executionArn, - ':progress': 'STOPPING' - }, - ReturnValues: "ALL_NEW" - }).promise() - - pprint('updatedTaskRes', updateTaskRes) - - return updateTaskRes.Attributes as Task -} - -/** - * Create a transfer Task. The return is an Task object. - * @param input - */ -async function createTask(input: CreateTaskInput) { - const sfn = new AWS.StepFunctions({ - region: process.env.AWS_REGION - }); - const ddb = new AWS.DynamoDB.DocumentClient({ - region: process.env.AWS_REGION - }); - assert('type' in input, 'No input.type field') - assert(process.env.STATE_MACHINE_ARN !== undefined, 'no environment variable STATE_MACHINE_ARN found') - assert(process.env.TASK_TABLE !== undefined, 'no environment variable TASK_TABLE') - const pluginTemplateUrl = process.env[`PLUGIN_TEMPLATE_${input.type.toUpperCase()}`] - assert(pluginTemplateUrl !== undefined, `No environment variable PLUGIN_TEMPLATE_${input.type.toUpperCase()}`) - const isDryRun = process.env.DRY_RUN == 'True' - - const task = { - ...input, // task object - id: uuidv4(), // id filed in DynamoDB - templateUrl: pluginTemplateUrl, - createdAt: new Date().toISOString() - } - - // Start to execute Steps Functions for CloudFormation template provisioning - const sfnRes = !isDryRun ? await sfn.startExecution({ - stateMachineArn: process.env.STATE_MACHINE_ARN, - input: JSON.stringify({ ...task, action: 'START' }) // Add action START - }).promise() : { - executionArn: `try-run-execution-arn-${makeid(5)}` - } - pprint('StepFunctions Res', sfnRes) - - const item: Task = { ...task, executionArn: sfnRes.executionArn } - pprint('Item to insert', item) - - await ddb.put({ - TableName: process.env.TASK_TABLE, - Item: item - }).promise() - - return item -} - - -export { handler, AppSyncEvent } \ No newline at end of file diff --git a/source/constructs/lambda/api/task-monitoring/start_monitor_flow.py b/source/constructs/lambda/api/task-monitoring/start_monitor_flow.py index 226f3a4..27db3e1 100644 --- a/source/constructs/lambda/api/task-monitoring/start_monitor_flow.py +++ b/source/constructs/lambda/api/task-monitoring/start_monitor_flow.py @@ -26,7 +26,7 @@ def lambda_handler(event, _): # logger.info("Received event: " + json.dumps(event, indent=2)) - task_id = event.get("id") + task_id = event["id"]["S"] input_data = { "arguments": { diff --git a/source/constructs/lambda/api/task-monitoring/test/test_check_sqs_status.py b/source/constructs/lambda/api/task-monitoring/test/test_check_sqs_status.py index a5b3c39..1e3c8f9 100644 --- a/source/constructs/lambda/api/task-monitoring/test/test_check_sqs_status.py +++ b/source/constructs/lambda/api/task-monitoring/test/test_check_sqs_status.py @@ -3,85 +3,9 @@ import pytest import os +import json import boto3 -from moto import mock_dynamodb, mock_sqs, mock_cloudformation, mock_sts - -task_info_1 = { - "id": "0ff94440-331e-4678-a53c-768c6720db55", - "createdAt": "2022-07-23T14:49:11.524Z", - "description": "", - "executionArn": "arn:aws:states:ap-southeast-1:123456789012:execution:APICfnWorkflowCfnDeploymentStateMachineFC154A5B-g5tjKvJSuaqT:6cc7ee05-8618-47d3-bd29-209b18e17310", - "parameters": [ - {"ParameterKey": "srcType", "ParameterValue": "Amazon_S3"}, - {"ParameterKey": "srcEndpoint", "ParameterValue": ""}, - {"ParameterKey": "srcBucket", "ParameterValue": "noexist"}, - {"ParameterKey": "srcPrefix", "ParameterValue": ""}, - {"ParameterKey": "srcPrefixsListFile", "ParameterValue": ""}, - {"ParameterKey": "srcEvent", "ParameterValue": "No"}, - {"ParameterKey": "srcRegion", "ParameterValue": "ap-southeast-1"}, - {"ParameterKey": "srcInCurrentAccount", "ParameterValue": "false"}, - {"ParameterKey": "srcCredentials", "ParameterValue": "qiniu-key"}, - {"ParameterKey": "destBucket", "ParameterValue": "dth-us-west-2"}, - {"ParameterKey": "destPrefix", "ParameterValue": ""}, - {"ParameterKey": "destStorageClass", "ParameterValue": "STANDARD"}, - {"ParameterKey": "destRegion", "ParameterValue": "us-west-2"}, - {"ParameterKey": "destInCurrentAccount", "ParameterValue": "true"}, - {"ParameterKey": "destCredentials", "ParameterValue": ""}, - {"ParameterKey": "includeMetadata", "ParameterValue": "false"}, - {"ParameterKey": "destAcl", "ParameterValue": "bucket-owner-full-control"}, - {"ParameterKey": "ec2CronExpression", "ParameterValue": "0 */1 ? * * *"}, - {"ParameterKey": "maxCapacity", "ParameterValue": "20"}, - {"ParameterKey": "minCapacity", "ParameterValue": "0"}, - {"ParameterKey": "desiredCapacity", "ParameterValue": "0"}, - {"ParameterKey": "srcSkipCompare", "ParameterValue": "false"}, - {"ParameterKey": "finderDepth", "ParameterValue": "0"}, - {"ParameterKey": "finderNumber", "ParameterValue": "1"}, - {"ParameterKey": "ecsFargateMemory", "ParameterValue": "8192"}, - {"ParameterKey": "workerNumber", "ParameterValue": "4"}, - {"ParameterKey": "alarmEmail", "ParameterValue": "xxxxxx"}, - {"ParameterKey": "ecsVpcId", "ParameterValue": "vpc-0f96aaaa8a4f5c38d"}, - { - "ParameterKey": "ecsClusterName", - "ParameterValue": "DataTransferHub-TaskCluster-l77WIQA2Y4ps", - }, - { - "ParameterKey": "ecsSubnets", - "ParameterValue": "subnet-0e416ad949f9b4250,subnet-08fdf96eeb87e9c0d", - }, - ], - "progress": "IN_PROGRESS", - "stackId": "arn:aws:cloudformation:ap-southeast-1:123456789012:stack/DTH-S3EC2-sKKUJ/9dd81670-0a96-11ed-9cc1-021ceb982872", - "stackOutputs": [ - { - "Description": "Task Definition Name", - "OutputKey": "ECSStackTaskDefinitionNameE8E07C57", - "OutputValue": "DTH-S3EC2-sKKUJ-DTHFinderTask", - }, - { - "Description": "Alarm Topic Name", - "OutputKey": "CommonAlarmTopicName54A80B94", - "OutputValue": "DTH-S3EC2-sKKUJ-S3TransferAlarmTopic-1Q0IKPEMJV9JJ", - }, - { - "Description": "Queue Name", - "OutputKey": "CommonQueueNameEB26B1B7", - "OutputValue": "DTH-S3EC2-sKKUJ-S3TransferQueue-lHC3es0HYTJd", - }, - { - "Description": "Dead Letter Queue Name", - "OutputKey": "CommonDLQQueueName98D51C56", - "OutputValue": "DTH-S3EC2-sKKUJ-S3TransferQueueDLQ-u88iddww7XC0", - }, - { - "Description": "DynamoDB Table Name", - "OutputKey": "CommonTableName4099A6E9", - "OutputValue": "DTH-S3EC2-sKKUJ-S3TransferTable-1CT4T8M71I9QA", - }, - ], - "stackStatus": "CREATE_COMPLETE", - "templateUrl": "https://aws-gcr-solutions.s3.amazonaws.com/data-transfer-hub-s3/v2.1.0/DataTransferS3Stack-ec2.template", - "type": "S3EC2", -} +from moto import mock_dynamodb, mock_sqs, mock_cloudformation, mock_sts, mock_stepfunctions @pytest.fixture @@ -95,6 +19,7 @@ def cfn_client(): ) yield + @pytest.fixture def sts_client(): with mock_sts(): @@ -105,6 +30,91 @@ def sts_client(): @pytest.fixture def ddb_client(): with mock_dynamodb(): + task_info_1 = { + "id": "0ff94440-331e-4678-a53c-768c6720db55", + "createdAt": "2022-07-23T14:49:11.524Z", + "description": "", + "executionArn": "arn:aws:states:ap-southeast-1:123456789012:execution:APICfnWorkflowCfnDeploymentStateMachineFC154A5B-g5tjKvJSuaqT:6cc7ee05-8618-47d3-bd29-209b18e17310", + "parameters": [ + {"ParameterKey": "srcType", "ParameterValue": "Amazon_S3"}, + {"ParameterKey": "srcEndpoint", "ParameterValue": ""}, + {"ParameterKey": "srcBucket", "ParameterValue": "noexist"}, + {"ParameterKey": "srcPrefix", "ParameterValue": ""}, + {"ParameterKey": "srcPrefixsListFile", "ParameterValue": ""}, + {"ParameterKey": "srcEvent", "ParameterValue": "No"}, + {"ParameterKey": "srcRegion", "ParameterValue": "ap-southeast-1"}, + {"ParameterKey": "srcInCurrentAccount", "ParameterValue": "false"}, + {"ParameterKey": "srcCredentials", "ParameterValue": "qiniu-key"}, + {"ParameterKey": "destBucket", "ParameterValue": "dth-us-west-2"}, + {"ParameterKey": "destPrefix", "ParameterValue": ""}, + {"ParameterKey": "destStorageClass", "ParameterValue": "STANDARD"}, + {"ParameterKey": "destRegion", "ParameterValue": "us-west-2"}, + {"ParameterKey": "destInCurrentAccount", "ParameterValue": "true"}, + {"ParameterKey": "destCredentials", "ParameterValue": ""}, + {"ParameterKey": "includeMetadata", "ParameterValue": "false"}, + {"ParameterKey": "destAcl", + "ParameterValue": "bucket-owner-full-control"}, + {"ParameterKey": "ec2CronExpression", + "ParameterValue": "0 */1 ? * * *"}, + {"ParameterKey": "maxCapacity", "ParameterValue": "20"}, + {"ParameterKey": "minCapacity", "ParameterValue": "0"}, + {"ParameterKey": "desiredCapacity", "ParameterValue": "0"}, + {"ParameterKey": "srcSkipCompare", "ParameterValue": "false"}, + {"ParameterKey": "finderDepth", "ParameterValue": "0"}, + {"ParameterKey": "finderNumber", "ParameterValue": "1"}, + {"ParameterKey": "ecsFargateMemory", "ParameterValue": "8192"}, + {"ParameterKey": "workerNumber", "ParameterValue": "4"}, + {"ParameterKey": "alarmEmail", "ParameterValue": "xxxxxx"}, + {"ParameterKey": "ecsVpcId", + "ParameterValue": "vpc-0f96aaaa8a4f5c38d"}, + { + "ParameterKey": "ecsClusterName", + "ParameterValue": "DataTransferHub-TaskCluster-l77WIQA2Y4ps", + }, + { + "ParameterKey": "ecsSubnets", + "ParameterValue": "subnet-0e416ad949f9b4250,subnet-08fdf96eeb87e9c0d", + }, + ], + "progress": "IN_PROGRESS", + "stackId": "arn:aws:cloudformation:ap-southeast-1:123456789012:stack/DTH-S3EC2-sKKUJ/9dd81670-0a96-11ed-9cc1-021ceb982872", + "stackOutputs": [ + { + "Description": "Task Definition Name", + "OutputKey": "ECSStackTaskDefinitionNameE8E07C57", + "OutputValue": "DTH-S3EC2-sKKUJ-DTHFinderTask", + }, + { + "Description": "Alarm Topic Name", + "OutputKey": "CommonAlarmTopicName54A80B94", + "OutputValue": "DTH-S3EC2-sKKUJ-S3TransferAlarmTopic-1Q0IKPEMJV9JJ", + }, + { + "Description": "Queue Name", + "OutputKey": "CommonQueueNameEB26B1B7", + "OutputValue": "DTH-S3EC2-sKKUJ-S3TransferQueue-lHC3es0HYTJd", + }, + { + "Description": "Dead Letter Queue Name", + "OutputKey": "CommonDLQQueueName98D51C56", + "OutputValue": "DTH-S3EC2-sKKUJ-S3TransferQueueDLQ-u88iddww7XC0", + }, + { + "Description": "DynamoDB Table Name", + "OutputKey": "CommonTableName4099A6E9", + "OutputValue": "DTH-S3EC2-sKKUJ-S3TransferTable-1CT4T8M71I9QA", + }, + { + "Description": "SFN ARN", + "OutputKey": "SfnArn", + "OutputValue": "arn:aws:states:us-east-1:123456789012:stateMachine:TestStateMachine", + }, + ], + "stackStatus": "CREATE_COMPLETE", + "templateUrl": "https://aws-gcr-solutions.s3.amazonaws.com/data-transfer-hub-s3/v2.1.0/DataTransferS3Stack-ec2.template", + "type": "S3EC2", + } + region = os.environ.get("AWS_REGION") ddb = boto3.resource("dynamodb", region_name=region) # Mock App Log Configuration Table @@ -142,7 +152,37 @@ def sqs_client(): yield -def test_lambda_function(cfn_client, ddb_client, sqs_client, sts_client): +@pytest.fixture +def sfn_client(): + with mock_stepfunctions(): + region = os.environ.get("AWS_REGION") + sf_client = boto3.client('stepfunctions', region_name=region) + + state_machine_definition = { + "Comment": "A simple AWS Step Functions state machine example", + "StartAt": "HelloWorld", + "States": { + "HelloWorld": { + "Type": "Pass", + "Result": "Hello, World!", + "End": True + } + } + } + definition_json = json.dumps(state_machine_definition) + response = sf_client.create_state_machine( + name="TestStateMachine", + definition=definition_json, + roleArn="arn:aws:iam::123456789012:role/service-role/StepFunctions-HelloWorld-ExecutionRole", + ) + + os.environ["MOCK_SFN_ARN"] = response["stateMachineArn"] + print("fuck") + print(os.environ["MOCK_SFN_ARN"]) + yield + + +def test_lambda_function(cfn_client, sfn_client, ddb_client, sqs_client, sts_client): import check_sqs_status # Create a service linked role in a brand new account diff --git a/source/constructs/lambda/api/task-monitoring/test/test_check_transfer_complete.py b/source/constructs/lambda/api/task-monitoring/test/test_check_transfer_complete.py index 9ba7440..c1b0d8b 100644 --- a/source/constructs/lambda/api/task-monitoring/test/test_check_transfer_complete.py +++ b/source/constructs/lambda/api/task-monitoring/test/test_check_transfer_complete.py @@ -109,7 +109,7 @@ def clw_client(): MetricData=[ { 'MetricName': 'TransferredObjects', - 'Timestamp': datetime.utcnow(), + 'Timestamp': datetime.utcnow()- timedelta(minutes=1), 'Value': 1, }, ] @@ -158,7 +158,6 @@ def test_lambda_function(cfn_client, clw_client, ddb_client): }, None, ) - print(result) # Expect Execute successfully. assert result == { 'isCompleted': 'true', diff --git a/source/constructs/lambda/api/task-monitoring/test/test_start_monitor_flow.py b/source/constructs/lambda/api/task-monitoring/test/test_start_monitor_flow.py index efc454d..00f1c36 100644 --- a/source/constructs/lambda/api/task-monitoring/test/test_start_monitor_flow.py +++ b/source/constructs/lambda/api/task-monitoring/test/test_start_monitor_flow.py @@ -28,7 +28,9 @@ def test_lambda_function(sfn_client): # Create a service linked role in a brand new account result = start_monitor_flow.lambda_handler( { - "id": "0ff94440-331e-4678-a53c-768c6720db55" + "id": { + "S": "0ff94440-331e-4678-a53c-768c6720db55" + } }, None, ) diff --git a/source/constructs/lambda/api/task-monitoring/util/monitor_helper.py b/source/constructs/lambda/api/task-monitoring/util/monitor_helper.py index 6617cfe..f589afa 100644 --- a/source/constructs/lambda/api/task-monitoring/util/monitor_helper.py +++ b/source/constructs/lambda/api/task-monitoring/util/monitor_helper.py @@ -29,6 +29,7 @@ sts_client = boto3.client('sts', config=default_config) asg_client = boto3.client('autoscaling', config=default_config) sns_client = boto3.client('sns', config=default_config) +sfn_client = boto3.client('stepfunctions', config=default_config) transfer_task_table_name = os.environ.get('TRANSFER_TASK_TABLE') transfer_task_table = dynamodb_resource.Table(transfer_task_table_name) @@ -314,13 +315,14 @@ def __init__(self, task_id): self._stack_name = self.get_stack_name(self.stack_id) self._name_space = self.get_stack_name(self.stack_id) self._sqs_name = self.get_task_attributes(resp, "Queue Name") + self._sfn_arn = self.get_task_attributes(resp, "SFN ARN") self._task_schedule = self.get_task_param(resp, "ec2CronExpression") self._finder_object_count = resp["Item"].get("totalObjectCount") self._worker_asg_name = self._stack_name + "-Worker-ASG" self._stack_create_time = self.get_cloudformation_stack_info( self._stack_name, "CreationTime") - def _get_worker_asg_transsferred_task_count(self): + def _get_worker_asg_transferred_task_count(self): """ This function will return the worker asg transsferred task count. Here we assume that a one time transfer task will completed in 60 days(3600 seconds * 1440). @@ -351,6 +353,7 @@ def _calculate_start_time(self, stack_create_time): def check_sqs_empty(self, check_round): """ This function will check the SQS queue is empty or not . + After v2.5.0, it will also check the Giant Controller Step Function is completed or not. Args: check_round @@ -376,7 +379,15 @@ def check_sqs_empty(self, check_round): logger.info( f"The approximate avaliable message count {message_available} and in flight message cound {message_in_flight}") - if int(message_in_flight) == 0 and int(message_available) == 0: + # Get the giant object merging step function status + sfn_response = sfn_client.list_executions( + stateMachineArn=self._sfn_arn, + statusFilter='RUNNING' + ) + + giant_merging_running_task_count = len(sfn_response['executions']) + + if int(message_in_flight) == 0 and int(message_available) == 0 and giant_merging_running_task_count == 0: check_round += 1 return { "isEmpty": "true", @@ -396,7 +407,7 @@ def check_sqs_empty(self, check_round): def check_transfer_complete(self): """This function will check the transfer task is completed or not .""" - transferred_object_count = self._get_worker_asg_transsferred_task_count() + transferred_object_count = self._get_worker_asg_transferred_task_count() # One Time Transfer task completed if int(transferred_object_count) == int(self._finder_object_count): diff --git a/source/constructs/lambda/api/task/api_task_v2.py b/source/constructs/lambda/api/task/api_task_v2.py index 2849e34..42127ba 100644 --- a/source/constructs/lambda/api/task/api_task_v2.py +++ b/source/constructs/lambda/api/task/api_task_v2.py @@ -5,17 +5,31 @@ import logging import os import re - +import uuid +import datetime +from decimal import Decimal import boto3 from boto3.dynamodb.conditions import Attr -from util.task_helper import TaskErrorHelper +from botocore import config + +from util.task_helper import TaskErrorHelper, make_id logger = logging.getLogger() logger.setLevel(logging.INFO) +solution_version = os.environ.get("SOLUTION_VERSION", "v1.0.0") +solution_id = os.environ.get("SOLUTION_ID", "SO8001") +user_agent_config = { + "user_agent_extra": f"AwsSolution/{solution_id}/{solution_version}" +} +default_config = config.Config(**user_agent_config) + +default_region = os.environ.get("AWS_REGION") # Get DDB resource. -dynamodb = boto3.resource('dynamodb') +dynamodb = boto3.resource('dynamodb', config=default_config) +sfn_client = boto3.client('stepfunctions', config=default_config) +ddb_client = boto3.client('dynamodb', config=default_config) transfer_task_table_name = os.environ.get('TRANSFER_TASK_TABLE') default_region = os.environ.get('AWS_REGION') @@ -33,6 +47,12 @@ def lambda_handler(event, _): elif action == "getErrorMessage": task_id = args.get("id") return get_error_message(task_id) + elif action == "createTask": + return create_task(args.get("input")) + elif action == "stopTask": + return stop_task(args.get("id")) + elif action == "updateTaskProgress": + return update_task_progress(args.get("id"), args.get("input")) else: logger.info('Event received: ' + json.dumps(event, indent=2)) raise RuntimeError(f'Unknown action {action}') @@ -128,10 +148,93 @@ def get_stack_schedule_type(task_schedule: str): return "ONE_TIME" return "FIXED_RATE" + def get_task_param(item, param_description): """ Get the task param from ddb """ for stack_param in item.get("parameters"): if stack_param.get("ParameterKey") == param_description: return stack_param.get("ParameterValue") - return "" \ No newline at end of file + return "" + + +def create_task(task_input): + """ Create a transfer task """ + plugin_template_url = os.environ.get( + f"PLUGIN_TEMPLATE_{task_input['type'].upper()}") + is_dry_run = os.environ.get('DRY_RUN') == 'True' + created_at_iso = datetime.datetime.utcnow().isoformat() + 'Z' + task = { + **task_input, + 'id': str(uuid.uuid4()), + 'templateUrl': plugin_template_url, + 'createdAt': created_at_iso + } + + if not is_dry_run: + sfn_res = sfn_client.start_execution( + stateMachineArn=os.environ['STATE_MACHINE_ARN'], + input=json.dumps({**task, 'action': 'START'}) + ) + execution_arn = sfn_res['executionArn'] + else: + execution_arn = f'dry-run-execution-arn-{make_id(5)}' + + item = {**task, 'executionArn': execution_arn} + + transfer_task_table.put_item(Item=item) + + return item + + +def stop_task(task_id): + """ Stop a transfer task """ + is_dry_run = os.environ.get('DRY_RUN') == 'True' + + resp = transfer_task_table.get_item(Key={"id": task_id}) + task = resp['Item'] + + if not is_dry_run: + sfn_res = sfn_client.start_execution( + stateMachineArn=os.environ['STATE_MACHINE_ARN'], + input=json.dumps({**task, 'action': 'STOP'}, + default=decimal_to_float) + ) + execution_arn = sfn_res['executionArn'] + else: + execution_arn = f'dry-run-execution-arn-{make_id(5)}' + + ddb_client.update_item( + TableName=transfer_task_table_name, + Key={'id': {'S': task_id}}, + UpdateExpression='set executionArn = :executionArn, progress = :progress', + ExpressionAttributeValues={':executionArn': { + 'S': execution_arn}, ':progress': {'S': 'STOPPING'}}, + ReturnValues='ALL_NEW' + ) + + task['progress'] = 'STOPPING' + task['executionArn'] = execution_arn + + return task + + +def update_task_progress(task_id, progress): + """ Update a transfer task progress """ + update_task_res = ddb_client.update_item( + TableName=transfer_task_table_name, + Key={'id': {'S': task_id}}, + UpdateExpression='set progressInfo = :progressInfo', + ExpressionAttributeValues={ + ':progressInfo': {'S': json.dumps(progress)}}, + ReturnValues='ALL_NEW' + ) + + return update_task_res['Attributes'] + + +def decimal_to_float(obj): + """ Convert Decimal values to float for JSON serialization """ + if isinstance(obj, Decimal): + return float(obj) + raise TypeError diff --git a/source/constructs/lambda/api/task/util/task_helper.py b/source/constructs/lambda/api/task/util/task_helper.py index 057d05d..4e04175 100644 --- a/source/constructs/lambda/api/task/util/task_helper.py +++ b/source/constructs/lambda/api/task/util/task_helper.py @@ -5,6 +5,8 @@ import os import re import boto3 +import random +import string from botocore import config @@ -54,7 +56,7 @@ def get_error_message(self): """ Get the error reason of transfer task """ - if "FAILED" in self._stack_status: + if "FAILED" in self._stack_status or "ROLLBACK" in self._stack_status: err_message = self.get_cfn_stack_first_error_event() err_code = "CFN_ERROR" else: @@ -126,4 +128,11 @@ def get_stack_name(stack_id): else: raise APIException("Error parse stack name.") - return stack_name \ No newline at end of file + return stack_name + + +def make_id(length): + """Generate a random string of fixed length """ + characters = string.ascii_letters + result = ''.join(random.choice(characters) for _ in range(length)) + return result \ No newline at end of file diff --git a/source/constructs/lambda/cdk/.coveragerc b/source/constructs/lambda/cdk/.coveragerc new file mode 100644 index 0000000..bacf8eb --- /dev/null +++ b/source/constructs/lambda/cdk/.coveragerc @@ -0,0 +1,8 @@ +[run] +omit = + tests/* + .venv-*/* + test/* + */__init__.py +source = + . \ No newline at end of file diff --git a/source/constructs/lambda/cdk/cfn-task.ts b/source/constructs/lambda/cdk/cfn-task.ts deleted file mode 100644 index eb54cf7..0000000 --- a/source/constructs/lambda/cdk/cfn-task.ts +++ /dev/null @@ -1,177 +0,0 @@ -// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -// SPDX-License-Identifier: Apache-2.0 - -import * as AWS from 'aws-sdk'; -import { Context, } from 'aws-lambda'; -import { CreateTaskInput, pprint, assert, makeid, TaskProgress } from '../common'; - -interface QueryTaskCfnResponse { - stackId: string, - stackStatus: string, - stackStatusReason?: string, - stackOutputs?: any -} - -interface CfnTaskInput extends CreateTaskInput { - templateUrl: string - id: string -} - -interface StopTaskInput { - id: string - stackId: string -} - -interface QueryCfnTaskInput extends CfnTaskInput { - stackId: string -} - -/** - * Create the Task CloudFormation Stack. This function will create an item in provided DynamoDB. - * - * : Environment variable. The name of the DynamoDB table for tasks. - * - * @param input - * @param context - */ -exports.createTaskCfn = async function (input: CfnTaskInput, context: Context) { - pprint('INPUT', input) - pprint('CONTEXT', context) - assert(process.env.TASK_TABLE !== undefined, 'No TASK_TABLE env') - assert(process.env.AWS_REGION !== undefined, 'No AWS_REGION env') - - const id = makeid(5) - const cfn = new AWS.CloudFormation(); - const stack = await cfn.createStack({ - TemplateURL: input.templateUrl, - StackName: `DTH-${input.type.toString()}-${id}`, - Parameters: input.parameters, - Capabilities: ['CAPABILITY_NAMED_IAM'], - Tags: [ - { - Key: "TaskId", - Value: id - } - ] - }).promise() - - assert(stack.StackId !== undefined, 'No Stack ID') - - const ddb = new AWS.DynamoDB.DocumentClient({ - region: process.env.AWS_REGION - }) - - const updatedItemRes = await ddb.update({ - TableName: process.env.TASK_TABLE, - Key: { - id: input.id - }, - UpdateExpression: 'set progress = :progress, stackId = :stackId, stackStatus = :stackStatus', - ExpressionAttributeValues: { - ':progress': 'STARTING', - ':stackId': stack.StackId, - ':stackStatus': 'CREATE_IN_PROGRESS' - }, - ReturnValues: "ALL_NEW" - }).promise() - - pprint('updatedItemRes.Attributes', updatedItemRes.Attributes) - return updatedItemRes.Attributes -} - -/** - * Request to delete the CloudFormation Stack. - * - * @param input - */ -exports.stopTaskCfn = async function (input: StopTaskInput) { - assert(process.env.TASK_TABLE !== undefined, 'NO TASK_TABLE env') - pprint('INPUT', input) - - const cfn = new AWS.CloudFormation() - const ddb = new AWS.DynamoDB.DocumentClient() - const deleteStackRes = await cfn.deleteStack({ - StackName: input.stackId - }).promise() - - pprint('deleteStackRes', deleteStackRes) - - const task = await ddb.update({ - TableName: process.env.TASK_TABLE, - Key: { - id: input.id - }, - UpdateExpression: 'set progress = :progress', - ExpressionAttributeValues: { - ':progress': 'STOPPING' - }, - ReturnValues: "ALL_NEW" - }).promise() - - pprint('task', task.Attributes) - - return task.Attributes -} - -/** - * Query the Task CloudFormation Stack status. This function will update the Stack Status in provided DynamoDB. - * - * : Environment variable. The name of the DynamoDB table for tasks. - * - * @param input - */ -exports.queryTaskCfn = async function (input: QueryCfnTaskInput) { - assert(process.env.TASK_TABLE !== undefined, 'NO TASK_TABLE env') - pprint('INPUT', input) - - const cfn = new AWS.CloudFormation(); - const ddb = new AWS.DynamoDB.DocumentClient(); - const describeStackResult = await cfn.describeStacks({ - StackName: input.stackId - }).promise() - - if (describeStackResult.Stacks && describeStackResult.Stacks.length > 0) { - const queryResult: QueryTaskCfnResponse = { - stackId: input.stackId, - stackStatus: describeStackResult.Stacks[0].StackStatus, - stackStatusReason: describeStackResult.Stacks[0].StackStatusReason, - stackOutputs: describeStackResult.Stacks[0].Outputs - } - - const getProgress = () => { - const stackStatus = queryResult.stackStatus - switch (stackStatus) { - case 'CREATE_IN_PROGRESS': - return TaskProgress.STARTING - case 'CREATE_COMPLETE': - return TaskProgress.IN_PROGRESS - case 'DELETE_IN_PROGRESS': - return TaskProgress.STOPPING - case 'DELETE_COMPLETE': - return TaskProgress.STOPPED - default: - return 'ERROR' - } - } - - const updatedTask = await ddb.update({ - TableName: process.env.TASK_TABLE, - Key: { - id: input.id - }, - UpdateExpression: 'set stackStatus = :stackStatus, progress = :progress, stackOutputs = :stackOutputs', - ConditionExpression: `stackStatus <> ${queryResult.stackStatus}`, - ExpressionAttributeValues: { - ':stackStatus': queryResult.stackStatus, - ':progress': getProgress(), - ':stackOutputs': queryResult.stackOutputs, - }, - ReturnValues: "ALL_NEW" - }).promise() - - pprint('updatedTask.Attributes', updatedTask.Attributes) - return updatedTask.Attributes - } else { - return new Error(`Query failed, stackId: ${input.stackId}`) - } -} diff --git a/source/constructs/lambda/cdk/lambda_function.py b/source/constructs/lambda/cdk/lambda_function.py new file mode 100644 index 0000000..a841712 --- /dev/null +++ b/source/constructs/lambda/cdk/lambda_function.py @@ -0,0 +1,173 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +import boto3 +import uuid +import os +import logging +from botocore import config + +logger = logging.getLogger() +logger.setLevel(logging.INFO) + +solution_version = os.environ.get("SOLUTION_VERSION", "v1.0.0") +solution_id = os.environ.get("SOLUTION_ID", "SO8001") +user_agent_config = { + "user_agent_extra": f"AwsSolution/{solution_id}/{solution_version}" +} +default_config = config.Config(**user_agent_config) + +default_region = os.environ.get("AWS_REGION") +task_table_name = os.environ.get("TASK_TABLE") + +cfn = boto3.client('cloudformation', config=default_config) +ddb = boto3.client('dynamodb', config=default_config) + + +class TaskProgress: + """ Task Progress """ + STARTING = "STARTING" + IN_PROGRESS = "IN_PROGRESS" + STOPPING = "STOPPING" + STOPPED = "STOPPED" + + +def create_task_cfn(sfn_input, _): + """ Create a cfn task """ + logger.info('INPUT: %s', sfn_input) + + task_id = str(uuid.uuid4())[:5] + stack_name = f'DTH-{sfn_input["type"]}-{task_id}' + stack = cfn.create_stack( + TemplateURL=sfn_input['templateUrl'], + StackName=stack_name, + Parameters=sfn_input['parameters'], + Capabilities=['CAPABILITY_NAMED_IAM'], + Tags=[{'Key': "TaskId", 'Value': task_id}] + ) + + updated_item_res = ddb.update_item( + TableName=task_table_name, + Key={'id': {'S': sfn_input['id']}}, + UpdateExpression='set progress = :progress, stackId = :stackId, stackStatus = :stackStatus', + ExpressionAttributeValues={ + ':progress': {'S': 'STARTING'}, + ':stackId': {'S': stack['StackId']}, + ':stackStatus': {'S': 'CREATE_IN_PROGRESS'} + }, + ReturnValues="ALL_NEW" + ) + + logger.info('updatedItemRes.Attributes: %s', + updated_item_res['Attributes']) + return updated_item_res['Attributes'] + + +def stop_task_cfn(sfn_input, _): + """ Stop a cfn task """ + logger.info('INPUT: %s', sfn_input) + + delete_stack_res = cfn.delete_stack(StackName=sfn_input['stackId']) + + logger.info('deleteStackRes: %s', delete_stack_res) + + task = ddb.update_item( + TableName=task_table_name, + Key={'id': {'S': sfn_input['id']}}, + UpdateExpression='set progress = :progress', + ExpressionAttributeValues={':progress': {'S': 'STOPPING'}}, + ReturnValues="ALL_NEW" + ) + + logger.info('task: %s', task['Attributes']) + return task['Attributes'] + + +def query_task_cfn(sfn_input, _): + """ Query a cfn task status """ + logger.info('INPUT: %s', sfn_input) + + describe_stack_result = cfn.describe_stacks( + StackName=sfn_input['stackId']['S']) + + if describe_stack_result['Stacks'] and len(describe_stack_result['Stacks']) > 0: + query_result = { + 'stackId': sfn_input['stackId']['S'], + 'stackStatus': describe_stack_result['Stacks'][0]['StackStatus'], + 'stackStatusReason': describe_stack_result['Stacks'][0].get('StackStatusReason', ""), + 'stackOutputs': describe_stack_result['Stacks'][0].get('Outputs', []) + } + + ddb_format_outputs = generate_ddb_format_json(query_result['stackOutputs']) + updated_task = ddb.update_item( + TableName=task_table_name, + Key={'id': {'S': sfn_input['id']['S']}}, + UpdateExpression='set stackStatus = :stackStatus, progress = :progress, stackOutputs = :stackOutputs', + ConditionExpression=f'stackStatus <> {query_result["stackStatus"]}', + ExpressionAttributeValues={ + ':stackStatus': {'S': query_result['stackStatus']}, + ':progress': {'S': get_progress(query_result['stackStatus'])}, + ':stackOutputs': {'L': ddb_format_outputs}, + }, + ReturnValues="ALL_NEW" + ) + + logger.info('updatedTask.Attributes: %s', updated_task['Attributes']) + return updated_task['Attributes'] + else: + return Exception(f'Query failed, stackId: {sfn_input["stackId"]["S"]}') + + +def generate_ddb_format_json(stack_outputs): + """ Generate ddb format json + Input: + stack_outputs: [{ + 'OutputKey': 'string', + 'OutputValue': 'string', + 'Description': 'string' + }] + Output: + [{ + 'M': { + 'OutputKey': { + 'S': 'string' + }, + 'OutputValue': { + 'S': 'string' + }, + 'Description': { + 'S': 'string' + } + } + }] + """ + ddb_format_outputs = [] + for stack_output in stack_outputs: + ddb_format_outputs.append({ + 'M': { + 'OutputKey': { + 'S': stack_output['OutputKey'] + }, + 'OutputValue': { + 'S': stack_output['OutputValue'] + }, + 'Description': { + 'S': stack_output.get('Description', '') + } + } + }) + return ddb_format_outputs + + +def get_progress(stack_status): + """ Get progress """ + if stack_status == 'CREATE_IN_PROGRESS': + return TaskProgress.STARTING + elif stack_status == 'CREATE_COMPLETE': + return TaskProgress.IN_PROGRESS + elif stack_status == 'DELETE_IN_PROGRESS': + return TaskProgress.STOPPING + elif stack_status == 'DELETE_COMPLETE': + return TaskProgress.STOPPED + else: + return 'ERROR' diff --git a/source/constructs/lambda/cdk/test/__init__.py b/source/constructs/lambda/cdk/test/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/source/constructs/lambda/cdk/test/conftest.py b/source/constructs/lambda/cdk/test/conftest.py new file mode 100644 index 0000000..c2aabc3 --- /dev/null +++ b/source/constructs/lambda/cdk/test/conftest.py @@ -0,0 +1,19 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +import os + +import pytest + + +@pytest.fixture(autouse=True) +def default_environment_variables(): + """Mocked AWS evivronment variables such as AWS credentials and region""" + os.environ["AWS_ACCESS_KEY_ID"] = "mocked-aws-access-key-id" + os.environ["AWS_SECRET_ACCESS_KEY"] = "mocked-aws-secret-access-key" + os.environ["AWS_SESSION_TOKEN"] = "mocked-aws-session-token" + os.environ["AWS_REGION"] = "us-east-1" + os.environ["AWS_DEFAULT_REGION"] = "us-east-1" + + os.environ["TRANSFER_TASK_TABLE"] = "dth-task-table" + os.environ["TASK_TABLE"] = "dth-task-table" \ No newline at end of file diff --git a/source/constructs/lambda/cdk/test/requirements-test.txt b/source/constructs/lambda/cdk/test/requirements-test.txt new file mode 100644 index 0000000..4b34868 --- /dev/null +++ b/source/constructs/lambda/cdk/test/requirements-test.txt @@ -0,0 +1,6 @@ +moto +pytest +pytest-cov +pyyaml +openapi_spec_validator +docker \ No newline at end of file diff --git a/source/constructs/lambda/cdk/test/test_lambda_function.py b/source/constructs/lambda/cdk/test/test_lambda_function.py new file mode 100644 index 0000000..eedc5f9 --- /dev/null +++ b/source/constructs/lambda/cdk/test/test_lambda_function.py @@ -0,0 +1,659 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +import pytest +import os +import boto3 +import json +from moto import mock_dynamodb, mock_cloudformation, mock_s3 + + +task_info_1 = { + "id": "0ff94440-331e-4678-a53c-768c6720db55", + "createdAt": "2022-07-23T14:49:11.524Z", + "description": "", + "executionArn": "arn:aws:states:ap-southeast-1:123456789012:execution:APICfnWorkflowCfnDeploymentStateMachineFC154A5B-g5tjKvJSuaqT:6cc7ee05-8618-47d3-bd29-209b18e17310", + "parameters": [ + {"ParameterKey": "srcType", "ParameterValue": "Amazon_S3"}, + {"ParameterKey": "srcEndpoint", "ParameterValue": ""}, + {"ParameterKey": "srcBucket", "ParameterValue": "noexist"}, + {"ParameterKey": "srcPrefix", "ParameterValue": ""}, + {"ParameterKey": "srcPrefixsListFile", "ParameterValue": ""}, + {"ParameterKey": "srcEvent", "ParameterValue": "No"}, + {"ParameterKey": "srcRegion", "ParameterValue": "ap-southeast-1"}, + {"ParameterKey": "srcInCurrentAccount", "ParameterValue": "false"}, + {"ParameterKey": "srcCredentials", "ParameterValue": "qiniu-key"}, + {"ParameterKey": "destBucket", "ParameterValue": "dth-us-west-2"}, + {"ParameterKey": "destPrefix", "ParameterValue": ""}, + {"ParameterKey": "destStorageClass", "ParameterValue": "STANDARD"}, + {"ParameterKey": "destRegion", "ParameterValue": "us-west-2"}, + {"ParameterKey": "destInCurrentAccount", "ParameterValue": "true"}, + {"ParameterKey": "destCredentials", "ParameterValue": ""}, + {"ParameterKey": "includeMetadata", "ParameterValue": "false"}, + {"ParameterKey": "destAcl", "ParameterValue": "bucket-owner-full-control"}, + {"ParameterKey": "ecsCronExpression", "ParameterValue": "0 */1 ? * * *"}, + {"ParameterKey": "maxCapacity", "ParameterValue": "20"}, + {"ParameterKey": "minCapacity", "ParameterValue": "0"}, + {"ParameterKey": "desiredCapacity", "ParameterValue": "0"}, + {"ParameterKey": "srcSkipCompare", "ParameterValue": "false"}, + {"ParameterKey": "finderDepth", "ParameterValue": "0"}, + {"ParameterKey": "finderNumber", "ParameterValue": "1"}, + {"ParameterKey": "ecsFargateMemory", "ParameterValue": "8192"}, + {"ParameterKey": "workerNumber", "ParameterValue": "4"}, + {"ParameterKey": "alarmEmail", "ParameterValue": "xxxxxx"}, + {"ParameterKey": "ecsVpcId", "ParameterValue": "vpc-0f96aaaa8a4f5c38d"}, + { + "ParameterKey": "ecsClusterName", + "ParameterValue": "DataTransferHub-TaskCluster-l77WIQA2Y4ps", + }, + { + "ParameterKey": "ecsSubnets", + "ParameterValue": "subnet-0e416ad949f9b4250,subnet-08fdf96eeb87e9c0d", + }, + ], + "stackStatus": "CREATE_IN_PROGRESS", + "progress": "IN_PROGRESS", + "stackId": "arn:aws:cloudformation:ap-southeast-1:123456789012:stack/DTH-S3EC2-sKKUJ/9dd81670-0a96-11ed-9cc1-021ceb982872", + "templateUrl": "https://my-assets.s3.amazonaws.com/dth-ec2.template", + "type": "S3EC2", +} + + +@pytest.fixture +def ddb_client(): + with mock_dynamodb(): + region = os.environ.get("AWS_REGION") + ddb = boto3.resource("dynamodb", region_name=region) + # Mock App Log Configuration Table + task_table_name = os.environ.get("TRANSFER_TASK_TABLE") + app_log_config_table = ddb.create_table( + TableName=task_table_name, + KeySchema=[{ + "AttributeName": "id", + "KeyType": "HASH" + }], + AttributeDefinitions=[{ + "AttributeName": "id", + "AttributeType": "S" + }], + ProvisionedThroughput={ + "ReadCapacityUnits": 10, + "WriteCapacityUnits": 10 + }, + ) + data_list = [task_info_1] + with app_log_config_table.batch_writer() as batch: + for data in data_list: + batch.put_item(Item=data) + yield + + +@pytest.fixture +def cfn_client(): + with mock_cloudformation(): + region = os.environ.get("AWS_REGION") + client = boto3.client("cloudformation", region_name=region) + response = client.create_stack( + StackName='DTH-S3EC2-sKKUJ', + TemplateBody='{"Resources": {}}', + ) + os.environ['MOCK_CFN_ID'] = response['StackId'] + yield + + +@pytest.fixture +def s3_client(): + with mock_s3(): + # Dummy CloudFormation Template + dummy_template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "Stack 1", + "Resources": { + "EC2Instance1": { + "Type": "AWS::EC2::Instance", + "Properties": { + "ImageId": "EXAMPLE_AMI_ID", + "KeyName": "dummy", + "InstanceType": "t2.micro", + "Tags": [ + {"Key": "Description", "Value": "Test tag"}, + {"Key": "Name", "Value": "Name tag for tests"}, + ], + }, + } + }, + } + region = os.environ.get("AWS_REGION") + s3 = boto3.client('s3', region_name=region) + s3.create_bucket(Bucket='my-assets') + s3.put_object(Bucket='my-assets', Key='dth-ec2.template', + Body=json.dumps(dummy_template)) + yield + + +def test_lambda_function_stop(ddb_client, cfn_client): + from lambda_function import stop_task_cfn + + response = stop_task_cfn( + { + "errMessage": "2023/09/08 03:20:33 Error listing objects in destination bucket - operation error S3: ListObjectsV2, https response error StatusCode: 301, RequestID: G409Z7BSZ23H88KY, HostID: klnWHkzuSRrBXl0sK//srXAkXCTvLcZPPys2IwRNcPpocVsGV+dvNK23UwWiKIPVp7/i3ZPZqyI=, api error PermanentRedirect: The bucket you are attempting to access must be addressed using the specified endpoint. Please send all future requests to this endpoint.", + "stackId": "arn:aws:cloudformation:us-west-1:123456789012:stack/DTH-S3EC2-8748c/1fb54110-4df6-11ee-b76a-06061c3a0e9f", + "createdAt": "2023-09-08T03:16:37.609935Z", + "stackOutputs": [ + { + "OutputKey": "CommonSplitPartTableName68CB1187", + "OutputValue": "DTH-S3EC2-8748c-S3SplitPartTable-AX7L3SHNN7V1", + "Description": "Split Part DynamoDB Table Name" + } + ], + "executionArn": "arn:aws:states:us-west-1:123456789012:execution:APICfnWorkflowCfnDeploymentStateMachineFC154A5B-y2TQO51TD4mD:2dd8afe1-0b02-4bb1-9ef9-0f41e5b458c8", + "updatedDt": "2023-09-08T03:20:52Z", + "errCode": "FINDER_ERROR", + "parameters": [ + { + "ParameterValue": "Amazon_S3", + "ParameterKey": "srcType" + } + ], + "progress": "ERROR", + "templateUrl": "https://solutions-features-reference.s3.amazonaws.com/data-transfer-hub/develop/DataTransferS3Stack.template", + "scheduleType": "FIXED_RATE", + "description": "", + "id": "0ff94440-331e-4678-a53c-768c6720db55", + "stackStatus": "CREATE_COMPLETE", + "type": "S3EC2", + "action": "STOP" + }, + None, + ) + + assert "S" in response.get("id") + + +def test_lambda_function_create(ddb_client, cfn_client, s3_client): + from lambda_function import create_task_cfn + + response = create_task_cfn( + { + "type": "S3EC2", + "description": "", + "scheduleType": "FIXED_RATE", + "parameters": [ + { + "ParameterKey": "srcType", + "ParameterValue": "Amazon_S3" + }, + { + "ParameterKey": "srcEndpoint", + "ParameterValue": "" + }, + { + "ParameterKey": "srcBucket", + "ParameterValue": "aaaaa" + }, + { + "ParameterKey": "srcPrefix", + "ParameterValue": "" + }, + { + "ParameterKey": "srcPrefixsListFile", + "ParameterValue": "" + }, + { + "ParameterKey": "srcEvent", + "ParameterValue": "No" + }, + { + "ParameterKey": "srcRegion", + "ParameterValue": "us-east-1" + }, + { + "ParameterKey": "srcInCurrentAccount", + "ParameterValue": "false" + }, + { + "ParameterKey": "srcCredentials", + "ParameterValue": "" + }, + { + "ParameterKey": "destBucket", + "ParameterValue": "bbbbb" + }, + { + "ParameterKey": "destPrefix", + "ParameterValue": "" + }, + { + "ParameterKey": "destStorageClass", + "ParameterValue": "INTELLIGENT_TIERING" + }, + { + "ParameterKey": "destRegion", + "ParameterValue": "us-west-2" + }, + { + "ParameterKey": "destInCurrentAccount", + "ParameterValue": "true" + }, + { + "ParameterKey": "destCredentials", + "ParameterValue": "" + }, + { + "ParameterKey": "includeMetadata", + "ParameterValue": "false" + }, + { + "ParameterKey": "isPayerRequest", + "ParameterValue": "false" + }, + { + "ParameterKey": "destAcl", + "ParameterValue": "bucket-owner-full-control" + }, + { + "ParameterKey": "ec2CronExpression", + "ParameterValue": "0 */1 ? * * *" + }, + { + "ParameterKey": "maxCapacity", + "ParameterValue": "0" + }, + { + "ParameterKey": "minCapacity", + "ParameterValue": "0" + }, + { + "ParameterKey": "desiredCapacity", + "ParameterValue": "0" + }, + { + "ParameterKey": "srcSkipCompare", + "ParameterValue": "false" + }, + { + "ParameterKey": "finderDepth", + "ParameterValue": "0" + }, + { + "ParameterKey": "finderNumber", + "ParameterValue": "1" + }, + { + "ParameterKey": "finderEc2Memory", + "ParameterValue": "8" + }, + { + "ParameterKey": "workerNumber", + "ParameterValue": "4" + }, + { + "ParameterKey": "alarmEmail", + "ParameterValue": "xxxxxx" + }, + { + "ParameterKey": "ec2VpcId", + "ParameterValue": "vpc-00435d5729dddd5b6" + }, + { + "ParameterKey": "ec2Subnets", + "ParameterValue": "subnet-0e0402cd5e17375b2,subnet-0dc128ee46fceac91" + } + ], + "id": "0ff94440-331e-4678-a53c-768c6720db55", + "templateUrl": "https://my-assets.s3.amazonaws.com/dth-ec2.template", + "createdAt": "2023-09-08T03:16:37.609935Z", + "action": "START" + }, + None, + ) + + assert "S" in response.get("id") + + +def test_lambda_function_query(ddb_client, cfn_client, s3_client): + from lambda_function import query_task_cfn + + response = query_task_cfn( + { + "stackId": { + "S": os.environ['MOCK_CFN_ID'] + }, + "createdAt": { + "S": "2023-09-08T03:16:37.609935Z" + }, + "stackOutputs": { + "L": [] + }, + "executionArn": { + "S": "arn:aws:states:us-west-1:123456789012:execution:APICfnWorkflowCfnDeploymentStateMachineFC154A5B-y2TQO51TD4mD:2dd8afe1-0b02-4bb1-9ef9-0f41e5b458c8" + }, + "parameters": { + "L": [ + { + "M": { + "ParameterValue": { + "S": "Amazon_S3" + }, + "ParameterKey": { + "S": "srcType" + } + } + }, + { + "M": { + "ParameterValue": { + "S": "" + }, + "ParameterKey": { + "S": "srcEndpoint" + } + } + }, + { + "M": { + "ParameterValue": { + "S": "aaaaa" + }, + "ParameterKey": { + "S": "srcBucket" + } + } + }, + { + "M": { + "ParameterValue": { + "S": "" + }, + "ParameterKey": { + "S": "srcPrefix" + } + } + }, + { + "M": { + "ParameterValue": { + "S": "" + }, + "ParameterKey": { + "S": "srcPrefixsListFile" + } + } + }, + { + "M": { + "ParameterValue": { + "S": "No" + }, + "ParameterKey": { + "S": "srcEvent" + } + } + }, + { + "M": { + "ParameterValue": { + "S": "us-east-1" + }, + "ParameterKey": { + "S": "srcRegion" + } + } + }, + { + "M": { + "ParameterValue": { + "S": "false" + }, + "ParameterKey": { + "S": "srcInCurrentAccount" + } + } + }, + { + "M": { + "ParameterValue": { + "S": "" + }, + "ParameterKey": { + "S": "srcCredentials" + } + } + }, + { + "M": { + "ParameterValue": { + "S": "bbbbb" + }, + "ParameterKey": { + "S": "destBucket" + } + } + }, + { + "M": { + "ParameterValue": { + "S": "" + }, + "ParameterKey": { + "S": "destPrefix" + } + } + }, + { + "M": { + "ParameterValue": { + "S": "INTELLIGENT_TIERING" + }, + "ParameterKey": { + "S": "destStorageClass" + } + } + }, + { + "M": { + "ParameterValue": { + "S": "us-west-2" + }, + "ParameterKey": { + "S": "destRegion" + } + } + }, + { + "M": { + "ParameterValue": { + "S": "true" + }, + "ParameterKey": { + "S": "destInCurrentAccount" + } + } + }, + { + "M": { + "ParameterValue": { + "S": "" + }, + "ParameterKey": { + "S": "destCredentials" + } + } + }, + { + "M": { + "ParameterValue": { + "S": "false" + }, + "ParameterKey": { + "S": "includeMetadata" + } + } + }, + { + "M": { + "ParameterValue": { + "S": "false" + }, + "ParameterKey": { + "S": "isPayerRequest" + } + } + }, + { + "M": { + "ParameterValue": { + "S": "bucket-owner-full-control" + }, + "ParameterKey": { + "S": "destAcl" + } + } + }, + { + "M": { + "ParameterValue": { + "S": "0 */1 ? * * *" + }, + "ParameterKey": { + "S": "ec2CronExpression" + } + } + }, + { + "M": { + "ParameterValue": { + "S": "0" + }, + "ParameterKey": { + "S": "maxCapacity" + } + } + }, + { + "M": { + "ParameterValue": { + "S": "0" + }, + "ParameterKey": { + "S": "minCapacity" + } + } + }, + { + "M": { + "ParameterValue": { + "S": "0" + }, + "ParameterKey": { + "S": "desiredCapacity" + } + } + }, + { + "M": { + "ParameterValue": { + "S": "false" + }, + "ParameterKey": { + "S": "srcSkipCompare" + } + } + }, + { + "M": { + "ParameterValue": { + "S": "0" + }, + "ParameterKey": { + "S": "finderDepth" + } + } + }, + { + "M": { + "ParameterValue": { + "S": "1" + }, + "ParameterKey": { + "S": "finderNumber" + } + } + }, + { + "M": { + "ParameterValue": { + "S": "8" + }, + "ParameterKey": { + "S": "finderEc2Memory" + } + } + }, + { + "M": { + "ParameterValue": { + "S": "4" + }, + "ParameterKey": { + "S": "workerNumber" + } + } + }, + { + "M": { + "ParameterValue": { + "S": "xxxxxx" + }, + "ParameterKey": { + "S": "alarmEmail" + } + } + }, + { + "M": { + "ParameterValue": { + "S": "vpc-00435d5729dddd5b6" + }, + "ParameterKey": { + "S": "ec2VpcId" + } + } + }, + { + "M": { + "ParameterValue": { + "S": "subnet-0e0402cd5e17375b2,subnet-0dc128ee46fceac91" + }, + "ParameterKey": { + "S": "ec2Subnets" + } + } + } + ] + }, + "progress": { + "S": "STARTING" + }, + "templateUrl": { + "S": "https://solutions-features-reference.s3.amazonaws.com/data-transfer-hub/develop/DataTransferS3Stack.template" + }, + "scheduleType": { + "S": "FIXED_RATE" + }, + "description": { + "S": "" + }, + "id": { + "S": "0ff94440-331e-4678-a53c-768c6720db55" + }, + "stackStatus": { + "S": "CREATE_IN_PROGRESS" + }, + "type": { + "S": "S3EC2" + } + }, + None, + ) + assert "S" in response.get("id") diff --git a/source/constructs/lambda/common.ts b/source/constructs/lambda/common.ts deleted file mode 100644 index 3bd5efb..0000000 --- a/source/constructs/lambda/common.ts +++ /dev/null @@ -1,105 +0,0 @@ -// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -// SPDX-License-Identifier: Apache-2.0 - -import { AssertionError } from "assert"; - -interface Task { - id: string, - type: TaskType, - description?: string, - templateUrl: string, - parameters?: Parameter[], - createdAt?: string, - stoppedAt?: string, - progress?: TaskProgress, - progressInfo?: CommonProgressInfo, - stackId?: string, - stackStatus?: string, - stackStatusReason?: string - executionArn?: string -} - -interface CommonProgressInfo { - total?: number, - replicated: number -} - - -enum TaskProgress { - STARTING = 'STARTING', - STOPPING = 'STOPPING', - ERROR = 'ERROR', - IN_PROGRESS = 'IN_PROGRESS', - DONE = 'DONE', - STOPPED = 'STOPPED' -} - -enum TaskType { - S3 = 'S3EC2', - ECR = 'ECR' -} - -enum ScheduleType { - ONE_TIME = 'ONE_TIME', - FIXED_RATE = 'FIXED_RATE' -} - -interface Parameter { - ParameterKey: string, - ParameterValue: string -} - -interface CreateTaskInput { - type: TaskType, - description?: string, - scheduleType: ScheduleType, - parameters?: Parameter[] -} - -interface UpdateTaskInput { - description?: string, - parameters?: Parameter[] -} - -interface CommonTaskProgress { - total?: number - replicated: number -} - -/** - * Assert. - * @param condition - * @param msg Error message - */ -function assert(condition: any, msg?: string): asserts condition { - if (!condition) { - throw new AssertionError({ - message: msg - }); - } -} - -/** - * Pretty Print the JSON object - * @param prefix The keyword before the object - * @param object JSON object to print - */ -function pprint(prefix: string, object: any) { - console.log(prefix + ": \n" + JSON.stringify(object, null, 2)) -} - -/** - * Create a random string id using letters. - * @param length - */ -function makeid(length: number) { - let result = ''; - const characters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'; - const charactersLength = characters.length; - for (let i = 0; i < length; i++) { - result += characters.charAt(Math.floor(Math.random() * charactersLength)); - } - return result; -} - -export { Task, TaskType, ScheduleType, Parameter, CreateTaskInput, UpdateTaskInput, CommonTaskProgress, TaskProgress, assert, pprint, makeid } \ No newline at end of file diff --git a/source/constructs/lambda/layer/api/nodejs/package.json b/source/constructs/lambda/layer/api/nodejs/package.json deleted file mode 100644 index dfaa7cd..0000000 --- a/source/constructs/lambda/layer/api/nodejs/package.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "name": "src", - "version": "2.4.0", - "license": "Apache-2.0", - "author": { - "name": "Amazon Web Services", - "url": "https://aws.amazon.com/solutions" - }, - "dependencies": { - "uuid": "^8.3.0", - "aws-lambda": "^1.0.6", - "aws-sdk": "^2.736.0" - }, - "devDependencies": { - "@types/uuid": "^8.3.0" - } -} diff --git a/source/constructs/lambda/layer/cdk/nodejs/package.json b/source/constructs/lambda/layer/cdk/nodejs/package.json deleted file mode 100644 index dfaa7cd..0000000 --- a/source/constructs/lambda/layer/cdk/nodejs/package.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "name": "src", - "version": "2.4.0", - "license": "Apache-2.0", - "author": { - "name": "Amazon Web Services", - "url": "https://aws.amazon.com/solutions" - }, - "dependencies": { - "uuid": "^8.3.0", - "aws-lambda": "^1.0.6", - "aws-sdk": "^2.736.0" - }, - "devDependencies": { - "@types/uuid": "^8.3.0" - } -} diff --git a/source/constructs/lambda/plugin/ecr/ecr_helper/.coveragerc b/source/constructs/lambda/plugin/ecr/ecr_helper/.coveragerc new file mode 100644 index 0000000..bacf8eb --- /dev/null +++ b/source/constructs/lambda/plugin/ecr/ecr_helper/.coveragerc @@ -0,0 +1,8 @@ +[run] +omit = + tests/* + .venv-*/* + test/* + */__init__.py +source = + . \ No newline at end of file diff --git a/source/constructs/lambda/plugin/ecr/ecr_helper/lambda_function.py b/source/constructs/lambda/plugin/ecr/ecr_helper/lambda_function.py new file mode 100644 index 0000000..4add969 --- /dev/null +++ b/source/constructs/lambda/plugin/ecr/ecr_helper/lambda_function.py @@ -0,0 +1,37 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 +import os +import logging + +from util.ecr_helper import BaseHelper, ECRHelper + +logger = logging.getLogger() +logger.setLevel(logging.INFO) + + +def lambda_handler(event, _): + params = { + 'source_type': os.environ['SOURCE_TYPE'], + 'src_list': os.environ['SRC_LIST'], + 'src_image_list': os.environ['SELECTED_IMAGE_PARAM'], + 'src_region': os.environ['SRC_REGION'], + 'src_account_id': os.environ['SRC_ACCOUNT_ID'], + 'src_credential_name': os.environ['SRC_CREDENTIAL_NAME'], + 'include_untagged': os.environ.get('INCLUDE_UNTAGGED', 'true'), + } + logger.info(params) + + result = [] + + if params['source_type'] == 'Amazon_ECR': + image_helper = ECRHelper(params) + result = image_helper.generate_repo_tag_map_list() + elif params['source_type'] != 'Amazon_ECR' and params['src_list'] == 'SELECTED': + image_helper = BaseHelper(params) + result = image_helper.generate_repo_tag_map_list() + else: + logger.info("sourceType is not (Amazon_ECR + X)/SELECTED, it is: " + + params['source_type'] + " " + params['src_list']) + + return {"Payload": result} + diff --git a/source/constructs/lambda/plugin/ecr/ecr_helper/test/__init__.py b/source/constructs/lambda/plugin/ecr/ecr_helper/test/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/source/constructs/lambda/plugin/ecr/ecr_helper/test/conftest.py b/source/constructs/lambda/plugin/ecr/ecr_helper/test/conftest.py new file mode 100644 index 0000000..79f9228 --- /dev/null +++ b/source/constructs/lambda/plugin/ecr/ecr_helper/test/conftest.py @@ -0,0 +1,18 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +import os + +import pytest + + +@pytest.fixture(autouse=True) +def default_environment_variables(): + """Mocked AWS evivronment variables such as AWS credentials and region""" + os.environ["AWS_ACCESS_KEY_ID"] = "mocked-aws-access-key-id" + os.environ["AWS_SECRET_ACCESS_KEY"] = "mocked-aws-secret-access-key" + os.environ["AWS_SESSION_TOKEN"] = "mocked-aws-session-token" + os.environ["AWS_REGION"] = "us-east-1" + os.environ["AWS_DEFAULT_REGION"] = "us-east-1" + + os.environ["SOURCE_TYPE"] = "Amazon_ECR" \ No newline at end of file diff --git a/source/constructs/lambda/plugin/ecr/ecr_helper/test/requirements-test.txt b/source/constructs/lambda/plugin/ecr/ecr_helper/test/requirements-test.txt new file mode 100644 index 0000000..99cbb53 --- /dev/null +++ b/source/constructs/lambda/plugin/ecr/ecr_helper/test/requirements-test.txt @@ -0,0 +1,4 @@ +moto==3.1.18 +pytest==7.1.2 +pytest-cov==3.0.0 +pyyaml \ No newline at end of file diff --git a/source/constructs/lambda/plugin/ecr/ecr_helper/test/test_lambda_function.py b/source/constructs/lambda/plugin/ecr/ecr_helper/test/test_lambda_function.py new file mode 100644 index 0000000..c85f978 --- /dev/null +++ b/source/constructs/lambda/plugin/ecr/ecr_helper/test/test_lambda_function.py @@ -0,0 +1,219 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +import pytest +import os +import boto3 +from moto import mock_ecr, mock_ssm + + +@pytest.fixture +def ecr_client(): + with mock_ecr(): + region = os.environ.get("AWS_REGION") + ecr = boto3.client("ecr", region_name=region) + # create a ecr repository 01 + repository_name = "test-repository-01" + ecr.create_repository(repositoryName=repository_name) + + # upload a image to above ecr repository + ecr.put_image( + repositoryName=repository_name, + imageManifest="test_01", + imageTag="latest" + ) + ecr.put_image( + repositoryName=repository_name, + imageManifest="test_01", + imageTag="v1.3.0" + ) + ecr.put_image( + repositoryName=repository_name, + imageManifest="test_02", + imageTag="v1.2.0" + ) + + # create a ecr repository 02 + repository_name = "ubuntu" + ecr.create_repository(repositoryName=repository_name) + + # upload a image to above ecr repository + ecr.put_image( + repositoryName=repository_name, + imageManifest="test_01", + imageTag="latest" + ) + ecr.put_image( + repositoryName=repository_name, + imageManifest="test_02", + imageTag="v2.3.0" + ) + ecr.put_image( + repositoryName=repository_name, + imageManifest="test_03", + imageTag="v2.2.0" + ) + + yield + + +@pytest.fixture +def ssm_client(): + with mock_ssm(): + region = os.environ.get("AWS_REGION") + ssm = boto3.client("ssm", region_name=region) + ssm.put_parameter( + Name="test_ssm_param_name", + Value=""" +ubuntu:v2.2.0, +test-repository-01 +""", + Type='String' + ) + + yield + + +# Test Amazon ECR with all images +@pytest.fixture +def env_variables_01(): + """Mocked AWS evivronment variables such as AWS credentials and region""" + os.environ["AWS_ACCESS_KEY_ID"] = "mocked-aws-access-key-id" + os.environ["AWS_SECRET_ACCESS_KEY"] = "mocked-aws-secret-access-key" + os.environ["AWS_SESSION_TOKEN"] = "mocked-aws-session-token" + os.environ["AWS_REGION"] = "us-east-1" + os.environ["AWS_DEFAULT_REGION"] = "us-east-1" + + os.environ["SOURCE_TYPE"] = "Amazon_ECR" + os.environ["SRC_LIST"] = "ALL" + os.environ["SRC_REGION"] = "us-east-1" + os.environ["SRC_ACCOUNT_ID"] = "" + os.environ["SELECTED_IMAGE_PARAM"] = "" + os.environ["SRC_CREDENTIAL_NAME"] = "test_key" + + yield + + +def test_lambda_function_01(ecr_client, env_variables_01): + import lambda_function + + response = lambda_function.lambda_handler( + {}, + None, + ) + assert len(response['Payload']) == 6 + + +# Test Amazon ECR with selected images +@pytest.fixture +def env_variables_02(): + """Mocked AWS evivronment variables such as AWS credentials and region""" + os.environ["AWS_ACCESS_KEY_ID"] = "mocked-aws-access-key-id" + os.environ["AWS_SECRET_ACCESS_KEY"] = "mocked-aws-secret-access-key" + os.environ["AWS_SESSION_TOKEN"] = "mocked-aws-session-token" + os.environ["AWS_REGION"] = "us-east-1" + os.environ["AWS_DEFAULT_REGION"] = "us-east-1" + + os.environ["SOURCE_TYPE"] = "Amazon_ECR" + os.environ["SRC_LIST"] = "SELECTED" + os.environ["SRC_REGION"] = "us-east-1" + os.environ["SRC_ACCOUNT_ID"] = "" + os.environ["SELECTED_IMAGE_PARAM"] = "test_ssm_param_name" + os.environ["SRC_CREDENTIAL_NAME"] = "test_key" + + yield + + +def test_lambda_function_02(ecr_client, ssm_client, env_variables_02): + import lambda_function + + response = lambda_function.lambda_handler( + {}, + None, + ) + assert len(response['Payload']) == 2 + assert response['Payload'] == [{'repositoryName': 'ubuntu', 'imageTag': 'v2.2.0', 'multiArchOption': 'all'}, + {'repositoryName': 'test-repository-01', 'imageTag': 'latest', 'multiArchOption': 'all'}] + + +# Test Public repos +@pytest.fixture +def env_variables_03(): + """Mocked AWS evivronment variables such as AWS credentials and region""" + os.environ["AWS_ACCESS_KEY_ID"] = "mocked-aws-access-key-id" + os.environ["AWS_SECRET_ACCESS_KEY"] = "mocked-aws-secret-access-key" + os.environ["AWS_SESSION_TOKEN"] = "mocked-aws-session-token" + os.environ["AWS_REGION"] = "us-east-1" + os.environ["AWS_DEFAULT_REGION"] = "us-east-1" + + os.environ["SOURCE_TYPE"] = "Public" + os.environ["SRC_LIST"] = "SELECTED" + os.environ["SRC_REGION"] = "us-east-1" + os.environ["SRC_ACCOUNT_ID"] = "" + os.environ["SELECTED_IMAGE_PARAM"] = "test_ssm_param_name" + os.environ["SRC_CREDENTIAL_NAME"] = "test_key" + + yield + + +def test_lambda_function_03(ecr_client, ssm_client, env_variables_03): + import lambda_function + + response = lambda_function.lambda_handler( + {}, + None, + ) + assert len(response['Payload']) == 2 + assert response['Payload'] == [{'repositoryName': 'ubuntu', 'imageTag': 'v2.2.0', 'multiArchOption': 'all'}, + {'repositoryName': 'test-repository-01', 'imageTag': 'latest', 'multiArchOption': 'all'}] + + +# Test Amazon ECR repos with tag ALL_TAGS +@pytest.fixture +def env_variables_04(): + """Mocked AWS evivronment variables such as AWS credentials and region""" + os.environ["AWS_ACCESS_KEY_ID"] = "mocked-aws-access-key-id" + os.environ["AWS_SECRET_ACCESS_KEY"] = "mocked-aws-secret-access-key" + os.environ["AWS_SESSION_TOKEN"] = "mocked-aws-session-token" + os.environ["AWS_REGION"] = "us-east-1" + os.environ["AWS_DEFAULT_REGION"] = "us-east-1" + + os.environ["SOURCE_TYPE"] = "Amazon_ECR" + os.environ["SRC_LIST"] = "SELECTED" + os.environ["SRC_REGION"] = "us-east-1" + os.environ["SRC_ACCOUNT_ID"] = "" + os.environ["SELECTED_IMAGE_PARAM"] = "test_ssm_param_name_all" + os.environ["SRC_CREDENTIAL_NAME"] = "test_key" + + yield + + +@pytest.fixture +def ssm_client_02(): + with mock_ssm(): + region = os.environ.get("AWS_REGION") + ssm = boto3.client("ssm", region_name=region) + ssm.put_parameter( + Name="test_ssm_param_name_all", + Value=""" +ubuntu:ALL_TAGS, +test-repository-01 +""", + Type='String' + ) + + yield + + +def test_lambda_function_04(ecr_client, ssm_client_02, env_variables_04): + import lambda_function + + response = lambda_function.lambda_handler( + {}, + None, + ) + assert len(response['Payload']) == 4 + assert response['Payload'] == [{'repositoryName': 'ubuntu', 'imageTag': 'latest', 'multiArchOption': 'all'}, + {'repositoryName': 'ubuntu', 'imageTag': 'v2.3.0', 'multiArchOption': 'all'}, + {'repositoryName': 'ubuntu', 'imageTag': 'v2.2.0', 'multiArchOption': 'all'}, + {'repositoryName': 'test-repository-01', 'imageTag': 'latest', 'multiArchOption': 'all'}] diff --git a/source/constructs/lambda/plugin/ecr/ecr_helper/util/ecr_helper.py b/source/constructs/lambda/plugin/ecr/ecr_helper/util/ecr_helper.py new file mode 100644 index 0000000..bfdd4a6 --- /dev/null +++ b/source/constructs/lambda/plugin/ecr/ecr_helper/util/ecr_helper.py @@ -0,0 +1,236 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 +import os +import json +import logging +import re + +import base64 +import boto3 +from botocore import config + +logger = logging.getLogger() +logger.setLevel(logging.INFO) + +solution_version = os.environ.get("SOLUTION_VERSION", "v1.0.0") +solution_id = os.environ.get("SOLUTION_ID", "SO8003") +user_agent_config = { + "user_agent_extra": f"AwsSolution/{solution_id}/{solution_version}" +} +default_config = config.Config(**user_agent_config) +default_region = os.environ.get("AWS_REGION") + +secretsmanager_client = boto3.client('secretsmanager', config=default_config) +ssm_client = boto3.client('ssm', config=default_config) + + +class BaseHelper: + """ + Base Helper Class""" + + def __init__(self, params): + self._params = params + self._multi_arch_option = "all" if self._params['include_untagged'] == 'true' else "system" + + def _get_ssm_parameter(self, parameter_name, decrypt=False): + """Get the value of an SSM parameter.""" + response = ssm_client.get_parameter( + Name=parameter_name, + WithDecryption=decrypt + ) + return response['Parameter']['Value'] + + def _split_selected_images(self, src_image_list): + """Split the list of selected images into a list of dictionaries.""" + logger.info("input srcImageList: %s", src_image_list) + result = [] + src_image_list = re.sub(r'(\r|\n|\ |\\n)', '', src_image_list) + + for image in src_image_list.split(','): + image_name = image.split(':')[0] + image_tag = image.split(':')[1] if len( + image.split(':')) >= 2 else "latest" + result.append( + {"repositoryName": image_name, "imageTag": image_tag, "multiArchOption": self._multi_arch_option}) + return result + + def generate_repo_tag_map_list(self): + """Generate a list of repository and tag map + Args: + src_list_type (str): Source list type: ALL | SELECTED + + Return: + [ + { + "repositoryName": "ubuntu", + "imageTag": "latest" + }, + { + "repositoryName": "ubuntu", + "imageTag": "1.2.0" + } + ] + """ + result = [] + + ssm_image_list = self._get_ssm_parameter( + self._params['src_image_list'], False) + logger.info(ssm_image_list) + result = self._split_selected_images(ssm_image_list) + + return result + + +class ECRHelper(BaseHelper): + """Helper Class for ECR Task""" + + def __init__(self, params): + super().__init__(params) + self._params = params + self._ecr = self._generate_client() + self._multi_arch_option = "all" if self._params['include_untagged'] == 'true' else "system" + + def generate_repo_tag_map_list(self): + """Generate a list of repository and tag map + Args: + src_list_type (str): Source list type: ALL | SELECTED + + Return: + [ + { + "repositoryName": "ubuntu", + "imageTag": "latest" + }, + { + "repositoryName": "ubuntu", + "imageTag": "1.2.0" + } + ] + """ + result = [] + + if self._params['src_list'] == 'ALL': + # Use ECR API to get the full list of repos and tags + + repo_name_list = self._get_ecr_repositories_name() + for repo_name in repo_name_list: + image_tags = self._get_ecr_image_tags(repo_name) + for image_tag in image_tags: + result.append( + {"repositoryName": repo_name, "imageTag": image_tag, "multiArchOption": self._multi_arch_option}) + + elif self._params['src_list'] == 'SELECTED': + # If the version of input is ALL_TAGS and the sourceType is Amazon_ECR, the function will get all the tags of this image. + ssm_image_list = self._get_ssm_parameter( + self._params['src_image_list'], False) + logger.info(ssm_image_list) + result = self._split_selected_images(ssm_image_list) + else: + logger.info("sourceType is not (Amazon_ECR + ALL)/SELECTED, it is: " + + self._params['source_type'] + " " + self._params['src_list']) + + return result + + def _generate_client(self): + """Generate the ECR client.""" + + # Get the AK/SK if source is NOT in current AWS account + if self._params['src_account_id']: + secret_name = self._params['src_credential_name'] + secret = None + decoded_binary_secret = None + + response = secretsmanager_client.get_secret_value( + SecretId=secret_name) + if 'SecretString' in response: + secret = response['SecretString'] + else: + binary_secret_data = response['SecretBinary'] + decoded_binary_secret = base64.b64decode(binary_secret_data) + secret = decoded_binary_secret.decode('utf-8') + + secret_dict = json.loads(secret) + return boto3.client('ecr', + region_name=self._params['src_region'], + aws_access_key_id=secret_dict['access_key_id'], + aws_secret_access_key=secret_dict['secret_access_key']) + else: + return boto3.client('ecr', region_name=self._params['src_region']) + + def _get_ecr_repositories_name(self): + """Get the list of repositories in an Amazon ECR registry.""" + response = self._ecr.describe_repositories() + repos = response.get('repositories') + while "nextToken" in response: + response = self._ecr.describe_repositories( + nextToken=response['nextToken'] + ) + repos.extend(response['repositories']) + + repo_name_list = [repo['repositoryName'] for repo in repos] + return repo_name_list + + def _get_ecr_image_tags(self, repo_name): + """ + Get the list of tags for a specific repository in an Amazon ECR registry. + For example: + Repo: prod_ubuntu + [ + { + Image Tag: 1.3, latest + Digest: sha256002 + }, + { + Image Tag: 1.2 + Digest: sha256001 + } + ] + Return: + tags_list: [1.3, latest, 1.2] + + """ + image_tags = [] + response = self._ecr.describe_images( + repositoryName=repo_name, + filter={ + 'tagStatus': 'TAGGED' + } + ) + image_details = response.get('imageDetails') + while "nextToken" in response: + response = self._ecr.describe_repositories( + repositoryName=repo_name, + filter={ + 'tagStatus': 'TAGGED' + }, + nextToken=response['nextToken'] + ) + image_details.extend(response.get('imageDetails')) + for image_detail in image_details: + image_tags.extend(image_detail.get('imageTags')) + + return image_tags + + def _split_selected_images(self, src_image_list): + """Split the list of selected images into a list of dictionaries.""" + logger.info("input srcImageList: %s", src_image_list) + result = [] + src_image_list = re.sub(r'(\r|\n|\ |\\n)', '', src_image_list) + + for image in src_image_list.split(','): + repo_name = image.split(':')[0] + image_tag = image.split(':')[1] if len( + image.split(':')) >= 2 else "latest" + + # Handle the ALL_TAGS + if image_tag == "ALL_TAGS": + tmp_image_tags = self._get_ecr_image_tags(repo_name) + for tmp_image_tag in tmp_image_tags: + result.append({"repositoryName": repo_name, + "imageTag": tmp_image_tag, + "multiArchOption": self._multi_arch_option}) + else: + result.append( + {"repositoryName": repo_name, "imageTag": image_tag, "multiArchOption": self._multi_arch_option}) + + return result diff --git a/source/constructs/lambda/plugin/ecr/sfn_helper/.coveragerc b/source/constructs/lambda/plugin/ecr/sfn_helper/.coveragerc new file mode 100644 index 0000000..bacf8eb --- /dev/null +++ b/source/constructs/lambda/plugin/ecr/sfn_helper/.coveragerc @@ -0,0 +1,8 @@ +[run] +omit = + tests/* + .venv-*/* + test/* + */__init__.py +source = + . \ No newline at end of file diff --git a/source/constructs/lambda/plugin/ecr/sfn_helper/lambda_function.py b/source/constructs/lambda/plugin/ecr/sfn_helper/lambda_function.py new file mode 100644 index 0000000..e456e12 --- /dev/null +++ b/source/constructs/lambda/plugin/ecr/sfn_helper/lambda_function.py @@ -0,0 +1,52 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 +import os +import logging +import boto3 +from botocore import config + + +logger = logging.getLogger() +logger.setLevel(logging.INFO) + +solution_version = os.environ.get("SOLUTION_VERSION", "v1.0.0") +solution_id = os.environ.get("SOLUTION_ID", "SO8003") +user_agent_config = { + "user_agent_extra": f"AwsSolution/{solution_id}/{solution_version}" +} +default_config = config.Config(**user_agent_config) +default_region = os.environ.get("AWS_REGION") + + +sfn = boto3.client('stepfunctions', config=default_config) + + +def lambda_handler(event, _): + """ + This lambda should be triggered when stack is created or updated. + The purpose is to ensure replication task is started immediately (step functions is triggered) on create or update + Ideally, the step functions should be triggered by event bridge rule, but this doesn't work all the time. + This is a workaround to resolve the issue. + """ + state_machine_arn = os.environ.get('STATE_MACHINE_ARN', 'null') + + + query_params = { + 'stateMachineArn': state_machine_arn, + 'statusFilter': 'RUNNING' + } + + exec_params = { + 'stateMachineArn': state_machine_arn + } + + # Check if any running executions + list_result = sfn.list_executions(**query_params) + executions = list_result.get('executions', []) + logger.info(executions) + + if not executions: + # if not, start a new one + execution_result = sfn.start_execution(**exec_params) + execution_arn = execution_result['executionArn'] + logger.info(execution_arn) diff --git a/source/constructs/lambda/plugin/ecr/sfn_helper/test/__init__.py b/source/constructs/lambda/plugin/ecr/sfn_helper/test/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/source/constructs/lambda/plugin/ecr/sfn_helper/test/conftest.py b/source/constructs/lambda/plugin/ecr/sfn_helper/test/conftest.py new file mode 100644 index 0000000..fc89804 --- /dev/null +++ b/source/constructs/lambda/plugin/ecr/sfn_helper/test/conftest.py @@ -0,0 +1,16 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +import os + +import pytest + + +@pytest.fixture(autouse=True) +def default_environment_variables(): + """Mocked AWS evivronment variables such as AWS credentials and region""" + os.environ["AWS_ACCESS_KEY_ID"] = "mocked-aws-access-key-id" + os.environ["AWS_SECRET_ACCESS_KEY"] = "mocked-aws-secret-access-key" + os.environ["AWS_SESSION_TOKEN"] = "mocked-aws-session-token" + os.environ["AWS_REGION"] = "us-east-1" + os.environ["AWS_DEFAULT_REGION"] = "us-east-1" \ No newline at end of file diff --git a/source/constructs/lambda/plugin/ecr/sfn_helper/test/requirements-test.txt b/source/constructs/lambda/plugin/ecr/sfn_helper/test/requirements-test.txt new file mode 100644 index 0000000..99cbb53 --- /dev/null +++ b/source/constructs/lambda/plugin/ecr/sfn_helper/test/requirements-test.txt @@ -0,0 +1,4 @@ +moto==3.1.18 +pytest==7.1.2 +pytest-cov==3.0.0 +pyyaml \ No newline at end of file diff --git a/source/constructs/lambda/plugin/ecr/sfn_helper/test/test_lambda_function.py b/source/constructs/lambda/plugin/ecr/sfn_helper/test/test_lambda_function.py new file mode 100644 index 0000000..b885106 --- /dev/null +++ b/source/constructs/lambda/plugin/ecr/sfn_helper/test/test_lambda_function.py @@ -0,0 +1,33 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +import pytest +import os +import boto3 +from moto import mock_stepfunctions + + +@pytest.fixture +def sfn_client(): + with mock_stepfunctions(): + region = os.environ.get("AWS_REGION") + client = boto3.client('stepfunctions', region_name=region) + + # Set up mock Step Functions server and create a state machine + state_machine_name = 'test-state-machine' + response = client.create_state_machine( + name=state_machine_name, + definition='{"StartAt": "HelloWorld", "States": {"HelloWorld": {"Type": "Task", "Resource": "arn:aws:lambda:us-east-1:123456789012:function:HelloWorld", "End": true}}}', + roleArn='arn:aws:iam::123456789012:role/service-role/MyRole' + ) + os.environ["STATE_MACHINE_ARN"] = response['stateMachineArn'] + yield + + +def test_lambda_function_04(sfn_client): + import lambda_function + + lambda_function.lambda_handler( + {}, + None, + ) \ No newline at end of file diff --git a/source/constructs/lambda/plugin/s3/asg-helper/.coveragerc b/source/constructs/lambda/plugin/s3/asg-helper/.coveragerc new file mode 100644 index 0000000..fed54a5 --- /dev/null +++ b/source/constructs/lambda/plugin/s3/asg-helper/.coveragerc @@ -0,0 +1,9 @@ +[run] +omit = + tests/* + .venv-*/* + test/* + */__init__.py + assets/* +source = + . \ No newline at end of file diff --git a/source/constructs/lambda/plugin/s3/asg-helper/lambda_function.py b/source/constructs/lambda/plugin/s3/asg-helper/lambda_function.py new file mode 100644 index 0000000..4a0554f --- /dev/null +++ b/source/constructs/lambda/plugin/s3/asg-helper/lambda_function.py @@ -0,0 +1,60 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +""" Lambda to control auto scaling group """ +import boto3 +import os +import random +import logging + +from botocore import config + +logger = logging.getLogger() +logger.setLevel(logging.INFO) + +solution_version = os.environ.get("SOLUTION_VERSION", "v1.0.0") +solution_id = os.environ.get("SOLUTION_ID", "SO8002") +user_agent_config = { + "user_agent_extra": f"AwsSolution/{solution_id}/{solution_version}" +} +default_config = config.Config(**user_agent_config) + +default_region = os.environ.get("AWS_REGION") +asg_name = os.environ.get("ASG_NAME") + +asg = boto3.client("autoscaling", config=default_config) + + +def lambda_handler(event, context): + # logging.info(event) + + # Check the current finder job status + desired_capacity = 0 + try: + resp = asg.describe_auto_scaling_groups( + AutoScalingGroupNames=[ + asg_name, + ], + ) + desired_capacity = resp["AutoScalingGroups"][0]["DesiredCapacity"] + except Exception as e: + logger.info("Failed to get auto scaling group, please check the error log.") + logger.error(e) + raise e + + # Update the auto scaling group + if desired_capacity == 1: + logger.info("There already has a Finder job, skip.") + else: + try: + asg.update_auto_scaling_group( + AutoScalingGroupName=asg_name, + DesiredCapacity=1, + ) + logger.info("Launch a new Finder instance.") + except Exception as e: + logger.info("Failed to launch a Finder Job, please check the error log.") + logger.error(e) + raise e + + return 'OK' \ No newline at end of file diff --git a/source/constructs/lambda/plugin/s3/asg-helper/test/__init__.py b/source/constructs/lambda/plugin/s3/asg-helper/test/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/source/constructs/lambda/plugin/s3/asg-helper/test/conftest.py b/source/constructs/lambda/plugin/s3/asg-helper/test/conftest.py new file mode 100644 index 0000000..6aeae13 --- /dev/null +++ b/source/constructs/lambda/plugin/s3/asg-helper/test/conftest.py @@ -0,0 +1,19 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +import os + +import pytest + + +@pytest.fixture(autouse=True) +def default_environment_variables(): + """Mocked AWS evivronment variables such as AWS credentials and region""" + os.environ["AWS_ACCESS_KEY_ID"] = "mocked-aws-access-key-id" + os.environ["AWS_SECRET_ACCESS_KEY"] = "mocked-aws-secret-access-key" + os.environ["AWS_SESSION_TOKEN"] = "mocked-aws-session-token" + os.environ["AWS_REGION"] = "us-east-1" + os.environ["AWS_DEFAULT_REGION"] = "us-east-1" + + os.environ["LAUNCH_TEMPLATE_NAME"] = "dth-finder-launch-template" + os.environ["ASG_NAME"] = "dth-finder-asg" diff --git a/source/constructs/lambda/plugin/s3/asg-helper/test/requirements-test.txt b/source/constructs/lambda/plugin/s3/asg-helper/test/requirements-test.txt new file mode 100644 index 0000000..f95d46d --- /dev/null +++ b/source/constructs/lambda/plugin/s3/asg-helper/test/requirements-test.txt @@ -0,0 +1,3 @@ +moto +pytest +pytest-cov \ No newline at end of file diff --git a/source/constructs/lambda/plugin/s3/asg-helper/test/test_lambda_function.py b/source/constructs/lambda/plugin/s3/asg-helper/test/test_lambda_function.py new file mode 100644 index 0000000..f2fe10c --- /dev/null +++ b/source/constructs/lambda/plugin/s3/asg-helper/test/test_lambda_function.py @@ -0,0 +1,82 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +import pytest +import os +import boto3 +from moto import mock_autoscaling, mock_ec2 + + +@pytest.fixture +def auto_scaling_client(): + with mock_autoscaling(): + region = os.environ.get("AWS_REGION") + launch_template_name = os.environ.get("LAUNCH_TEMPLATE_NAME") + asg_name = os.environ.get("ASG_NAME") + client = boto3.client("autoscaling", region_name=region) + client.create_auto_scaling_group( + AutoScalingGroupName=asg_name, + DesiredCapacity=0, + MinSize=0, + MaxSize=1, + LaunchTemplate={ + "LaunchTemplateName": launch_template_name, + "Version": "$Latest", + }, + AvailabilityZones=["us-east-1a"], + ) + yield + + +@pytest.fixture +def auto_scaling_client_2(): + with mock_autoscaling(): + region = os.environ.get("AWS_REGION") + launch_template_name = os.environ.get("LAUNCH_TEMPLATE_NAME") + asg_name = os.environ.get("ASG_NAME") + client = boto3.client("autoscaling", region_name=region) + client.create_auto_scaling_group( + AutoScalingGroupName="no_exist", + DesiredCapacity=0, + MinSize=0, + MaxSize=1, + LaunchTemplate={ + "LaunchTemplateName": launch_template_name, + "Version": "$Latest", + }, + AvailabilityZones=["us-east-1a"], + ) + yield + + +@pytest.fixture +def ec2_client(): + with mock_ec2(): + region = os.environ.get("AWS_REGION") + launch_template_name = os.environ.get("LAUNCH_TEMPLATE_NAME") + client = boto3.client("ec2", region_name=region) + client.create_launch_template( + LaunchTemplateName=launch_template_name, + LaunchTemplateData={"ImageId": "ami-12c6146b", "InstanceType": "t2.medium"}, + ) + yield + + +def test_lambda_function(ec2_client, auto_scaling_client): + import lambda_function + + # Create a service linked role in a brand new account + result = lambda_function.lambda_handler(None, None) + print(result) + # Expect Execute successfully. + assert result == 'OK' + + +def test_lambda_function_2(ec2_client, auto_scaling_client_2): + import lambda_function + + with pytest.raises(Exception): + result = lambda_function.lambda_handler(None, None) + print(result) + # Expect Execute successfully. + assert result == 'OK' diff --git a/source/constructs/lambda/plugin/s3/custom-resource/.coveragerc b/source/constructs/lambda/plugin/s3/custom-resource/.coveragerc new file mode 100644 index 0000000..bacf8eb --- /dev/null +++ b/source/constructs/lambda/plugin/s3/custom-resource/.coveragerc @@ -0,0 +1,8 @@ +[run] +omit = + tests/* + .venv-*/* + test/* + */__init__.py +source = + . \ No newline at end of file diff --git a/source/constructs/lambda/plugin/s3/custom-resource/lambda_function.py b/source/constructs/lambda/plugin/s3/custom-resource/lambda_function.py new file mode 100644 index 0000000..2c45e7e --- /dev/null +++ b/source/constructs/lambda/plugin/s3/custom-resource/lambda_function.py @@ -0,0 +1,139 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + + +import json +import boto3 +import os +from botocore import config +import logging + +logger = logging.getLogger() +logger.setLevel(logging.INFO) + +stack_name = os.environ["STACK_NAME"] + +solution_version = os.environ.get("SOLUTION_VERSION", "v1.0.0") +solution_id = os.environ.get("SOLUTION_ID", "SO8002") +user_agent_config = { + "user_agent_extra": f"AwsSolution/{solution_id}/{solution_version}" +} +default_config = config.Config(**user_agent_config) + +default_region = os.environ.get("AWS_REGION") + +bucket_name = os.environ.get("BUCKET_NAME", "") +object_prefix = os.environ.get("OBJECT_PREFIX", "") + +event_queue_name = os.environ.get("EVENT_QUEUE_NAME", "") +event_queue_arn = os.environ.get("EVENT_QUEUE_ARN", "") + +event_action = os.environ.get("EVENT_ACTION", "") + +notification_id = f"{stack_name}-{event_queue_name}" + + +def lambda_handler(event, context): + request_type = event["RequestType"] + if request_type == "Create" or request_type == "Update": + return on_create() + if request_type == "Delete": + return on_delete() + raise Exception("Invalid request type: %s" % request_type) + + +def on_create(): + config_events = [] + if event_action == "CreateAndDelete": + config_events = ["s3:ObjectCreated:*", "s3:ObjectRemoved:*"] + elif event_action == "Create": + config_events = ["s3:ObjectCreated:*"] + else: + return { + "statusCode": 200, + "body": json.dumps("Skip creating s3 events."), + } + + try: + s3 = boto3.client("s3") + history_config = s3.get_bucket_notification_configuration( + Bucket=bucket_name, + ) + logger.info(f"history notification config is {history_config}") + + queue_configurations = history_config.get("QueueConfigurations", []) + queue_configurations.append( + { + "Id": notification_id, + "QueueArn": event_queue_arn, + "Events": config_events, + "Filter": { + "Key": {"FilterRules": [{"Name": "prefix", "Value": object_prefix}]} + }, + } + ) + + notification_config = { + "QueueConfigurations": queue_configurations, + "TopicConfigurations": history_config.get("TopicConfigurations", []), + "LambdaFunctionConfigurations": history_config.get( + "LambdaFunctionConfigurations", [] + ), + } + if "EventBridgeConfiguration" in history_config: + notification_config["EventBridgeConfiguration"] = history_config[ + "EventBridgeConfiguration" + ] + resp = s3.put_bucket_notification_configuration( + Bucket=bucket_name, + NotificationConfiguration=notification_config, + ) + logger.info(f"put_bucket_notification_configuration resp is {resp}") + except Exception as err: + logger.error("Create log source s3 bucket notification failed, %s" % err) + raise + + return { + "statusCode": 200, + "body": json.dumps("Create log source s3 bucket notification success!"), + } + + +def on_delete(): + if event_action in ["CreateAndDelete", "Create"]: + try: + s3 = boto3.client("s3") + history_config = s3.get_bucket_notification_configuration( + Bucket=bucket_name, + ) + logger.info(f"history notification config is {history_config}") + queue_configurations = history_config.get("QueueConfigurations", []) + deleted_queue_configurations = [ + x for x in queue_configurations if x["Id"] != notification_id + ] + + notification_config = { + "QueueConfigurations": deleted_queue_configurations, + "TopicConfigurations": history_config.get("TopicConfigurations", []), + "LambdaFunctionConfigurations": history_config.get( + "LambdaFunctionConfigurations", [] + ), + } + if "EventBridgeConfiguration" in history_config: + notification_config["EventBridgeConfiguration"] = history_config[ + "EventBridgeConfiguration" + ] + + resp = s3.put_bucket_notification_configuration( + Bucket=bucket_name, + NotificationConfiguration=notification_config, + ) + logger.info(f"put_bucket_notification_configuration resp is {resp}") + except Exception as err: + print("Delete log source s3 bucket notification failed, %s" % err) + raise + + return { + "statusCode": 200, + "body": json.dumps("Delete log source s3 bucket notification success!"), + } diff --git a/source/constructs/lambda/plugin/s3/custom-resource/test/__init__.py b/source/constructs/lambda/plugin/s3/custom-resource/test/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/source/constructs/lambda/plugin/s3/custom-resource/test/conftest.py b/source/constructs/lambda/plugin/s3/custom-resource/test/conftest.py new file mode 100644 index 0000000..6977bdc --- /dev/null +++ b/source/constructs/lambda/plugin/s3/custom-resource/test/conftest.py @@ -0,0 +1,29 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +import os +import pytest + + +@pytest.fixture(autouse=True) +def default_environment_variables(): + """Mocked AWS evivronment variables such as AWS credentials and region""" + os.environ["AWS_ACCESS_KEY_ID"] = "mocked-aws-access-key-id" + os.environ["AWS_SECRET_ACCESS_KEY"] = "mocked-aws-secret-access-key" + os.environ["AWS_SESSION_TOKEN"] = "mocked-aws-session-token" + os.environ["AWS_REGION"] = "us-east-1" + os.environ["SOLUTION_VERSION"] = "v1.0.0" + os.environ["SOLUTION_ID"] = "SO8002" + + os.environ["STACK_NAME"] = "test" + + os.environ["BUCKET_NAME"] = "test-bucket" + os.environ["OBJECT_PREFIX"] = "test" + + os.environ["EVENT_QUEUE_ARN"] = "arn:aws:sqs:us-east-1:123456789012:test-queue" + os.environ[ + "LOG_EVENT_QUEUE_URL" + ] = "https://sqs.us-east-1.amazonaws.com/123456789012/test-queue" + os.environ["EVENT_QUEUE_NAME"] = "test-queue" + + os.environ["EVENT_ACTION"] = "CreateAndDelete" diff --git a/source/constructs/lambda/plugin/s3/custom-resource/test/requirements-test.txt b/source/constructs/lambda/plugin/s3/custom-resource/test/requirements-test.txt new file mode 100644 index 0000000..5e06ff7 --- /dev/null +++ b/source/constructs/lambda/plugin/s3/custom-resource/test/requirements-test.txt @@ -0,0 +1,4 @@ +boto3 +moto +pytest +pytest-cov diff --git a/source/constructs/lambda/plugin/s3/custom-resource/test/test_custom_resource.py b/source/constructs/lambda/plugin/s3/custom-resource/test/test_custom_resource.py new file mode 100644 index 0000000..ba1f95e --- /dev/null +++ b/source/constructs/lambda/plugin/s3/custom-resource/test/test_custom_resource.py @@ -0,0 +1,39 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +from moto import mock_logs, mock_s3, settings +import pytest +import os +import boto3 + + +@pytest.fixture +def s3_client(): + bucket_name = os.environ.get("BUCKET_NAME") + with mock_s3(): + s3 = boto3.resource("s3", region_name="us-east-1") + # Create the bucket + s3.create_bucket(Bucket=bucket_name) + yield + + +def test_lambda_handler_on_create(s3_client): + from lambda_function import lambda_handler + + assert lambda_handler( + { + "RequestType": "Create", + }, + None, + ) + + +def test_lambda_handler_on_delete(s3_client): + from lambda_function import lambda_handler + + assert lambda_handler( + { + "RequestType": "Delete", + }, + None, + ) diff --git a/source/constructs/lambda/plugin/s3/multi-part-controller/.coveragerc b/source/constructs/lambda/plugin/s3/multi-part-controller/.coveragerc new file mode 100644 index 0000000..fed54a5 --- /dev/null +++ b/source/constructs/lambda/plugin/s3/multi-part-controller/.coveragerc @@ -0,0 +1,9 @@ +[run] +omit = + tests/* + .venv-*/* + test/* + */__init__.py + assets/* +source = + . \ No newline at end of file diff --git a/source/constructs/lambda/plugin/s3/multi-part-controller/check_multi_part_upload_status.py b/source/constructs/lambda/plugin/s3/multi-part-controller/check_multi_part_upload_status.py new file mode 100644 index 0000000..0f508c5 --- /dev/null +++ b/source/constructs/lambda/plugin/s3/multi-part-controller/check_multi_part_upload_status.py @@ -0,0 +1,75 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +from util.multi_part_helper import MultiPartUploadHelper + +import os +import logging + +import boto3 +from botocore import config + +from boto3.dynamodb.conditions import Key + +logger = logging.getLogger() +logger.setLevel(logging.INFO) + +solution_version = os.environ.get("SOLUTION_VERSION", "v1.0.0") +solution_id = os.environ.get("SOLUTION_ID", "SO8002") +user_agent_config = { + "user_agent_extra": f"AwsSolution/{solution_id}/{solution_version}" +} +default_config = config.Config(**user_agent_config) +default_region = os.environ.get("AWS_REGION") +split_part_table_name = os.environ.get("SPLIT_PART_TABLE_NAME") +destination_bucket_name = os.environ.get("DESTINATION_BUCKET_NAME") + +dynamodb = boto3.resource('dynamodb', config=default_config) +s3 = boto3.client('s3', config=default_config) + +split_part_table = dynamodb.Table(split_part_table_name) + +# detect the dest bucket is in current account or not + + +def lambda_handler(event, _): + """ handler function""" + + args = event["arguments"] + upload_id = args.get("uploadID") + total_parts_count = int(args.get("totalPartsCount")) + object_key = args.get('objectKey') + + multi_part_helper = MultiPartUploadHelper( + upload_id=upload_id, object_key=object_key) + + # Query all parts under the given upload id, + # if all parts are transferred, call complete multipart upload + # and send the transfer complete message to worker log group + all_parts = multi_part_helper.query_all_parts_under_upload_id() + + if len(all_parts) == total_parts_count: + transfer_result = multi_part_helper.check_all_parts_transferred(all_parts) + if transfer_result == "COMPLETED": + logger.info( + "All parts transferred successfully. Starting merge...") + elif transfer_result == "NOT_COMPLETED": + logger.info( + "Some Parts are currently being transferred, please wait a moment...") + else: + logger.info( + "There are one or more part transferred failed over 5 times. Abort this uploadId and send the transfer failed message to worker log group") + status = transfer_result + else: + logger.info( + f"Currently all parts count is {len(all_parts)}, but total parts count is {total_parts_count}. Cancel merge!") + status = "NOT_COMPLETED" + + return { + "status": status, + "arguments": { + "uploadID": upload_id, + "totalPartsCount": total_parts_count, + "objectKey": object_key, + } + } diff --git a/source/constructs/lambda/plugin/s3/multi-part-controller/multi_part_upload_result.py b/source/constructs/lambda/plugin/s3/multi-part-controller/multi_part_upload_result.py new file mode 100644 index 0000000..655c042 --- /dev/null +++ b/source/constructs/lambda/plugin/s3/multi-part-controller/multi_part_upload_result.py @@ -0,0 +1,35 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +from util.multi_part_helper import MultiPartUploadHelper + + +def lambda_handler(event, _): + """ This Lambda is used to manage the lifecycle of multi-part uploads in cluster mode. """ + + args = event["arguments"] + status = event["status"] + upload_id = args.get("uploadID") + object_key = args.get('objectKey') + + multi_part_helper = MultiPartUploadHelper( + upload_id=upload_id, object_key=object_key) + etag = None + if status == "COMPLETED": + etag = multi_part_helper.complete_multipart_upload() + if etag is None: + status = "ERROR" + elif status == "ERROR": + multi_part_helper.abort_multipart_upload() + + # Update the transfer ddb table + multi_part_helper.update_transfer_table_status(status, etag) + + # Send the transfer result message to worker cloudwatch log group + multi_part_helper.send_transfer_result_message_to_cloudwatch_log_group( + status) + + return { + "status": status, + "etag": etag + } diff --git a/source/constructs/lambda/plugin/s3/multi-part-controller/test/__init__.py b/source/constructs/lambda/plugin/s3/multi-part-controller/test/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/source/constructs/lambda/plugin/s3/multi-part-controller/test/conftest.py b/source/constructs/lambda/plugin/s3/multi-part-controller/test/conftest.py new file mode 100644 index 0000000..7fdf5e3 --- /dev/null +++ b/source/constructs/lambda/plugin/s3/multi-part-controller/test/conftest.py @@ -0,0 +1,24 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +import os + +import pytest + + +@pytest.fixture(autouse=True) +def default_environment_variables(): + """Mocked AWS evivronment variables such as AWS credentials and region""" + os.environ["AWS_ACCESS_KEY_ID"] = "mocked-aws-access-key-id" + os.environ["AWS_SECRET_ACCESS_KEY"] = "mocked-aws-secret-access-key" + os.environ["AWS_SESSION_TOKEN"] = "mocked-aws-session-token" + os.environ["AWS_REGION"] = "us-east-1" + os.environ["AWS_DEFAULT_REGION"] = "us-east-1" + + os.environ["DESTINATION_PREFIX"] = "v2-5-0-dev-0901-cn-01" + os.environ["OBJECT_TRANSFER_TABLE_NAME"] = "DataTransferS3Stack-S3TransferTable-LZ7KNIP1A6H3" + os.environ["SOLUTION_VERSION"] = "v1.0.0" + os.environ["SPLIT_PART_TABLE_NAME"] = "DataTransferS3Stack-S3SplitPartTable-151O82S2IB63R" + os.environ["STACK_NAME"] = "DataTransferS3Stack" + os.environ["WORKER_LOG_GROUP_NAME"] = "DataTransferS3Stack-CommonS3RepWorkerLogGroupE38567D7-p0aHZkWsx0xk" + os.environ["DESTINATION_BUCKET_NAME"] = "dth-recive-cn-north-1" \ No newline at end of file diff --git a/source/constructs/lambda/plugin/s3/multi-part-controller/test/requirements-test.txt b/source/constructs/lambda/plugin/s3/multi-part-controller/test/requirements-test.txt new file mode 100644 index 0000000..f95d46d --- /dev/null +++ b/source/constructs/lambda/plugin/s3/multi-part-controller/test/requirements-test.txt @@ -0,0 +1,3 @@ +moto +pytest +pytest-cov \ No newline at end of file diff --git a/source/constructs/lambda/plugin/s3/multi-part-controller/test/test_check_multi_part_upload_status.py b/source/constructs/lambda/plugin/s3/multi-part-controller/test/test_check_multi_part_upload_status.py new file mode 100644 index 0000000..5668baf --- /dev/null +++ b/source/constructs/lambda/plugin/s3/multi-part-controller/test/test_check_multi_part_upload_status.py @@ -0,0 +1,140 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +import pytest +import os +import boto3 +from moto import mock_dynamodb + + +part_info_1 = { + "UploadId": "PpxsD1E.Hr0VNChyhRk_syuZCzFegskaF8Ag57DSyTJYvki57wTdj50yIQGtKm.TAJ_67Q2a1GIDnvMVZA6c3FhFDcegFeyHc95tDlwPTf6.z.L2VoBM.C", + "PartNumber": 1, + "EndTime": "2023/09/01 02:24:04", + "EndTimestamp": 1693535044, + "Etag": "2798abd1fe0a963f01054d2525634047", + "JobStatus": "PART_DONE", + "ObjectKey": "v2-5-0-dev-0822/ami-055bae28e98705972.bin", + "RetryCount": 0, + "SpentTime": 3, + "StartTime": "2023/09/01 02:24:01", + "StartTimestamp": 1693535041, + "TotalPartsCount": 3 +} + +part_info_2 = { + "UploadId": "PpxsD1E.Hr0VNChyhRk_syuZCzFegskaF8Ag57DSyTJYvki57wTdj50yIQGtKm.TAJ_67Q2a1GIDnvMVZA6c3FhFDcegFeyHc95tDlwPTf6.z.L2VoBM.C", + "PartNumber": 2, + "EndTime": "2023/09/01 02:24:04", + "EndTimestamp": 1693535044, + "Etag": "2798abd1fe0a963f01054d2525634046", + "JobStatus": "PART_DONE", + "ObjectKey": "v2-5-0-dev-0822/ami-055bae28e98705972.bin", + "RetryCount": 0, + "SpentTime": 3, + "StartTime": "2023/09/01 02:24:01", + "StartTimestamp": 1693535041, + "TotalPartsCount": 3 +} + +part_info_3 = { + "UploadId": "PpxsD1E.Hr0VNChyhRk_syuZCzFegskaF8Ag57DSyTJYvki57wTdj50yIQGtKm.TAJ_67Q2a1GIDnvMVZA6c3FhFDcegFeyHc95tDlwPTf6.z.L2VoBM.C", + "PartNumber": 3, + "EndTime": "2023/09/01 02:24:04", + "EndTimestamp": 1693535044, + "Etag": "2798abd1fe0a963f01054d2525634046", + "JobStatus": "PART_DONE", + "ObjectKey": "v2-5-0-dev-0822/ami-055bae28e98705972.bin", + "RetryCount": 0, + "SpentTime": 3, + "StartTime": "2023/09/01 02:24:01", + "StartTimestamp": 1693535041, + "TotalPartsCount": 3 +} + +part_info_4 = { + "UploadId": "fake.Hr0VNChyhRk_syuZCzFegskaF8Ag57DSyTJYvki57wTdj50yIQGtKm", + "PartNumber": 1, + "EndTime": "2023/09/01 02:24:04", + "EndTimestamp": 1693535044, + "Etag": "2798abd1fe0a963f01054d2525634046", + "JobStatus": "PART_DONE", + "ObjectKey": "v2-5-0-dev-0822/ami-055bae28e98705972.bin", + "RetryCount": 0, + "SpentTime": 3, + "StartTime": "2023/09/01 02:24:01", + "StartTimestamp": 1693535041, + "TotalPartsCount": 3 +} + + +@pytest.fixture +def ddb_client(): + with mock_dynamodb(): + region = os.environ.get("AWS_REGION") + ddb = boto3.resource("dynamodb", region_name=region) + # Mock App Log Configuration Table + task_table_name = os.environ.get("SPLIT_PART_TABLE_NAME") + app_log_config_table = ddb.create_table( + TableName=task_table_name, + KeySchema=[ + { + "AttributeName": "UploadId", + "KeyType": "HASH" + }, + { + "AttributeName": "PartNumber", + "KeyType": "RANGE" # This specifies the SortKey + } + ], + AttributeDefinitions=[ + { + "AttributeName": "UploadId", + "AttributeType": "S" + }, + { + "AttributeName": "PartNumber", + "AttributeType": "N" # Assuming PartNumber is a number + } + ], + ProvisionedThroughput={ + "ReadCapacityUnits": 10, + "WriteCapacityUnits": 10 + }, + ) + data_list = [part_info_1, part_info_2, part_info_3, part_info_4] + with app_log_config_table.batch_writer() as batch: + for data in data_list: + batch.put_item(Item=data) + yield + + +def test_lambda_function(ddb_client): + """test lambda function""" + import check_multi_part_upload_status + + result = check_multi_part_upload_status.lambda_handler( + { + "status": "NOT_COMPLETED", + "arguments": { + "uploadID": "PpxsD1E.Hr0VNChyhRk_syuZCzFegskaF8Ag57DSyTJYvki57wTdj50yIQGtKm.TAJ_67Q2a1GIDnvMVZA6c3FhFDcegFeyHc95tDlwPTf6.z.L2VoBM.C", + "totalPartsCount": 3, + "objectKey": "v2-5-0-dev-0822/ami-055bae28e98705972.bin" + } + }, + None, + ) + assert result["status"] == "COMPLETED" + + result = check_multi_part_upload_status.lambda_handler( + { + "status": "NOT_COMPLETED", + "arguments": { + "uploadID": "fake.Hr0VNChyhRk_syuZCzFegskaF8Ag57DSyTJYvki57wTdj50yIQGtKm", + "totalPartsCount": 3, + "objectKey": "v2-5-0-dev-0822/ami-055bae28e98705972.bin" + } + }, + None, + ) + assert result["status"] == "NOT_COMPLETED" diff --git a/source/constructs/lambda/plugin/s3/multi-part-controller/test/test_multi_part_upload_result.py b/source/constructs/lambda/plugin/s3/multi-part-controller/test/test_multi_part_upload_result.py new file mode 100644 index 0000000..1dc9400 --- /dev/null +++ b/source/constructs/lambda/plugin/s3/multi-part-controller/test/test_multi_part_upload_result.py @@ -0,0 +1,178 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +import pytest +import os +import boto3 +from moto import mock_dynamodb, mock_s3, mock_logs + + +part_info_1 = { + "UploadId": "PpxsD1E.Hr0VNChyhRk_syuZCzFegskaF8Ag57DSyTJYvki57wTdj50yIQGtKm.TAJ_67Q2a1GIDnvMVZA6c3FhFDcegFeyHc95tDlwPTf6.z.L2VoBM.C", + "PartNumber": 1, + "EndTime": "2023/09/01 02:24:04", + "EndTimestamp": 1693535044, + "Etag": "2798abd1fe0a963f01054d2525634047", + "JobStatus": "PART_DONE", + "ObjectKey": "v2-5-0-dev-0822/ami-055bae28e98705972.bin", + "RetryCount": 0, + "SpentTime": 3, + "StartTime": "2023/09/01 02:24:01", + "StartTimestamp": 1693535041, + "TotalPartsCount": 3 +} + +part_info_2 = { + "UploadId": "PpxsD1E.Hr0VNChyhRk_syuZCzFegskaF8Ag57DSyTJYvki57wTdj50yIQGtKm.TAJ_67Q2a1GIDnvMVZA6c3FhFDcegFeyHc95tDlwPTf6.z.L2VoBM.C", + "PartNumber": 2, + "EndTime": "2023/09/01 02:24:04", + "EndTimestamp": 1693535044, + "Etag": "2798abd1fe0a963f01054d2525634046", + "JobStatus": "PART_DONE", + "ObjectKey": "v2-5-0-dev-0822/ami-055bae28e98705972.bin", + "RetryCount": 0, + "SpentTime": 3, + "StartTime": "2023/09/01 02:24:01", + "StartTimestamp": 1693535041, + "TotalPartsCount": 3 +} + +part_info_3 = { + "UploadId": "PpxsD1E.Hr0VNChyhRk_syuZCzFegskaF8Ag57DSyTJYvki57wTdj50yIQGtKm.TAJ_67Q2a1GIDnvMVZA6c3FhFDcegFeyHc95tDlwPTf6.z.L2VoBM.C", + "PartNumber": 3, + "EndTime": "2023/09/01 02:24:04", + "EndTimestamp": 1693535044, + "Etag": "2798abd1fe0a963f01054d2525634046", + "JobStatus": "PART_DONE", + "ObjectKey": "v2-5-0-dev-0822/ami-055bae28e98705972.bin", + "RetryCount": 0, + "SpentTime": 3, + "StartTime": "2023/09/01 02:24:01", + "StartTimestamp": 1693535041, + "TotalPartsCount": 3 +} + +item_info_1 = { + "ObjectKey": "v2-5-0-dev-0822/ami-055bae28e98705972.bin", + "EndTime": "2023/09/01 02:25:05", + "EndTimestamp": 1693535105, + "Etag": "", + "JobStatus": "SPLIT_DONE", + "Sequencer": "", + "Size": 117576402565, + "SpentTime": 66, + "StartTime": "2023/09/01 02:23:59", + "StartTimestamp": 1693535039 +} + + +@pytest.fixture +def ddb_client(): + with mock_dynamodb(): + region = os.environ.get("AWS_REGION") + ddb = boto3.resource("dynamodb", region_name=region) + # Mock App Log Configuration Table + task_table_name = os.environ.get("SPLIT_PART_TABLE_NAME") + split_part_table = ddb.create_table( + TableName=task_table_name, + KeySchema=[ + { + "AttributeName": "UploadId", + "KeyType": "HASH" + }, + { + "AttributeName": "PartNumber", + "KeyType": "RANGE" # This specifies the SortKey + } + ], + AttributeDefinitions=[ + { + "AttributeName": "UploadId", + "AttributeType": "S" + }, + { + "AttributeName": "PartNumber", + "AttributeType": "N" # Assuming PartNumber is a number + } + ], + ProvisionedThroughput={ + "ReadCapacityUnits": 10, + "WriteCapacityUnits": 10 + }, + ) + + data_list = [part_info_1, part_info_2, part_info_3] + with split_part_table.batch_writer() as batch: + for data in data_list: + batch.put_item(Item=data) + + object_table_name = os.environ.get("OBJECT_TRANSFER_TABLE_NAME") + object_transfer_table = ddb.create_table( + TableName=object_table_name, + KeySchema=[ + { + "AttributeName": "ObjectKey", + "KeyType": "HASH" + } + ], + AttributeDefinitions=[ + { + "AttributeName": "ObjectKey", + "AttributeType": "S" + } + ], + ProvisionedThroughput={ + "ReadCapacityUnits": 10, + "WriteCapacityUnits": 10 + }, + ) + data_list = [item_info_1] + with object_transfer_table.batch_writer() as batch: + for data in data_list: + batch.put_item(Item=data) + yield + + +@pytest.fixture +def s3_client(): + with mock_s3(): + region = os.environ.get("AWS_REGION") + s3 = boto3.client('s3', region_name=region) + + bucket_name = os.environ.get("DESTINATION_BUCKET_NAME") + object_key = "v2-5-0-dev-0822/ami-055bae28e98705972.bin" + s3.create_bucket(Bucket=bucket_name) + + response = s3.create_multipart_upload( + Bucket=bucket_name, Key=object_key) + os.environ["MOCK_UPLOAD_ID"] = response['UploadId'] + + +@pytest.fixture +def log_client(): + with mock_logs(): + region = os.environ.get("AWS_REGION") + logs_client = boto3.client('logs', region_name=region) + + worker_log_group_name = os.environ.get("WORKER_LOG_GROUP_NAME") + + logs_client.create_log_group(logGroupName=worker_log_group_name) + + +def test_lambda_function(ddb_client, s3_client, log_client): + """test lambda function""" + import multi_part_upload_result + + result = multi_part_upload_result.lambda_handler( + { + "status": "COMPLETED", + "arguments": { + "uploadID": os.environ["MOCK_UPLOAD_ID"], + "totalPartsCount": 3, + "objectKey": "v2-5-0-dev-0822/ami-055bae28e98705972.bin" + } + }, + None, + ) + print(result) + assert result["status"] == "ERROR" diff --git a/source/constructs/lambda/plugin/s3/multi-part-controller/util/multi_part_helper.py b/source/constructs/lambda/plugin/s3/multi-part-controller/util/multi_part_helper.py new file mode 100644 index 0000000..9f3e8ea --- /dev/null +++ b/source/constructs/lambda/plugin/s3/multi-part-controller/util/multi_part_helper.py @@ -0,0 +1,242 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 +import os +import json +import logging +import boto3 +import time +import datetime +from botocore import config +from boto3.dynamodb.conditions import Key + +logger = logging.getLogger() +logger.setLevel(logging.INFO) + +solution_version = os.environ.get("SOLUTION_VERSION", "v1.0.0") +solution_id = os.environ.get("SOLUTION_ID", "SO8002") +user_agent_config = { + "user_agent_extra": f"AwsSolution/{solution_id}/{solution_version}" +} + +default_config = config.Config(**user_agent_config) +default_region = os.environ.get("AWS_REGION") +split_part_table_name = os.environ.get("SPLIT_PART_TABLE_NAME") +object_transfer_table_name = os.environ.get("OBJECT_TRANSFER_TABLE_NAME") +destination_bucket_name = os.environ.get("DESTINATION_BUCKET_NAME") +destination_prefix = os.environ.get("DESTINATION_PREFIX") +worker_log_group_name = os.environ.get("WORKER_LOG_GROUP_NAME") +dest_credentials_name = os.environ.get("DEST_CREDENTIALS") +dest_region = os.environ.get("DEST_REGION") +WORKER_LOG_STREAM_NAME = "giant-object-log-stream" + +MAX_RETRY_COUNT = 5 + +ddb_resource = boto3.resource('dynamodb', config=default_config) +cloudwatch_logs = boto3.client('logs', config=default_config) +ddb_client = boto3.client('dynamodb', config=default_config) +secrets_manager = boto3.client('secretsmanager', config=default_config) + +split_part_table = ddb_resource.Table(split_part_table_name) + +if destination_prefix.endswith('/'): + destination_prefix = destination_prefix[:-1] + + +class MultiPartUploadHelper: + """Base Class for multi-part upload helper""" + + def __init__(self, upload_id: str, object_key: str): + self.upload_id = upload_id + self.dest_object_key = f"{destination_prefix}/{object_key}" if destination_prefix else object_key + self.object_key = object_key + self.s3_client = self.create_s3_client() + + def create_s3_client(self): + """ Create an S3 boto3 client using the retrieved credentials """ + if dest_credentials_name is not None and dest_credentials_name != "": + try: + # Retrieve the stored AKSK from Secrets Manager + response = secrets_manager.get_secret_value( + SecretId=dest_credentials_name) + secret_data = response['SecretString'] + secret_dict = json.loads(secret_data) + + # Create an S3 boto3 client using the retrieved credentials + s3_client = boto3.client('s3', + aws_access_key_id=secret_dict['access_key_id'], + aws_secret_access_key=secret_dict['secret_access_key'], + region_name=dest_region, + config=default_config + ) + + return s3_client + except Exception as err: + logger.error("Error retrieving or using stored credentials:") + logger.error(err) + return None + else: + # If dest_credentials_name is None, create a regular S3 boto3 client + s3_client = boto3.client('s3', config=default_config) + return s3_client + + def query_all_parts_under_upload_id(self): + """ Query all parts under the given upload id """ + all_items = [] + + last_evaluated_key = None + while True: + if last_evaluated_key: + response = split_part_table.query( + KeyConditionExpression=Key("UploadId").eq(self.upload_id), + ExclusiveStartKey=last_evaluated_key + ) + else: + response = split_part_table.query( + KeyConditionExpression=Key("UploadId").eq(self.upload_id), + ) + + all_items.extend(response.get('Items', [])) + + last_evaluated_key = response.get('LastEvaluatedKey') + if not last_evaluated_key: + break + + return all_items + + def check_all_parts_transferred(self, all_parts: list): + """ Query all parts under the given upload id and check if all are transferred """ + all_completed = all(part.get('JobStatus', {}) == + 'PART_DONE' for part in all_parts) + any_failed_retry = any(part.get('JobStatus', {}) == 'PART_ERROR' and part.get( + 'RetryCount', 0) >= MAX_RETRY_COUNT for part in all_parts) + + if all_completed: + return 'COMPLETED' + elif any_failed_retry: + return 'ERROR' + else: + return 'NOT_COMPLETED' + + def abort_multipart_upload(self): + """ Abort the S3 multipart upload with the given upload id """ + self.s3_client.abort_multipart_upload( + Bucket=destination_bucket_name, + Key=self.dest_object_key, + UploadId=self.upload_id + ) + + def complete_multipart_upload(self): + """ Complete the S3 multipart upload with the given upload id """ + + all_parts = self.query_all_parts_under_upload_id() + + parts_list = [] + + for part in all_parts: + parts_list.append({ + 'ETag': part['Etag'], + 'PartNumber': int(part['PartNumber']) + }) + try: + logger.info("Start complete multipart upload") + response = self.s3_client.complete_multipart_upload( + Bucket=destination_bucket_name, + Key=self.dest_object_key, + UploadId=self.upload_id, + MultipartUpload={ + 'Parts': parts_list + } + ) + logger.info(f"Complete multipart upload, etag: {response['ETag']}") + return response['ETag'] + except Exception as err: + logger.exception("Failed to complete multipart upload due to:") + logger.error(err) + logger.exception("Abort multipart uploadId: %s", self.upload_id) + self.abort_multipart_upload() + return None + + def update_transfer_table_status(self, status: str, etag: str): + """ Update the transfer ddb table """ + if status == "COMPLETED": + status = "DONE" + logger.info("Update %s object in transfer table status: %s", + self.object_key, status) + + current_time = time.strftime("%Y/%m/%d %H:%M:%S", time.localtime()) + current_timestamp = int(time.time()) + + update_expression = ( + "SET JobStatus = :status, Etag = :tg, EndTime = :et, EndTimestamp = :etm, SpentTime = :etm - StartTimestamp" + ) + + if etag is None: + etag = "" + + expression_attribute_values = { + ':status': {'S': status}, + ':tg': {'S': etag}, + ':et': {'S': current_time}, + ':etm': {'N': str(current_timestamp)}, + } + + response = ddb_client.update_item( + TableName=object_transfer_table_name, + Key={ + 'ObjectKey': {'S': self.object_key} + }, + UpdateExpression=update_expression, + ExpressionAttributeValues=expression_attribute_values + ) + + return response + + def send_transfer_result_message_to_cloudwatch_log_group(self, status: str): + """ Send the transfer result message to worker cloudwatch log group """ + logger.info("Send transfer result message to cloudwatch log group") + + current_time = datetime.datetime.now().strftime("%Y/%m/%d %H:%M:%S") + + if status == "COMPLETED": + logger.info("Send transfer completed message") + status = "DONE" + log_message = f"{current_time} ----->Transferred 1 object {self.object_key} with status {status}" + self.log_to_cloudwatch(log_message) + + elif status == "ERROR": + logger.info("Send transfer failed message") + log_message = f"{current_time} ----->Transferred 1 object {self.object_key} with status {status}" + self.log_to_cloudwatch(log_message) + else: + logger.info("Unknown transfer status %s!" % status) + + def log_to_cloudwatch(self, message): + """ Log message to cloudwatch log group """ + if not self.log_stream_exists(WORKER_LOG_STREAM_NAME): + self.create_log_stream(WORKER_LOG_STREAM_NAME) + + cloudwatch_logs.put_log_events( + logGroupName=worker_log_group_name, + logStreamName=WORKER_LOG_STREAM_NAME, + logEvents=[ + { + 'timestamp': int(time.time() * 1000), + 'message': message + } + ] + ) + + def log_stream_exists(self, log_stream_name): + """ Check if the log stream exists """ + response = cloudwatch_logs.describe_log_streams( + logGroupName=worker_log_group_name, + logStreamNamePrefix=log_stream_name + ) + return len(response.get('logStreams', [])) > 0 + + def create_log_stream(self, log_stream_name): + """ Create the log stream """ + cloudwatch_logs.create_log_stream( + logGroupName=worker_log_group_name, + logStreamName=log_stream_name + ) diff --git a/source/constructs/lib/api-stack.ts b/source/constructs/lib/api-stack.ts index 1298807..6d6408f 100644 --- a/source/constructs/lib/api-stack.ts +++ b/source/constructs/lib/api-stack.ts @@ -50,18 +50,11 @@ import * as appsync from "@aws-cdk/aws-appsync-alpha"; // Can define custom bucket to hold the plugin url. Default to aws-gcr-solutions const templateBucket = process.env.TEMPLATE_OUTPUT_BUCKET || 'aws-gcr-solutions' + const solutionName = process.env.SOLUTION_TRADEMARKEDNAME || 'data-transfer-hub' + const solutionVersion = process.env.VERSION || 'v1.0.0' - let s3PluginVersion = 'v2.4.0' - let ecrPluginVersion = 'v2.3.0' - let suffix = '-plugin' - if (templateBucket === 'aws-gcr-solutions') { - s3PluginVersion = 'v2.4.0' - ecrPluginVersion = 'v2.3.0' - suffix = '' - } - - const PLUGIN_TEMPLATE_S3EC2 = `https://${templateBucket}.s3.amazonaws.com/data-transfer-hub-s3${suffix}/${s3PluginVersion}/DataTransferS3Stack-ec2.template`; - const PLUGIN_TEMPLATE_ECR = `https://${templateBucket}.s3.amazonaws.com/data-transfer-hub-ecr${suffix}/${ecrPluginVersion}/DataTransferECRStack.template`; + const PLUGIN_TEMPLATE_S3EC2 = `https://${templateBucket}.s3.amazonaws.com/${solutionName}/${solutionVersion}/DataTransferS3Stack.template`; + const PLUGIN_TEMPLATE_ECR = `https://${templateBucket}.s3.amazonaws.com/${solutionName}/${solutionVersion}/DataTransferECRStack.template`; // This Lambda is to create the AppSync Service Linked Role const appSyncServiceLinkRoleFn = new lambda.Function(this, 'AppSyncServiceLinkRoleFn', { @@ -220,25 +213,6 @@ import * as appsync from "@aws-cdk/aws-appsync-alpha"; const cfnAlarmTopic = alarmTopic.node.defaultChild as sns.CfnTopic; cfnAlarmTopic.overrideLogicalId('DTHCentralAlarmTopic') - const lambdaLayer = new lambda.LayerVersion(this, 'Layer', { - code: lambda.Code.fromAsset(path.join(__dirname, '../lambda/layer/api/'), { - bundling: { - image: lambda.Runtime.NODEJS_16_X.bundlingImage, - command: [ - 'bash', '-c', [ - `cd /asset-output/`, - `mkdir nodejs`, - `cp /asset-input/nodejs/package.json /asset-output/nodejs/`, - `cd /asset-output/nodejs/`, - `npm install` - ].join(' && ') - ], - user: 'root' - } - }), - compatibleRuntimes: [lambda.Runtime.NODEJS_16_X], - description: 'Data Transfer Hub - Lambda Layer' - }) const monitorStateMachine = new monitorSate.MonitorStateMachine(this, 'taskMonitorFlow', { taskTable: this.taskTable, @@ -248,7 +222,6 @@ import * as appsync from "@aws-cdk/aws-appsync-alpha"; const stateMachine = new cfnSate.CloudFormationStateMachine(this, 'CfnWorkflow', { taskTableName: this.taskTable.tableName, taskTableArn: this.taskTable.tableArn, - lambdaLayer: lambdaLayer, taskMonitorSfnArn: monitorStateMachine.taskMonitorStateMachineArn }) @@ -408,13 +381,6 @@ import * as appsync from "@aws-cdk/aws-appsync-alpha"; const taskDS = this.api.addDynamoDbDataSource('TaskTableDS', this.taskTable) - taskDS.createResolver('QueryListTasksResolver', { - typeName: 'Query', - fieldName: 'listTasks', - requestMappingTemplate: appsync.MappingTemplate.fromFile(path.join(__dirname, '../../schema/vtl/DynamoDBScanTable.vtl')), - responseMappingTemplate: appsync.MappingTemplate.fromFile(path.join(__dirname, '../../schema/vtl/DynamoDBScanTableResult.vtl')) - }) - taskDS.createResolver('QueryGetTaskResolver', { typeName: 'Query', fieldName: 'getTask', @@ -425,23 +391,22 @@ import * as appsync from "@aws-cdk/aws-appsync-alpha"; // Create Lambda Data Source const isDryRun = this.node.tryGetContext('DRY_RUN') - const taskHandlerFn = new lambda.Function(this, 'TaskHandlerFn', { - code: lambda.AssetCode.fromAsset(path.join(__dirname, '../lambda/'), { - exclude: ['cdk/*', 'layer/*'] - }), - runtime: lambda.Runtime.NODEJS_16_X, - handler: 'api/api-task.handler', - timeout: Duration.seconds(10), - memorySize: 512, - environment: { - STATE_MACHINE_ARN: stateMachine.stateMachineArn, - TASK_TABLE: this.taskTable.tableName, - PLUGIN_TEMPLATE_S3EC2: PLUGIN_TEMPLATE_S3EC2, - PLUGIN_TEMPLATE_ECR: PLUGIN_TEMPLATE_ECR, - DRY_RUN: isDryRun ? 'True' : 'False' - }, - layers: [lambdaLayer] - }) + const taskHandlerFn = new lambda.Function(this, 'TaskHandlerFn', { + code: lambda.AssetCode.fromAsset(path.join(__dirname, '../lambda/api/task'), { + }), + runtime: lambda.Runtime.PYTHON_3_9, + description: 'Data Transfer Hub - API V1', + handler: 'api_task_v2.lambda_handler', + timeout: Duration.seconds(60), + memorySize: 512, + environment: { + STATE_MACHINE_ARN: stateMachine.stateMachineArn, + TRANSFER_TASK_TABLE: this.taskTable.tableName, + PLUGIN_TEMPLATE_S3EC2: PLUGIN_TEMPLATE_S3EC2, + PLUGIN_TEMPLATE_ECR: PLUGIN_TEMPLATE_ECR, + DRY_RUN: isDryRun ? 'True' : 'False' + }, + }) const cfnTaskHandlerFn = taskHandlerFn.node.defaultChild as lambda.CfnFunction addCfnNagSuppressRules(cfnTaskHandlerFn, [ @@ -487,13 +452,6 @@ import * as appsync from "@aws-cdk/aws-appsync-alpha"; responseMappingTemplate: appsync.MappingTemplate.lambdaResult() }) - // lambdaDS.createResolver({ - // typeName: 'Mutation', - // fieldName: 'updateTaskProgress', - // requestMappingTemplate: appsync.MappingTemplate.lambdaRequest(), - // responseMappingTemplate: appsync.MappingTemplate.lambdaResult() - // }) - // Create Lambda Data Source for listing secrets const secretManagerHandlerFn = new lambda.Function(this, 'SecretManagerHandlerFn', { code: lambda.AssetCode.fromAsset(path.join(__dirname, '../lambda/api/secrets_manager'), { diff --git a/source/constructs/lib/cfn-step-functions.ts b/source/constructs/lib/cfn-step-functions.ts index c3952e0..d908d0a 100644 --- a/source/constructs/lib/cfn-step-functions.ts +++ b/source/constructs/lib/cfn-step-functions.ts @@ -23,7 +23,6 @@ import { addCfnNagSuppressRules } from "./constructs-stack"; export interface CloudFormationStateMachineProps { taskTableName: string, taskTableArn: string, - lambdaLayer: lambda.LayerVersion, taskMonitorSfnArn: string } @@ -35,15 +34,13 @@ export class CloudFormationStateMachine extends Construct { super(scope, id); const createTaskCfnFn = new lambda.Function(this, 'CreateTaskCfnFn', { - runtime: lambda.Runtime.NODEJS_16_X, - code: lambda.AssetCode.fromAsset(path.join(__dirname, '../lambda/'), { - exclude: ['api/*', 'layer/*'] + runtime: lambda.Runtime.PYTHON_3_9, + code: lambda.AssetCode.fromAsset(path.join(__dirname, '../lambda/cdk'), { }), - handler: 'cdk/cfn-task.createTaskCfn', + handler: 'lambda_function.create_task_cfn', environment: { TASK_TABLE: props.taskTableName }, - layers: [props.lambdaLayer], memorySize: 512, timeout: Duration.seconds(60), description: 'Data Transfer Hub - Create Task' @@ -58,15 +55,13 @@ export class CloudFormationStateMachine extends Construct { ]) const stopTaskCfnFn = new lambda.Function(this, 'StopTaskCfnFn', { - runtime: lambda.Runtime.NODEJS_16_X, - code: lambda.AssetCode.fromAsset(path.join(__dirname, '../lambda/'), { - exclude: ['api/*', 'layer/*'] + runtime: lambda.Runtime.PYTHON_3_9, + code: lambda.AssetCode.fromAsset(path.join(__dirname, '../lambda/cdk'), { }), - handler: 'cdk/cfn-task.stopTaskCfn', + handler: 'lambda_function.stop_task_cfn', environment: { TASK_TABLE: props.taskTableName }, - layers: [props.lambdaLayer], memorySize: 512, timeout: Duration.seconds(60), description: 'Data Transfer Hub - Stop Task' @@ -178,6 +173,7 @@ export class CloudFormationStateMachine extends Construct { "sqs:GetQueueAttributes", "sqs:SetQueueAttributes", "sqs:DeleteQueue", + "sqs:TagQueue", ], resources: [`arn:${Aws.PARTITION}:sqs:${Aws.REGION}:${Aws.ACCOUNT_ID}:DTH*`] }), @@ -192,9 +188,22 @@ export class CloudFormationStateMachine extends Construct { "ec2:DescribeTags", "ec2:CreateSecurityGroup", "ec2:DeleteSecurityGroup", + "ec2:LaunchTemplate", + "ec2:CreateLaunchTemplate", + "ec2:DeleteLaunchTemplate", + 'ec2:CreateLaunchTemplateVersion', + 'ec2:DeleteLaunchTemplateVersions', + 'ec2:GetLaunchTemplateData', "ec2:DescribeSecurityGroups", "ec2:RevokeSecurityGroupEgress", "ec2:AuthorizeSecurityGroupEgress", + "ec2:DescribeLaunchTemplates", + "ec2:DescribeLaunchTemplateVersions", + 'ec2:Describe*', + 'ec2:AuthorizeSecurityGroupIngress', + 'ec2:RevokeSecurityGroupIngress', + 'ec2:RunInstances', + 'ec2:TerminateInstances', ], resources: [`*`] }), @@ -270,6 +279,8 @@ export class CloudFormationStateMachine extends Construct { "iam:DeleteRole", "iam:DeleteRolePolicy", "iam:DetachRolePolicy", + "iam:GetInstanceProfile", + "iam:TagRole", ], resources: [ `arn:${Aws.PARTITION}:iam::${Aws.ACCOUNT_ID}:instance-profile/DTH*`, @@ -371,15 +382,13 @@ export class CloudFormationStateMachine extends Construct { ]) const queryTaskCfnFn = new lambda.Function(this, 'QueryTaskCfnFn', { - runtime: lambda.Runtime.NODEJS_16_X, - code: lambda.AssetCode.fromAsset(path.join(__dirname, '../lambda/'), { - exclude: ['api/*', 'layer/*'] + runtime: lambda.Runtime.PYTHON_3_9, + code: lambda.AssetCode.fromAsset(path.join(__dirname, '../lambda/cdk'), { }), - handler: 'cdk/cfn-task.queryTaskCfn', + handler: 'lambda_function.query_task_cfn', environment: { TASK_TABLE: props.taskTableName }, - layers: [props.lambdaLayer], memorySize: 512, timeout: Duration.seconds(60), description: 'Data Transfer Hub - Query Task' @@ -483,20 +492,20 @@ export class CloudFormationStateMachine extends Construct { const queryStackStatusChoice = new sfn.Choice(this, 'Query Stack Status Choice') .when( - sfn.Condition.stringEquals('$.stackStatus', 'CREATE_COMPLETE') + sfn.Condition.stringEquals('$.stackStatus.S', 'CREATE_COMPLETE') , startMonitorFlow.next(stackOperationSucceed)) .when( sfn.Condition.or( - sfn.Condition.stringEquals('$.stackStatus', 'UPDATE_COMPLETE'), - sfn.Condition.stringEquals('$.stackStatus', 'DELETE_COMPLETE') + sfn.Condition.stringEquals('$.stackStatus.S', 'UPDATE_COMPLETE'), + sfn.Condition.stringEquals('$.stackStatus.S', 'DELETE_COMPLETE') ) , stackOperationSucceed) .when( sfn.Condition.or( - sfn.Condition.stringEquals('$.stackStatus', 'CREATE_FAILED'), - sfn.Condition.stringEquals('$.stackStatus', 'DELETE_FAILED'), - sfn.Condition.stringEquals('$.stackStatus', 'UPDATE_ROLLBACK_FAILED'), - sfn.Condition.stringEquals('$.stackStatus', 'ROLLBACK_COMPLETE') + sfn.Condition.stringEquals('$.stackStatus.S', 'CREATE_FAILED'), + sfn.Condition.stringEquals('$.stackStatus.S', 'DELETE_FAILED'), + sfn.Condition.stringEquals('$.stackStatus.S', 'UPDATE_ROLLBACK_FAILED'), + sfn.Condition.stringEquals('$.stackStatus.S', 'ROLLBACK_COMPLETE') ) , stackOperationFailed ) @@ -526,12 +535,13 @@ export class CloudFormationStateMachine extends Construct { } ]) const stateMachine = new sfn.StateMachine(this, 'CfnDeploymentStateMachine', { - definition: definition, + definitionBody: sfn.DefinitionBody.fromChainable(definition), timeout: Duration.minutes(120), logs: { destination: logGroup, level: sfn.LogLevel.ALL, - } + }, + tracingEnabled: true, }) new CfnOutput(this, 'CfnDeploymentStateMachineArn', { diff --git a/source/constructs/lib/constructs-stack.ts b/source/constructs/lib/constructs-stack.ts index 79d7dd9..86156fe 100644 --- a/source/constructs/lib/constructs-stack.ts +++ b/source/constructs/lib/constructs-stack.ts @@ -39,22 +39,28 @@ export function addCfnNagSuppressRules(resource: CfnResource, rules: CfnNagSuppr }); } +export interface ConstructsProps extends StackProps { + /** + * Indicate the auth type in which main stack uses + */ + authType?: AuthType; +} + /** * @class ConstructsStack */ export class ConstructsStack extends Stack { - constructor(scope: Construct, id: string, props?: StackProps) { + constructor(scope: Construct, id: string, props: ConstructsProps) { super(scope, id, props); - const authType = this.node.tryGetContext('authType') || AuthType.COGNITO let usernameParameter: CfnParameter | null = null; let oidcProvider: CfnParameter | null = null; let oidcClientId: CfnParameter | null = null; let oidcCustomerDomain: CfnParameter | null = null; // CFN parameters - if (authType === AuthType.OPENID) { + if (props.authType === AuthType.OPENID) { oidcProvider = new CfnParameter(this, 'OidcProvider', { type: 'String', description: 'Open Id Connector Provider Issuer', @@ -128,10 +134,11 @@ export class ConstructsStack extends Stack { const taskCluster = new TaskCluster(this, 'TaskCluster') // API props + const authTypeString: string = props.authType || AuthType.COGNITO; const drhProps: ApiProps = { - authType, - oidcProvider, - usernameParameter, + authType: authTypeString, + oidcProvider: oidcProvider, + usernameParameter: usernameParameter, } // API Stack const apiStack = new ApiStack(this, 'API', drhProps); @@ -145,7 +152,7 @@ export class ConstructsStack extends Stack { ); // Portal - S3 Static Website const portal = new PortalStack(this, 'Portal', { - auth_type: authType, + auth_type: authTypeString, aws_oidc_customer_domain: oidcCustomerDomain?.valueAsString || '', aws_oidc_provider: oidcProvider?.valueAsString || '', aws_oidc_client_id: oidcClientId?.valueAsString || '', diff --git a/source/constructs/lib/ecr-plugin/ecr-plugin-stack.ts b/source/constructs/lib/ecr-plugin/ecr-plugin-stack.ts new file mode 100644 index 0000000..692d8e0 --- /dev/null +++ b/source/constructs/lib/ecr-plugin/ecr-plugin-stack.ts @@ -0,0 +1,796 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { + Construct, +} from 'constructs'; +import { + Aws, + Fn, + Stack, + StackProps, + CfnParameter, + CfnCondition, + Duration, + CfnResource, + RemovalPolicy, + aws_iam as iam, + aws_dynamodb as ddb, + aws_sns as sns, + aws_kms as kms, + aws_stepfunctions as sfn, + aws_stepfunctions_tasks as tasks, + aws_lambda as lambda, + aws_ecs as ecs, + aws_ec2 as ec2, + aws_ssm as ssm, + aws_sns_subscriptions as subscriptions, + aws_events as events, + aws_events_targets as targets, + aws_secretsmanager as secretsmanager, + aws_logs as logs, + custom_resources as cr +} from 'aws-cdk-lib'; + +import * as path from 'path'; + +const { VERSION } = process.env; + +/** + * cfn-nag suppression rule interface + */ +interface CfnNagSuppressRule { + readonly id: string; + readonly reason: string; +} + + +export function addCfnNagSuppressRules(resource: CfnResource, rules: CfnNagSuppressRule[]) { + resource.addMetadata('cfn_nag', { + rules_to_suppress: rules + }); +} + +/*** + * Main Stack + */ +export class DataTransferECRStack extends Stack { + private paramGroups: any[] = []; + private paramLabels: any = {}; + + private addToParamGroups(label: string, ...param: string[]) { + this.paramGroups.push({ + Label: { default: label }, + Parameters: param + + }); + }; + + private addToParamLabels(label: string, param: string) { + this.paramLabels[param] = { + default: label + } + } + + + constructor(scope: Construct, id: string, props?: StackProps) { + super(scope, id, props); + + // The code that defines your stack goes here + + const sourceType = new CfnParameter(this, 'sourceType', { + description: 'Choose type of source container registry, for example Amazon_ECR, or Public from Docker Hub, gco.io, etc.', + type: 'String', + default: 'Amazon_ECR', + allowedValues: ['Amazon_ECR', 'Public'] + }) + this.addToParamLabels('Source Type', sourceType.logicalId) + + // Only required for ECR + const srcRegion = new CfnParameter(this, 'srcRegion', { + description: 'Source Region Name (only required if source type is Amazon ECR), for example, us-west-1', + type: 'String', + default: '', + }) + this.addToParamLabels('Source Region Name', srcRegion.logicalId) + + // Only required for ECR + const srcAccountId = new CfnParameter(this, 'srcAccountId', { + description: 'Source AWS Account ID (only required if source type is Amazon ECR), leave it blank if source is in current account', + type: 'String', + default: '', + }) + this.addToParamLabels('Source AWS Account ID', srcAccountId.logicalId) + // + const srcList = new CfnParameter(this, 'srcList', { + description: 'Type of Source Image List, either ALL or SELECTED, for public registry, please use SELECTED only', + type: 'String', + default: 'ALL', + allowedValues: ['ALL', 'SELECTED'] + }) + this.addToParamLabels('Source Image List Type', srcList.logicalId) + + const srcImageList = new CfnParameter(this, 'srcImageList', { + description: 'Selected Image List delimited by comma, for example, ubuntu:latest,alpine:latest..., leave it blank if Type is ALL. For ECR source, using ALL_TAGS tag to get all tags.', + type: 'String', + default: '', + }) + this.addToParamLabels('Source Image List', srcImageList.logicalId) + + // Currently, only required if source type is ECR + const srcCredential = new CfnParameter(this, 'srcCredential', { + description: 'The secret name in Secrets Manager only when using AK/SK credentials to pull images from source Amazon ECR, leave it blank for public registry', + type: 'String', + default: '', + }) + this.addToParamLabels('Source Credentials', srcCredential.logicalId) + + + const destRegion = new CfnParameter(this, 'destRegion', { + description: 'Destination Region Name, for example, cn-north-1', + type: 'String', + }) + this.addToParamLabels('Destination Region Name', destRegion.logicalId) + + const destAccountId = new CfnParameter(this, 'destAccountId', { + description: 'Destination AWS Account ID, leave it blank if destination is in current account', + type: 'String', + default: '', + }) + this.addToParamLabels('Destination AWS Account ID', destAccountId.logicalId) + + const destPrefix = new CfnParameter(this, 'destPrefix', { + description: 'Destination Repo Prefix', + type: 'String', + default: '', + }) + this.addToParamLabels('Destination Repo Prefix', destPrefix.logicalId) + + const destCredential = new CfnParameter(this, 'destCredential', { + description: 'The secret name in Secrets Manager only when using AK/SK credentials to push images to destination Amazon ECR', + type: 'String', + default: '', + }) + this.addToParamLabels('Destination Credentials', destCredential.logicalId) + + const includeUntagged = new CfnParameter(this, 'includeUntagged', { + description: 'Whether to include untagged images in the replication', + default: "true", + type: "String", + allowedValues: ["true", "false"] + }) + this.addToParamLabels('Include Untagged', includeUntagged.logicalId) + + const ecsClusterName = new CfnParameter(this, 'ecsClusterName', { + description: 'ECS Cluster Name to run ECS task (Please make sure the cluster exists)', + type: 'String' + }) + this.addToParamLabels('ECS Cluster Name', ecsClusterName.logicalId) + + const ecsVpcId = new CfnParameter(this, 'ecsVpcId', { + description: 'VPC ID to run ECS task, e.g. vpc-bef13dc7', + type: 'AWS::EC2::VPC::Id' + }) + this.addToParamLabels('VPC ID', ecsVpcId.logicalId) + + const ecsSubnetA = new CfnParameter(this, 'ecsSubnetA', { + description: 'First Subnet ID to run ECS task, e.g. subnet-97bfc4cd', + type: 'AWS::EC2::Subnet::Id' + }) + this.addToParamLabels('First Subnet ID', ecsSubnetA.logicalId) + + const ecsSubnetB = new CfnParameter(this, 'ecsSubnetB', { + description: 'Second Subnet ID to run ECS task, e.g. subnet-7ad7de32', + type: 'AWS::EC2::Subnet::Id' + }) + this.addToParamLabels('Second Subnet ID', ecsSubnetB.logicalId) + + const alarmEmail = new CfnParameter(this, 'alarmEmail', { + description: 'Alarm Email address to receive notification in case of any failure', + // default: '', + allowedPattern: '\\w[-\\w.+]*@([A-Za-z0-9][-A-Za-z0-9]+\\.)+[A-Za-z]{2,14}', + type: 'String', + }) + this.addToParamLabels('Alarm Email', alarmEmail.logicalId) + + this.addToParamGroups('Type', sourceType.logicalId) + this.addToParamGroups('Source Information', srcRegion.logicalId, srcAccountId.logicalId, srcList.logicalId, srcImageList.logicalId, srcCredential.logicalId) + this.addToParamGroups('Destination Information', destRegion.logicalId, destAccountId.logicalId, destPrefix.logicalId, destCredential.logicalId) + this.addToParamGroups('ECS Cluster Information', ecsClusterName.logicalId, ecsVpcId.logicalId, ecsSubnetA.logicalId, ecsSubnetB.logicalId) + this.addToParamGroups('Notification Information', alarmEmail.logicalId) + + this.templateOptions.description = `(SO8003) - Data Transfer Hub - ECR Plugin - Template version ${VERSION}`; + + this.templateOptions.metadata = { + 'AWS::CloudFormation::Interface': { + ParameterGroups: this.paramGroups, + ParameterLabels: this.paramLabels, + } + } + + const isSelectedImage = new CfnCondition(this, 'isSelectedImage', { + expression: Fn.conditionEquals('SELECTED', srcList), + }); + + + const isSrcInCurrentAccount = new CfnCondition(this, 'isSrcInCurrentAccount', { + expression: Fn.conditionAnd( + // Source Account ID is blank + Fn.conditionEquals('', srcAccountId), + // Source Type is Amazon ECR + Fn.conditionEquals('Amazon_ECR', sourceType)), + + }); + + const isDestInCurrentAccount = new CfnCondition(this, 'isDestInCurrentAccount', { + // Destination in Current Account + expression: Fn.conditionEquals('', destAccountId), + }); + + const selectedImages = Fn.conditionIf(isSelectedImage.logicalId, srcImageList.valueAsString, 'Not Applicable').toString(); + + + // Set up SSM for selected image list + const selectedImageParam = new ssm.StringParameter(this, 'selectedImageParam', { + description: `Parameter to store the selected image list delimited by comma for stack ${Aws.STACK_NAME}`, + // parameterName: 'SelectedImageList', + stringValue: selectedImages, + }); + + + // Setup DynamoDB + const imageTable = new ddb.Table(this, 'ECRTransferTable', { + partitionKey: { name: 'Image', type: ddb.AttributeType.STRING }, + sortKey: { name: 'Tag', type: ddb.AttributeType.STRING }, + billingMode: ddb.BillingMode.PAY_PER_REQUEST, + removalPolicy: RemovalPolicy.DESTROY, + pointInTimeRecovery: true, + }) + + const cfnTable = imageTable.node.defaultChild as ddb.CfnTable + addCfnNagSuppressRules(cfnTable, [ + { + id: 'W74', + reason: 'This table is set to use DEFAULT encryption, the key is owned by DDB.' + }, + ]) + + const listImagesLambda = new lambda.Function(this, 'ListImagesFunction', { + code: lambda.AssetCode.fromAsset(path.join(__dirname, '../../lambda/plugin/ecr/ecr_helper'), { + }), + runtime: lambda.Runtime.PYTHON_3_9, + handler: 'lambda_function.lambda_handler', + memorySize: 256, + timeout: Duration.minutes(15), + description: 'Data Transfer Hub ECR Plugin - List Image Handler', + environment: { + SOURCE_TYPE: sourceType.valueAsString, + SRC_ACCOUNT_ID: srcAccountId.valueAsString, + SRC_LIST: srcList.valueAsString, + SRC_REGION: srcRegion.valueAsString, + SRC_CREDENTIAL_NAME: srcCredential.valueAsString, + SELECTED_IMAGE_PARAM: selectedImageParam.parameterName, + INCLUDE_UNTAGGED: includeUntagged.valueAsString, + } + }); + + const srcSecretParam = secretsmanager.Secret.fromSecretNameV2(this, 'srcSecretParam', srcCredential.valueAsString); + const desSecretParam = secretsmanager.Secret.fromSecretNameV2(this, 'desSecretParam', destCredential.valueAsString); + + listImagesLambda.addToRolePolicy( + new iam.PolicyStatement({ + actions: [ + "ecr:DescribeRepositories", + "ecr:DescribeImages", + ], + resources: [ + `arn:${Aws.PARTITION}:ecr:${srcRegion.valueAsString}:${Aws.ACCOUNT_ID}:repository/*` + ] + }) + ); + + selectedImageParam.grantRead(listImagesLambda); + srcSecretParam.grantRead(listImagesLambda); + + const vpc = ec2.Vpc.fromVpcAttributes(this, 'ECSVpc', { + vpcId: ecsVpcId.valueAsString, + availabilityZones: Fn.getAzs(), + publicSubnetIds: [ecsSubnetA.valueAsString, ecsSubnetB.valueAsString] + }) + + const cluster = ecs.Cluster.fromClusterAttributes(this, 'ECSCluster', { + clusterName: ecsClusterName.valueAsString, + vpc: vpc, + securityGroups: [] + }) + + const containerlogGroup = new logs.LogGroup(this, `DTH-ECR-Container-LogGroup`, { + retention: 365 + }); + const cfncontainerlogGroup = containerlogGroup.node.defaultChild as logs.CfnLogGroup + addCfnNagSuppressRules(cfncontainerlogGroup, [ + { + id: 'W84', + reason: 'Log group data is always encrypted in CloudWatch Logs using an AWS Managed KMS Key' + }, + ]) + + // Create ECS executionRole and executionPolicy + const ecsTaskExecutionRole = new iam.Role(this, `DTH-ECR-TaskExecutionRole`, { + assumedBy: new iam.ServicePrincipal('ecs-tasks.amazonaws.com') + }); + + const taskExecutionPolicy = new iam.Policy(this, 'TaskExecutionPolicy', { + policyName: `${Aws.STACK_NAME}TaskExecutionPolicy`, + statements: [ + new iam.PolicyStatement({ + actions: [ + "logs:CreateLogStream", + "logs:PutLogEvents" + ], + resources: [ + containerlogGroup.logGroupArn + ] + }), + ] + }); + taskExecutionPolicy.node.addDependency(containerlogGroup); + taskExecutionPolicy.attachToRole(ecsTaskExecutionRole); + + const taskDefinition = new ecs.TaskDefinition(this, 'ECRTransferTask', { + memoryMiB: '1024', + cpu: '512', + compatibility: ecs.Compatibility.FARGATE, + family: `${Aws.STACK_NAME}-ECRTransferTask`, + executionRole: ecsTaskExecutionRole.withoutPolicyUpdates() + }); + srcSecretParam.grantRead(taskDefinition.taskRole); + desSecretParam.grantRead(taskDefinition.taskRole); + + const ecrRegistry = 'public.ecr.aws/aws-gcr-solutions' + const ecrImageName = 'data-transfer-hub-ecr' + const ecrImageTag = VERSION + + const ecrImageUrl = `${ecrRegistry}/${ecrImageName}:${ecrImageTag}` + + const containerDefinition = taskDefinition.addContainer('DefaultContainer', { + image: ecs.ContainerImage.fromRegistry(ecrImageUrl), + environment: { + SOURCE_TYPE: sourceType.valueAsString, + AWS_DEFAULT_REGION: this.region, + AWS_ACCOUNT_ID: this.account, + SRC_REGION: srcRegion.valueAsString, + SRC_ACCOUNT_ID: srcAccountId.valueAsString, + SRC_CREDENTIAL_NAME: srcCredential.valueAsString, + DEST_REGION: destRegion.valueAsString, + DEST_ACCOUNT_ID: destAccountId.valueAsString, + DEST_PREFIX: destPrefix.valueAsString, + DEST_CREDENTIAL_NAME: destCredential.valueAsString, + + }, + logging: ecs.LogDrivers.awsLogs({ + streamPrefix: 'DTH-ECR', + logGroup: containerlogGroup, + }) + }); + + + const ecrSrcReadOnlyPolicy = new iam.Policy(this, 'ECRSrcReadOnlyPolicy', { + policyName: `${Aws.STACK_NAME}ECRSrcReadOnlyPolicy`, + statements: [ + new iam.PolicyStatement({ + actions: [ + "ecr:GetAuthorizationToken", + ], + resources: [ + '*' + ] + }), + new iam.PolicyStatement({ + actions: [ + "ecr:BatchCheckLayerAvailability", + "ecr:GetDownloadUrlForLayer", + "ecr:BatchGetImage", + ], + resources: [ + `arn:${Aws.PARTITION}:ecr:${srcRegion.valueAsString}:${Aws.ACCOUNT_ID}:repository/*` + + ] + }), + ] + }); + + const cfnecrSrcReadOnlyPolicy = ecrSrcReadOnlyPolicy.node.defaultChild as iam.CfnPolicy + addCfnNagSuppressRules(cfnecrSrcReadOnlyPolicy, [ + { + id: 'W12', + reason: 'This IAM policy need * resource' + }, + ]) + + const ecrSrcPolicy = ecrSrcReadOnlyPolicy.node.defaultChild as iam.CfnPolicy + ecrSrcPolicy.cfnOptions.condition = isSrcInCurrentAccount + + ecrSrcReadOnlyPolicy.attachToRole(taskDefinition.taskRole); + + const ecrDestWritePolicy = new iam.Policy(this, 'ECRDestWritePolicy', { + policyName: `${Aws.STACK_NAME}ECRDestWritePolicy`, + statements: [ + new iam.PolicyStatement({ + actions: [ + "ecr:GetAuthorizationToken", + ], + resources: [ + '*' + ] + }), + new iam.PolicyStatement({ + actions: [ + "ecr:CreateRepository", + "ecr:CompleteLayerUpload", + "ecr:UploadLayerPart", + "ecr:InitiateLayerUpload", + "ecr:PutImage", + "ecr:BatchCheckLayerAvailability", + "ecr:GetDownloadUrlForLayer", + "ecr:BatchGetImage", + ], + resources: [ + `arn:${Aws.PARTITION}:ecr:${destRegion.valueAsString}:${Aws.ACCOUNT_ID}:repository/*` + + ] + }), + ] + }); + const cfnecrDestWritePolicy = ecrDestWritePolicy.node.defaultChild as iam.CfnPolicy + addCfnNagSuppressRules(cfnecrDestWritePolicy, [ + { + id: 'W12', + reason: 'This IAM policy need * resource' + }, + ]) + + const ecrDestPolicy = ecrDestWritePolicy.node.defaultChild as iam.CfnPolicy + ecrDestPolicy.cfnOptions.condition = isDestInCurrentAccount + ecrDestWritePolicy.attachToRole(taskDefinition.taskRole); + + + const submitJob = new tasks.LambdaInvoke(this, 'Submit Lambda', { + lambdaFunction: listImagesLambda, + // Lambda's result is in the attribute `Payload` + outputPath: '$.Payload' + }); + + const clusterSG = new ec2.SecurityGroup(this, 'clusterSG', { + allowAllOutbound: true, + description: `SG for ${Aws.STACK_NAME} Fargate Tasks`, + vpc: vpc, + }); + const cfnclusterSG = clusterSG.node.defaultChild as ec2.CfnSecurityGroup + addCfnNagSuppressRules(cfnclusterSG, [ + { + id: 'W5', + reason: 'Egress of 0.0.0.0/0 is required' + }, + { + id: 'W40', + reason: 'Egress IPProtocol of -1 is required' + }, + ]) + + const runTask = new tasks.EcsRunTask(this, 'Run Fargate Task', { + integrationPattern: sfn.IntegrationPattern.RUN_JOB, + cluster, + taskDefinition, + assignPublicIp: true, + containerOverrides: [{ + containerDefinition, + environment: [ + { name: 'IMAGE', value: sfn.JsonPath.stringAt('$.repositoryName') }, + { name: 'TAG', value: sfn.JsonPath.stringAt('$.imageTag') }, + { name: 'MULTI_ARCH_OPTION', value: sfn.JsonPath.stringAt('$.multiArchOption') }, + ], + }], + launchTarget: new tasks.EcsFargateLaunchTarget(), + resultPath: '$.result', + securityGroups: [clusterSG] + }); + + + const putSuccessInDDBTask = new tasks.DynamoPutItem(this, 'Log Success in DynamoDB', { + item: { + Image: tasks.DynamoAttributeValue.fromString(sfn.JsonPath.stringAt('$.repositoryName')), + Tag: tasks.DynamoAttributeValue.fromString(sfn.JsonPath.stringAt('$.imageTag')), + MultiArchOption: tasks.DynamoAttributeValue.fromString(sfn.JsonPath.stringAt('$.multiArchOption')), + Execution: tasks.DynamoAttributeValue.fromString(sfn.JsonPath.stringAt('$$.Execution.Name')), + Status: tasks.DynamoAttributeValue.fromString('Done'), + }, + table: imageTable, + returnValues: tasks.DynamoReturnValues.NONE, + resultPath: '$.result' + }); + + const putFailureInDDBTask = new tasks.DynamoPutItem(this, 'Log Failure in DynamoDB', { + item: { + Image: tasks.DynamoAttributeValue.fromString(sfn.JsonPath.stringAt('$.repositoryName')), + Tag: tasks.DynamoAttributeValue.fromString(sfn.JsonPath.stringAt('$.imageTag')), + MultiArchOption: tasks.DynamoAttributeValue.fromString(sfn.JsonPath.stringAt('$.multiArchOption')), + Execution: tasks.DynamoAttributeValue.fromString(sfn.JsonPath.stringAt('$$.Execution.Name')), + ErrorMessage: tasks.DynamoAttributeValue.fromString(sfn.JsonPath.stringAt('$.result.Error')), + Status: tasks.DynamoAttributeValue.fromString('Error'), + }, + table: imageTable, + returnValues: tasks.DynamoReturnValues.NONE, + resultPath: '$.result' + }); + + const myKeyAlias = kms.Alias.fromAliasName(this, 'AwsSnsDefaultKey', 'alias/aws/sns'); + + const topic = new sns.Topic(this, + 'EcrReplicationTopic', + { + masterKey: myKeyAlias, + } + ); + topic.addSubscription(new subscriptions.EmailSubscription(alarmEmail.valueAsString)); + + const snsTask = new tasks.SnsPublish(this, 'Publish To SNS', { + topic, + integrationPattern: sfn.IntegrationPattern.REQUEST_RESPONSE, + message: sfn.TaskInput.fromObject({ + error: "Failed to copy image", + execution: sfn.JsonPath.stringAt('$$.Execution.Name'), + image: sfn.JsonPath.stringAt('$.repositoryName'), + tag: sfn.JsonPath.stringAt('$.imageTag'), + multiArchOption: sfn.JsonPath.stringAt('$.multiArchOption'), + }) + }); + + const endState = new sfn.Pass(this, 'EndState'); + + const map = new sfn.Map(this, 'Map State', { + maxConcurrency: 10, + itemsPath: sfn.JsonPath.stringAt('$.Payload'), + }); + + const retryParam: sfn.RetryProps = { + backoffRate: 2, + interval: Duration.seconds(60), + maxAttempts: 3, + } + + map.iterator(runTask + .addRetry(retryParam) + .addCatch(putFailureInDDBTask.next(snsTask), { resultPath: '$.result' }) + .next(putSuccessInDDBTask)); + + submitJob.next(map).next(endState) + + const logGroup = new logs.LogGroup(this, `DTH-ECR-StepFunction-LogGroup`,{ + logGroupName: `/aws/vendedlogs/states/${Fn.select(6, Fn.split(":", listImagesLambda.functionArn))}-SM-log` + }); + + // Create role for Step Machine + const ecrStateMachineRole = new iam.Role(this, `DTH-ECR-ecrStateMachineRole`, { + assumedBy: new iam.ServicePrincipal('states.amazonaws.com') + }); + + const taskDefArnNoVersion = Stack.of(this).formatArn({ + service: 'ecs', + resource: 'task-definition', + resourceName: taskDefinition.family + }) + + const ecrStateMachineRolePolicy = new iam.Policy(this, 'ecrStateMachineRolePolicy'); + + ecrStateMachineRolePolicy.addStatements( + new iam.PolicyStatement({ + actions: [ + 'lambda:InvokeFunction' + ], + resources: [ + listImagesLambda.functionArn + ] + }), + new iam.PolicyStatement({ + actions: [ + 'ecs:RunTask' + ], + resources: [ + taskDefArnNoVersion + ] + }), + new iam.PolicyStatement({ + actions: [ + "ecs:StopTask", + "ecs:DescribeTasks" + ], + resources: [ + '*' + ] + }), + new iam.PolicyStatement({ + actions: [ + "iam:PassRole" + ], + resources: [ + taskDefinition.taskRole.roleArn, + taskDefinition.executionRole!.roleArn + ] + }), + new iam.PolicyStatement({ + actions: [ + "dynamodb:PutItem" + ], + resources: [ + imageTable.tableArn + ] + }), + new iam.PolicyStatement({ + actions: [ + "sns:Publish" + ], + resources: [ + topic.topicArn + ] + }), + new iam.PolicyStatement({ + actions: [ + "events:PutTargets", + "events:PutRule", + "events:DescribeRule" + ], + resources: [ + `arn:${Aws.PARTITION}:events:${Aws.REGION}:${Aws.ACCOUNT_ID}:rule/StepFunctionsGetEventsForECSTaskRule`, + ] + }), + new iam.PolicyStatement({ + actions: [ + 'logs:CreateLogDelivery', + 'logs:GetLogDelivery', + 'logs:UpdateLogDelivery', + 'logs:DeleteLogDelivery', + 'logs:ListLogDeliveries', + 'logs:PutResourcePolicy', + 'logs:DescribeResourcePolicies', + 'logs:DescribeLogGroups' + ], + resources: [ + '*' + ] + }), + ); + ecrStateMachineRolePolicy.node.addDependency(listImagesLambda, taskDefinition, imageTable, topic, logGroup); + ecrStateMachineRolePolicy.attachToRole(ecrStateMachineRole); + const cfnecrStateMachineRolePolicy = ecrStateMachineRolePolicy.node.defaultChild as iam.CfnPolicy + addCfnNagSuppressRules(cfnecrStateMachineRolePolicy, [ + { + id: 'W12', + reason: '[*] Access granted as per documentation: https://docs.aws.amazon.com/step-functions/latest/dg/cw-logs.html' + }, + { + id: 'W76', + reason: 'SPCM complexity greater then 25 is appropriate for the logic implemented' + } + ]) + + const ecrStateMachine = new sfn.StateMachine(this, 'ECRReplicationStateMachine', { + stateMachineName: `${Aws.STACK_NAME}-ECRReplicationSM`, + role: ecrStateMachineRole.withoutPolicyUpdates(), + definitionBody: sfn.DefinitionBody.fromChainable(submitJob), + logs: { + destination: logGroup, + level: sfn.LogLevel.ALL, + }, + tracingEnabled: true, + }); + const cfnlogGroup = logGroup.node.defaultChild as logs.CfnLogGroup + addCfnNagSuppressRules(cfnlogGroup, [ + { + id: 'W84', + reason: 'Log group data is always encrypted in CloudWatch Logs using an AWS Managed KMS Key' + }, + ]) + + ecrStateMachine.node.addDependency(containerDefinition, taskDefinition, submitJob, logGroup, ecrStateMachineRole, ecrStateMachineRolePolicy) + + const smRuleRole = new iam.Role(this, 'ECRReplicationSMExecRole', { + assumedBy: new iam.ServicePrincipal('events.amazonaws.com'), + }) + smRuleRole.addToPolicy(new iam.PolicyStatement({ + actions: [ + "states:StartExecution", + ], + resources: [ + ecrStateMachine.stateMachineArn, + ] + })) + + const ecrStateMachineTarget = new targets.SfnStateMachine(ecrStateMachine, { role: smRuleRole }); + const smRule = new events.Rule(this, 'ECRReplicationScheduleRule', { + schedule: events.Schedule.rate(Duration.days(1)), + targets: [ecrStateMachineTarget], + }); + smRule.node.addDependency(ecrStateMachine, smRuleRole) + + const checkExecutionLambdaPolicy = new iam.Policy(this, 'CheckExecutionLambdaPolicy', { + policyName: `${Aws.STACK_NAME}CheckExecutionLambdaPolicy`, + statements: [ + new iam.PolicyStatement({ + actions: [ + "states:StartExecution", + "states:ListExecutions", + "states:ListStateMachines", + "states:DescribeExecution", + "states:DescribeStateMachineForExecution", + "states:GetExecutionHistory", + "states:ListActivities", + "states:DescribeStateMachine", + "states:DescribeActivity", + ], + resources: [ + '*' + ] + }), + ] + }); + + const cfncheckExecutionLambdaPolicy = checkExecutionLambdaPolicy.node.defaultChild as iam.CfnPolicy + addCfnNagSuppressRules(cfncheckExecutionLambdaPolicy, [ + { + id: 'W12', + reason: 'This IAM policy need * resource' + }, + ]) + + const checkExecutionLambdaRole = new iam.Role(this, 'CheckExecutionFunctionRole', { + assumedBy: new iam.ServicePrincipal('lambda.amazonaws.com'), + }) + + const checkExecutionLambda = new lambda.Function(this, 'CheckExecutionFunction', { + runtime: lambda.Runtime.PYTHON_3_9, + handler: 'lambda_function.lambda_handler', + code: lambda.Code.fromAsset(path.join(__dirname, '../../lambda/plugin/ecr/sfn_helper')), + memorySize: 256, + timeout: Duration.minutes(15), + environment: { + STATE_MACHINE_ARN: ecrStateMachine.stateMachineArn + }, + role: checkExecutionLambdaRole.withoutPolicyUpdates() + }); + checkExecutionLambda.node.addDependency(checkExecutionLambdaRole, checkExecutionLambdaPolicy) + + checkExecutionLambdaPolicy.attachToRole(checkExecutionLambda.role!) + ecrStateMachine.grantStartExecution(checkExecutionLambda) + ecrStateMachine.grantRead(checkExecutionLambda) + + //Run checkExecutionLambda on Create + const lambdaTrigger = new cr.AwsCustomResource(this, 'StatefunctionTrigger', { + policy: cr.AwsCustomResourcePolicy.fromStatements([new iam.PolicyStatement({ + actions: ['lambda:InvokeFunction'], + effect: iam.Effect.ALLOW, + resources: [checkExecutionLambda.functionArn] + })]), + timeout: Duration.minutes(15), + onCreate: { + service: 'Lambda', + action: 'invoke', + parameters: { + FunctionName: checkExecutionLambda.functionName, + InvocationType: 'Event' + }, + physicalResourceId: cr.PhysicalResourceId.of('JobSenderTriggerPhysicalId') + }, + onUpdate: { + service: 'Lambda', + action: 'invoke', + parameters: { + FunctionName: checkExecutionLambda.functionName, + InvocationType: 'Event' + }, + physicalResourceId: cr.PhysicalResourceId.of('JobSenderTriggerPhysicalId') + } + }) + lambdaTrigger.node.addDependency(ecrStateMachine, smRule) + } +} diff --git a/source/constructs/lib/monitor-step-functions.ts b/source/constructs/lib/monitor-step-functions.ts index ad6ff0e..bc8182c 100644 --- a/source/constructs/lib/monitor-step-functions.ts +++ b/source/constructs/lib/monitor-step-functions.ts @@ -112,6 +112,15 @@ export class MonitorStateMachine extends Construct { resources: [ `*`, ] + }), + new iam.PolicyStatement({ + actions: [ + "states:ListExecutions", + "states:ListStateMachines" + ], + resources: [ + `*`, + ] }) ] }); @@ -439,7 +448,7 @@ export class MonitorStateMachine extends Construct { scareDownAsgTask.next(sendSnsNotificationTask .next(taskMonitoringComplete)) - const checkSqsEmptyChoice = new sfn.Choice(this, 'SQS is Empty?') + const checkSqsEmptyChoice = new sfn.Choice(this, 'SQS and SFN are Empty?') .when(sfn.Condition.stringEquals('$.isEmpty', 'true'), new sfn.Choice(this, 'Has Checked 3 times?') .when( @@ -483,11 +492,12 @@ export class MonitorStateMachine extends Construct { } ]) const taskMonitorStateMachine = new sfn.StateMachine(this, 'taskMonitorStateMachine', { - definition: definition, + definitionBody: sfn.DefinitionBody.fromChainable(definition), logs: { destination: logGroup, level: sfn.LogLevel.ALL, - } + }, + tracingEnabled: true, }) this.taskMonitorStateMachineArn = taskMonitorStateMachine.stateMachineArn diff --git a/source/constructs/lib/portal-stack.ts b/source/constructs/lib/portal-stack.ts index 5f7cc29..e4a416f 100644 --- a/source/constructs/lib/portal-stack.ts +++ b/source/constructs/lib/portal-stack.ts @@ -5,9 +5,6 @@ import { Construct, } from 'constructs'; import { - CfnCondition, - CfnResource, - CfnCustomResource, RemovalPolicy, Duration, Aws, @@ -16,6 +13,7 @@ import { aws_s3_deployment as s3Deployment, aws_iam as iam, aws_lambda as lambda, + custom_resources as cr, } from 'aws-cdk-lib'; import { CloudFrontToS3 } from '@aws-solutions-constructs/aws-cloudfront-s3'; @@ -25,14 +23,6 @@ import { NagSuppressions } from 'cdk-nag'; // const { BUCKET_NAME, SOLUTION_NAME, VERSION } = process.env -/** - * Custom resource config interface - */ -interface CustomResourceConfig { - readonly properties?: { path: string, value: any }[]; - readonly condition?: CfnCondition; - readonly dependencies?: CfnResource[]; -} export interface PortalStackProps { auth_type: string, @@ -172,6 +162,16 @@ function handler(event) { resources: [ `arn:${Aws.PARTITION}:s3:::*` ] + }), + new iam.PolicyStatement({ + effect: iam.Effect.ALLOW, + actions: [ + 'cloudfront:GetInvalidation', + 'cloudfront:CreateInvalidation', + ], + resources: [ + `arn:${Aws.PARTITION}:cloudfront::${Aws.ACCOUNT_ID}:distribution/${website.cloudFrontWebDistribution.distributionId}`, + ], }) ] }); @@ -179,29 +179,39 @@ function handler(event) { const cfnCustomResourcePolicy = customResourcePolicy.node.defaultChild as iam.CfnPolicy; cfnCustomResourcePolicy.overrideLogicalId('CustomResourcePolicy'); + new s3Deployment.BucketDeployment(this, 'DeployWebsite', { + sources: [s3Deployment.Source.asset(path.join(__dirname, '../../portal/build'))], + destinationBucket: websiteBucket, + // disable this, otherwise the aws-exports.json will be deleted + prune: false, + }) + + this.websiteURL = website.cloudFrontWebDistribution.distributionDomainName + const customResourceFunction = new lambda.Function(this, 'CustomHandler', { - description: 'Data Transfer Hub - Custom resource', - runtime: lambda.Runtime.NODEJS_16_X, - handler: 'index.handler', + description: `${Aws.STACK_NAME} - - Custom resource`, + runtime: lambda.Runtime.PYTHON_3_9, + handler: 'lambda_function.lambda_handler', timeout: Duration.seconds(30), memorySize: 512, role: customResourceRole, - code: lambda.Code.fromAsset(path.join(__dirname, '../../custom-resource/'), - { - bundling: { - image: lambda.Runtime.NODEJS_16_X.bundlingImage, - command: [ - 'bash', '-c', [ - `cd /asset-output/`, - `cp -r /asset-input/* /asset-output/`, - `cd /asset-output/`, - `npm install` - ].join(' && ') - ], - user: 'root' - } - } - ) + code: lambda.Code.fromAsset(path.join(__dirname, '../../custom-resource/')), + environment: { + WEB_BUCKET_NAME: websiteBucket.bucketName, + API_ENDPOINT: props.aws_appsync_graphqlEndpoint, + OIDC_PROVIDER: props.aws_oidc_provider, + OIDC_CLIENT_ID: props.aws_oidc_client_id, + OIDC_CUSTOMER_DOMAIN: props.aws_oidc_customer_domain, + CLOUDFRONT_URL: this.websiteURL, + CLOUDFRONT_DISTRIBUTION_ID: website.cloudFrontWebDistribution.distributionId, + AUTHENTICATION_TYPE: props.auth_type === AuthType.OPENID ? 'OPENID_CONNECT' : 'AMAZON_COGNITO_USER_POOLS', + USER_POOL_ID: props.aws_user_pools_id, + USER_POOL_CLIENT_ID: props.aws_user_pools_web_client_id, + SOLUTION_VERSION: process.env.VERSION || "v1.0.0", + ECS_VPC_ID: props.taskCluster?.ecsVpcId || "", + ECS_CLUSTER_NAME: props.taskCluster?.ecsClusterName || "", + ECS_SUBNETS: props.taskCluster?.ecsSubnets.join(",") || "" + } }) const cfnCustomResourceFn = customResourceFunction.node.defaultChild as lambda.CfnFunction @@ -212,86 +222,25 @@ function handler(event) { } ]) - new s3Deployment.BucketDeployment(this, 'DeployWebsite', { - sources: [s3Deployment.Source.asset(path.join(__dirname, '../../portal/build'))], - destinationBucket: websiteBucket, - // disable this, otherwise the aws-exports.json will be deleted - prune: false, - }) - - this.websiteURL = website.cloudFrontWebDistribution.distributionDomainName - - // CustomResourceConfig - this.createCustomResource('CustomResourceConfig', customResourceFunction, { - properties: [ - { path: 'Region', value: Aws.REGION }, - { - path: 'configItem', value: { - aws_project_region: Aws.REGION, - aws_cognito_region: Aws.REGION, - aws_cloudfront_url: this.websiteURL, - aws_user_pools_id: props.aws_user_pools_id, - aws_user_pools_web_client_id: props.aws_user_pools_web_client_id, - oauth: {}, - aws_oidc_customer_domain: props.aws_oidc_customer_domain, - aws_oidc_provider: props.aws_oidc_provider, - aws_oidc_client_id: props.aws_oidc_client_id, - aws_appsync_graphqlEndpoint: props.aws_appsync_graphqlEndpoint, - aws_appsync_region: Aws.REGION, - aws_appsync_authenticationType: props.auth_type === AuthType.OPENID ? 'OPENID_CONNECT' : 'AMAZON_COGNITO_USER_POOLS', - taskCluster: props.taskCluster - } + const crLambda = new cr.AwsCustomResource(this, "CustomResourceConfig", { + policy: cr.AwsCustomResourcePolicy.fromStatements([ + new iam.PolicyStatement({ + actions: ["lambda:InvokeFunction"], + effect: iam.Effect.ALLOW, + resources: [customResourceFunction.functionArn], + }), + ]), + timeout: Duration.minutes(15), + onUpdate: { + service: "Lambda", + action: "invoke", + parameters: { + FunctionName: customResourceFunction.functionName, + InvocationType: "Event", }, - { path: 'destS3Bucket', value: websiteBucket.bucketName }, - { path: 'destS3key', value: 'aws-exports.json' }, - { path: 'customAction', value: 'putConfigFile' } - ], - dependencies: [cfnCustomResourceRole, cfnCustomResourcePolicy] - }); - - } - - - /** - * Adds dependencies to the AWS CloudFormation resource. - * @param {CfnResource} resource Resource to add AWS CloudFormation dependencies - * @param {CfnResource[]} dependencies Dependencies to be added to the AWS CloudFormation resource - */ - addDependencies(resource: CfnResource, dependencies: CfnResource[]) { - for (let dependency of dependencies) { - resource.addDependsOn(dependency); - } - } - - /** - * Creates custom resource to the AWS CloudFormation template. - * @param {string} id Custom resource ID - * @param {lambda.Function} customResourceFunction Custom resource Lambda function - * @param {CustomResourceConfig} config Custom resource configuration - * @return {CfnCustomResource} - */ - createCustomResource(id: string, customResourceFunction: lambda.Function, config?: CustomResourceConfig): CfnCustomResource { - const customResource = new CfnCustomResource(this, id, { - serviceToken: customResourceFunction.functionArn + physicalResourceId: cr.PhysicalResourceId.of(Date.now().toString()), + }, }); - customResource.addOverride('Type', 'Custom::CustomResource'); - - if (config) { - const { properties, condition, dependencies } = config; - - if (properties) { - for (let property of properties) { - customResource.addPropertyOverride(property.path, property.value); - } - } - - if (dependencies) { - this.addDependencies(customResource, dependencies); - } - - customResource.cfnOptions.condition = condition; - } - - return customResource; + crLambda.node.addDependency(customResourceFunction); } } \ No newline at end of file diff --git a/source/constructs/lib/s3-plugin/common-resources.ts b/source/constructs/lib/s3-plugin/common-resources.ts new file mode 100644 index 0000000..1ba990c --- /dev/null +++ b/source/constructs/lib/s3-plugin/common-resources.ts @@ -0,0 +1,244 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { Construct } from "constructs"; +import { + Aws, + Duration, + CfnOutput, + RemovalPolicy, + aws_iam as iam, + aws_dynamodb as ddb, + aws_sqs as sqs, + aws_cloudwatch as cw, + aws_cloudwatch_actions as actions, + aws_sns as sns, + aws_sns_subscriptions as sub, + aws_kms as kms, + aws_logs as logs, + aws_s3 as s3 +} from "aws-cdk-lib"; +import { NagSuppressions } from "cdk-nag"; + +import { addCfnNagSuppressRules } from "./s3-plugin-stack"; + +export interface CommonProps { + readonly alarmEmail: string; + readonly srcIBucket: s3.IBucket; +} + +export class CommonStack extends Construct { + readonly jobTable: ddb.Table; + readonly sqsQueue: sqs.Queue; + readonly splitPartTable: ddb.Table; + readonly workerLogGroup: logs.ILogGroup; + + constructor(scope: Construct, id: string, props: CommonProps) { + super(scope, id); + + // Setup DynamoDB + this.jobTable = new ddb.Table(this, "S3TransferTable", { + partitionKey: { name: "ObjectKey", type: ddb.AttributeType.STRING }, + billingMode: ddb.BillingMode.PAY_PER_REQUEST, + removalPolicy: RemovalPolicy.DESTROY, + encryption: ddb.TableEncryption.DEFAULT, + pointInTimeRecovery: true + }); + + const cfnJobTable = this.jobTable.node.defaultChild as ddb.CfnTable; + addCfnNagSuppressRules(cfnJobTable, [ + { + id: "W74", + reason: "Use deafult encryption. Encryption key owned by Amazon" + } + ]); + cfnJobTable.overrideLogicalId("S3TransferTable"); + + this.splitPartTable = new ddb.Table(this, "S3SplitPartTable", { + partitionKey: { name: "UploadId", type: ddb.AttributeType.STRING }, + sortKey: { name: "PartNumber", type: ddb.AttributeType.NUMBER }, + billingMode: ddb.BillingMode.PAY_PER_REQUEST, + removalPolicy: RemovalPolicy.DESTROY, + encryption: ddb.TableEncryption.DEFAULT, + pointInTimeRecovery: true + }); + + const cfnSplitPartTable = this.splitPartTable.node + .defaultChild as ddb.CfnTable; + addCfnNagSuppressRules(cfnSplitPartTable, [ + { + id: "W74", + reason: "Use deafult encryption. Encryption key owned by Amazon" + } + ]); + cfnSplitPartTable.overrideLogicalId("S3SplitPartTable"); + + // Setup SQS + const sqsQueueDLQ = new sqs.Queue(this, "S3TransferQueueDLQ", { + visibilityTimeout: Duration.minutes(30), + retentionPeriod: Duration.days(14), + encryption: sqs.QueueEncryption.KMS_MANAGED + }); + NagSuppressions.addResourceSuppressions(sqsQueueDLQ, [ + { id: "AwsSolutions-SQS3", reason: "it is a DLQ" }, + { id: "AwsSolutions-SQS2", reason: "it is a DLQ" }, + { id: "AwsSolutions-SQS4", reason: "it is a DLQ" } + ]); + + const cfnSqsQueueDLQ = sqsQueueDLQ.node.defaultChild as sqs.CfnQueue; + cfnSqsQueueDLQ.overrideLogicalId("S3TransferQueueDLQ"); + + this.sqsQueue = new sqs.Queue(this, "S3TransferQueue", { + visibilityTimeout: Duration.minutes(15), + retentionPeriod: Duration.days(14), + deadLetterQueue: { + queue: sqsQueueDLQ, + maxReceiveCount: 5 + } + }); + NagSuppressions.addResourceSuppressions(this.sqsQueue, [ + { + id: "AwsSolutions-SQS2", + reason: "this queue only used by DTH solution" + }, + { + id: "AwsSolutions-SQS4", + reason: "this queue only used by DTH solution" + } + ]); + + this.sqsQueue.addToResourcePolicy( + new iam.PolicyStatement({ + effect: iam.Effect.ALLOW, + conditions: { + ArnLike: { + "aws:SourceArn": props.srcIBucket.bucketArn + } + }, + principals: [new iam.ServicePrincipal("s3.amazonaws.com")], + resources: [this.sqsQueue.queueArn], + actions: [ + "sqs:SendMessage", + "sqs:GetQueueAttributes", + "sqs:GetQueueUrl" + ] + }) + ); + + const cfnSqsQueue = this.sqsQueue.node.defaultChild as sqs.CfnQueue; + cfnSqsQueue.overrideLogicalId("S3TransferQueue"); + addCfnNagSuppressRules(cfnSqsQueue, [ + { + id: "W48", + reason: "No need to use encryption" + } + ]); + + // Setup Alarm for queue - DLQ + const alarmDLQ = new cw.Alarm(this, "S3TransferDLQAlarm", { + metric: sqsQueueDLQ.metricApproximateNumberOfMessagesVisible(), + threshold: 0, + comparisonOperator: cw.ComparisonOperator.GREATER_THAN_THRESHOLD, + evaluationPeriods: 1, + datapointsToAlarm: 1 + }); + + const snsKey = new kms.Key(this, "SNSTopicEncryptionKey", { + enableKeyRotation: true, + enabled: true, + alias: `alias/dth/sns/${Aws.STACK_NAME}`, + // policy: snsKeyPolicy, + policy: new iam.PolicyDocument({ + assignSids: true, + statements: [ + new iam.PolicyStatement({ + actions: ["kms:GenerateDataKey*", "kms:Decrypt", "kms:Encrypt"], + resources: ["*"], + effect: iam.Effect.ALLOW, + principals: [ + new iam.ServicePrincipal("sns.amazonaws.com"), + new iam.ServicePrincipal("cloudwatch.amazonaws.com") + ] + }), + // This policy is in CDK v1, we just move it to here + new iam.PolicyStatement({ + actions: [ + "kms:Create*", + "kms:Describe*", + "kms:Enable*", + "kms:List*", + "kms:Put*", + "kms:Update*", + "kms:Revoke*", + "kms:Disable*", + "kms:Get*", + "kms:Delete*", + "kms:ScheduleKeyDeletion", + "kms:CancelKeyDeletion", + "kms:GenerateDataKey", + "kms:TagResource", + "kms:UntagResource" + ], + resources: ["*"], + effect: iam.Effect.ALLOW, + principals: [new iam.AccountRootPrincipal()] + }) + ] + }) + }); + + const alarmTopic = new sns.Topic(this, "S3TransferAlarmTopic", { + masterKey: snsKey, + displayName: `Data Transfer Hub Alarm (${Aws.STACK_NAME})` + }); + + const cfnAlarmTopic = alarmTopic.node.defaultChild as sns.CfnTopic; + cfnAlarmTopic.overrideLogicalId("S3TransferAlarmTopic"); + + alarmTopic.addSubscription(new sub.EmailSubscription(props.alarmEmail)); + alarmDLQ.addAlarmAction(new actions.SnsAction(alarmTopic)); + + // Set up log group for worker asg + this.workerLogGroup = new logs.LogGroup(this, "S3RepWorkerLogGroup", { + retention: logs.RetentionDays.TWO_WEEKS + }); + + const cfnWorkerLogGroup = this.workerLogGroup.node.defaultChild as logs.CfnLogGroup; + addCfnNagSuppressRules(cfnWorkerLogGroup, [ + { + id: "W84", + reason: "log group is encrypted with the default master key" + } + ]); + + new CfnOutput(this, "TableName", { + value: this.jobTable.tableName, + description: "DynamoDB Table Name" + }); + + new CfnOutput(this, "SplitPartTableName", { + value: this.splitPartTable.tableName, + description: "Split Part DynamoDB Table Name" + }); + + new CfnOutput(this, "QueueName", { + value: this.sqsQueue.queueName, + description: "Queue Name" + }); + + new CfnOutput(this, "DLQQueueName", { + value: sqsQueueDLQ.queueName, + description: "Dead Letter Queue Name" + }); + + new CfnOutput(this, "AlarmTopicName", { + value: alarmTopic.topicName, + description: "Alarm Topic Name" + }); + + new CfnOutput(this, "StackName", { + value: Aws.STACK_NAME, + description: "Stack Name" + }); + } +} diff --git a/source/constructs/lib/s3-plugin/dashboard-config/cw_agent_config.json b/source/constructs/lib/s3-plugin/dashboard-config/cw_agent_config.json new file mode 100644 index 0000000..a1a3992 --- /dev/null +++ b/source/constructs/lib/s3-plugin/dashboard-config/cw_agent_config.json @@ -0,0 +1,47 @@ +{ + "agent": { + "metrics_collection_interval": 60, + "run_as_user": "root" + }, + "logs": { + "logs_collected": { + "files": { + "collect_list": [ + { + "file_path": "/home/ec2-user/worker.log", + "log_group_name": "##log group##", + "log_stream_name": "Instance-{instance_id}" + } + ] + } + } + }, + "metrics": { + "append_dimensions": { + "AutoScalingGroupName": "${aws:AutoScalingGroupName}", + "InstanceId": "${aws:InstanceId}" + }, + "aggregation_dimensions": [ + [ + "AutoScalingGroupName" + ] + ], + "metrics_collected": { + "disk": { + "measurement": [ + "used_percent" + ], + "metrics_collection_interval": 60, + "resources": [ + "*" + ] + }, + "mem": { + "measurement": [ + "mem_used_percent" + ], + "metrics_collection_interval": 60 + } + } + } +} \ No newline at end of file diff --git a/source/constructs/lib/s3-plugin/dashboard-stack.ts b/source/constructs/lib/s3-plugin/dashboard-stack.ts new file mode 100644 index 0000000..cc7203f --- /dev/null +++ b/source/constructs/lib/s3-plugin/dashboard-stack.ts @@ -0,0 +1,205 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { + Construct, +} from 'constructs'; +import { + Aws, + Duration, + aws_cloudwatch as cw, + aws_sqs as sqs, +} from 'aws-cdk-lib'; + +import { RunType } from './s3-plugin-stack'; + +export interface DBProps { + readonly runType: RunType, + readonly queue: sqs.Queue + readonly asgName?: string +} + +export class DashboardStack extends Construct { + + readonly dashboard: cw.Dashboard + + constructor(scope: Construct, id: string, props: DBProps) { + super(scope, id); + + const completedBytes = new cw.Metric({ + namespace: `${Aws.STACK_NAME}`, + metricName: 'CompletedBytes', + statistic: 'Sum', + period: Duration.minutes(1), + label: 'Completed(Bytes)' + }) + + const transferredObjects = new cw.Metric({ + namespace: `${Aws.STACK_NAME}`, + metricName: 'TransferredObjects', + statistic: 'Sum', + period: Duration.minutes(1), + label: 'Transferred(Objects)' + }) + + const failedObjects = new cw.Metric({ + namespace: `${Aws.STACK_NAME}`, + metricName: 'FailedObjects', + statistic: 'Sum', + period: Duration.minutes(1), + label: 'Failed(Objects)' + }) + + + const asgDesired = new cw.Metric({ + namespace: 'AWS/AutoScaling', + metricName: 'GroupDesiredCapacity', + dimensionsMap: { + 'AutoScalingGroupName': props.asgName! + }, + statistic: 'Max', + period: Duration.minutes(1), + label: 'Desired Capacity' + }) + + const asgInSvc = new cw.Metric({ + namespace: 'AWS/AutoScaling', + metricName: 'GroupInServiceInstances', + dimensionsMap: { + 'AutoScalingGroupName': props.asgName! + }, + statistic: 'Max', + period: Duration.minutes(1), + label: 'In Service Instances' + }) + + const asgNetworkIn = new cw.Metric({ + namespace: 'AWS/EC2', + metricName: 'NetworkIn', + dimensionsMap: { + 'AutoScalingGroupName': props.asgName! + }, + statistic: 'Sum', + period: Duration.minutes(1) + }) + const asgNetworkOut = new cw.Metric({ + namespace: 'AWS/EC2', + metricName: 'NetworkOut', + dimensionsMap: { + 'AutoScalingGroupName': props.asgName! + }, + statistic: 'Sum', + period: Duration.minutes(1) + }) + + const asgCPU = new cw.Metric({ + namespace: 'AWS/EC2', + metricName: 'CPUUtilization', + dimensionsMap: { + 'AutoScalingGroupName': props.asgName! + }, + statistic: 'Average', + period: Duration.minutes(1), + label: 'CPU %' + }) + + const asgMemory = new cw.Metric({ + namespace: 'CWAgent', + metricName: 'mem_used_percent', + dimensionsMap: { + 'AutoScalingGroupName': props.asgName! + }, + statistic: 'Average', + period: Duration.minutes(1), + label: 'MEM %' + }) + + const asgDisk = new cw.Metric({ + namespace: 'CWAgent', + metricName: 'disk_used_percent', + dimensionsMap: { + 'AutoScalingGroupName': props.asgName! + }, + statistic: 'Average', + period: Duration.minutes(1), + label: 'Disk %' + }) + + + + // Main Dashboard + this.dashboard = new cw.Dashboard(this, 'S3Migration', { + dashboardName: `${Aws.STACK_NAME}-Dashboard-${Aws.REGION}` + }); + + this.dashboard.addWidgets( + new cw.GraphWidget({ + title: 'Network', + left: [completedBytes] + }), + + new cw.GraphWidget({ + title: 'Transferred/Failed Objects', + left: [transferredObjects, failedObjects] + }), + + new cw.GraphWidget({ + title: 'Running/Waiting Jobs History', + left: [ + props.queue.metricApproximateNumberOfMessagesVisible({ + period: Duration.minutes(1), + label: 'Waiting Jobs' + }), + props.queue.metricApproximateNumberOfMessagesNotVisible({ + period: Duration.minutes(1), + label: 'Running Jobs' + }) + ] + }), + + + new cw.SingleValueWidget({ + title: 'Running/Waiting Jobs', + metrics: [ + props.queue.metricApproximateNumberOfMessagesVisible({ + period: Duration.minutes(1), + label: 'Waiting Jobs' + }), + props.queue.metricApproximateNumberOfMessagesNotVisible({ + period: Duration.minutes(1), + label: 'Running Jobs' + }) + ], + height: 6 + }) + ) + + + this.dashboard.addWidgets( + new cw.GraphWidget({ + title: 'Network In/Out', + left: [asgNetworkIn, asgNetworkOut] + }), + + new cw.GraphWidget({ + title: 'CPU Utilization (Average)', + left: [asgCPU] + }), + + new cw.GraphWidget({ + title: 'Memory / Disk (Average)', + left: [asgMemory, asgDisk] + }), + + + new cw.GraphWidget({ + title: 'Desired / InService Instances', + left: [asgDesired, asgInSvc] + }), + + + ) + + } + +} \ No newline at end of file diff --git a/source/constructs/lib/s3-plugin/ec2-finder-stack.ts b/source/constructs/lib/s3-plugin/ec2-finder-stack.ts new file mode 100644 index 0000000..98f488f --- /dev/null +++ b/source/constructs/lib/s3-plugin/ec2-finder-stack.ts @@ -0,0 +1,325 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import * as path from "path"; + +import { Construct } from "constructs"; +import { + Aws, + Duration, + CfnOutput, + CfnMapping, + CfnCondition, + Tags, + Fn, + aws_iam as iam, + aws_ec2 as ec2, + aws_logs as logs, + aws_lambda as lambda, + aws_events as events, + aws_events_targets as targets, + aws_autoscaling as asg +} from "aws-cdk-lib"; +import { NagSuppressions } from "cdk-nag"; + +import { addCfnNagSuppressRules } from "./s3-plugin-stack"; + +export interface Env { + [key: string]: any; +} + +export interface Ec2FinderProps { + readonly env: Env; + readonly vpc: ec2.IVpc; + readonly ec2SubnetIds: string[]; + readonly cliRelease: string; + readonly ec2CronExpression: string; + readonly ec2Memory: string; +} + +export class Ec2FinderStack extends Construct { + readonly finderRole: iam.Role; + readonly securityGroup: ec2.SecurityGroup; + + constructor(scope: Construct, id: string, props: Ec2FinderProps) { + super(scope, id); + + const finderLG = new logs.LogGroup(this, "FinderLogGroup", { + retention: logs.RetentionDays.TWO_WEEKS + }); + finderLG.addMetricFilter("Finder-Error-Counts", { + metricName: "FinderErrorCounts", + metricNamespace: `${Aws.STACK_NAME}`, + metricValue: "1", + filterPattern: logs.FilterPattern.anyTerm("Error", "error") + }); + + const cfnfinderLG = finderLG.node.defaultChild as logs.CfnLogGroup; + addCfnNagSuppressRules(cfnfinderLG, [ + { + id: "W84", + reason: "log group is encrypted with the default master key" + } + ]); + + const amznLinux = ec2.MachineImage.latestAmazonLinux2({ + edition: ec2.AmazonLinuxEdition.STANDARD, + storage: ec2.AmazonLinuxStorage.GENERAL_PURPOSE, + cpuType: ec2.AmazonLinuxCpuType.ARM_64 + }); + + this.securityGroup = new ec2.SecurityGroup(this, "S3FinderEC2SG", { + vpc: props.vpc, + description: "Security Group for Data Transfer Hub Fidner instance", + allowAllOutbound: true + }); + // For dev only + // this.securityGroup.addIngressRule(ec2.Peer.anyIpv4(), ec2.Port.tcp(22), 'Allow ssh access'); + + const cfnSG = this.securityGroup.node.defaultChild as ec2.CfnSecurityGroup; + addCfnNagSuppressRules(cfnSG, [ + { + id: "W5", + reason: "Open egress rule is required to access public network" + }, + { + id: "W40", + reason: "Open egress rule is required to access public network" + } + ]); + + this.finderRole = new iam.Role(this, "FinderRole", { + assumedBy: new iam.ServicePrincipal("ec2.amazonaws.com") + }); + + const cwAgentPolicy = new iam.Policy(this, "CWAgentPolicy", { + statements: [ + new iam.PolicyStatement({ + effect: iam.Effect.ALLOW, + resources: ["*"], + actions: [ + "cloudwatch:PutMetricData", + "ec2:DescribeVolumes", + "ec2:DescribeTags", + "logs:CreateLogGroup", + "logs:CreateLogStream", + "logs:PutLogEvents", + "logs:DescribeLogStreams", + "logs:DescribeLogGroups", + "autoscaling:UpdateAutoScalingGroup" + ] + }) + ] + }); + + const cfnCwAgentPolicy = cwAgentPolicy.node.defaultChild as iam.CfnPolicy; + addCfnNagSuppressRules(cfnCwAgentPolicy, [ + { + id: "W12", + reason: "Publish log streams requires any resources" + } + ]); + + this.finderRole.attachInlinePolicy(cwAgentPolicy); + this.finderRole.addManagedPolicy( + iam.ManagedPolicy.fromAwsManagedPolicyName("AmazonSSMManagedInstanceCore") + ); + + const assetTable = new CfnMapping(this, 'AssetTable', { + mapping: { + 'aws': { + assetDomain: 'https://aws-gcr-solutions-assets.s3.amazonaws.com', + }, + 'aws-cn': { + assetDomain: 'https://aws-gcr-solutions-assets.s3.cn-northwest-1.amazonaws.com.cn', + }, + } + }); + + const cliAssetDomain = assetTable.findInMap(Aws.PARTITION, 'assetDomain') + + const instanceTypeTable = new CfnMapping(this, "InstanceTypeTable", { + mapping: { + "8": { instanceType: "m6g.large" }, + "16": { instanceType: "r6g.large" }, + "32": { instanceType: "r6g.xlarge" }, + "64": { instanceType: "r6g.2xlarge" }, + "128": { instanceType: "r6g.4xlarge" }, + "256": { instanceType: "r6g.8xlarge" } + } + }); + + const instanceTypeName = instanceTypeTable.findInMap( + props.ec2Memory, + "instanceType" + ); + + const instanceType = new ec2.InstanceType(instanceTypeName); + + const finder_ud = ec2.UserData.forLinux(); + + const finderLaunchTemplate = new ec2.LaunchTemplate( + this, + "FinderEC2LaunchTemplate", + { + instanceType: instanceType, + machineImage: amznLinux, + userData: finder_ud, + role: this.finderRole, + // keyName: 'ad-key', // dev only + securityGroup: this.securityGroup, + blockDevices: [ + { + deviceName: "/dev/xvda", + volume: asg.BlockDeviceVolume.ebs(8, { + encrypted: true + }) + } + ], + associatePublicIpAddress: true, + detailedMonitoring: true, + requireImdsv2: true + } + ); + + const finderAsg = new asg.AutoScalingGroup(this, "S3RepFinderASG", { + autoScalingGroupName: `${Aws.STACK_NAME}-Finder-ASG`, + vpc: props.vpc, + maxCapacity: 1, + minCapacity: 0, + desiredCapacity: 1, + groupMetrics: [ + new asg.GroupMetrics( + asg.GroupMetric.DESIRED_CAPACITY, + asg.GroupMetric.IN_SERVICE_INSTANCES + ) + ], + cooldown: Duration.minutes(2), + signals: asg.Signals.waitForMinCapacity(), + launchTemplate: finderLaunchTemplate + }); + + NagSuppressions.addResourceSuppressions(finderAsg, [ + { + id: "AwsSolutions-AS3", + reason: "we do not need notification, this asg will change automated" + } + ]); + + Tags.of(finderAsg).add("Name", `${Aws.STACK_NAME}-Replication-Finder`, {}); + + finderAsg.userData.addCommands( + "yum update -y", + "cd /home/ec2-user/", + + // Enable BBR + 'echo "net.core.default_qdisc = fq" >> /etc/sysctl.conf', + 'echo "net.ipv4.tcp_congestion_control = bbr" >> /etc/sysctl.conf', + "sysctl -p", + "echo `sysctl net.ipv4.tcp_congestion_control` > finder.log", + + // Enable Cloudwatch Agent + 'echo "{\\"agent\\": {\\"metrics_collection_interval\\": 60,\\"run_as_user\\": \\"root\\"},\\"logs\\": {\\"logs_collected\\": {\\"files\\": {\\"collect_list\\": [{\\"file_path\\": \\"/home/ec2-user/finder.log\\",\\"log_group_name\\": \\"##log group##\\"}]}}}}" >> /home/ec2-user/cw_agent_config.json', + "yum install -y amazon-cloudwatch-agent", + `sed -i -e "s/##log group##/${finderLG.logGroupName}/g" cw_agent_config.json`, + "/opt/aws/amazon-cloudwatch-agent/bin/amazon-cloudwatch-agent-ctl -a fetch-config -m ec2 -c file:/home/ec2-user/cw_agent_config.json -s", + + // Get CLI from solution assets + `curl -LO "${cliAssetDomain}/data-transfer-hub-cli/v${props.cliRelease}/dthcli_${props.cliRelease}_linux_arm64.tar.gz"`, + `tar zxvf dthcli_${props.cliRelease}_linux_arm64.tar.gz`, + + // Prepare the environment variables + `echo "export JOB_TABLE_NAME=${props.env.JOB_TABLE_NAME}" >> env.sh`, + `echo "export JOB_QUEUE_NAME=${props.env.JOB_QUEUE_NAME}" >> env.sh`, + `echo "export SINGLE_PART_TABLE_NAME=${props.env.SINGLE_PART_TABLE_NAME}" >> env.sh`, + `echo "export SFN_ARN=${props.env.SFN_ARN}" >> env.sh`, + + `echo "export SOURCE_TYPE=${props.env.SOURCE_TYPE}" >> env.sh`, + `echo "export SRC_BUCKET=${props.env.SRC_BUCKET}" >> env.sh`, + `echo "export SRC_PREFIX=${props.env.SRC_PREFIX}" >> env.sh`, + `echo "export SRC_PREFIX_LIST=${props.env.SRC_PREFIX_LIST}" >> env.sh`, + `echo "export SRC_REGION=${props.env.SRC_REGION}" >> env.sh`, + `echo "export SRC_ENDPOINT=${props.env.SRC_ENDPOINT}" >> env.sh`, + `echo "export SRC_CREDENTIALS=${props.env.SRC_CREDENTIALS}" >> env.sh`, + `echo "export SRC_IN_CURRENT_ACCOUNT=${props.env.SRC_IN_CURRENT_ACCOUNT}" >> env.sh`, + `echo "export PAYER_REQUEST=${props.env.PAYER_REQUEST}" >> env.sh`, + + `echo "export DEST_BUCKET=${props.env.DEST_BUCKET}" >> env.sh`, + `echo "export DEST_PREFIX=${props.env.DEST_PREFIX}" >> env.sh`, + `echo "export DEST_REGION=${props.env.DEST_REGION}" >> env.sh`, + `echo "export DEST_CREDENTIALS=${props.env.DEST_CREDENTIALS}" >> env.sh`, + `echo "export DEST_IN_CURRENT_ACCOUNT=${props.env.DEST_IN_CURRENT_ACCOUNT}" >> env.sh`, + `echo "export DEST_STORAGE_CLASS=${props.env.DEST_STORAGE_CLASS}" >> env.sh`, + `echo "export DEST_ACL=${props.env.DEST_ACL}" >> env.sh`, + + // `echo "export MULTIPART_THRESHOLD=${props.env.MULTIPART_THRESHOLD}" >> env.sh`, + // `echo "export CHUNK_SIZE=${props.env.CHUNK_SIZE}" >> env.sh`, + `echo "export FINDER_DEPTH=${props.env.FINDER_DEPTH}" >> env.sh`, + `echo "export SKIP_COMPARE=${props.env.SKIP_COMPARE}" >> env.sh`, + `echo "export FINDER_NUMBER=${props.env.FINDER_NUMBER}" >> env.sh`, + `echo "export finder_NUMBER=${props.env.finder_NUMBER}" >> env.sh`, + `echo "export INCLUDE_METADATA=${props.env.INCLUDE_METADATA}" >> env.sh`, + `echo "export AWS_DEFAULT_REGION=${Aws.REGION}" >> env.sh`, + + // Create the script + 'echo "source /home/ec2-user/env.sh" >> start-finder.sh', + 'echo "nohup ./dthcli run -t Finder |& tee -a /home/ec2-user/finder.log" >> start-finder.sh', + "echo \"echo 'Exit Finder proccess, trying to set auto scaling group desiredCapacity to 0 to terminate instance after 60 seconds...' >> /home/ec2-user/finder.log\" >> start-finder.sh", + `echo "sleep 61; mycount=0; while (( \\$mycount < 5 )); do aws autoscaling update-auto-scaling-group --region ${Aws.REGION} --auto-scaling-group-name ${Aws.STACK_NAME}-Finder-ASG --desired-capacity 0; sleep 10; ((mycount=\$mycount+1)); done;" >> start-finder.sh`, // change the asg desired-capacity to 0 to stop the finder task + + "chmod +x start-finder.sh", + // Run the script + "./start-finder.sh" + ); + + const finderLauncherPolicy = new iam.Policy(this, "FinderLauncherPolicy", { + statements: [ + new iam.PolicyStatement({ + actions: [ + "autoscaling:DescribeAutoScalingGroups", + "autoscaling:UpdateAutoScalingGroup" + ], + resources: ["*"] + }) + ] + }); + + // Create the Finder Launch Lambda + const finderLaunchFn = new lambda.Function(this, "FinderLaunchHelper", { + runtime: lambda.Runtime.PYTHON_3_9, + handler: "lambda_function.lambda_handler", + code: lambda.Code.fromAsset( + path.join(__dirname, "../../lambda/plugin/s3/asg-helper") + ), + memorySize: 256, + timeout: Duration.minutes(15), + environment: { + ASG_NAME: finderAsg.autoScalingGroupName + } + }); + finderLaunchFn.role!.attachInlinePolicy(finderLauncherPolicy); + + // Default Schedule CRON event to trigger JobSender per hour + const enableFinderTrigger = new CfnCondition(this, "enableFinderTrigger", { + expression: Fn.conditionEquals(props.ec2CronExpression, "") + }); + + // If props.ecsCronExpression is null, set CronExpression to 0/60 * * * ? 2000 to stop the schedule trigger + const CronExpression = Fn.conditionIf( + enableFinderTrigger.logicalId, + "0/60 * * * ? 2000", + props.ec2CronExpression + ); + const trigger = new events.Rule(this, "DTHFinderSchedule", { + schedule: events.Schedule.expression("cron(" + CronExpression + ")") + }); + + // Add target to cloudwatch rule. + trigger.addTarget(new targets.LambdaFunction(finderLaunchFn)); + + new CfnOutput(this, "FinderLogGroupName", { + value: finderLG.logGroupName, + description: "Finder Log Group Name" + }); + } +} diff --git a/source/constructs/lib/s3-plugin/ec2-worker-stack.ts b/source/constructs/lib/s3-plugin/ec2-worker-stack.ts new file mode 100644 index 0000000..0ac18df --- /dev/null +++ b/source/constructs/lib/s3-plugin/ec2-worker-stack.ts @@ -0,0 +1,310 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { Construct } from "constructs"; +import { + Aws, + Duration, + CfnOutput, + CfnMapping, + Tags, + aws_iam as iam, + aws_ec2 as ec2, + aws_logs as logs, + aws_cloudwatch as cw, + aws_sqs as sqs, + aws_autoscaling as asg +} from "aws-cdk-lib"; +import { NagSuppressions } from "cdk-nag"; + +import * as path from "path"; +import { addCfnNagSuppressRules } from "./s3-plugin-stack"; + +export interface Env { + [key: string]: any; +} + +export interface Ec2WorkerProps { + readonly env: Env; + readonly vpc: ec2.IVpc; + readonly queue: sqs.Queue; + readonly maxCapacity?: number; + readonly minCapacity?: number; + readonly desiredCapacity?: number; + readonly cliRelease: string; + readonly ec2LG: logs.ILogGroup; +} + +/*** + * EC2 Stack + */ +export class Ec2WorkerStack extends Construct { + readonly workerAsg: asg.AutoScalingGroup; + + constructor(scope: Construct, id: string, props: Ec2WorkerProps) { + super(scope, id); + + const instanceType = new ec2.InstanceType("t4g.micro"); + + const amznLinux = ec2.MachineImage.latestAmazonLinux2({ + edition: ec2.AmazonLinuxEdition.STANDARD, + storage: ec2.AmazonLinuxStorage.GENERAL_PURPOSE, + cpuType: ec2.AmazonLinuxCpuType.ARM_64 + }); + + const ec2SG = new ec2.SecurityGroup(this, "S3RepEC2SG", { + vpc: props.vpc, + description: "Security Group for Data Replication Hub EC2 instances", + allowAllOutbound: true + }); + // For dev only + // ec2SG.addIngressRule(ec2.Peer.anyIpv4(), ec2.Port.tcp(22), 'Allow ssh access'); + + const cfnSG = ec2SG.node.defaultChild as ec2.CfnSecurityGroup; + addCfnNagSuppressRules(cfnSG, [ + { + id: "W5", + reason: "Open egress rule is required to access public network" + }, + { + id: "W40", + reason: "Open egress rule is required to access public network" + } + ]); + + const workerAsgRole = new iam.Role(this, "WorkerAsgRole", { + assumedBy: new iam.ServicePrincipal("ec2.amazonaws.com") + }); + + const cwAgentPolicy = new iam.Policy(this, "CWAgentPolicy", { + statements: [ + new iam.PolicyStatement({ + effect: iam.Effect.ALLOW, + resources: ["*"], + actions: [ + "cloudwatch:PutMetricData", + "ec2:DescribeVolumes", + "ec2:DescribeTags", + "logs:CreateLogGroup", + "logs:CreateLogStream", + "logs:PutLogEvents", + "logs:DescribeLogStreams", + "logs:DescribeLogGroups" + ] + }) + ] + }); + + const cfnCwAgentPolicy = cwAgentPolicy.node.defaultChild as iam.CfnPolicy; + addCfnNagSuppressRules(cfnCwAgentPolicy, [ + { + id: "W12", + reason: "Publish log streams requires any resources" + } + ]); + + workerAsgRole.attachInlinePolicy(cwAgentPolicy); + workerAsgRole.addManagedPolicy( + iam.ManagedPolicy.fromAwsManagedPolicyName("AmazonSSMManagedInstanceCore") + ); + + const worker_ud = ec2.UserData.forLinux(); + + const workerLaunchTemplate = new ec2.LaunchTemplate( + this, + "WorkerEC2LaunchTemplate", + { + instanceType: instanceType, + machineImage: amznLinux, + userData: worker_ud, + role: workerAsgRole, + // keyName: 'ad-key', // dev only + securityGroup: ec2SG, + blockDevices: [ + { + deviceName: "/dev/xvda", + volume: asg.BlockDeviceVolume.ebs(8, { + encrypted: true + }) + } + ], + associatePublicIpAddress: true, + detailedMonitoring: true, + requireImdsv2: true + } + ); + + this.workerAsg = new asg.AutoScalingGroup(this, "S3RepWorkerASG", { + autoScalingGroupName: `${Aws.STACK_NAME}-Worker-ASG`, + vpc: props.vpc, + maxCapacity: props.maxCapacity ? props.maxCapacity : 20, + minCapacity: props.minCapacity ? props.minCapacity : 1, + desiredCapacity: props.desiredCapacity ? props.desiredCapacity : 1, + groupMetrics: [ + new asg.GroupMetrics( + asg.GroupMetric.DESIRED_CAPACITY, + asg.GroupMetric.IN_SERVICE_INSTANCES + ) + ], + cooldown: Duration.minutes(2), + signals: asg.Signals.waitForMinCapacity(), + launchTemplate: workerLaunchTemplate + }); + NagSuppressions.addResourceSuppressions(this.workerAsg, [ + { + id: "AwsSolutions-AS3", + reason: "we do not need notification, this asg will change automated" + } + ]); + + Tags.of(this.workerAsg).add( + "Name", + `${Aws.STACK_NAME}-Replication-Worker`, + {} + ); + + const assetTable = new CfnMapping(this, 'AssetTable', { + mapping: { + 'aws': { + assetDomain: 'https://aws-gcr-solutions-assets.s3.amazonaws.com', + }, + 'aws-cn': { + assetDomain: 'https://aws-gcr-solutions-assets.s3.cn-northwest-1.amazonaws.com.cn', + }, + } + }); + + const cliAssetDomain = assetTable.findInMap(Aws.PARTITION, 'assetDomain') + + this.workerAsg.applyCloudFormationInit( + ec2.CloudFormationInit.fromElements( + ec2.InitFile.fromFileInline( + "/home/ec2-user/cw_agent_config.json", + path.join(__dirname, "./dashboard-config/cw_agent_config.json") + ) + ) + ); + + worker_ud.addCommands( + "yum update -y", + "cd /home/ec2-user/", + + // Enable BBR + 'echo "net.core.default_qdisc = fq" >> /etc/sysctl.conf', + 'echo "net.ipv4.tcp_congestion_control = bbr" >> /etc/sysctl.conf', + "sysctl -p", + "echo `sysctl net.ipv4.tcp_congestion_control` > worker.log", + + // Enable Cloudwatch Agent + "yum install -y amazon-cloudwatch-agent", + `sed -i -e "s/##log group##/${props.ec2LG.logGroupName}/g" cw_agent_config.json`, + "/opt/aws/amazon-cloudwatch-agent/bin/amazon-cloudwatch-agent-ctl -a fetch-config -m ec2 -c file:/home/ec2-user/cw_agent_config.json -s", + + // Get CLI from solution assets + `curl -LO "${cliAssetDomain}/data-transfer-hub-cli/v${props.cliRelease}/dthcli_${props.cliRelease}_linux_arm64.tar.gz"`, + `tar zxvf dthcli_${props.cliRelease}_linux_arm64.tar.gz`, + + // Prepare the environment variables + `echo "export JOB_TABLE_NAME=${props.env.JOB_TABLE_NAME}" >> env.sh`, + `echo "export JOB_QUEUE_NAME=${props.env.JOB_QUEUE_NAME}" >> env.sh`, + `echo "export SINGLE_PART_TABLE_NAME=${props.env.SINGLE_PART_TABLE_NAME}" >> env.sh`, + `echo "export SFN_ARN=${props.env.SFN_ARN}" >> env.sh`, + + `echo "export SOURCE_TYPE=${props.env.SOURCE_TYPE}" >> env.sh`, + `echo "export SRC_BUCKET=${props.env.SRC_BUCKET}" >> env.sh`, + `echo "export SRC_PREFIX=${props.env.SRC_PREFIX}" >> env.sh`, + `echo "export SRC_REGION=${props.env.SRC_REGION}" >> env.sh`, + `echo "export SRC_ENDPOINT=${props.env.SRC_ENDPOINT}" >> env.sh`, + `echo "export SRC_CREDENTIALS=${props.env.SRC_CREDENTIALS}" >> env.sh`, + `echo "export SRC_IN_CURRENT_ACCOUNT=${props.env.SRC_IN_CURRENT_ACCOUNT}" >> env.sh`, + `echo "export PAYER_REQUEST=${props.env.PAYER_REQUEST}" >> env.sh`, + + `echo "export DEST_BUCKET=${props.env.DEST_BUCKET}" >> env.sh`, + `echo "export DEST_PREFIX=${props.env.DEST_PREFIX}" >> env.sh`, + `echo "export DEST_REGION=${props.env.DEST_REGION}" >> env.sh`, + `echo "export DEST_CREDENTIALS=${props.env.DEST_CREDENTIALS}" >> env.sh`, + `echo "export DEST_IN_CURRENT_ACCOUNT=${props.env.DEST_IN_CURRENT_ACCOUNT}" >> env.sh`, + `echo "export DEST_STORAGE_CLASS=${props.env.DEST_STORAGE_CLASS}" >> env.sh`, + `echo "export DEST_ACL=${props.env.DEST_ACL}" >> env.sh`, + + // `echo "export MULTIPART_THRESHOLD=${props.env.MULTIPART_THRESHOLD}" >> env.sh`, + // `echo "export CHUNK_SIZE=${props.env.CHUNK_SIZE}" >> env.sh`, + `echo "export FINDER_DEPTH=${props.env.FINDER_DEPTH}" >> env.sh`, + `echo "export FINDER_NUMBER=${props.env.FINDER_NUMBER}" >> env.sh`, + `echo "export WORKER_NUMBER=${props.env.WORKER_NUMBER}" >> env.sh`, + `echo "export INCLUDE_METADATA=${props.env.INCLUDE_METADATA}" >> env.sh`, + `echo "export AWS_DEFAULT_REGION=${Aws.REGION}" >> env.sh`, + + // Create the script + 'echo "source /home/ec2-user/env.sh" >> start-worker.sh', + 'echo "nohup ./dthcli run -t Worker |& tee -a /home/ec2-user/worker.log" >> start-worker.sh', + "echo \"echo 'Error occured, trying to terminate instance...' >> /home/ec2-user/worker.log\" >> start-worker.sh", + 'echo "shutdown" >> start-worker.sh', // shutdown will terminate the instance as asg will automatically replace the stopped one + + "chmod +x start-worker.sh", + // Add to startup items + 'echo "@reboot /home/ec2-user/start-worker.sh" >> /var/spool/cron/root', + // Run the script + "./start-worker.sh" + ); + + props.ec2LG.addMetricFilter("CompletedBytes", { + metricName: "CompletedBytes", + metricNamespace: `${Aws.STACK_NAME}`, + metricValue: "$Bytes", + filterPattern: logs.FilterPattern.literal( + '[data, time, p="----->Completed", Bytes, ...]' + ) + }); + + props.ec2LG.addMetricFilter("Transferred-Objects", { + metricName: "TransferredObjects", + metricNamespace: `${Aws.STACK_NAME}`, + metricValue: "1", + filterPattern: logs.FilterPattern.literal( + '[data, time, p="----->Transferred", ..., s="DONE"]' + ) + }); + + props.ec2LG.addMetricFilter("Failed-Objects", { + metricName: "FailedObjects", + metricNamespace: `${Aws.STACK_NAME}`, + metricValue: "1", + filterPattern: logs.FilterPattern.literal( + '[data, time, p="----->Transferred", ..., s="ERROR"]' + ) + }); + + const allMsg = new cw.MathExpression({ + expression: "notvisible + visible", + usingMetrics: { + notvisible: props.queue.metricApproximateNumberOfMessagesNotVisible(), + visible: props.queue.metricApproximateNumberOfMessagesVisible() + }, + period: Duration.minutes(1), + label: "# of messages" + }); + + this.workerAsg.scaleOnMetric("ScaleOutSQS", { + metric: allMsg, + scalingSteps: [ + { upper: 0, change: -10000 }, // Scale in when no messages to process + { lower: 100, change: +1 }, + { lower: 500, change: +2 }, + { lower: 2000, change: +5 }, + { lower: 10000, change: +10 } + ], + adjustmentType: asg.AdjustmentType.CHANGE_IN_CAPACITY + }); + + new CfnOutput(this, "WorkerLogGroupName", { + value: props.ec2LG.logGroupName, + description: "Worker Log Group Name" + }); + + new CfnOutput(this, "WorkerASGName", { + value: this.workerAsg.autoScalingGroupName, + description: "Worker ASG Name" + }); + } +} diff --git a/source/constructs/lib/s3-plugin/multi-part-step-functions.ts b/source/constructs/lib/s3-plugin/multi-part-step-functions.ts new file mode 100644 index 0000000..3fa95e8 --- /dev/null +++ b/source/constructs/lib/s3-plugin/multi-part-step-functions.ts @@ -0,0 +1,194 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { Construct } from "constructs"; +import { + CfnOutput, + Duration, + Aws, + Fn, + aws_iam as iam, + aws_logs as logs, + aws_dynamodb as ddb, + aws_stepfunctions as sfn, + aws_stepfunctions_tasks as sfnTasks, + aws_lambda as lambda, + aws_s3 as s3 +} from "aws-cdk-lib"; +import * as path from "path"; + +import { addCfnNagSuppressRules } from "../constructs-stack"; + +export interface MultiPartStateMachineProps { + splitPartTable: ddb.Table; + jobTable: ddb.Table; + destIBucket: s3.IBucket; + destPrefix: string; + defaultPolicy: iam.Policy; + workerLogGroup: logs.ILogGroup; + destCredentials: string; + destRegion: string; +} + +export class MultiPartStateMachine extends Construct { + readonly multiPartControllerStateMachine: sfn.StateMachine; + + constructor(scope: Construct, id: string, props: MultiPartStateMachineProps) { + super(scope, id); + + const checkMultiPartUploadStatusFn = new lambda.Function( + this, + "CheckMultiPartUploadStatusFn", + { + runtime: lambda.Runtime.PYTHON_3_9, + code: lambda.AssetCode.fromAsset( + path.join(__dirname, "../../lambda/plugin/s3/multi-part-controller") + ), + handler: "check_multi_part_upload_status.lambda_handler", + environment: { + STACK_NAME: Aws.STACK_NAME, + SOLUTION_VERSION: process.env.VERSION || "v1.0.0", + SPLIT_PART_TABLE_NAME: props.splitPartTable.tableName, + DESTINATION_BUCKET_NAME: props.destIBucket.bucketName, + OBJECT_TRANSFER_TABLE_NAME: props.jobTable.tableName, + WORKER_LOG_GROUP_NAME: props.workerLogGroup.logGroupName, + DESTINATION_PREFIX: props.destPrefix + }, + memorySize: 512, + timeout: Duration.minutes(15), + description: "Data Transfer Hub - Multi-Part Check Job Status Handler" + } + ); + props.splitPartTable.grantReadWriteData(checkMultiPartUploadStatusFn); + checkMultiPartUploadStatusFn.role?.attachInlinePolicy(props.defaultPolicy); + + const multiPartUploadResultFn = new lambda.Function( + this, + "MultiPartUploadResultFn", + { + runtime: lambda.Runtime.PYTHON_3_9, + code: lambda.AssetCode.fromAsset( + path.join(__dirname, "../../lambda/plugin/s3/multi-part-controller") + ), + handler: "multi_part_upload_result.lambda_handler", + environment: { + STACK_NAME: Aws.STACK_NAME, + SOLUTION_VERSION: process.env.VERSION || "v1.0.0", + SPLIT_PART_TABLE_NAME: props.splitPartTable.tableName, + DESTINATION_BUCKET_NAME: props.destIBucket.bucketName, + OBJECT_TRANSFER_TABLE_NAME: props.jobTable.tableName, + WORKER_LOG_GROUP_NAME: props.workerLogGroup.logGroupName, + DEST_CREDENTIALS: props.destCredentials, + DEST_REGION: props.destRegion, + DESTINATION_PREFIX: props.destPrefix + }, + memorySize: 512, + timeout: Duration.minutes(15), + description: "Data Transfer Hub - Multi-Part Upload Result Handler" + } + ); + props.jobTable.grantReadWriteData(multiPartUploadResultFn); + props.splitPartTable.grantReadWriteData(multiPartUploadResultFn); + props.destIBucket.grantReadWrite(multiPartUploadResultFn); + multiPartUploadResultFn.role?.attachInlinePolicy(props.defaultPolicy); + props.workerLogGroup.grantRead(multiPartUploadResultFn); + props.workerLogGroup.grantWrite(multiPartUploadResultFn); + + const checkMultiPartUploadStatusTask = new sfnTasks.LambdaInvoke( + this, + "Check Multi-Part Upload Job Status", + { + lambdaFunction: checkMultiPartUploadStatusFn, + outputPath: "$.Payload" + } + ); + + const multiPartUploadResultTask = new sfnTasks.LambdaInvoke( + this, + "Handle Multi-Part Upload Result", + { + lambdaFunction: multiPartUploadResultFn, + outputPath: "$.Payload" + } + ); + + const waitFor1Minute = new sfn.Wait(this, "Wait for 1 minute", { + time: sfn.WaitTime.duration(Duration.minutes(1)) + }); + waitFor1Minute.next(checkMultiPartUploadStatusTask); + + const multiPartUploadComplete = new sfn.Succeed( + this, + "Multi-Part Upload Complete" + ); + + const multiPartUploadFailed = new sfn.Fail( + this, + "Multi-Part Upload Failed" + ); + + const completeMultiPartUploadStatusChoice = new sfn.Choice( + this, + "Complete or Abort Multi-Part Upload Job Status Choice" + ) + .when( + sfn.Condition.stringEquals("$.status", "COMPLETED"), + multiPartUploadComplete + ) + .otherwise(multiPartUploadFailed); + + multiPartUploadResultTask.next(completeMultiPartUploadStatusChoice); + + const checkMultiPartUploadStatusChoice = new sfn.Choice( + this, + "Check Multi-Part Upload Job Status Choice" + ) + .when( + sfn.Condition.stringEquals("$.status", "ERROR"), + multiPartUploadResultTask + ) + .when( + sfn.Condition.stringEquals("$.status", "COMPLETED"), + multiPartUploadResultTask + ) + .otherwise(waitFor1Minute); + + const definition = checkMultiPartUploadStatusTask.next( + checkMultiPartUploadStatusChoice + ); + + // State machine log group + const logGroup = new logs.LogGroup(this, "ErrorLogGroup", { + logGroupName: `/aws/vendedlogs/states/${Fn.select( + 6, + Fn.split(":", checkMultiPartUploadStatusFn.functionArn) + )}-MultiPart-Controller` + }); + const cfnLogGroup = logGroup.node.defaultChild as logs.CfnLogGroup; + addCfnNagSuppressRules(cfnLogGroup, [ + { + id: "W84", + reason: "log group is encrypted with the default master key" + } + ]); + + this.multiPartControllerStateMachine = new sfn.StateMachine( + this, + "multiPartControllerStateMachine", + { + stateMachineName: `${Aws.STACK_NAME}-MultiPart-ControllerSM`, + definitionBody: sfn.DefinitionBody.fromChainable(definition), + logs: { + destination: logGroup, + level: sfn.LogLevel.ALL + }, + tracingEnabled: true + } + ); + + new CfnOutput(this, "SfnArn", { + value: this.multiPartControllerStateMachine.stateMachineArn, + description: "SFN ARN" + }); + } +} diff --git a/source/constructs/lib/s3-plugin/s3-plugin-stack.ts b/source/constructs/lib/s3-plugin/s3-plugin-stack.ts new file mode 100644 index 0000000..b6e20c6 --- /dev/null +++ b/source/constructs/lib/s3-plugin/s3-plugin-stack.ts @@ -0,0 +1,678 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { Construct } from "constructs"; +import { + Aws, + Fn, + CfnParameter, + CfnResource, + Stack, + StackProps, + Duration, + CustomResource, + aws_secretsmanager as sm, + aws_s3 as s3, + aws_ec2 as ec2, + aws_iam as iam, + custom_resources as cr, + aws_lambda as lambda +} from "aws-cdk-lib"; +import { NagSuppressions } from "cdk-nag"; + +import { CommonStack, CommonProps } from "./common-resources"; +import { Ec2FinderStack, Ec2FinderProps } from "./ec2-finder-stack"; +import { Ec2WorkerStack, Ec2WorkerProps } from "./ec2-worker-stack"; +import { DashboardStack, DBProps } from "./dashboard-stack"; + +import * as path from "path"; +import { MultiPartStateMachine } from "./multi-part-step-functions"; + +const { VERSION } = process.env; + +export const enum RunType { + EC2 = "EC2", + LAMBDA = "Lambda" +} + +/** + * cfn-nag suppression rule interface + */ +interface CfnNagSuppressRule { + readonly id: string; + readonly reason: string; +} + +export function addCfnNagSuppressRules( + resource: CfnResource, + rules: CfnNagSuppressRule[] +) { + resource.addMetadata("cfn_nag", { + rules_to_suppress: rules + }); +} + +/*** + * Main Stack + */ +export class DataTransferS3Stack extends Stack { + private paramGroups: any[] = []; + private paramLabels: any = {}; + + private addToParamGroups(label: string, ...param: string[]) { + this.paramGroups.push({ + Label: { default: label }, + Parameters: param + }); + } + + private addToParamLabels(label: string, param: string) { + this.paramLabels[param] = { + default: label + }; + } + + constructor(scope: Construct, id: string, props?: StackProps) { + super(scope, id, props); + + const runType: RunType = this.node.tryGetContext("runType") || RunType.EC2; + + const cliRelease = "1.3.0"; + + const srcType = new CfnParameter(this, "srcType", { + description: + "Choose type of source storage, including Amazon S3, Aliyun OSS, Qiniu Kodo, Tencent COS or Google GCS", + type: "String", + default: "Amazon_S3", + allowedValues: ["Amazon_S3", "Aliyun_OSS", "Qiniu_Kodo", "Tencent_COS"] + }); + this.addToParamLabels("Source Type", srcType.logicalId); + + const srcBucket = new CfnParameter(this, "srcBucket", { + description: "Source Bucket Name", + type: "String" + }); + this.addToParamLabels("Source Bucket", srcBucket.logicalId); + + const srcPrefix = new CfnParameter(this, "srcPrefix", { + description: "Source Prefix (Optional)", + default: "", + type: "String" + }); + this.addToParamLabels("Source Prefix", srcPrefix.logicalId); + + const srcPrefixsListFile = new CfnParameter(this, "srcPrefixsListFile", { + description: + "Source Prefixs List File S3 path (Optional), support txt type, the maximum number of lines is 10 millions. e.g. my_prefix_list.txt", + default: "", + type: "String" + }); + this.addToParamLabels( + "Source Prefixs List File", + srcPrefixsListFile.logicalId + ); + + const srcSkipCompare = new CfnParameter(this, "srcSkipCompare", { + description: + "Skip the data comparison in task finding process? If yes, all data in the source will be sent to the destination", + default: "false", + type: "String", + allowedValues: ["true", "false"] + }); + this.addToParamLabels("Skip Data Comparison", srcSkipCompare.logicalId); + + const srcRegion = new CfnParameter(this, "srcRegion", { + description: "Source Region Name", + default: "", + type: "String" + }); + this.addToParamLabels("Source Region", srcRegion.logicalId); + + const srcEndpoint = new CfnParameter(this, "srcEndpoint", { + description: + "Source Endpoint URL (Optional), leave blank unless you want to provide a custom Endpoint URL", + default: "", + type: "String" + }); + this.addToParamLabels("Source Endpoint URL", srcEndpoint.logicalId); + + const srcInCurrentAccount = new CfnParameter(this, "srcInCurrentAccount", { + description: + "Source Bucket in current account? If not, you should provide a credential with read access", + default: "false", + type: "String", + allowedValues: ["true", "false"] + }); + this.addToParamLabels( + "Source In Current Account", + srcInCurrentAccount.logicalId + ); + + const srcCredentials = new CfnParameter(this, "srcCredentials", { + description: + "The secret name in Secrets Manager used to keep AK/SK credentials for Source Bucket. Leave blank if source bucket is in current account or source is open data", + default: "", + type: "String" + }); + this.addToParamLabels("Source Credentials", srcCredentials.logicalId); + + const isPayerRequest = new CfnParameter(this, "isPayerRequest", { + description: "Enable Payer Request?", + default: "false", + type: "String", + allowedValues: ["true", "false"] + }); + this.addToParamLabels("Enable Payer Request", isPayerRequest.logicalId); + + const destBucket = new CfnParameter(this, "destBucket", { + description: "Destination Bucket Name", + type: "String" + }); + this.addToParamLabels("Destination Bucket", destBucket.logicalId); + + const destPrefix = new CfnParameter(this, "destPrefix", { + description: "Destination Prefix (Optional)", + default: "", + type: "String" + }); + this.addToParamLabels("Destination Prefix", destPrefix.logicalId); + + const destRegion = new CfnParameter(this, "destRegion", { + description: "Destination Region Name", + default: "", + type: "String" + }); + this.addToParamLabels("Destination Region", destRegion.logicalId); + + const destInCurrentAccount = new CfnParameter( + this, + "destInCurrentAccount", + { + description: + "Destination Bucket in current account? If not, you should provide a credential with read and write access", + default: "true", + type: "String", + allowedValues: ["true", "false"] + } + ); + this.addToParamLabels( + "Destination In Current Account", + destInCurrentAccount.logicalId + ); + + const destCredentials = new CfnParameter(this, "destCredentials", { + description: + "The secret name in Secrets Manager used to keep AK/SK credentials for Destination Bucket. Leave blank if desination bucket is in current account", + default: "", + type: "String" + }); + this.addToParamLabels("Destination Credentials", destCredentials.logicalId); + + // 'STANDARD'|'REDUCED_REDUNDANCY'|'STANDARD_IA'|'ONEZONE_IA'|'INTELLIGENT_TIERING'|'GLACIER'|'DEEP_ARCHIVE'|'OUTPOSTS', + const destStorageClass = new CfnParameter(this, "destStorageClass", { + description: "Destination Storage Class, Default to INTELLIGENT_TIERING", + default: "INTELLIGENT_TIERING", + type: "String", + allowedValues: [ + "STANDARD", + "STANDARD_IA", + "ONEZONE_IA", + "INTELLIGENT_TIERING" + ] + }); + this.addToParamLabels( + "Destination Storage Class", + destStorageClass.logicalId + ); + + const destAcl = new CfnParameter(this, "destAcl", { + description: "Destination Access Control List", + default: "bucket-owner-full-control", + type: "String", + allowedValues: [ + "private", + "public-read", + "public-read-write", + "authenticated-read", + "aws-exec-read", + "bucket-owner-read", + "bucket-owner-full-control" + ] + }); + this.addToParamLabels("Destination Access Control List", destAcl.logicalId); + + const ec2VpcId = new CfnParameter(this, "ec2VpcId", { + description: "VPC ID to run EC2 task, e.g. vpc-bef13dc7", + default: "", + type: "AWS::EC2::VPC::Id" + }); + this.addToParamLabels("VPC ID", ec2VpcId.logicalId); + + const ec2Subnets = new CfnParameter(this, "ec2Subnets", { + description: + "Subnet IDs to run EC2 task. Please provide two subnets at least delimited by comma, e.g. subnet-97bfc4cd,subnet-7ad7de32", + default: "", + type: "List" + }); + this.addToParamLabels("Subnet IDs", ec2Subnets.logicalId); + + const finderEc2Memory = new CfnParameter(this, "finderEc2Memory", { + description: "The amount of memory (in GB) used by the Finder task.", + default: "8", + type: "String", + allowedValues: ["8", "16", "32", "64", "128", "256"] + }); + this.addToParamLabels("EC2 Finder Memory", finderEc2Memory.logicalId); + + const ec2CronExpression = new CfnParameter(this, "ec2CronExpression", { + description: + "Cron Expression For EC2 Finder Task. Leave blank to execute only once.", + default: "0/60 * * * ? *", + type: "String" + }); + this.addToParamLabels("EC2 Cron Expression", ec2CronExpression.logicalId); + + const alarmEmail = new CfnParameter(this, "alarmEmail", { + allowedPattern: + "\\w[-\\w.+]*@([A-Za-z0-9][-A-Za-z0-9]+\\.)+[A-Za-z]{2,14}", + type: "String", + description: "Errors will be sent to this email." + }); + this.addToParamLabels("Alarm Email", alarmEmail.logicalId); + + const includeMetadata = new CfnParameter(this, "includeMetadata", { + description: + "Add replication of object metadata, there will be additional API calls", + default: "true", + type: "String", + allowedValues: ["true", "false"] + }); + + this.addToParamLabels("Include Metadata", includeMetadata.logicalId); + + const srcEvent = new CfnParameter(this, "srcEvent", { + description: + "Whether to enable S3 Event to trigger the replication. Note that S3Event is only applicable if source is in Current account", + default: "No", + type: "String", + allowedValues: ["No", "Create", "CreateAndDelete"] + }); + this.addToParamLabels("Enable S3 Event", srcEvent.logicalId); + + const finderDepth = new CfnParameter(this, "finderDepth", { + description: + "The depth of sub folders to compare in parallel. 0 means comparing all objects in sequence", + default: "0", + type: "String" + }); + const finderNumber = new CfnParameter(this, "finderNumber", { + description: "The number of finder threads to run in parallel", + default: "1", + type: "String" + }); + const workerNumber = new CfnParameter(this, "workerNumber", { + description: + "The number of worker threads to run in one worker node/instance", + default: "4", + type: "String" + }); + + this.addToParamGroups( + "Source Information", + srcType.logicalId, + srcBucket.logicalId, + srcPrefix.logicalId, + srcPrefixsListFile.logicalId, + srcRegion.logicalId, + srcEndpoint.logicalId, + srcInCurrentAccount.logicalId, + srcCredentials.logicalId, + srcEvent.logicalId, + srcSkipCompare.logicalId, + isPayerRequest.logicalId + ); + this.addToParamGroups( + "Destination Information", + destBucket.logicalId, + destPrefix.logicalId, + destRegion.logicalId, + destInCurrentAccount.logicalId, + destCredentials.logicalId, + destStorageClass.logicalId, + destAcl.logicalId + ); + this.addToParamGroups("Notification Information", alarmEmail.logicalId); + this.addToParamGroups( + "EC2 Cluster Information", + ec2VpcId.logicalId, + ec2Subnets.logicalId, + finderEc2Memory.logicalId, + ec2CronExpression.logicalId + ); + + // let lambdaMemory: CfnParameter | undefined + let maxCapacity: CfnParameter | undefined; + let minCapacity: CfnParameter | undefined; + let desiredCapacity: CfnParameter | undefined; + + if (runType === RunType.EC2) { + maxCapacity = new CfnParameter(this, "maxCapacity", { + description: "Maximum Capacity for Auto Scaling Group", + default: "20", + type: "Number" + }); + this.addToParamLabels("Maximum Capacity", maxCapacity.logicalId); + + minCapacity = new CfnParameter(this, "minCapacity", { + description: "Minimum Capacity for Auto Scaling Group", + default: "1", + type: "Number" + }); + this.addToParamLabels("Minimum Capacity", minCapacity.logicalId); + + desiredCapacity = new CfnParameter(this, "desiredCapacity", { + description: "Desired Capacity for Auto Scaling Group", + default: "1", + type: "Number" + }); + this.addToParamLabels("Desired Capacity", desiredCapacity.logicalId); + + this.addToParamGroups( + "Advanced Options", + finderDepth.logicalId, + finderNumber.logicalId, + workerNumber.logicalId, + includeMetadata.logicalId, + maxCapacity.logicalId, + minCapacity.logicalId, + desiredCapacity.logicalId + ); + } + + this.templateOptions.description = `(SO8002) - Data Transfer Hub - S3 Plugin - Template version ${VERSION}`; + + this.templateOptions.metadata = { + "AWS::CloudFormation::Interface": { + ParameterGroups: this.paramGroups, + ParameterLabels: this.paramLabels + } + }; + + // Get Secret for credentials from Secrets Manager + const srcCred = sm.Secret.fromSecretNameV2( + this, + "SrcCredentialsParam", + srcCredentials.valueAsString + ); + const destCred = sm.Secret.fromSecretNameV2( + this, + "DestCredentialsParam", + destCredentials.valueAsString + ); + + const srcIBucket = s3.Bucket.fromBucketName( + this, + `SrcBucket`, + srcBucket.valueAsString + ); + const destIBucket = s3.Bucket.fromBucketName( + this, + `DestBucket`, + destBucket.valueAsString + ); + + // Get VPC + const vpc = ec2.Vpc.fromVpcAttributes(this, "EC2Vpc", { + vpcId: ec2VpcId.valueAsString, + availabilityZones: Fn.getAzs(), + publicSubnetIds: ec2Subnets.valueAsList + }); + + // Start Common Stack + const commonProps: CommonProps = { + alarmEmail: alarmEmail.valueAsString, + srcIBucket: srcIBucket + }; + + const commonStack = new CommonStack(this, "Common", commonProps); + + const defaultPolicy = new iam.Policy(this, "DefaultPolicy"); + + defaultPolicy.addStatements( + new iam.PolicyStatement({ + actions: [ + "dynamodb:BatchGetItem", + "dynamodb:GetRecords", + "dynamodb:GetShardIterator", + "dynamodb:Query", + "dynamodb:GetItem", + "dynamodb:Scan", + "dynamodb:ConditionCheckItem", + "dynamodb:BatchWriteItem", + "dynamodb:PutItem", + "dynamodb:UpdateItem", + "dynamodb:DeleteItem" + ], + resources: [ + commonStack.jobTable.tableArn, + commonStack.splitPartTable.tableArn + ] + }), + new iam.PolicyStatement({ + actions: [ + "secretsmanager:GetSecretValue", + "secretsmanager:DescribeSecret" + ], + resources: [ + `${srcCred.secretArn}-??????`, + `${destCred.secretArn}-??????` + ] + }) + ); + + const multipartStateMachine = new MultiPartStateMachine( + this, + "MultiPartStateMachine", + { + splitPartTable: commonStack.splitPartTable, + jobTable: commonStack.jobTable, + destIBucket: destIBucket, + destPrefix: destPrefix.valueAsString, + workerLogGroup: commonStack.workerLogGroup, + destCredentials: destCredentials.valueAsString, + destRegion: destRegion.valueAsString, + defaultPolicy: defaultPolicy + } + ); + + // Start Finder - EC2 Stack + const finderEnv = { + AWS_DEFAULT_REGION: Aws.REGION, + JOB_TABLE_NAME: commonStack.jobTable.tableName, + JOB_QUEUE_NAME: commonStack.sqsQueue.queueName, + SINGLE_PART_TABLE_NAME: commonStack.splitPartTable.tableName, + SFN_ARN: + multipartStateMachine.multiPartControllerStateMachine.stateMachineArn, + + SOURCE_TYPE: srcType.valueAsString, + SRC_BUCKET: srcBucket.valueAsString, + SRC_PREFIX: srcPrefix.valueAsString, + SRC_PREFIX_LIST: srcPrefixsListFile.valueAsString, + SRC_REGION: srcRegion.valueAsString, + SRC_ENDPOINT: srcEndpoint.valueAsString, + SRC_CREDENTIALS: srcCredentials.valueAsString, + SRC_IN_CURRENT_ACCOUNT: srcInCurrentAccount.valueAsString, + PAYER_REQUEST: isPayerRequest.valueAsString, + SKIP_COMPARE: srcSkipCompare.valueAsString, + + DEST_BUCKET: destBucket.valueAsString, + DEST_PREFIX: destPrefix.valueAsString, + DEST_REGION: destRegion.valueAsString, + DEST_CREDENTIALS: destCredentials.valueAsString, + DEST_IN_CURRENT_ACCOUNT: destInCurrentAccount.valueAsString, + + FINDER_DEPTH: finderDepth.valueAsString, + FINDER_NUMBER: finderNumber.valueAsString + }; + + const finderProps: Ec2FinderProps = { + env: finderEnv, + vpc: vpc, + ec2SubnetIds: ec2Subnets.valueAsList, + cliRelease: cliRelease, + ec2CronExpression: ec2CronExpression.valueAsString, + ec2Memory: finderEc2Memory.valueAsString + }; + const finderStack = new Ec2FinderStack(this, "FinderStack", finderProps); + finderStack.finderRole.attachInlinePolicy(defaultPolicy); + commonStack.sqsQueue.grantSendMessages(finderStack.finderRole); + srcIBucket.grantRead(finderStack.finderRole); + destIBucket.grantRead(finderStack.finderRole); + multipartStateMachine.multiPartControllerStateMachine.grantRead(finderStack.finderRole); + + const workerEnv = { + JOB_TABLE_NAME: commonStack.jobTable.tableName, + JOB_QUEUE_NAME: commonStack.sqsQueue.queueName, + SINGLE_PART_TABLE_NAME: commonStack.splitPartTable.tableName, + SFN_ARN: + multipartStateMachine.multiPartControllerStateMachine.stateMachineArn, + SOURCE_TYPE: srcType.valueAsString, + + SRC_BUCKET: srcBucket.valueAsString, + SRC_PREFIX: srcPrefix.valueAsString, + SRC_PREFIX_LIST: srcPrefixsListFile.valueAsString, + SRC_REGION: srcRegion.valueAsString, + SRC_ENDPOINT: srcEndpoint.valueAsString, + SRC_CREDENTIALS: srcCredentials.valueAsString, + SRC_IN_CURRENT_ACCOUNT: srcInCurrentAccount.valueAsString, + PAYER_REQUEST: isPayerRequest.valueAsString, + + DEST_BUCKET: destBucket.valueAsString, + DEST_PREFIX: destPrefix.valueAsString, + DEST_REGION: destRegion.valueAsString, + DEST_CREDENTIALS: destCredentials.valueAsString, + DEST_IN_CURRENT_ACCOUNT: destInCurrentAccount.valueAsString, + DEST_STORAGE_CLASS: destStorageClass.valueAsString, + DEST_ACL: destAcl.valueAsString, + + FINDER_DEPTH: finderDepth.valueAsString, + FINDER_NUMBER: finderNumber.valueAsString, + WORKER_NUMBER: workerNumber.valueAsString, + INCLUDE_METADATA: includeMetadata.valueAsString + }; + + let asgName = undefined; + if (runType === RunType.EC2) { + const ec2Props: Ec2WorkerProps = { + env: workerEnv, + vpc: vpc, + queue: commonStack.sqsQueue, + maxCapacity: maxCapacity?.valueAsNumber, + minCapacity: minCapacity?.valueAsNumber, + desiredCapacity: desiredCapacity?.valueAsNumber, + ec2LG: commonStack.workerLogGroup, + cliRelease: cliRelease + }; + + const ec2Stack = new Ec2WorkerStack(this, "EC2WorkerStack", ec2Props); + + ec2Stack.workerAsg.role.attachInlinePolicy(defaultPolicy); + commonStack.sqsQueue.grantConsumeMessages(ec2Stack.workerAsg.role); + commonStack.sqsQueue.grantSendMessages(ec2Stack.workerAsg.role); + srcIBucket.grantRead(ec2Stack.workerAsg.role); + destIBucket.grantReadWrite(ec2Stack.workerAsg.role); + multipartStateMachine.multiPartControllerStateMachine.grantStartExecution(ec2Stack.workerAsg.role); + + asgName = ec2Stack.workerAsg.autoScalingGroupName; + } + + // Setup Cloudwatch Dashboard + const dbProps: DBProps = { + runType: runType, + queue: commonStack.sqsQueue, + asgName: asgName + }; + new DashboardStack(this, "DashboardStack", dbProps); + + commonStack.sqsQueue.addToResourcePolicy( + new iam.PolicyStatement({ + effect: iam.Effect.ALLOW, + conditions: { + StringEquals: { + "aws:SourceArn": srcIBucket.bucketArn + } + }, + principals: [new iam.ServicePrincipal("s3.amazonaws.com")], + resources: [commonStack.sqsQueue.queueArn], + actions: ["sqs:SendMessage"] + }) + ); + + // Here we create the notification resource by default + // Using cdk condition to enable or disable this notification + // Using cdk Aspects to modify the event type. + // Lambda to enable bucket notification of log source account. + const s3NotificationHelperFn = new lambda.Function( + this, + "s3NotificationHelperFn", + { + description: `${Aws.STACK_NAME} - Create S3 Notification Processor`, + runtime: lambda.Runtime.PYTHON_3_9, + handler: "lambda_function.lambda_handler", + code: lambda.Code.fromAsset( + path.join(__dirname, "../../lambda/plugin/s3/custom-resource") + ), + memorySize: 256, + timeout: Duration.seconds(60), + environment: { + STACK_NAME: Aws.STACK_NAME, + SOLUTION_VERSION: process.env.VERSION || "v1.0.0", + BUCKET_NAME: srcIBucket.bucketName, + OBJECT_PREFIX: srcPrefix.valueAsString, + EVENT_QUEUE_NAME: commonStack.sqsQueue.queueName, + EVENT_QUEUE_ARN: commonStack.sqsQueue.queueArn, + EVENT_ACTION: srcEvent.valueAsString + } + } + ); + // Create the policy and role for the Lambda to create and delete CloudWatch Log Group Subscription Filter with cross-account scenario + s3NotificationHelperFn.addToRolePolicy( + new iam.PolicyStatement({ + actions: ["s3:GetBucketNotification", "s3:PutBucketNotification"], + effect: iam.Effect.ALLOW, + resources: [ + `arn:${Aws.PARTITION}:s3:::${srcIBucket.bucketName}`, + `arn:${Aws.PARTITION}:s3:::${srcIBucket.bucketName}/*` + ] + }) + ); + + const s3NotificationHelperProvider = new cr.Provider( + this, + "s3NotificationHelperProvider", + { + onEventHandler: s3NotificationHelperFn + } + ); + + s3NotificationHelperProvider.node.addDependency(s3NotificationHelperFn); + NagSuppressions.addResourceSuppressions(s3NotificationHelperProvider, [ + { + id: "AwsSolutions-L1", + reason: "the lambda runtime is determined by aws cdk customer resource" + } + ]); + + const s3NotificationHelperLambdaTrigger = new CustomResource( + this, + "s3NotificationHelperLambdaTrigger", + { + serviceToken: s3NotificationHelperProvider.serviceToken + } + ); + + s3NotificationHelperLambdaTrigger.node.addDependency( + s3NotificationHelperProvider + ); + } +} diff --git a/source/constructs/lib/task-cluster.ts b/source/constructs/lib/task-cluster.ts index 957abed..365633f 100644 --- a/source/constructs/lib/task-cluster.ts +++ b/source/constructs/lib/task-cluster.ts @@ -30,7 +30,7 @@ export class TaskCluster extends Construct { super(scope, id); const vpc = new ec2.Vpc(this, 'TaskVPC', { - cidr: props?.cidr || '10.0.0.0/16', + ipAddresses: ec2.IpAddresses.cidr(props?.cidr || '10.0.0.0/16'), enableDnsHostnames: true, enableDnsSupport: true, subnetConfiguration: [ diff --git a/source/constructs/package.json b/source/constructs/package.json index 490e0fc..a55ecc3 100755 --- a/source/constructs/package.json +++ b/source/constructs/package.json @@ -1,7 +1,8 @@ { "name": "data-transfer-hub", - "version": "2.4.0", + "version": "2.5.0", "license": "Apache-2.0", + "description": "data-transfer-hub construct", "author": { "name": "Amazon Web Services", "url": "https://aws.amazon.com/solutions" @@ -19,27 +20,25 @@ }, "devDependencies": { "@types/aws-lambda": "^8.10.83", - "@types/jest": "29.4.0", - "@types/node": "16.10.3", - "@types/uuid": "^8.3.1", - "aws-cdk": "2.74.0", - "aws-cdk-lib": "2.74.0", - "aws-sdk": "2.1360.0", - "aws-sdk-mock": "^5.4.0", - "jest": "^29.4.3", - "ts-jest": "^29.0.5", + "@types/jest": "29.5.3", + "@types/node": "20.6.0", + "@types/uuid": "9.0.2", + "aws-cdk": "2.95.0", + "aws-cdk-lib": "2.95.0", + "jest": "^29.7.0", + "ts-jest": "^29.1.1", "ts-node": "^10.2.1", - "typescript": "4.9.3", - "uuid": "^8.3.2" + "typescript": "5.2.2", + "uuid": "9.0.0" }, "dependencies": { - "@aws-solutions-constructs/aws-cloudfront-s3": "2.38.0", - "@aws-solutions-constructs/core": "2.38.0", - "@aws-cdk/aws-appsync-alpha": "2.58.0-alpha.0", - "aws-cdk": "2.74.0", - "aws-cdk-lib": "2.74.0", - "cdk-nag": "2.16.0", - "constructs": "10.1.85", - "source-map-support": "0.5.20" + "@aws-solutions-constructs/aws-cloudfront-s3": "2.44.0", + "@aws-solutions-constructs/core": "2.44.0", + "@aws-cdk/aws-appsync-alpha": "2.59.0-alpha.0", + "aws-cdk": "2.95.0", + "aws-cdk-lib": "2.95.0", + "cdk-nag": "2.27.66", + "constructs": "10.2.55", + "source-map-support": "0.5.21" } } diff --git a/source/constructs/run-all-tests.sh b/source/constructs/run-all-tests.sh index cea4fd7..a77436f 100755 --- a/source/constructs/run-all-tests.sh +++ b/source/constructs/run-all-tests.sh @@ -137,7 +137,14 @@ run_python_test $construct_dir/lambda/api/cwl cloudwatch_api run_python_test $construct_dir/lambda/api/secrets_manager secrets_manager_api run_python_test $construct_dir/lambda/api/task task_api run_python_test $construct_dir/lambda/api/task-monitoring task_monitoring_api +run_python_test $construct_dir/lambda/cdk cdk_api run_python_test $construct_dir/lambda/custom-resource custom-resource +run_python_test $construct_dir/lambda/plugin/s3/asg-helper s3-plugin-asg-helper +run_python_test $construct_dir/lambda/plugin/s3/custom-resource s3-plugin-custom-resource +run_python_test $construct_dir/lambda/plugin/s3/multi-part-controller s3-plugin-multi-part-controller +run_python_test $construct_dir/lambda/plugin/ecr/ecr_helper ecr_helper +run_python_test $construct_dir/lambda/plugin/ecr/sfn_helper sfn_helper +run_python_test $construct_dir/../custom-resource cloudfront_cfn # Return to the source/ level cd $source_dir \ No newline at end of file diff --git a/source/constructs/test/api-task.test.ts b/source/constructs/test/api-task.test.ts deleted file mode 100644 index 51e137b..0000000 --- a/source/constructs/test/api-task.test.ts +++ /dev/null @@ -1,146 +0,0 @@ -// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -// SPDX-License-Identifier: Apache-2.0 - -import * as task from '../lambda/api/api-task' -import { TaskType, ScheduleType } from '../lambda/common' -import * as AWSMock from 'aws-sdk-mock' -import * as AWS from 'aws-sdk' -import { StartExecutionOutput } from "aws-sdk/clients/stepfunctions" -import { DocumentClient } from "aws-sdk/clients/dynamodb"; - -beforeAll(async () => { - process.env.AWS_REGION = 'us-west-2' - process.env.STATE_MACHINE_ARN = 'arn:aws:states:us-west-2:12345678901:state-machine-name' - process.env.TASK_TABLE = 'TaskTable' - process.env.PLUGIN_TEMPLATE_S3EC2 = 'http://dth.s3.amazonaws.com/S3EC2.template' - process.env.PLUGIN_TEMPLATE_ECR = 'http://dth.s3.amazonaws.com/ECR.template' - AWSMock.setSDKInstance(AWS) - // done() -}) - -test('createTask', async () => { - const executionArn = 'arn:aws:states:us-west-2:12345678901:execution:state-machine-name:3a35b25f-05a6-4ba9-a0e3-1312166e85f3' - AWSMock.mock('StepFunctions', 'startExecution', ({ }, callback: Function) => { - const output: StartExecutionOutput = { - executionArn: executionArn, - startDate: new Date() - } - callback(null, output) - }) - - AWSMock.mock('DynamoDB.DocumentClient', 'put', (params: DocumentClient.PutItemInput, callback: Function) => { - const output: DocumentClient.PutItemOutput = { - Attributes: params.Item - } - callback(null, output) - }) - - const createTaskInput: task.AppSyncEvent = { - info: { - fieldName: 'createTask', - parentTypeName: 'Mutation', - variables: {} - }, - arguments: { - input: { - type: TaskType.S3, - description: "Test", - scheduleType: ScheduleType.ONE_TIME, - parameters: [ - { - ParameterKey: "srcBucket", - ParameterValue: "test-src" - }, - { - ParameterKey: "destBucket", - ParameterValue: "test-dest" - } - ] - } - } - } - - const createTaskOutput = await task.handler(createTaskInput) - console.log("createTaskOutput"); - console.log(createTaskOutput); - - expect(createTaskOutput).toHaveProperty('id') - expect(createTaskOutput).toHaveProperty('createdAt') - expect(createTaskOutput).toHaveProperty('executionArn', executionArn) - expect(createTaskOutput).toHaveProperty('type', 'S3EC2') - expect(createTaskOutput).toHaveProperty('parameters', [ - { - ParameterKey: "srcBucket", - ParameterValue: "test-src" - }, - { - ParameterKey: "destBucket", - ParameterValue: "test-dest" - } - ]) - - AWSMock.restore('StepFunctions') - AWSMock.restore('DynamoDB.DocumentClient') - -}); - -test('stopTask', async () => { - const taskId = 'this-is-an-id' - - AWSMock.mock('DynamoDB.DocumentClient', 'query', ({ }, callback: Function) => { - const output: DocumentClient.QueryOutput = { - Items: [ - { - id: taskId, - stackId: 'stack-id' - } - ] - } - callback(null, output) - }) - - const executionArn = 'arn:aws:states:us-west-2:12345678901:execution:state-machine-name:3a35b25f-05a6-4ba9-a0e3-1312166e85f3' - AWSMock.mock('StepFunctions', 'startExecution', ({ }, callback: Function) => { - const output: StartExecutionOutput = { - executionArn: executionArn, - startDate: new Date() - } - callback(null, output) - }) - - AWSMock.mock('DynamoDB.DocumentClient', 'update', ({ }, callback: Function) => { - const output: DocumentClient.UpdateItemOutput = { - Attributes: { - progress: 'STOPPING', - id: taskId, - executionArn: executionArn - } - } - callback(null, output) - }) - - - const stopTaskInput: task.AppSyncEvent = { - info: { - fieldName: 'stopTask', - parentTypeName: 'Mutation', - variables: {} - }, - arguments: { - id: taskId - } - } - - const stopTaskRes = await task.handler(stopTaskInput) - - expect(stopTaskRes).toBeTruthy() - // @ts-ignore - expect(stopTaskRes.id).toEqual(taskId) - // @ts-ignore - expect(stopTaskRes.executionArn).toEqual(executionArn) - // @ts-ignore - expect(stopTaskRes.progress).toEqual('STOPPING') - - AWSMock.restore() - -}) diff --git a/source/constructs/test/common.test.ts b/source/constructs/test/common.test.ts deleted file mode 100644 index ab73dcd..0000000 --- a/source/constructs/test/common.test.ts +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -// SPDX-License-Identifier: Apache-2.0 - -import { makeid } from "../lambda/common"; - -test('makeid function', () => { - const id = makeid(10) - expect(id.search('([A-Z]|[a-z]){10}')).toEqual(0) -}) \ No newline at end of file diff --git a/source/constructs/test/ecr-plugin.test.ts b/source/constructs/test/ecr-plugin.test.ts new file mode 100644 index 0000000..b089087 --- /dev/null +++ b/source/constructs/test/ecr-plugin.test.ts @@ -0,0 +1,22 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { App } from "aws-cdk-lib"; +import { Template } from "aws-cdk-lib/assertions"; +import * as main from "../lib/ecr-plugin/ecr-plugin-stack"; + +beforeEach(() => { + jest.resetModules(); + process.env = {}; +}); + +describe("MainStack", () => { + test("Test main stack with default setting", () => { + const app = new App(); + + const stack = new main.DataTransferECRStack(app, "MyTestStack"); + const template = Template.fromStack(stack); + + template.resourceCountIs("AWS::StepFunctions::StateMachine", 1); + }); +}); diff --git a/source/constructs/test/s3-plugin.test.ts b/source/constructs/test/s3-plugin.test.ts new file mode 100644 index 0000000..d094422 --- /dev/null +++ b/source/constructs/test/s3-plugin.test.ts @@ -0,0 +1,27 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + + +import { App } from "aws-cdk-lib"; +import { Template } from "aws-cdk-lib/assertions"; +import * as main from '../lib/s3-plugin/s3-plugin-stack'; + +beforeEach(() => { + jest.resetModules(); + process.env = {}; +}); + +describe("MainStack", () => { + test("Test main stack with default setting", () => { + const app = new App(); + + // WHEN + const stack = new main.DataTransferS3Stack(app, "MyTestStack"); + const template = Template.fromStack(stack); + + template.hasResourceProperties("AWS::DynamoDB::Table", {}); + + template.resourceCountIs("AWS::SQS::Queue", 2); + }); + +}); \ No newline at end of file diff --git a/source/custom-resource/.coveragerc b/source/custom-resource/.coveragerc new file mode 100644 index 0000000..bacf8eb --- /dev/null +++ b/source/custom-resource/.coveragerc @@ -0,0 +1,8 @@ +[run] +omit = + tests/* + .venv-*/* + test/* + */__init__.py +source = + . \ No newline at end of file diff --git a/source/custom-resource/index.js b/source/custom-resource/index.js deleted file mode 100644 index 52e92c9..0000000 --- a/source/custom-resource/index.js +++ /dev/null @@ -1,288 +0,0 @@ -/********************************************************************************************************************* - * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. * - * * - * Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance * - * with the License. A copy of the License is located at * - * * - * http://www.apache.org/licenses/LICENSE-2.0 * - * * - * or in the 'license' file accompanying this file. This file is distributed on an 'AS IS' BASIS, WITHOUT WARRANTIES * - * OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions * - * and limitations under the License. * - *********************************************************************************************************************/ - -'use strict'; - -console.log('Loading function'); - -const https = require('https'); -const url = require('url'); -const moment = require('moment'); -const S3Helper = require('./lib/s3-helper.js'); -const UsageMetrics = require('./lib/usage-metrics'); -const uuidv4 = require('uuid/v4'); - -/** - * Request handler. - */ -exports.handler = (event, context, callback) => { - console.log('Received event:', JSON.stringify(event, null, 2)); - - let responseStatus = 'FAILED'; - let responseData = {}; - - if (event.RequestType === 'Delete') { - if (event.ResourceProperties.customAction === 'sendMetric') { - responseStatus = 'SUCCESS'; - - if (event.ResourceProperties.anonymousData === 'Yes') { - let _metric = { - Solution: event.ResourceProperties.solutionId, - UUID: event.ResourceProperties.UUID, - TimeStamp: moment().utc().format('YYYY-MM-DD HH:mm:ss.S'), - Data: { - Version: event.ResourceProperties.version, - Deleted: moment().utc().format() - } - }; - - let _usageMetrics = new UsageMetrics(); - _usageMetrics.sendAnonymousMetric(_metric).then((data) => { - console.log(data); - console.log('Annonymous metrics successfully sent.'); - sendResponse(event, callback, context.logStreamName, responseStatus, responseData); - }).catch((err) => { - responseData = { - Error: 'Sending anonymous delete metric failed' - }; - console.log([responseData.Error, ':\n', err].join('')); - sendResponse(event, callback, context.logStreamName, responseStatus, responseData); - }); - } else { - sendResponse(event, callback, context.logStreamName, 'SUCCESS'); - } - - } else { - sendResponse(event, callback, context.logStreamName, 'SUCCESS'); - } - } - - if (event.RequestType === 'Create') { - if (event.ResourceProperties.customAction === 'putConfigFile') { - let _s3Helper = new S3Helper(); - console.log(event.ResourceProperties.configItem); - _s3Helper.putConfigFile(event.ResourceProperties.configItem, event.ResourceProperties.destS3Bucket, event.ResourceProperties.destS3key).then((data) => { - responseStatus = 'SUCCESS'; - responseData = data; - sendResponse(event, callback, context.logStreamName, responseStatus, responseData); - }).catch((err) => { - responseData = { - Error: `Saving config file to ${event.ResourceProperties.destS3Bucket}/${event.ResourceProperties.destS3key} failed` - }; - console.log([responseData.Error, ':\n', err].join('')); - sendResponse(event, callback, context.logStreamName, responseStatus, responseData); - }); - - } else if (event.ResourceProperties.customAction === 'copyS3assets') { - let _s3Helper = new S3Helper(); - - _s3Helper.copyAssets(event.ResourceProperties.manifestKey, - event.ResourceProperties.sourceS3Bucket, event.ResourceProperties.sourceS3key, - event.ResourceProperties.destS3Bucket).then((data) => { - responseStatus = 'SUCCESS'; - responseData = {}; - sendResponse(event, callback, context.logStreamName, responseStatus, responseData); - }).catch((err) => { - responseData = { - Error: `Copy of website assets failed` - }; - console.log([responseData.Error, ':\n', err].join('')); - sendResponse(event, callback, context.logStreamName, responseStatus, responseData); - }); - - } else if (event.ResourceProperties.customAction === 'createUuid') { - responseStatus = 'SUCCESS'; - responseData = { - UUID: uuidv4() - }; - sendResponse(event, callback, context.logStreamName, responseStatus, responseData); - - } else if (event.ResourceProperties.customAction === 'checkSourceBuckets') { - let _s3Helper = new S3Helper(); - - _s3Helper.validateBuckets(event.ResourceProperties.sourceBuckets).then((data) => { - responseStatus = 'SUCCESS'; - responseData = {}; - sendResponse(event, callback, context.logStreamName, responseStatus, responseData); - }).catch((err) => { - responseData = { - Error: `Could not find the following source bucket(s) in your account: ${err}. Please specify at least one source bucket that exists within your account and try again. If specifying multiple source buckets, please ensure that they are comma-separated.` - }; - console.log(responseData.Error); - sendResponse(event, callback, context.logStreamName, responseStatus, responseData, responseData.Error); - }); - - } else if (event.ResourceProperties.customAction === 'sendMetric') { - if (event.ResourceProperties.anonymousData === 'Yes') { - let _metric = { - Solution: event.ResourceProperties.solutionId, - UUID: event.ResourceProperties.UUID, - TimeStamp: moment().utc().format('YYYY-MM-DD HH:mm:ss.S'), - Data: { - Version: event.ResourceProperties.version, - Launch: moment().utc().format() - } - }; - - let _usageMetrics = new UsageMetrics(); - _usageMetrics.sendAnonymousMetric(_metric).then((data) => { - console.log(data); - console.log('Annonymous metrics successfully sent.'); - }).catch((err) => { - console.log(`Sending anonymous launch metric failed: ${err}`); - }); - - sendResponse(event, callback, context.logStreamName, 'SUCCESS', {}); - } else { - sendResponse(event, callback, context.logStreamName, 'SUCCESS'); - } - - } else { - sendResponse(event, callback, context.logStreamName, 'SUCCESS'); - } - } - - if (event.RequestType === 'Update') { - if (event.ResourceProperties.customAction === 'copyS3assets') { - let _s3Helper = new S3Helper(); - - _s3Helper.copyAssets(event.ResourceProperties.manifestKey, - event.ResourceProperties.sourceS3Bucket, event.ResourceProperties.sourceS3key, - event.ResourceProperties.destS3Bucket).then((data) => { - responseStatus = 'SUCCESS'; - responseData = {}; - sendResponse(event, callback, context.logStreamName, responseStatus, responseData); - }).catch((err) => { - responseData = { - Error: `Copy of website assets failed` - }; - console.log([responseData.Error, ':\n', err].join('')); - sendResponse(event, callback, context.logStreamName, responseStatus, responseData); - }); - - } else if (event.ResourceProperties.customAction === 'putConfigFile') { - let _s3Helper = new S3Helper(); - console.log(event.ResourceProperties.configItem); - _s3Helper.putConfigFile(event.ResourceProperties.configItem, event.ResourceProperties.destS3Bucket, event.ResourceProperties.destS3key).then((data) => { - responseStatus = 'SUCCESS'; - responseData = data; - sendResponse(event, callback, context.logStreamName, responseStatus, responseData); - }).catch((err) => { - responseData = { - Error: `Saving config file to ${event.ResourceProperties.destS3Bucket}/${event.ResourceProperties.destS3key} failed` - }; - console.log([responseData.Error, ':\n', err].join('')); - sendResponse(event, callback, context.logStreamName, responseStatus, responseData); - }); - - } else if (event.ResourceProperties.customAction === 'checkSourceBuckets') { - let _s3Helper = new S3Helper(); - - _s3Helper.validateBuckets(event.ResourceProperties.sourceBuckets).then((data) => { - responseStatus = 'SUCCESS'; - responseData = {}; - sendResponse(event, callback, context.logStreamName, responseStatus, responseData); - }).catch((err) => { - responseData = { - Error: `Could not find the following source bucket(s) in your account: ${err}. Please specify at least one source bucket that exists within your account and try again. If specifying multiple source buckets, please ensure that they are comma-separated.` - }; - console.log(responseData.Error); - sendResponse(event, callback, context.logStreamName, responseStatus, responseData, responseData.Error); - }); - - } else if (event.ResourceProperties.customAction === 'createUuid') { - responseStatus = 'SUCCESS'; - responseData = { - UUID: uuidv4() - }; - sendResponse(event, callback, context.logStreamName, responseStatus, responseData); - } else if (event.ResourceProperties.customAction === 'sendMetric') { - responseStatus = 'SUCCESS'; - - if (event.ResourceProperties.anonymousData === 'Yes') { - let _metric = { - Solution: event.ResourceProperties.solutionId, - UUID: event.ResourceProperties.UUID, - TimeStamp: moment().utc().format('YYYY-MM-DD HH:mm:ss.S'), - Data: { - Version: event.ResourceProperties.version, - Updated: moment().utc().format() - } - }; - - let _usageMetrics = new UsageMetrics(); - _usageMetrics.sendAnonymousMetric(_metric).then((data) => { - console.log(data); - console.log('Annonymous metrics successfully sent.'); - sendResponse(event, callback, context.logStreamName, responseStatus, responseData); - }).catch((err) => { - responseData = { - Error: 'Sending anonymous delete metric failed' - }; - console.log([responseData.Error, ':\n', err].join('')); - sendResponse(event, callback, context.logStreamName, responseStatus, responseData); - }); - } else { - sendResponse(event, callback, context.logStreamName, 'SUCCESS'); - } - } else { - sendResponse(event, callback, context.logStreamName, 'SUCCESS'); - } - } -}; - -/** - * Sends a response to the pre-signed S3 URL - */ -let sendResponse = function(event, callback, logStreamName, responseStatus, responseData, customReason) { - - const defaultReason = `See the details in CloudWatch Log Stream: ${logStreamName}`; - const reason = (customReason !== undefined) ? customReason : defaultReason; - - const responseBody = JSON.stringify({ - Status: responseStatus, - Reason: reason, - PhysicalResourceId: event.LogicalResourceId, - StackId: event.StackId, - RequestId: event.RequestId, - LogicalResourceId: event.LogicalResourceId, - Data: responseData, - }); - - console.log('RESPONSE BODY:\n', responseBody); - const parsedUrl = url.parse(event.ResponseURL); - const options = { - hostname: parsedUrl.hostname, - port: 443, - path: parsedUrl.path, - method: 'PUT', - headers: { - 'Content-Type': '', - 'Content-Length': responseBody.length, - } - }; - - const req = https.request(options, (res) => { - console.log('STATUS:', res.statusCode); - console.log('HEADERS:', JSON.stringify(res.headers)); - callback(null, 'Successfully sent stack response!'); - }); - - req.on('error', (err) => { - console.log('sendResponse Error:\n', err); - callback(err); - }); - - req.write(responseBody); - req.end(); -}; \ No newline at end of file diff --git a/source/custom-resource/lambda_function.py b/source/custom-resource/lambda_function.py new file mode 100644 index 0000000..36f6870 --- /dev/null +++ b/source/custom-resource/lambda_function.py @@ -0,0 +1,109 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +import json +import logging +import os +from uuid import uuid4 + +import boto3 +from botocore import config +from boto3.dynamodb.conditions import Attr + +logger = logging.getLogger() +logger.setLevel(logging.INFO) + +solution_version = os.environ.get("SOLUTION_VERSION") + +solution_id = os.environ.get("SOLUTION_ID", "SO8001") +user_agent_config = { + "user_agent_extra": f"AwsSolution/{solution_id}/{solution_version}" +} +default_config = config.Config(**user_agent_config) +default_region = os.environ.get("AWS_REGION") + +s3 = boto3.resource("s3", region_name=default_region, config=default_config) + +bucket_name = os.environ.get("WEB_BUCKET_NAME") +api_endpoint = os.environ.get("API_ENDPOINT") +user_pool_id = os.environ.get("USER_POOL_ID") +user_pool_client_id = os.environ.get("USER_POOL_CLIENT_ID") +oidc_provider = os.environ.get("OIDC_PROVIDER") +client_id = os.environ.get("OIDC_CLIENT_ID") +authentication_type = os.environ.get("AUTHENTICATION_TYPE") +custom_domain = os.environ.get("OIDC_CUSTOMER_DOMAIN", "") +if custom_domain and not custom_domain.startswith("https://"): + custom_domain = "https://" + custom_domain +cloudfront_url = os.environ.get("CLOUDFRONT_URL") +region = os.environ.get("AWS_REGION") +ecs_vpc_id = os.environ.get("ECS_VPC_ID") +ecs_cluster_name = os.environ.get("ECS_CLUSTER_NAME") +ecs_subnets = os.environ.get("ECS_SUBNETS") + +CLOUDFRONT_DISTRIBUTION_ID = os.environ.get("CLOUDFRONT_DISTRIBUTION_ID") + +cloudfront = boto3.client("cloudfront") +iam = boto3.client("iam") + + +def lambda_handler(event, _): + logger.info(event) + + config_str = get_config_str() + write_to_s3(config_str) + + upgrade_data() + + cloudfront_invalidate(CLOUDFRONT_DISTRIBUTION_ID, ["/*"]) + + return "OK" + + +def upgrade_data(): + """Perform actions on updating backend data during upgrade""" + pass + + +def get_config_str(): + """ Get config string """ + export_json = { + "taskCluster": { + "ecsVpcId": ecs_vpc_id, + "ecsClusterName":ecs_cluster_name, + "ecsSubnets": ecs_subnets.split(",") + }, + "aws_appsync_region": region, + "aws_appsync_authenticationType": authentication_type, + "aws_user_pools_id": user_pool_id, + "aws_oidc_customer_domain": custom_domain, + "aws_project_region": region, + "aws_oidc_provider": oidc_provider, + "aws_cognito_region": region, + "aws_oidc_client_id": client_id, + "aws_cloudfront_url": cloudfront_url, + "aws_appsync_graphqlEndpoint": api_endpoint, + "aws_user_pools_web_client_id": user_pool_client_id + } + + return json.dumps(export_json) + + +def write_to_s3(config_str): + """ Write config file to S3 """ + logger.info("Put config file to S3") + key_name = "aws-exports.json" + s3.Bucket(bucket_name).put_object(Key=key_name, Body=config_str) + logger.info("Put config file to S3 completed.") + + +def cloudfront_invalidate(distribution_id, distribution_paths): + """ Create a CloudFront invalidation request """ + invalidation_resp = cloudfront.create_invalidation( + DistributionId=distribution_id, + InvalidationBatch={ + "Paths": {"Quantity": len(distribution_paths), "Items": distribution_paths}, + "CallerReference": str(uuid4()), + }, + ) + + return invalidation_resp["Invalidation"]["Id"] diff --git a/source/custom-resource/lib/s3-helper.js b/source/custom-resource/lib/s3-helper.js deleted file mode 100644 index c61fc94..0000000 --- a/source/custom-resource/lib/s3-helper.js +++ /dev/null @@ -1,277 +0,0 @@ -/********************************************************************************************************************* - * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. * - * * - * Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance * - * with the License. A copy of the License is located at * - * * - * http://www.apache.org/licenses/LICENSE-2.0 * - * * - * or in the 'license' file accompanying this file. This file is distributed on an 'AS IS' BASIS, WITHOUT WARRANTIES * - * OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions * - * and limitations under the License. * - *********************************************************************************************************************/ - -/** - * @author Solution Builders - */ - -'use strict'; - -let AWS = require('aws-sdk'); -const fs = require('fs'); - -/** - * Helper function to interact with AWS S3 for cfn custom resource. - * - * @class s3Helper - */ -class s3Helper { - - /** - * @class s3Helper - * @constructor - */ - constructor() { - this.creds = new AWS.EnvironmentCredentials('AWS'); // Lambda provided credentials - this.downloadLocation = '/tmp/manifest.json'; - } - - /** - * validateBuckets - * Cross-checks provided bucket names against existing bucket names in the account for - * validation. - * @param {String} strBuckets - String of bucket names from the template params. - */ - async validateBuckets(strBuckets) { - const formatted = strBuckets.replace(/\s/g,''); - console.log(`Attempting to check if the following buckets exist: ${formatted}`); - const buckets = formatted.split(','); - const errorBuckets = []; - for (let i = 0; i < buckets.length; i++) { - const s3 = new AWS.S3({ signatureVersion: 'v4' }); - const params = { Bucket: buckets[i] }; - try { - await s3.headBucket(params).promise(); - console.log(`Found bucket: ${buckets[i]}`); - } catch (err) { - console.log(`Could not find bucket: ${buckets[i]}`); - console.log(err); - errorBuckets.push(buckets[i]); - } - } - if (errorBuckets.length === 0) return Promise.resolve(); - else return Promise.reject(errorBuckets); - } - - /** - * putConfigFile - * Saves a JS config file to S3 location. - * @param {JSON} content - JSON object. - * @param {JSON} destS3Bucket - S3 destination bucket. - * @param {JSON} destS3key - S3 destination key. - */ - putConfigFile(content, destS3Bucket, destS3key) { - console.log(`Attempting to save content blob destination location: ${destS3Bucket}/${destS3key}`); - console.log(JSON.stringify(content)); - - return new Promise((resolve, reject) => { - let _content = JSON.stringify(content, null, 2) - _content += '\n' - - let params = { - Bucket: destS3Bucket, - Key: destS3key, - Body: _content, - Metadata: { - 'Content-Type': 'application/json' - } - }; - - let s3 = new AWS.S3({ - signatureVersion: 'v4' - }); - s3.putObject(params, function(err, data) { - if (err) { - console.log(err); - reject(`Error creating ${destS3Bucket}/${destS3key} content \n${err}`); - } else { - console.log(data); - resolve(data); - } - }); - }); - } - - copyAssets(manifestKey, sourceS3Bucket, sourceS3prefix, destS3Bucket) { - console.log(`source bucket: ${sourceS3Bucket}`); - console.log(`source prefix: ${sourceS3prefix}`); - console.log(`destination bucket: ${destS3Bucket}`); - - let _self = this; - return new Promise((resolve, reject) => { - - this._downloadManifest(sourceS3Bucket, manifestKey).then((data) => { - - fs.readFile(_self.downloadLocation, 'utf8', function(err, data) { - if (err) { - console.log(err); - reject(err); - } - - let _manifest = _self._validateJSON(data); - - if (!_manifest) { - reject('Unable to validate downloaded manifest file JSON'); - } else { - _self._uploadFile(_manifest.files, 0, destS3Bucket, `${sourceS3Bucket}/${sourceS3prefix}`).then((resp) => { - console.log(resp); - resolve(resp) - }).catch((err) => { - console.log(err); - reject(err); - }); - } - - }); - }).catch((err) => { - console.log(err); - reject(err); - }); - - }); - }; - - /** - * Helper function to validate the JSON structure of contents of an import manifest file. - * @param {string} body - JSON object stringify-ed. - * @returns {JSON} - The JSON parsed string or null if string parsing failed - */ - _validateJSON(body) { - try { - let data = JSON.parse(body); - console.log(data); - return data; - } catch (e) { - // failed to parse - console.log('Manifest file contains invalid JSON.'); - return null; - } - }; - - _uploadFile(filelist, index, destS3Bucket, sourceS3prefix) { - let _self = this; - return new Promise((resolve, reject) => { - - if (filelist.length > index) { - let params = { - Bucket: destS3Bucket, - Key: filelist[index], - CopySource: [sourceS3prefix, filelist[index]].join('/'), - MetadataDirective: 'REPLACE' - }; - - params.ContentType = this._setContentType(filelist[index]); - params.Metadata = { - 'Content-Type': params.ContentType - }; - console.log(params); - let s3 = new AWS.S3({ - signatureVersion: 'v4' - }); - s3.copyObject(params, function(err, data) { - if (err) { - console.log(err); - reject(`error copying ${sourceS3prefix}/${filelist[index]}\n${err}`); - } else { - console.log(`${sourceS3prefix}/${filelist[index]} uploaded successfully`); - let _next = index + 1; - _self._uploadFile(filelist, _next, destS3Bucket, sourceS3prefix).then((resp) => { - resolve(resp); - }).catch((err2) => { - reject(err2); - }); - } - }); - } else { - resolve(`${index} files copied`); - } - - }); - - } - - /** - * Helper function to download a manifest to local storage for processing. - * @param {string} s3Bucket - Amazon S3 bucket of the manifest to download. - * @param {string} s3Key - Amazon S3 key of the manifest to download. - * @param {string} downloadLocation - Local storage location to download the Amazon S3 object. - */ - _downloadManifest(s3Bucket, s3Key) { - let _self = this; - return new Promise((resolve, reject) => { - - let params = { - Bucket: s3Bucket, - Key: s3Key - }; - - console.log(`Attempting to download manifest: ${JSON.stringify(params)}`); - - // check to see if the manifest file exists - let s3 = new AWS.S3({ - signatureVersion: 'v4' - }); - s3.headObject(params, function(err, metadata) { - if (err) { - console.log(err); - } - - if (err && err.code === 'NotFound') { - // Handle no object on cloud here - console.log('manifest file doesn\'t exist'); - reject('Manifest file was not found.'); - } else { - console.log('manifest file exists'); - console.log(metadata); - let file = require('fs').createWriteStream(_self.downloadLocation); - - s3.getObject(params). - on('httpData', function(chunk) { - file.write(chunk); - }). - on('httpDone', function() { - file.end(); - console.log('manifest downloaded for processing...'); - resolve('success'); - }). - send(); - } - }); - }); - } - - _setContentType(file) { - let _contentType = 'binary/octet-stream'; - if (file.endsWith('.html')) { - _contentType = 'text/html'; - } else if (file.endsWith('.css')) { - _contentType = 'text/css'; - } else if (file.endsWith('.png')) { - _contentType = 'image/png'; - } else if (file.endsWith('.svg')) { - _contentType = 'image/svg+xml'; - } else if (file.endsWith('.jpg')) { - _contentType = 'image/jpeg'; - } else if (file.endsWith('.js')) { - _contentType = 'application/javascript'; - } else if (file.endsWith('.json')) { - _contentType = 'application/json'; - } - - return _contentType; - } - - -} - -module.exports = s3Helper; \ No newline at end of file diff --git a/source/custom-resource/lib/usage-metrics/package.json b/source/custom-resource/lib/usage-metrics/package.json deleted file mode 100644 index 77fdd58..0000000 --- a/source/custom-resource/lib/usage-metrics/package.json +++ /dev/null @@ -1,24 +0,0 @@ -{ - "name": "usage-metrics", - "version": "0.0.1", - "description": "Usage metrics is an auxiliary class to capture metrics pertinent for feedback on the solution", - "author": { - "name": "aws-solutions-builder" - }, - "private": true, - "main": "metrics.common.js", - "dependencies": { - }, - "devDependencies": { - "aws-sdk": "*", - "chai": "*", - "sinon": "*", - "sinon-chai": "*", - "mocha": "*", - "aws-sdk-mock": "*", - "npm-run-all": "*" - }, - "scripts": { - "test": "mocha *.spec.js" - } -} \ No newline at end of file diff --git a/source/custom-resource/package.json b/source/custom-resource/package.json deleted file mode 100644 index d495e64..0000000 --- a/source/custom-resource/package.json +++ /dev/null @@ -1,39 +0,0 @@ -{ - "name": "ui-framework-helper", - - "description": "UI Framework custom resource helper Lambda function", - "main": "index.js", - "version": "2.4.0", - "license": "Apache-2.0", - "author": { - "name": "Amazon Web Services", - "url": "https://aws.amazon.com/solutions" - }, - "private": true, - "dependencies": { - "moment": "^2.24.0", - "uuid": "^3.3.3", - "usage-metrics": "file:lib/usage-metrics/" - }, - "devDependencies": { - "aws-sdk": "*", - "aws-sdk-mock": "*", - "chai": "*", - "mocha": "^8.1.1", - "npm-run-all": "*", - "sinon": "*", - "sinon-chai": "*" - }, - "scripts": { - "pretest": "npm install", - "test": "mocha lib/*.spec.js", - "build:init": "rm -rf dist && rm -rf node_modules", - "build:zip": "zip -rq custom-resource.zip .", - "build:dist": "mkdir dist && mv custom-resource.zip dist/", - "build": "npm run build:init && npm install --production && npm run build:zip && npm run build:dist" - }, - "bundledDependencies": [ - "moment", - "uuid" - ] -} diff --git a/source/custom-resource/test/__init__.py b/source/custom-resource/test/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/source/custom-resource/test/conftest.py b/source/custom-resource/test/conftest.py new file mode 100644 index 0000000..805050d --- /dev/null +++ b/source/custom-resource/test/conftest.py @@ -0,0 +1,30 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +import os + +import pytest + + +@pytest.fixture(autouse=True) +def default_environment_variables(): + """Mocked AWS evivronment variables such as AWS credentials and region""" + os.environ["AWS_ACCESS_KEY_ID"] = "mocked-aws-access-key-id" + os.environ["AWS_SECRET_ACCESS_KEY"] = "mocked-aws-secret-access-key" + os.environ["AWS_SESSION_TOKEN"] = "mocked-aws-session-token" + os.environ["AWS_REGION"] = "us-east-1" + os.environ["AWS_DEFAULT_REGION"] = "us-east-1" + + os.environ["WEB_BUCKET_NAME"] = "solution-web-bucket" + os.environ["API_ENDPOINT"] = "https:/solution.xxx.amazonaws.com/graphql" + os.environ["USER_POOL_ID"] = "abc" + os.environ["USER_POOL_CLIENT_ID"] = "abcd" + os.environ["OIDC_PROVIDER"] = "" + os.environ["OIDC_CLIENT_ID"] = "" + os.environ["OIDC_CUSTOMER_DOMAIN"] = "" + os.environ["AUTHENTICATION_TYPE"] = "AMAZON_COGNITO_USER_POOLS" + os.environ["CLOUDFRONT_URL"] = "solution.cloudfront.net" + os.environ["DEFAULT_LOGGING_BUCKET"] = "solution-bucket" + os.environ["ECS_VPC_ID"] = "solution-bucket" + os.environ["ECS_CLUSTER_NAME"] = "solution-bucket" + os.environ["ECS_SUBNETS"] = "subnet-1,subnet-2" \ No newline at end of file diff --git a/source/custom-resource/test/requirements-test.txt b/source/custom-resource/test/requirements-test.txt new file mode 100644 index 0000000..4b34868 --- /dev/null +++ b/source/custom-resource/test/requirements-test.txt @@ -0,0 +1,6 @@ +moto +pytest +pytest-cov +pyyaml +openapi_spec_validator +docker \ No newline at end of file diff --git a/source/custom-resource/test/test_lambda_function.py b/source/custom-resource/test/test_lambda_function.py new file mode 100644 index 0000000..d084e79 --- /dev/null +++ b/source/custom-resource/test/test_lambda_function.py @@ -0,0 +1,111 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +import pytest +import os +import boto3 +import json +from moto import mock_s3, mock_dynamodb, mock_cloudfront, mock_iam + + +@pytest.fixture +def s3_client(): + + with mock_s3(): + region = os.environ.get("AWS_REGION") + + s3 = boto3.resource("s3", region_name=region) + # Create the buckets + default_bucket = os.environ.get("WEB_BUCKET_NAME") + s3.create_bucket(Bucket=default_bucket) + yield + + +@pytest.fixture +def iam_client(): + + with mock_iam(): + region = os.environ.get("AWS_REGION") + + iam = boto3.client("iam", region_name=region) + policy_json = { + "Version": "2012-10-17", + "Statement": [ + { + "Action": [ + "logs:CreateLogGroup", + "logs:CreateLogStream", + "logs:PutLogEvents", + ], + "Resource": "arn:aws:logs:us-east-1:123456789012:*", + "Effect": "Allow", + } + ], + } + response = iam.create_policy( + PolicyName="mock-central-assume-role-policy", + PolicyDocument=json.dumps(policy_json), + ) + os.environ["CENTRAL_ASSUME_ROLE_POLICY_ARN"] = response["Policy"]["Arn"] + yield + + +@pytest.fixture +def cloudfront_client(): + + with mock_cloudfront(): + region = os.environ.get("AWS_REGION") + + cloudfront = boto3.client("cloudfront", region_name=region) + + response = cloudfront.create_distribution( + DistributionConfig=dict( + CallerReference="firstOne", + Aliases=dict(Quantity=1, Items=["mydomain.com"]), + DefaultRootObject="index.html", + Comment="Test distribution", + Enabled=True, + Origins=dict( + Quantity=1, + Items=[ + dict( + Id="1", + DomainName="mydomain.com.s3.amazonaws.com", + S3OriginConfig=dict(OriginAccessIdentity=""), + ) + ], + ), + DefaultCacheBehavior=dict( + TargetOriginId="1", + ViewerProtocolPolicy="redirect-to-https", + TrustedSigners=dict(Quantity=0, Enabled=False), + ForwardedValues=dict( + Cookies={"Forward": "all"}, + Headers=dict(Quantity=0), + QueryString=False, + QueryStringCacheKeys=dict(Quantity=0), + ), + MinTTL=1000, + ), + ) + ) + os.environ["CLOUDFRONT_DISTRIBUTION_ID"] = response["Distribution"]["Id"] + yield + + +def test_lambda_function(s3_client, cloudfront_client, iam_client): + import lambda_function + + result = lambda_function.lambda_handler(None, None) + # Expect Execute successfully. + assert result == "OK" + + region = os.environ.get("AWS_REGION") + s3 = boto3.resource("s3", region_name=region) + + default_bucket = os.environ.get("WEB_BUCKET_NAME") + + # Expect Config file is uploaded to S3 + obj = s3.Object(default_bucket, "aws-exports.json").get() + assert "ContentLength" in obj + assert obj["ContentLength"] > 0 \ No newline at end of file diff --git a/source/portal/.eslintrc.json b/source/portal/.eslintrc.json index 48879fa..1655ab7 100644 --- a/source/portal/.eslintrc.json +++ b/source/portal/.eslintrc.json @@ -6,8 +6,7 @@ "extends": [ "eslint:recommended", "plugin:react/recommended", - "plugin:@typescript-eslint/recommended", - "plugin:prettier/recommended" + "plugin:@typescript-eslint/recommended" ], "parser": "@typescript-eslint/parser", "parserOptions": { diff --git a/source/portal/package.json b/source/portal/package.json index 7cf5e0c..48951fd 100644 --- a/source/portal/package.json +++ b/source/portal/package.json @@ -1,6 +1,7 @@ { "name": "data-transfer-hub-portal", - "version": "2.4.0", + "version": "2.5.0", + "description": "data-transfer-hub-portal", "license": "Apache-2.0", "author": { "name": "Amazon Web Services", @@ -8,52 +9,53 @@ }, "private": true, "dependencies": { - "@apollo/client": "^3.7.1", - "@aws-amplify/ui-react": "^1.2.18", + "@apollo/client": "^3.8.3", + "@aws-amplify/ui-components": "^1.9.40", + "@aws-amplify/ui-react": "^5.3.0", "@material-ui/core": "^4.12.4", "@material-ui/icons": "^4.11.3", "@material-ui/lab": "4.0.0-alpha.61", - "@testing-library/jest-dom": "^5.16.5", - "@testing-library/react": "^12.1.5", - "@testing-library/user-event": "^13.2.1", + "@testing-library/jest-dom": "^6.1.3", + "@testing-library/react": "^14.0.0", + "@testing-library/user-event": "^14.4.3", "@types/classnames": "^2.3.1", - "@types/jest": "^29.2.4", - "@types/node": "^16.10.1", - "@types/react": "^17.0.24", - "@types/react-copy-to-clipboard": "^5.0.1", - "@types/react-dom": "^17.0.9", + "@types/jest": "^29.5.4", + "@types/node": "^20.6.0", + "@types/react": "^18.2.21", + "@types/react-copy-to-clipboard": "^5.0.4", + "@types/react-dom": "^18.2.7", "@types/react-loader-spinner": "^4.0.0", - "@types/react-router-dom": "^5.3.0", - "apexcharts": "^3.36.3", + "@types/react-router-dom": "^5.3.3", + "apexcharts": "^3.42.0", "apollo-link": "^1.2.14", - "aws-amplify": "^4.3.4", + "aws-amplify": "^5.3.10", "aws-appsync-auth-link": "^3.0.7", "aws-appsync-subscription-link": "^3.1.2", - "axios": "^0.26.0", + "axios": "^1.5.0", "classnames": "^2.3.2", - "date-fns": "^2.29.3", - "i18next": "^20.2.1", - "i18next-browser-languagedetector": "^6.1.4", - "i18next-http-backend": "^1.4.1", + "date-fns": "^2.30.0", + "i18next": "^23.5.0", + "i18next-browser-languagedetector": "^7.1.0", + "i18next-http-backend": "^2.2.2", "lodash.clonedeep": "^4.5.0", - "moment": "^2.29.1", - "node-sass": "^8.0.0", + "moment": "^2.29.4", + "node-sass": "^9.0.0", "oidc-client": "^1.11.5", - "oidc-client-ts": "^2.2.0", - "react": "^17.0.2", - "react-apexcharts": "^1.4.0", - "react-copy-to-clipboard": "^5.0.4", - "react-dom": "^17.0.2", - "react-i18next": "^11.8.13", - "react-loader-spinner": "^4.0.0", - "react-minimal-datetime-range": "^2.0.7", - "react-number-format": "^4.9.3", - "react-oidc-context": "^2.2.0", - "react-router-dom": "^5.3.0", - "redux": "^4.2.0", + "oidc-client-ts": "^2.2.5", + "react": "^18.2.0", + "react-apexcharts": "^1.4.1", + "react-copy-to-clipboard": "^5.1.0", + "react-dom": "^18.2.0", + "react-i18next": "^13.2.2", + "react-loader-spinner": "^5.4.5", + "react-minimal-datetime-range": "^2.0.9", + "react-number-format": "^5.3.1", + "react-oidc-context": "^2.3.0", + "react-router-dom": "^6.15.0", + "redux": "^4.2.1", "redux-react-hook": "^4.0.3", - "sweetalert2": "11.4.8", - "typescript": "^4.9.3" + "sweetalert2": "11.7.27", + "typescript": "^5.2.2" }, "scripts": { "start": "cross-env TSC_WATCHFILE=UseFsEventsWithFallbackDynamicPolling react-scripts start", @@ -79,21 +81,18 @@ }, "devDependencies": { "@types/lodash.clonedeep": "^4.5.7", - "@typescript-eslint/eslint-plugin": "^5.45.0", - "@typescript-eslint/parser": "^5.45.0", - "browserslist": "^4.21.4", + "@typescript-eslint/eslint-plugin": "^6.6.0", + "@typescript-eslint/parser": "^6.6.0", + "browserslist": "^4.21.10", "cross-env": "^7.0.3", - "eslint": "^8.29.0", - "eslint-config-prettier": "^8.2.0", - "eslint-config-standard": "^17.0.0", - "eslint-plugin-import": "^2.26.0", - "eslint-plugin-n": "^15.6.0", - "eslint-plugin-prettier": "^3.4.0", + "eslint": "^8.48.0", + "eslint-config-standard": "^17.1.0", + "eslint-plugin-import": "^2.28.1", + "eslint-plugin-n": "^16.0.2", "eslint-plugin-promise": "^6.1.1", - "eslint-plugin-react": "^7.31.11", - "prettier": "^2.2.1", + "eslint-plugin-react": "^7.33.2", "react-scripts": "^5.0.1", - "rimraf": "^3.0.2" + "rimraf": "^5.0.1" }, "overrides": { "nth-check": "2.1.1" diff --git a/source/portal/public/locales/en/translation.json b/source/portal/public/locales/en/translation.json index 4898687..3ef9b95 100644 --- a/source/portal/public/locales/en/translation.json +++ b/source/portal/public/locales/en/translation.json @@ -7,7 +7,7 @@ "reLoignTips": "This session has expired. To log in to Data Transfer Hub again, please click Login Again.", "info": "Info", "optional": "optional", - "recommened": "Recommened", + "recommened": "Recommend", "credentialsGuide": "Create secret guide & AK/SK format", "signin": { "signInToDRH": "Sign in to Data Transfer Hub", @@ -44,6 +44,7 @@ "threeDesc": "Review and create" }, "home": { + "name": "Home", "title": { "getStarted": "Get started", "getStartedDesc": "Transfer data across partitions, or move from other cloud providers.", @@ -138,7 +139,7 @@ "destInAccount": "Is Bucket in this account?", "destInAccountDesc": "Select Yes, if the bucket is in the same Amazon Web Services account as Data Transfer Hub.", "objectPrefix": "Prefix to Insert", - "prefixDesc": "The data transfer engine will insert this prefix for all objects. It will automatically append ‘/‘.", + "prefixDesc": "The data transfer engine will insert this prefix for all objects. It will automatically append '/'.", "storageClass": "Storage Class", "storageClassDesc": "Objects will be stored with the selected storage class.", "destRegionName": "Region Name", @@ -171,9 +172,9 @@ "finderDepth": "Finder Depth", "finderDepthDesc": "The depth of sub folders to compare in parallel. 0 means comparing all objects in sequence.", "finderNumber": "Finder Number", - "finderNumberDesc": "The number of find threads to run in parrallel.", + "finderNumberDesc": "The number of find threads to run in parallel.", "workerThreadsNumber": "Worker Threads Number", - "workerThreadsNumberDesc": "The number of worker threads to run in one insance.", + "workerThreadsNumberDesc": "The number of worker threads to run in one instance. By default is 4, for those small objects. When the object size more than 1TB, recommend set up to 1.", "schedulingSettings": "Task Scheduling Settings", "schedulingSettingsDesc1": "Set task scheduling by selecting fixed rate manually or using CRON expression. Please select 'One time transfer' if it is an one-time task. See", "schedulingSettingsDesc2": "to understand CRON expression.", @@ -197,6 +198,7 @@ "taskDetail": "Specify task details", "sourceType": "Source Type", "selectContainerType": "Select container registry type", + "onlyTag": "Only transfer the 'Tagged' images", "valid": { "srcRegionRequired": "Source Region is Required", "accountRequired": "Please input Amazon Web Services account ID", @@ -219,7 +221,7 @@ "image1": "Enter all images in format of", "image2": "image-name", "image3": "tag", - "image4": "delimited by comma. If tag is ommited, the latest tag will be used.", + "image4": "delimited by comma. If tag is omitted, the latest tag will be used.", "image5": " If tag is ALL_TAGS, all the tags of this image will be used." }, "dest": { @@ -241,7 +243,7 @@ "descriptionDesc": "Description about this task", "email": "Alarm Notification", "optional": "optional", - "emailDesc": "Receive notificaiton when transfer failed." + "emailDesc": "Receive notification when transfer failed." } } }, @@ -265,6 +267,8 @@ "taskList": { "title": "Tasks", "stopTask": "Stop Task", + "activated": "Activated", + "deleted": "Deleted", "tips": { "selectTask": "Please select a task", "selectTaskStop": "Please select a task to stop", @@ -303,7 +307,7 @@ "status": "Status", "inProgress": "Transfer in progress", "totalObjects": "Total Objects", - "repObjects": "Transfered Objects", + "repObjects": "Transferred Objects", "srcEndpoint": "Source Endpoint URL", "srcRegion": "Source Region", "srcName": "Source Bucket Name", @@ -313,7 +317,7 @@ "destName": "Destination Bucket Name", "destPrefix": "Destination Bucket Prefix", "destInThisAccount": "Bucket in This Account?", - "credentials": "Paramete Store for credentials", + "credentials": "Parameter Store for credentials", "images": "Images", "option": "Options", "description": "Description", @@ -375,9 +379,11 @@ "viewDetail": "View Details", "taskAction": "Task Action", "stopTask": "Stop Task", + "deleteTask": "Delete Task", "cloneTask": "Clone Task", "reLogin": "Login Again", - "createTransferTask": "Start a new transfer task" + "createTransferTask": "Start a new transfer task", + "close": "Close" }, "tips": { "error": { @@ -442,7 +448,7 @@ }, "engineSettingsEC2": { "name": "Engine settings", - "tip1": "The S3 transfer engine use Amazon EC2 Graviton2 in an Auto Scaling Group to transfer objects. You can adjust the concurrency by setting the minimun, maximum and desired instance number. The transfer engine will auto scale between the minimun value and maximum value.", + "tip1": "The S3 transfer engine use Amazon EC2 Graviton2 in an Auto Scaling Group to transfer objects. You can adjust the concurrency by setting the minimum, maximum and desired instance number. The transfer engine will auto scale between the minimun value and maximum value.", "tip2": "You can adjust these numbers on Amazon Web Services Console after creating the task.", "linkName": "Adjust the Auto Scaling Group to change the transfer concurrency." }, @@ -462,7 +468,7 @@ "ex1": "Example 1: ", "ex2": "Example 2: ", "tip1": "If there're 10 subdirectories with over 100k files each, it's recommended to set finderDepth=1 and finderNumber=10", - "tip2": "If the transferred files are under two tier subdirectories, with 10 tier 1 subdirectories and 15 tier 2 subdirectories incl. over 100k leaf objects each. It is recommended to set finderDepth=2,finderNumber=10*15=150" + "tip2": "If the transferred files are under two tier subdirectories, with 10 tier 1 subdirectories and 15 tier 2 subdirectories incl. over 100k leaf objects each. It is recommended to set finderDepth=2, finderNumber=10*15=150" }, "finderMemory": { "name": "Finder Memory", @@ -493,5 +499,6 @@ "noNewer": "No newer events at this moment. " }, "noData": "No Data", - "loading": "Loading..." + "loading": "Loading...", + "pageNotFound": "Page Not Found" } diff --git a/source/portal/public/locales/zh/translation.json b/source/portal/public/locales/zh/translation.json index 5d98f29..984efa5 100644 --- a/source/portal/public/locales/zh/translation.json +++ b/source/portal/public/locales/zh/translation.json @@ -44,6 +44,7 @@ "threeDesc": "预览并创建" }, "home": { + "name": "首页", "title": { "getStarted": "开始使用", "getStartedDesc": "跨区域数据传输,或从其他云厂商迁移数据。", @@ -173,7 +174,7 @@ "finderNumber": "查找器数量", "finderNumberDesc": "要并行运行的查找线程数.", "workerThreadsNumber": "工作线程数", - "workerThreadsNumberDesc": "一个实例中要运行的工作线程数.", + "workerThreadsNumberDesc": "一个实例中要运行的工作线程数.对于那些小对象,默认值为 4。 当对象大小超过 1TB 时,建议设置为 1。", "schedulingSettings": "任务调度设置", "schedulingSettingsDesc1": "可通过手动调整或CRON表达式来设置任务调度频率,如若单次传输,请选单次传输。详见", "schedulingSettingsDesc2": "了解CRON表达式.", @@ -197,6 +198,7 @@ "taskDetail": "填写任务信息", "sourceType": "源仓库类型", "selectContainerType": "选择容器仓库类型", + "onlyTag": "仅传输被“标记”的镜像", "valid": { "srcRegionRequired": "请选择源仓库区域", "accountRequired": "请输入 Amazon Web Services 账户 ID", @@ -219,7 +221,7 @@ "image1": "输入容器镜像名称", "image2": "image-name", "image3": "tag", - "image4": "使用逗号分隔. 如果不指定 tag, 则默认为 latest.", + "image4": "使用逗号分隔. 如果不指定 tag, 则默认为 latest。", "image5": "如果 tag 是 ALL_TAGS,则将使用该容器镜像的所有 tag。" }, "dest": { @@ -265,6 +267,8 @@ "taskList": { "title": "任务", "stopTask": "停止任务", + "activated": "活动的", + "deleted": "已删除", "tips": { "selectTask": "请选择一个任务", "selectTaskStop": "请选择需要停止的任务", @@ -376,8 +380,10 @@ "taskAction": "操作", "stopTask": "停止任务", "cloneTask": "复制任务", + "deleteTask": "删除任务", "reLogin": "重新登录", - "createTransferTask": "创建新任务" + "createTransferTask": "创建新任务", + "close": "关闭" }, "tips": { "error": { @@ -493,5 +499,6 @@ "noNewer": "暂时没有更新的事件。" }, "noData": "暂无数据", - "loading": "加载中..." + "loading": "加载中...", + "pageNotFound": "没找到页面" } diff --git a/source/portal/src/API.ts b/source/portal/src/API.ts index 2557545..55ac675 100644 --- a/source/portal/src/API.ts +++ b/source/portal/src/API.ts @@ -5,6 +5,7 @@ export type CreateTaskInput = { type: TaskType; description?: string | null; + scheduleType: ScheduleType; parameters?: Array | null; }; @@ -13,6 +14,11 @@ export enum TaskType { ECR = "ECR", } +export enum ScheduleType { + ONE_TIME = "ONE_TIME", + FIXED_RATE = "FIXED_RATE", +} + export type TaskParameterInput = { ParameterKey?: string | null; ParameterValue?: string | null; @@ -66,17 +72,6 @@ export type StackOutputs = { OutputValue?: string | null; }; -export enum ScheduleType { - ONE_TIME = "ONE_TIME", - FIXED_RATE = "FIXED_RATE", -} - -export type ListTasksResponse = { - __typename: "ListTasksResponse"; - items?: Array | null; - nextToken?: string | null; -}; - export type ListTasksResponseV2 = { __typename: "ListTasksResponseV2"; items?: Array | null; @@ -236,51 +231,8 @@ export type StopTaskMutation = { } | null; }; -export type ListTasksQueryVariables = { - limit?: number | null; - nextToken?: string | null; -}; - -export type ListTasksQuery = { - listTasks?: { - __typename: "ListTasksResponse"; - items?: Array<{ - __typename: "Task"; - id: string; - description?: string | null; - type?: TaskType | null; - templateUrl?: string | null; - parameters?: Array<{ - __typename: "TaskParameter"; - ParameterKey?: string | null; - ParameterValue?: string | null; - } | null> | null; - createdAt?: string | null; - stoppedAt?: string | null; - progress?: TaskProgress | null; - progressInfo?: { - __typename: "CommonProgressInfo"; - total?: number | null; - replicated?: number | null; - } | null; - stackId?: string | null; - stackName?: string | null; - stackOutputs?: Array<{ - __typename: "StackOutputs"; - Description?: string | null; - OutputKey?: string | null; - OutputValue?: string | null; - } | null> | null; - stackStatus?: string | null; - stackStatusReason?: string | null; - executionArn?: string | null; - scheduleType?: ScheduleType | null; - } | null> | null; - nextToken?: string | null; - } | null; -}; - export type ListTasksV2QueryVariables = { + progress?: TaskProgress | null; page?: number | null; count?: number | null; }; diff --git a/source/portal/src/App.scss b/source/portal/src/App.scss index ee4df25..a039b8a 100644 --- a/source/portal/src/App.scss +++ b/source/portal/src/App.scss @@ -1,3 +1,5 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 .App { text-align: center; } diff --git a/source/portal/src/App.test.tsx b/source/portal/src/App.test.tsx index 352d7b8..2a1b446 100644 --- a/source/portal/src/App.test.tsx +++ b/source/portal/src/App.test.tsx @@ -1,3 +1,5 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 import React from "react"; import { render } from "@testing-library/react"; import App from "./App"; @@ -5,5 +7,5 @@ import App from "./App"; test("renders learn react link", () => { const { getByText } = render(); const linkElement = getByText(/learn react/i); - expect(linkElement).toBeInTheDocument(); + expect(linkElement).toBeDefined(); }); diff --git a/source/portal/src/App.tsx b/source/portal/src/App.tsx index 3f720ce..fe1ea42 100644 --- a/source/portal/src/App.tsx +++ b/source/portal/src/App.tsx @@ -1,10 +1,11 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 import React, { useState, useEffect } from "react"; -import { BrowserRouter, Route } from "react-router-dom"; +import { Link, Route, BrowserRouter as Router, Routes } from "react-router-dom"; import { useDispatch } from "redux-react-hook"; -import { I18n } from "aws-amplify"; -import Amplify, { Hub } from "aws-amplify"; -import { AmplifyAuthenticator, AmplifySignIn } from "@aws-amplify/ui-react"; +import { Amplify, Auth, Hub, I18n } from "aws-amplify"; +import { Authenticator } from "@aws-amplify/ui-react"; import { AuthState, onAuthUIStateChange } from "@aws-amplify/ui-components"; import Axios from "axios"; @@ -22,6 +23,8 @@ import StepThreeS3 from "./pages/creation/s3/StepThreeS3"; import StepTwoECR from "./pages/creation/ecr/StepTwoECR"; import StepThreeECR from "./pages/creation/ecr/StepThreeECR"; import List from "./pages/list/TaskList"; + +import "@aws-amplify/ui-react/styles.css"; import { ACTION_TYPE, AmplifyConfigType, @@ -61,96 +64,104 @@ const Loader = () => { ); }; +const loginComponents = { + Header() { + const { t } = useTranslation(); + return
{t("signin.signInToDRH")}
; + }, +}; + const AmplifyLoginPage: React.FC = () => { const { t } = useTranslation(); return ( -
- - + -
-
-
+ }, + }} + >
); }; const SignedInApp: React.FC = (props: SignedInAppProps) => { const { oidcSignOut } = props; + const { t } = useTranslation(); return ( <> - - - - - - - - - - - - - - - + + + }> + }> + }> + } + > + } + > + } + > + } + > + } + > + }> + } + > + } + > + } + > + } + > + +
+
+

{t("pageNotFound")}

+ + {t("home.name")} + +
+
+ + } + /> +
+
); }; @@ -161,8 +172,8 @@ const OIDCAppRouter: React.FC = () => { const dispatch = useDispatch(); useEffect(() => { - return auth?.events?.addAccessTokenExpiring((event) => { - console.info("addAccessTokenExpiring:event:", event); + // the `return` is important - addAccessTokenExpiring() returns a cleanup function + return auth?.events?.addAccessTokenExpiring(() => { auth.signinSilent(); }); }, [auth.events, auth.signinSilent]); @@ -226,21 +237,48 @@ const AmplifyAppRouter: React.FC = () => { if (payload?.data?.code === "ResourceNotFoundException") { window.localStorage.removeItem(DRH_CONFIG_JSON_NAME); window.location.reload(); + } else { + Auth?.currentAuthenticatedUser() + .then((authData: any) => { + dispatch({ + type: ACTION_TYPE.UPDATE_USER_EMAIL, + email: authData?.attributes?.email, + }); + setAuthState(AuthState.SignedIn); + }) + .catch((error) => { + console.error(error); + }); } }; + Hub.listen("auth", (data) => { const { payload } = data; onAuthEvent(payload); }); + useEffect(() => { + if (authState === undefined) { + Auth?.currentAuthenticatedUser() + .then((authData: any) => { + dispatch({ + type: ACTION_TYPE.UPDATE_USER_EMAIL, + userEmail: authData?.attributes?.email, + }); + setAuthState(AuthState.SignedIn); + }) + .catch((error) => { + console.error(error); + }); + } return onAuthUIStateChange((nextAuthState, authData: any) => { + setAuthState(nextAuthState); dispatch({ type: ACTION_TYPE.UPDATE_USER_EMAIL, userEmail: authData?.attributes?.email, }); - setAuthState(nextAuthState); }); - }, []); + }, [authState]); return authState === AuthState.SignedIn ? ( @@ -316,7 +354,7 @@ const App: React.FC = () => { useEffect(() => { document.title = t("title"); if (window.performance) { - if (performance.navigation.type === 1) { + if ((performance.getEntriesByType("navigation") as any)?.[0]?.type === "reload") { const timeStamp = new Date().getTime(); setLoadingConfig(true); Axios.get(`/aws-exports.json?timestamp=${timeStamp}`).then((res) => { diff --git a/source/portal/src/assets/config/const.ts b/source/portal/src/assets/config/const.ts index 3a7cb9b..4b2fb9c 100644 --- a/source/portal/src/assets/config/const.ts +++ b/source/portal/src/assets/config/const.ts @@ -1,3 +1,5 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 import { EnumSourceType } from "../types/index"; export const GITHUB_LINK = @@ -294,6 +296,10 @@ export const ECR_PARAMS_LIST_MAP: any = { en_name: "Destination Image Prefix", zh_name: "目标容器镜像前缀", }, + includeUntagged: { + en_name: "Only transfer the 'Tagged' images", + zh_name: "仅传输被“标记”的镜像", + }, destCredential: { en_name: "Destination Credential Parameter Name", zh_name: "目的仓库凭证参数名称", diff --git a/source/portal/src/assets/config/content.ts b/source/portal/src/assets/config/content.ts index 8be80ff..bcf5c65 100644 --- a/source/portal/src/assets/config/content.ts +++ b/source/portal/src/assets/config/content.ts @@ -1,3 +1,5 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 import { WHAT_IS_LINK, COPY_BETWEEN_LINK, diff --git a/source/portal/src/assets/types/index.ts b/source/portal/src/assets/types/index.ts index bc47882..d53abba 100644 --- a/source/portal/src/assets/types/index.ts +++ b/source/portal/src/assets/types/index.ts @@ -1,3 +1,5 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 // Task Type Icons import ICON_S3 from "../images/icon-s3.png"; import ICON_ECR from "../images/icon-ecr.png"; @@ -139,7 +141,6 @@ export enum EnumBucketType { // Task Tyep Enum export enum EnumSourceType { - S3_EC2 = "Amazon_S3", S3 = "Amazon_S3", S3_COMPATIBLE = "Amazon_S3_Compatible", Qiniu = "Qiniu_Kodo", @@ -176,6 +177,7 @@ export const TASK_STATUS_MAP: any = { IN_PROGRESS: { name: "In Progress", src: STATUS_PROGRESS, class: "gray" }, DONE: { name: "Completed", src: STATUS_DONE, class: "success" }, STOPPED: { name: "Stopped", src: STATUS_PENDING, class: "gray" }, + UNKNOWN: { name: "Unkonwn", src: STATUS_PENDING, class: "gray" }, }; export interface ISouceType { @@ -215,8 +217,8 @@ export const DOCKER_IMAGE_TYPE = [ en_name: "All", zh_name: "全部", value: EnumDockerImageType.ALL, - en_desc: "Transfer all images in the source.", - zh_desc: "传输源地址所有容器镜像", + en_desc: "Transfer all images in the source with tagged and untagged.", + zh_desc: "传输源中带有标记和未标记的所有容器镜像", }, { id: 2, diff --git a/source/portal/src/assets/utils/request.ts b/source/portal/src/assets/utils/request.ts index 19b8503..efbcb6b 100644 --- a/source/portal/src/assets/utils/request.ts +++ b/source/portal/src/assets/utils/request.ts @@ -1,18 +1,5 @@ -/* -Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 /* eslint-disable no-async-promise-executor */ /* eslint-disable @typescript-eslint/explicit-module-boundary-types */ import Swal from "sweetalert2"; diff --git a/source/portal/src/assets/utils/utils.ts b/source/portal/src/assets/utils/utils.ts index df0c05f..89c9eed 100644 --- a/source/portal/src/assets/utils/utils.ts +++ b/source/portal/src/assets/utils/utils.ts @@ -1,3 +1,5 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 import { format } from "date-fns"; // format date export const formatLocalTime = (time: any): string => { diff --git a/source/portal/src/assets/utils/xss.ts b/source/portal/src/assets/utils/xss.ts index 9ca3400..6b7ccd8 100644 --- a/source/portal/src/assets/utils/xss.ts +++ b/source/portal/src/assets/utils/xss.ts @@ -1,3 +1,5 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 // No UpdateInstanceGroup, UpdateSubAccountLink function in UI const NEED_ENCODE_PARAM_KEYS: string[] = []; diff --git a/source/portal/src/common/Alert.tsx b/source/portal/src/common/Alert.tsx index c878745..2bd1bb2 100644 --- a/source/portal/src/common/Alert.tsx +++ b/source/portal/src/common/Alert.tsx @@ -1,3 +1,5 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 import React, { ReactElement } from "react"; import ErrorOutlineIcon from "@material-ui/icons/ErrorOutline"; import ReportProblemOutlinedIcon from "@material-ui/icons/ReportProblemOutlined"; @@ -10,7 +12,7 @@ export enum AlertType { interface AlertProps { type?: AlertType; title?: string; - content: string | JSX.Element; + content: string | ReactElement; actions?: ReactElement; noMargin?: boolean; width?: number; diff --git a/source/portal/src/common/Bottom.scss b/source/portal/src/common/Bottom.scss index 4b5ff00..5079440 100644 --- a/source/portal/src/common/Bottom.scss +++ b/source/portal/src/common/Bottom.scss @@ -1,3 +1,5 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 .page-bottom { padding: 0 20px 0 10px; text-align: left; diff --git a/source/portal/src/common/Bottom.tsx b/source/portal/src/common/Bottom.tsx index 9cd918f..17e3fae 100644 --- a/source/portal/src/common/Bottom.tsx +++ b/source/portal/src/common/Bottom.tsx @@ -1,3 +1,5 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 import React, { useState } from "react"; import { useTranslation } from "react-i18next"; diff --git a/source/portal/src/common/InfoBar.scss b/source/portal/src/common/InfoBar.scss index 4a6b54f..dd36d25 100644 --- a/source/portal/src/common/InfoBar.scss +++ b/source/portal/src/common/InfoBar.scss @@ -1,3 +1,5 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 .info-bar { position: fixed; z-index: 9; diff --git a/source/portal/src/common/InfoBar.tsx b/source/portal/src/common/InfoBar.tsx index 2599701..58a44bf 100644 --- a/source/portal/src/common/InfoBar.tsx +++ b/source/portal/src/common/InfoBar.tsx @@ -1,3 +1,5 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 import React from "react"; import classNames from "classnames"; import { useDispatch, useMappedState } from "redux-react-hook"; diff --git a/source/portal/src/common/InfoSpan.tsx b/source/portal/src/common/InfoSpan.tsx index 5364e59..55f14cd 100644 --- a/source/portal/src/common/InfoSpan.tsx +++ b/source/portal/src/common/InfoSpan.tsx @@ -1,14 +1,16 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 import React from "react"; import { useDispatch } from "redux-react-hook"; import { useTranslation } from "react-i18next"; import { ACTION_TYPE } from "assets/types"; -interface spanInfo { +interface SpanInfoProps { spanType: string; infoText?: string; } -const InfoSpan: React.FC = (props) => { +const InfoSpan: React.FC = (props) => { const { spanType, infoText } = props; const { t } = useTranslation(); const dispatch = useDispatch(); diff --git a/source/portal/src/common/LeftMenu.scss b/source/portal/src/common/LeftMenu.scss index 547da4c..cfcf051 100644 --- a/source/portal/src/common/LeftMenu.scss +++ b/source/portal/src/common/LeftMenu.scss @@ -1,3 +1,5 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 .left { .drh-left-menu { position: absolute; diff --git a/source/portal/src/common/LeftMenu.tsx b/source/portal/src/common/LeftMenu.tsx index 4ba2296..b4ef9b5 100644 --- a/source/portal/src/common/LeftMenu.tsx +++ b/source/portal/src/common/LeftMenu.tsx @@ -1,3 +1,5 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 import React from "react"; import { Link } from "react-router-dom"; import { useTranslation } from "react-i18next"; diff --git a/source/portal/src/common/Loading.tsx b/source/portal/src/common/Loading.tsx index d2c7144..dbb4f7f 100644 --- a/source/portal/src/common/Loading.tsx +++ b/source/portal/src/common/Loading.tsx @@ -1,11 +1,12 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 import React from "react"; -import Loader from "react-loader-spinner"; -import "react-loader-spinner/dist/loader/css/react-spinner-loader.css"; +import { ThreeDots } from "react-loader-spinner"; const DataLoading: React.FC = () => { return (
- +
); }; diff --git a/source/portal/src/common/TopBar.scss b/source/portal/src/common/TopBar.scss index 955e090..6a2f32f 100644 --- a/source/portal/src/common/TopBar.scss +++ b/source/portal/src/common/TopBar.scss @@ -1,3 +1,5 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 .drh-top-bar { width: 100%; height: 40px; diff --git a/source/portal/src/common/TopBar.tsx b/source/portal/src/common/TopBar.tsx index 5a2d19e..ce5c17e 100644 --- a/source/portal/src/common/TopBar.tsx +++ b/source/portal/src/common/TopBar.tsx @@ -1,3 +1,5 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 import React from "react"; import { useTranslation } from "react-i18next"; import { useMappedState } from "redux-react-hook"; diff --git a/source/portal/src/common/comp/DescLink.tsx b/source/portal/src/common/comp/DescLink.tsx index 8cbd309..e8222dc 100644 --- a/source/portal/src/common/comp/DescLink.tsx +++ b/source/portal/src/common/comp/DescLink.tsx @@ -1,3 +1,5 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 import React from "react"; import OpenInNewIcon from "@material-ui/icons/OpenInNew"; diff --git a/source/portal/src/common/comp/LoadingText/index.tsx b/source/portal/src/common/comp/LoadingText/index.tsx index 5d20dd6..548450e 100644 --- a/source/portal/src/common/comp/LoadingText/index.tsx +++ b/source/portal/src/common/comp/LoadingText/index.tsx @@ -1,2 +1,4 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 import LoadingText from "./loadingText"; export default LoadingText; diff --git a/source/portal/src/common/comp/LoadingText/loadingText.tsx b/source/portal/src/common/comp/LoadingText/loadingText.tsx index 6187106..e83aee0 100644 --- a/source/portal/src/common/comp/LoadingText/loadingText.tsx +++ b/source/portal/src/common/comp/LoadingText/loadingText.tsx @@ -1,3 +1,5 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 import React from "react"; import CircularProgress from "@material-ui/core/CircularProgress"; diff --git a/source/portal/src/common/comp/Modal.tsx b/source/portal/src/common/comp/Modal.tsx new file mode 100644 index 0000000..e47d70a --- /dev/null +++ b/source/portal/src/common/comp/Modal.tsx @@ -0,0 +1,99 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 +import React, { ReactElement } from "react"; +import { withStyles } from "@material-ui/core/styles"; +import Dialog from "@material-ui/core/Dialog"; +import MuiDialogTitle from "@material-ui/core/DialogTitle"; +import MuiDialogContent from "@material-ui/core/DialogContent"; +import MuiDialogActions from "@material-ui/core/DialogActions"; +import IconButton from "@material-ui/core/IconButton"; +import CloseIcon from "@material-ui/icons/Close"; +import Typography from "@material-ui/core/Typography"; + +const styles: any = (theme: any) => ({ + root: { + margin: 0, + background: "#f5f5f5", + fontWeight: "bold", + padding: "14px 20px 13px", + }, + closeButton: { + position: "absolute", + right: theme.spacing(0), + top: theme.spacing(0), + color: theme.palette.grey[500], + }, +}); + +const DialogContent = withStyles(() => ({ + root: { + padding: 0, + }, +}))(MuiDialogContent); + +const DialogActions = withStyles((theme) => ({ + root: { + margin: 0, + padding: theme.spacing(2), + }, +}))(MuiDialogActions); + +const DialogTitle = withStyles(styles)((props: any) => { + const { children, classes, onClose, ...other } = props; + return ( + + + {children} + + {onClose ? ( + + + + ) : null} + + ); +}); + +interface ModalProps { + title: string; + isOpen: boolean; + fullWidth: boolean; + closeModal: () => void; + children: ReactElement; + actions: ReactElement; + width?: number; +} + +const Modal: React.FC = (props: ModalProps) => { + const { title, isOpen, fullWidth, closeModal, children, actions, width } = + props; + const handleClose = (event: any, reason: string) => { + console.info("reason:", reason); + if (reason !== "backdropClick") { + closeModal(); + } + }; + return ( + + + {title} + + + {children} + + {actions} + + ); +}; + +export default Modal; diff --git a/source/portal/src/common/comp/NormalButton.ts b/source/portal/src/common/comp/NormalButton.ts index 9a98766..7065dad 100644 --- a/source/portal/src/common/comp/NormalButton.ts +++ b/source/portal/src/common/comp/NormalButton.ts @@ -1,3 +1,5 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 import Button from "@material-ui/core/Button"; import { withStyles } from "@material-ui/core/styles"; diff --git a/source/portal/src/common/comp/PrimaryButton.ts b/source/portal/src/common/comp/PrimaryButton.ts index 8bd9bba..3e0048f 100644 --- a/source/portal/src/common/comp/PrimaryButton.ts +++ b/source/portal/src/common/comp/PrimaryButton.ts @@ -1,3 +1,5 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 import Button from "@material-ui/core/Button"; import { withStyles } from "@material-ui/core/styles"; @@ -8,6 +10,7 @@ const PrimaryButton = withStyles({ textTransform: "none", fontSize: 14, fontWeight: "bold", + height: 36, padding: "5px 15px", border: "1px solid", backgroundColor: "#EC7211", diff --git a/source/portal/src/common/comp/PrimaryButtonLoading.ts b/source/portal/src/common/comp/PrimaryButtonLoading.ts index eb12452..8c2045a 100644 --- a/source/portal/src/common/comp/PrimaryButtonLoading.ts +++ b/source/portal/src/common/comp/PrimaryButtonLoading.ts @@ -1,3 +1,5 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 import Button from "@material-ui/core/Button"; import { withStyles } from "@material-ui/core/styles"; @@ -8,6 +10,7 @@ const PrimaryButtonLoading = withStyles({ boxShadow: "none", textTransform: "none", fontSize: 14, + height: 36, fontWeight: "bold", padding: "5px 15px", border: "1px solid", diff --git a/source/portal/src/common/comp/ProgressBar.tsx b/source/portal/src/common/comp/ProgressBar.tsx index 2ba137f..917ca42 100644 --- a/source/portal/src/common/comp/ProgressBar.tsx +++ b/source/portal/src/common/comp/ProgressBar.tsx @@ -1,3 +1,5 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 import * as React from "react"; import PropTypes from "prop-types"; import { makeStyles } from "@material-ui/core/styles"; diff --git a/source/portal/src/common/comp/SelectInput.ts b/source/portal/src/common/comp/SelectInput.ts index 99a504f..5c35521 100644 --- a/source/portal/src/common/comp/SelectInput.ts +++ b/source/portal/src/common/comp/SelectInput.ts @@ -1,3 +1,6 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + import { createStyles, withStyles, Theme } from "@material-ui/core/styles"; import InputBase from "@material-ui/core/InputBase"; diff --git a/source/portal/src/common/comp/SignOut.tsx b/source/portal/src/common/comp/SignOut.tsx index ea6d8bc..6265880 100644 --- a/source/portal/src/common/comp/SignOut.tsx +++ b/source/portal/src/common/comp/SignOut.tsx @@ -1,3 +1,5 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 import React from "react"; import { Auth } from "aws-amplify"; import { useTranslation } from "react-i18next"; diff --git a/source/portal/src/common/comp/TaskStatusComp.tsx b/source/portal/src/common/comp/TaskStatusComp.tsx index 2fff410..520f1a9 100644 --- a/source/portal/src/common/comp/TaskStatusComp.tsx +++ b/source/portal/src/common/comp/TaskStatusComp.tsx @@ -1,3 +1,5 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 import React, { useState, useEffect, useRef } from "react"; import { EnumTaskStatus, TASK_STATUS_MAP } from "assets/types/index"; @@ -29,6 +31,7 @@ const STATUS_ICON_MAP: any = { IN_PROGRESS: , DONE: , STOPPED: , + UNKNOWN: , }; const TaskStatusComp: React.FC = ({ @@ -42,7 +45,7 @@ const TaskStatusComp: React.FC = ({ const [errorMessage, setErrorMessage] = useState(); const [showError, setShowError] = useState(false); const [curRegion, setCurRegion] = useState(""); - + progress = progress || "UNKNOWN"; function useOutsideAlerter(ref: any) { useEffect(() => { /** @@ -97,13 +100,13 @@ const TaskStatusComp: React.FC = ({ } }} className={ - progress ? TASK_STATUS_MAP[progress].class + " status" : "status" + progress ? TASK_STATUS_MAP[progress]?.class + " status" : "status" } > {progress ? STATUS_ICON_MAP[progress] : ""} - {progress ? TASK_STATUS_MAP[progress].name : ""} + {progress ? TASK_STATUS_MAP[progress]?.name : ""} {progress === EnumTaskStatus.ERROR && showError && (
diff --git a/source/portal/src/common/comp/TextButton.ts b/source/portal/src/common/comp/TextButton.ts index 214fd0a..35be735 100644 --- a/source/portal/src/common/comp/TextButton.ts +++ b/source/portal/src/common/comp/TextButton.ts @@ -1,3 +1,5 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 import Button from "@material-ui/core/Button"; import { withStyles } from "@material-ui/core/styles"; diff --git a/source/portal/src/common/comp/TimeRange.tsx b/source/portal/src/common/comp/TimeRange.tsx new file mode 100644 index 0000000..d31e97b --- /dev/null +++ b/source/portal/src/common/comp/TimeRange.tsx @@ -0,0 +1,187 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 +import moment from "moment"; +import React, { useState, useEffect } from "react"; +import { useTranslation } from "react-i18next"; +import { RangePicker } from "react-minimal-datetime-range"; +import "react-minimal-datetime-range/lib/react-minimal-datetime-range.min.css"; + +const SPECIFY_TIME_ITEMS = [ + { + name: "1h", + }, + { + name: "3h", + }, + { + name: "12h", + }, + { + name: "1d", + }, + { + name: "3d", + }, + { + name: "1w", + }, + { + name: "Custom", + }, +]; + +const buildPreTime = (nowTime: number, period: string) => { + switch (period) { + case "1h": + return Math.floor((nowTime - 1000 * 60 * 60) / 1000); + case "3h": + return Math.floor((nowTime - 1000 * 60 * 60 * 3) / 1000); + case "12h": + return Math.floor((nowTime - 1000 * 60 * 60 * 12) / 1000); + case "1d": + return Math.floor((nowTime - 1000 * 60 * 60 * 24) / 1000); + case "3d": + return Math.floor((nowTime - 1000 * 60 * 60 * 24 * 3) / 1000); + case "1w": + return Math.floor((nowTime - 1000 * 60 * 60 * 24 * 7) / 1000); + default: + return Math.floor(nowTime / 1000); + } +}; + +export const changeSpecifyTimeRange = (range: string) => { + const tmpEndTime = Date.now(); + const tmpStartTime = buildPreTime(tmpEndTime, range); + return [tmpStartTime, Math.floor(tmpEndTime / 1000)]; +}; + +interface TimeRangeProps { + curTimeRangeType: string; + startTime: number; + endTime: number; + changeTimeRange: (timeRange: number[]) => void; + changeRangeType: (rangeType: string) => void; +} + +const getDateTimeByTimeStamp = ( + rangeType: string, + timeStamp: number, + type: "time" | "date" +) => { + const tranformDate = moment(timeStamp * 1000).format("YYYY-MM-DD HH:mm"); + if (rangeType === "Custom") { + if (type === "date") { + return tranformDate.split(" ")?.[0]; + } else { + return tranformDate.split(" ")?.[1]; + } + } +}; + +const TimeRange: React.FC = (props: TimeRangeProps) => { + const { + curTimeRangeType, + startTime, + endTime, + changeRangeType, + changeTimeRange, + } = props; + const { t } = useTranslation(); + const [curSpecifyRange, setCurSpecifyRange] = useState( + curTimeRangeType ?? "3h" + ); + const [customDateRage, setCustomDateRage] = useState([ + getDateTimeByTimeStamp(curTimeRangeType, startTime, "date"), + getDateTimeByTimeStamp(curTimeRangeType, endTime, "date"), + ]); + const [customTimeRage, setCustomTimeRage] = useState([ + getDateTimeByTimeStamp(curTimeRangeType, startTime, "time"), + getDateTimeByTimeStamp(curTimeRangeType, endTime, "time"), + ]); + + useEffect(() => { + if (curSpecifyRange && curSpecifyRange !== "Custom") { + setCustomDateRage([]); + setCustomTimeRage(["00:00", "23:59"]); + const timeRange = changeSpecifyTimeRange(curSpecifyRange); + changeTimeRange(timeRange); + changeRangeType(curSpecifyRange); + } + }, [curSpecifyRange]); + + return ( +
+ {SPECIFY_TIME_ITEMS.map((element) => { + return ( + { + setCurSpecifyRange(element.name); + }} + > + {element.name} + + ); + })} + {curSpecifyRange === "Custom" && ( + +
+ { + console.log(res); + // setCurSpecifyRange("Custom"); + const timestamps = res.map((dateString: string) => + Math.floor(moment(dateString).valueOf() / 1000) + ); + changeTimeRange(timestamps); + changeRangeType("Custom"); + const customStartDate = res[0]?.split(" ")?.[0]; + const customEndDate = res[1]?.split(" ")?.[0]; + setCustomDateRage([customStartDate, customEndDate]); + const customStartTime = res[0]?.split(" ")?.[1]; + const customEndTime = res[1]?.split(" ")?.[1]; + setCustomTimeRage([customStartTime, customEndTime]); + }} + onClose={() => { + console.log("onClose"); + }} + onClear={() => { + console.log("onClear"); + }} + style={{ width: "300px", margin: "0 auto" }} + placeholder={[t("taskDetail.startTime"), t("taskDetail.endTime")]} + showOnlyTime={false} // default is false, only select time + //////////////////// + // IMPORTANT DESC // + //////////////////// + defaultDates={customDateRage} + // ['YYYY-MM-DD', 'YYYY-MM-DD'] + // This is the value you choosed every time. + defaultTimes={customTimeRage} + // ['hh:mm', 'hh:mm'] + // This is the value you choosed every time. + initialDates={customDateRage} + // ['YYYY-MM-DD', 'YYYY-MM-DD'] + // This is the initial dates. + // If provied, input will be reset to this value when the clear icon hits, + // otherwise input will be display placeholder + initialTimes={customTimeRage} + // ['hh:mm', 'hh:mm'] + // This is the initial times. + // If provied, input will be reset to this value when the clear icon hits, + // otherwise input will be display placeholder + /> +
+
+ )} +
+ ); +}; + +export default TimeRange; diff --git a/source/portal/src/common/comp/form/DrhCredential.tsx b/source/portal/src/common/comp/form/DrhCredential.tsx index eb5ec16..05bc761 100644 --- a/source/portal/src/common/comp/form/DrhCredential.tsx +++ b/source/portal/src/common/comp/form/DrhCredential.tsx @@ -1,10 +1,12 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 import React, { useState, useEffect } from "react"; import { useTranslation } from "react-i18next"; import Select from "@material-ui/core/Select"; import MenuItem from "@material-ui/core/MenuItem"; import RefreshIcon from "@material-ui/icons/Refresh"; -import Loader from "react-loader-spinner"; +import { ThreeDots } from "react-loader-spinner"; import SelectInput from "common/comp/SelectInput"; import InfoSpan from "common/InfoSpan"; @@ -41,14 +43,7 @@ const DrhCredential: React.FC = (props: SelectMenuProp) => { setLoadingData(true); const apiData: any = await appSyncRequestQuery(listSecrets, {}); setLoadingData(false); - if ( - apiData && - apiData.data && - apiData.data.listSecrets && - apiData.data.listSecrets.length > 0 - ) { - setSSMParamList(apiData.data.listSecrets); - } + setSSMParamList(apiData?.data?.listSecrets ?? []); } catch (error) { setLoadingData(false); } @@ -120,7 +115,7 @@ const DrhCredential: React.FC = (props: SelectMenuProp) => { style={{ width: 50, height: 32 }} disabled={true} > - + ) : ( ({ + placeholder: { + color: "#aaa", + }, +})); + +const Placeholder = ({ children }: any) => { + const classes = usePlaceholderStyles(); + return
{children}
; +}; + +const BootstrapInput = withStyles((theme) => ({ + root: { + "label + &": { + marginTop: theme.spacing(3), + }, + }, + input: { + borderRadius: 0, + position: "relative", + backgroundColor: theme.palette.background.paper, + border: "1px solid #545b64", + fontSize: "14px", + fontWeight: "bold", + padding: "8px 10px 8px 10px", + "&:focus": { + borderRadius: 0, + borderColor: "#aab7b8", + }, + }, +}))(InputBase); + +export type SelectItem = { + name: string; + value: string; + disabled?: boolean | false; +}; + +interface SelectProps { + optionList: SelectItem[]; + placeholder?: string | null; + className?: string; + loading?: boolean; + value: string; + onChange?: (event: any) => void; + hasRefresh?: boolean; + disabled?: boolean; + isI18N?: boolean; + allowEmpty?: boolean; + hasStatus?: boolean; + width?: number; + onBlur?: (event: any) => void; +} + +const DTHSelect: React.FC = (props: SelectProps) => { + const { + optionList, + placeholder, + loading, + className, + value, + onChange, + disabled, + isI18N, + allowEmpty, + width, + onBlur, + } = props; + const { t } = useTranslation(); + return ( +
+
+ +
+
+ ); +}; + +export default DTHSelect; diff --git a/source/portal/src/common/context/ClientContext.ts b/source/portal/src/common/context/ClientContext.ts index be1f0c1..a2860e9 100644 --- a/source/portal/src/common/context/ClientContext.ts +++ b/source/portal/src/common/context/ClientContext.ts @@ -1,3 +1,5 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 import React from "react"; const ClientContext = React.createContext(""); export default ClientContext; diff --git a/source/portal/src/common/info/ComparisonInfo.tsx b/source/portal/src/common/info/ComparisonInfo.tsx index 64cd27e..8b6aae2 100644 --- a/source/portal/src/common/info/ComparisonInfo.tsx +++ b/source/portal/src/common/info/ComparisonInfo.tsx @@ -1,3 +1,5 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 import React from "react"; import { useTranslation } from "react-i18next"; diff --git a/source/portal/src/common/info/CredentialInfo.tsx b/source/portal/src/common/info/CredentialInfo.tsx index e2fe911..c8c0864 100644 --- a/source/portal/src/common/info/CredentialInfo.tsx +++ b/source/portal/src/common/info/CredentialInfo.tsx @@ -1,3 +1,5 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 import React, { useEffect, useState } from "react"; import { useTranslation } from "react-i18next"; diff --git a/source/portal/src/common/info/EngineEdition.tsx b/source/portal/src/common/info/EngineEdition.tsx index 7f9d72e..5fe7767 100644 --- a/source/portal/src/common/info/EngineEdition.tsx +++ b/source/portal/src/common/info/EngineEdition.tsx @@ -1,3 +1,5 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 import React from "react"; import { useTranslation } from "react-i18next"; import OpenInNewIcon from "@material-ui/icons/OpenInNew"; diff --git a/source/portal/src/common/info/EngineSettings.tsx b/source/portal/src/common/info/EngineSettings.tsx index d20d2df..0147e9d 100644 --- a/source/portal/src/common/info/EngineSettings.tsx +++ b/source/portal/src/common/info/EngineSettings.tsx @@ -1,3 +1,5 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 import React from "react"; import { useTranslation } from "react-i18next"; diff --git a/source/portal/src/common/info/EngineSettingsEC2.tsx b/source/portal/src/common/info/EngineSettingsEC2.tsx index 2819355..49fdfdf 100644 --- a/source/portal/src/common/info/EngineSettingsEC2.tsx +++ b/source/portal/src/common/info/EngineSettingsEC2.tsx @@ -1,3 +1,5 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 import React from "react"; import { useTranslation } from "react-i18next"; import OpenInNewIcon from "@material-ui/icons/OpenInNew"; diff --git a/source/portal/src/common/info/FinderDepthNumber.tsx b/source/portal/src/common/info/FinderDepthNumber.tsx index eff9010..b7af4ad 100644 --- a/source/portal/src/common/info/FinderDepthNumber.tsx +++ b/source/portal/src/common/info/FinderDepthNumber.tsx @@ -1,3 +1,5 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 import React from "react"; import { useTranslation } from "react-i18next"; diff --git a/source/portal/src/common/info/FinderMemory.tsx b/source/portal/src/common/info/FinderMemory.tsx index 4938ed2..c5f1a40 100644 --- a/source/portal/src/common/info/FinderMemory.tsx +++ b/source/portal/src/common/info/FinderMemory.tsx @@ -1,3 +1,5 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 import React from "react"; import { useTranslation } from "react-i18next"; diff --git a/source/portal/src/common/info/S3BucketDestPrefix.tsx b/source/portal/src/common/info/S3BucketDestPrefix.tsx index 44dd0f5..b4cd96a 100644 --- a/source/portal/src/common/info/S3BucketDestPrefix.tsx +++ b/source/portal/src/common/info/S3BucketDestPrefix.tsx @@ -1,3 +1,5 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 import React from "react"; import { useTranslation } from "react-i18next"; diff --git a/source/portal/src/common/info/S3BucketSrcPrefix.tsx b/source/portal/src/common/info/S3BucketSrcPrefix.tsx index 48e63b1..439e5ee 100644 --- a/source/portal/src/common/info/S3BucketSrcPrefix.tsx +++ b/source/portal/src/common/info/S3BucketSrcPrefix.tsx @@ -1,3 +1,5 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 import React from "react"; import { useTranslation } from "react-i18next"; import OpenInNewIcon from "@material-ui/icons/OpenInNew"; diff --git a/source/portal/src/common/info/S3BucketSrcPrefixFilstLIst.tsx b/source/portal/src/common/info/S3BucketSrcPrefixFilstLIst.tsx index 6ab85d0..1c9d446 100644 --- a/source/portal/src/common/info/S3BucketSrcPrefixFilstLIst.tsx +++ b/source/portal/src/common/info/S3BucketSrcPrefixFilstLIst.tsx @@ -1,3 +1,5 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 import React from "react"; import { useTranslation } from "react-i18next"; diff --git a/source/portal/src/common/info/info.scss b/source/portal/src/common/info/info.scss index d877d51..f84484d 100644 --- a/source/portal/src/common/info/info.scss +++ b/source/portal/src/common/info/info.scss @@ -1,3 +1,5 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 .credential { .top-tips { padding: 10px 0 15px; diff --git a/source/portal/src/graphql/mutations.ts b/source/portal/src/graphql/mutations.ts index ecafc71..b52f180 100644 --- a/source/portal/src/graphql/mutations.ts +++ b/source/portal/src/graphql/mutations.ts @@ -1,3 +1,5 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 /* tslint:disable */ /* eslint-disable */ // this is an auto generated file. This will be overwritten @@ -12,6 +14,7 @@ export const createTask = /* GraphQL */ ` parameters { ParameterKey ParameterValue + __typename } createdAt stoppedAt @@ -19,6 +22,7 @@ export const createTask = /* GraphQL */ ` progressInfo { total replicated + __typename } stackId stackName @@ -26,11 +30,13 @@ export const createTask = /* GraphQL */ ` Description OutputKey OutputValue + __typename } stackStatus stackStatusReason executionArn scheduleType + __typename } } `; @@ -44,6 +50,7 @@ export const stopTask = /* GraphQL */ ` parameters { ParameterKey ParameterValue + __typename } createdAt stoppedAt @@ -51,6 +58,7 @@ export const stopTask = /* GraphQL */ ` progressInfo { total replicated + __typename } stackId stackName @@ -58,11 +66,13 @@ export const stopTask = /* GraphQL */ ` Description OutputKey OutputValue + __typename } stackStatus stackStatusReason executionArn scheduleType + __typename } } `; diff --git a/source/portal/src/graphql/queries.ts b/source/portal/src/graphql/queries.ts index f6031fb..d098e36 100644 --- a/source/portal/src/graphql/queries.ts +++ b/source/portal/src/graphql/queries.ts @@ -1,45 +1,12 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 /* tslint:disable */ /* eslint-disable */ // this is an auto generated file. This will be overwritten -export const listTasks = /* GraphQL */ ` - query ListTasks($limit: Int, $nextToken: String) { - listTasks(limit: $limit, nextToken: $nextToken) { - items { - id - description - type - templateUrl - parameters { - ParameterKey - ParameterValue - } - createdAt - stoppedAt - progress - progressInfo { - total - replicated - } - stackId - stackName - stackOutputs { - Description - OutputKey - OutputValue - } - stackStatus - stackStatusReason - executionArn - scheduleType - } - nextToken - } - } -`; export const listTasksV2 = /* GraphQL */ ` - query ListTasksV2($page: Int, $count: Int) { - listTasksV2(page: $page, count: $count) { + query ListTasksV2($progress: TaskProgress, $page: Int, $count: Int) { + listTasksV2(progress: $progress, page: $page, count: $count) { items { id description @@ -48,6 +15,7 @@ export const listTasksV2 = /* GraphQL */ ` parameters { ParameterKey ParameterValue + __typename } createdAt stoppedAt @@ -55,6 +23,7 @@ export const listTasksV2 = /* GraphQL */ ` progressInfo { total replicated + __typename } stackId stackName @@ -62,13 +31,16 @@ export const listTasksV2 = /* GraphQL */ ` Description OutputKey OutputValue + __typename } stackStatus stackStatusReason executionArn scheduleType + __typename } total + __typename } } `; @@ -82,6 +54,7 @@ export const getTask = /* GraphQL */ ` parameters { ParameterKey ParameterValue + __typename } createdAt stoppedAt @@ -89,6 +62,7 @@ export const getTask = /* GraphQL */ ` progressInfo { total replicated + __typename } stackId stackName @@ -96,11 +70,13 @@ export const getTask = /* GraphQL */ ` Description OutputKey OutputValue + __typename } stackStatus stackStatusReason executionArn scheduleType + __typename } } `; @@ -109,6 +85,7 @@ export const listSecrets = /* GraphQL */ ` listSecrets { name description + __typename } } `; @@ -134,8 +111,10 @@ export const listLogStreams = /* GraphQL */ ` uploadSequenceToken arn storedBytes + __typename } total + __typename } } `; @@ -156,9 +135,11 @@ export const getLogEvents = /* GraphQL */ ` timestamp message ingestionTime + __typename } nextForwardToken nextBackwardToken + __typename } } `; @@ -180,10 +161,13 @@ export const getMetricHistoryData = /* GraphQL */ ` series { name data + __typename } xaxis { categories + __typename } + __typename } } `; @@ -192,6 +176,7 @@ export const getErrorMessage = /* GraphQL */ ` getErrorMessage(id: $id) { errMessage errCode + __typename } } `; diff --git a/source/portal/src/graphql/schema.json b/source/portal/src/graphql/schema.json index e170ecf..3a3e0c5 100644 --- a/source/portal/src/graphql/schema.json +++ b/source/portal/src/graphql/schema.json @@ -13,38 +13,18 @@ "name" : "Query", "description" : null, "fields" : [ { - "name" : "listTasks", + "name" : "listTasksV2", "description" : null, "args" : [ { - "name" : "limit", + "name" : "progress", "description" : null, "type" : { - "kind" : "SCALAR", - "name" : "Int", + "kind" : "ENUM", + "name" : "TaskProgress", "ofType" : null }, "defaultValue" : null }, { - "name" : "nextToken", - "description" : null, - "type" : { - "kind" : "SCALAR", - "name" : "String", - "ofType" : null - }, - "defaultValue" : null - } ], - "type" : { - "kind" : "OBJECT", - "name" : "ListTasksResponse", - "ofType" : null - }, - "isDeprecated" : false, - "deprecationReason" : null - }, { - "name" : "listTasksV2", - "description" : null, - "args" : [ { "name" : "page", "description" : null, "type" : { @@ -298,7 +278,7 @@ "possibleTypes" : null }, { "kind" : "OBJECT", - "name" : "ListTasksResponse", + "name" : "ListTasksResponseV2", "description" : null, "fields" : [ { "name" : "items", @@ -316,12 +296,12 @@ "isDeprecated" : false, "deprecationReason" : null }, { - "name" : "nextToken", + "name" : "total", "description" : null, "args" : [ ], "type" : { "kind" : "SCALAR", - "name" : "String", + "name" : "Int", "ofType" : null }, "isDeprecated" : false, @@ -745,41 +725,6 @@ "deprecationReason" : null } ], "possibleTypes" : null - }, { - "kind" : "OBJECT", - "name" : "ListTasksResponseV2", - "description" : null, - "fields" : [ { - "name" : "items", - "description" : null, - "args" : [ ], - "type" : { - "kind" : "LIST", - "name" : null, - "ofType" : { - "kind" : "OBJECT", - "name" : "Task", - "ofType" : null - } - }, - "isDeprecated" : false, - "deprecationReason" : null - }, { - "name" : "total", - "description" : null, - "args" : [ ], - "type" : { - "kind" : "SCALAR", - "name" : "Int", - "ofType" : null - }, - "isDeprecated" : false, - "deprecationReason" : null - } ], - "inputFields" : null, - "interfaces" : [ ], - "enumValues" : null, - "possibleTypes" : null }, { "kind" : "OBJECT", "name" : "Secret", @@ -1307,6 +1252,19 @@ "ofType" : null }, "defaultValue" : null + }, { + "name" : "scheduleType", + "description" : null, + "type" : { + "kind" : "NON_NULL", + "name" : null, + "ofType" : { + "kind" : "ENUM", + "name" : "ScheduleType", + "ofType" : null + } + }, + "defaultValue" : null }, { "name" : "parameters", "description" : null, @@ -2144,9 +2102,17 @@ "onFragment" : false, "onField" : true }, { - "name" : "aws_auth", - "description" : "Directs the schema to enforce authorization on a field", - "locations" : [ "FIELD_DEFINITION" ], + "name" : "aws_api_key", + "description" : "Tells the service this field/object has access authorized by an API key.", + "locations" : [ "OBJECT", "FIELD_DEFINITION" ], + "args" : [ ], + "onOperation" : false, + "onFragment" : false, + "onField" : false + }, { + "name" : "aws_cognito_user_pools", + "description" : "Tells the service this field/object has access authorized by a Cognito User Pools token.", + "locations" : [ "OBJECT", "FIELD_DEFINITION" ], "args" : [ { "name" : "cognito_groups", "description" : "List of cognito user pool groups which have access on this field", @@ -2165,28 +2131,37 @@ "onFragment" : false, "onField" : false }, { - "name" : "aws_iam", - "description" : "Tells the service this field/object has access authorized by sigv4 signing.", + "name" : "aws_lambda", + "description" : "Tells the service this field/object has access authorized by a Lambda Authorizer.", "locations" : [ "OBJECT", "FIELD_DEFINITION" ], "args" : [ ], "onOperation" : false, "onFragment" : false, "onField" : false }, { - "name" : "aws_oidc", - "description" : "Tells the service this field/object has access authorized by an OIDC token.", - "locations" : [ "OBJECT", "FIELD_DEFINITION" ], - "args" : [ ], + "name" : "deprecated", + "description" : null, + "locations" : [ "FIELD_DEFINITION", "ENUM_VALUE" ], + "args" : [ { + "name" : "reason", + "description" : null, + "type" : { + "kind" : "SCALAR", + "name" : "String", + "ofType" : null + }, + "defaultValue" : "\"No longer supported\"" + } ], "onOperation" : false, "onFragment" : false, "onField" : false }, { - "name" : "aws_subscribe", - "description" : "Tells the service which mutation triggers this subscription.", + "name" : "aws_auth", + "description" : "Directs the schema to enforce authorization on a field", "locations" : [ "FIELD_DEFINITION" ], "args" : [ { - "name" : "mutations", - "description" : "List of mutations which will trigger this subscription when they are called.", + "name" : "cognito_groups", + "description" : "List of cognito user pool groups which have access on this field", "type" : { "kind" : "LIST", "name" : null, @@ -2202,28 +2177,12 @@ "onFragment" : false, "onField" : false }, { - "name" : "aws_api_key", - "description" : "Tells the service this field/object has access authorized by an API key.", - "locations" : [ "OBJECT", "FIELD_DEFINITION" ], - "args" : [ ], - "onOperation" : false, - "onFragment" : false, - "onField" : false - }, { - "name" : "aws_lambda", - "description" : "Tells the service this field/object has access authorized by a Lambda Authorizer.", - "locations" : [ "OBJECT", "FIELD_DEFINITION" ], - "args" : [ ], - "onOperation" : false, - "onFragment" : false, - "onField" : false - }, { - "name" : "aws_cognito_user_pools", - "description" : "Tells the service this field/object has access authorized by a Cognito User Pools token.", - "locations" : [ "OBJECT", "FIELD_DEFINITION" ], + "name" : "aws_publish", + "description" : "Tells the service which subscriptions will be published to when this mutation is called. This directive is deprecated use @aws_susbscribe directive instead.", + "locations" : [ "FIELD_DEFINITION" ], "args" : [ { - "name" : "cognito_groups", - "description" : "List of cognito user pool groups which have access on this field", + "name" : "subscriptions", + "description" : "List of subscriptions which will be published to when this mutation is called.", "type" : { "kind" : "LIST", "name" : null, @@ -2239,29 +2198,12 @@ "onFragment" : false, "onField" : false }, { - "name" : "deprecated", - "description" : null, - "locations" : [ "FIELD_DEFINITION", "ENUM_VALUE" ], - "args" : [ { - "name" : "reason", - "description" : null, - "type" : { - "kind" : "SCALAR", - "name" : "String", - "ofType" : null - }, - "defaultValue" : "\"No longer supported\"" - } ], - "onOperation" : false, - "onFragment" : false, - "onField" : false - }, { - "name" : "aws_publish", - "description" : "Tells the service which subscriptions will be published to when this mutation is called. This directive is deprecated use @aws_susbscribe directive instead.", + "name" : "aws_subscribe", + "description" : "Tells the service which mutation triggers this subscription.", "locations" : [ "FIELD_DEFINITION" ], "args" : [ { - "name" : "subscriptions", - "description" : "List of subscriptions which will be published to when this mutation is called.", + "name" : "mutations", + "description" : "List of mutations which will trigger this subscription when they are called.", "type" : { "kind" : "LIST", "name" : null, @@ -2276,6 +2218,22 @@ "onOperation" : false, "onFragment" : false, "onField" : false + }, { + "name" : "aws_iam", + "description" : "Tells the service this field/object has access authorized by sigv4 signing.", + "locations" : [ "OBJECT", "FIELD_DEFINITION" ], + "args" : [ ], + "onOperation" : false, + "onFragment" : false, + "onField" : false + }, { + "name" : "aws_oidc", + "description" : "Tells the service this field/object has access authorized by an OIDC token.", + "locations" : [ "OBJECT", "FIELD_DEFINITION" ], + "args" : [ ], + "onOperation" : false, + "onFragment" : false, + "onField" : false } ] } } diff --git a/source/portal/src/graphql/subscriptions.ts b/source/portal/src/graphql/subscriptions.ts index cb5bb7d..568b991 100644 --- a/source/portal/src/graphql/subscriptions.ts +++ b/source/portal/src/graphql/subscriptions.ts @@ -1,3 +1,5 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 /* tslint:disable */ /* eslint-disable */ // this is an auto generated file. This will be overwritten diff --git a/source/portal/src/i18n.ts b/source/portal/src/i18n.ts index 9465a19..13534d8 100644 --- a/source/portal/src/i18n.ts +++ b/source/portal/src/i18n.ts @@ -1,3 +1,5 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 import i18n from "i18next"; import Backend from "i18next-http-backend"; import LanguageDetector from "i18next-browser-languagedetector"; diff --git a/source/portal/src/index.scss b/source/portal/src/index.scss index 0362776..3b7c4e8 100644 --- a/source/portal/src/index.scss +++ b/source/portal/src/index.scss @@ -1,3 +1,5 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 // @font-face {font-family: "Amazon Ember"; src: url("//db.onlinewebfonts.com/t/157c6cc36dd65b1b2adc9e7f3329c761.eot"); src: url("//db.onlinewebfonts.com/t/157c6cc36dd65b1b2adc9e7f3329c761.eot?#iefix") format("embedded-opentype"), url("//db.onlinewebfonts.com/t/157c6cc36dd65b1b2adc9e7f3329c761.woff2") format("woff2"), url("//db.onlinewebfonts.com/t/157c6cc36dd65b1b2adc9e7f3329c761.woff") format("woff"), url("//db.onlinewebfonts.com/t/157c6cc36dd65b1b2adc9e7f3329c761.ttf") format("truetype"), url("//db.onlinewebfonts.com/t/157c6cc36dd65b1b2adc9e7f3329c761.svg#Amazon Ember") format("svg"); } // $font-family: "Amazon Ember", -apple-system, BlinkMacSystemFont, 'Segoe UI', 'Roboto', 'Oxygen', // 'Ubuntu', 'Cantarell', 'Fira Sans', 'Droid Sans', 'Helvetica Neue', @@ -37,6 +39,12 @@ a:hover { } } +.flex-center { + display: flex; + align-items: center; + justify-content: space-between; +} + .text-center { text-align: center; } @@ -46,6 +54,14 @@ a:hover { color: #888; } +.mt-10 { + margin-top: 10px; +} + +.mt-20 { + margin-top: 20px; +} + .desc-link { padding-right: 25px; .icon { @@ -152,7 +168,10 @@ code { } .loading-style { - text-align: center; + display: flex; + align-items: center; + align-content: center; + justify-content: center; margin: 40px auto; } @@ -358,10 +377,22 @@ code { } } +.modal-time-range { + display: flex; + flex: 1; + width: 100%; + justify-content: space-between; + position: relative; + z-index: 999; +} + .monitor-filters { display: flex; align-items: center; justify-content: center; +} +.modal-time-range, +.monitor-filters { .metrics-time-filter { display: flex; align-items: center; @@ -438,13 +469,21 @@ code { .monitor-chart-list { margin-top: 37px; - background-color: #fff; display: flex; flex-wrap: wrap; .monitor-chart { + background-color: #fff; width: 50%; - margin-top: 10px; + margin-top: 3px; padding: 10px 10px 20px; + box-shadow: 0 2px 2px #ccc; + .zoom { + position: absolute; + cursor: pointer; + right: 5px; + top: 5px; + z-index: 10; + } } } @@ -546,3 +585,31 @@ $loading-color: #555; justify-content: space-between; } } + +.dth-login-title { + color: #444; + font-size: 24px; + padding: 0 0 5px 0; + text-align: center; +} + +.dth-login { + width: 100vw; + height: 100vh; + display: flex; + flex-direction: column; + align-content: center; + align-items: center; + justify-content: center; +} + +.app-loading { + display: flex; + flex-direction: column; + align-items: center; + align-content: center; +} + +.modal-chart-container { + min-height: 430px; +} diff --git a/source/portal/src/index.tsx b/source/portal/src/index.tsx index 311b89c..b8285ee 100644 --- a/source/portal/src/index.tsx +++ b/source/portal/src/index.tsx @@ -1,9 +1,10 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 /* eslint-disable @typescript-eslint/no-empty-function */ import React, { Suspense } from "react"; -import ReactDOM from "react-dom"; +import ReactDOM from "react-dom/client"; import "./index.scss"; import App from "./App"; -import * as serviceWorker from "./serviceWorker"; import "./i18n"; import { StoreContext } from "redux-react-hook"; import { makeStore } from "./store/Store"; @@ -16,18 +17,13 @@ if (process.env.NODE_ENV === "production") { const store = makeStore(); -ReactDOM.render( - // +const root = ReactDOM.createRoot( + document.getElementById("root") as HTMLElement +); +root.render( -
}> + }> - , - // , - document.getElementById("root") + ); - -// If you want your app to work offline and load faster, you can change -// unregister() to register() below. Note this comes with some pitfalls. -// Learn more about service workers: https://bit.ly/CRA-PWA -serviceWorker.unregister(); diff --git a/source/portal/src/pages/creation/Creation.scss b/source/portal/src/pages/creation/Creation.scss index 051ced2..f0005bd 100644 --- a/source/portal/src/pages/creation/Creation.scss +++ b/source/portal/src/pages/creation/Creation.scss @@ -1,3 +1,5 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 .page-breadcrumb { padding: 20px 20px 20px 40px; a, @@ -93,7 +95,7 @@ padding: 5px; } .max-tips { - padding: 4px 0 0 0; + padding: 0px 0 0 0; color: #666; text-align: right; } @@ -157,29 +159,32 @@ padding: 4px 0; } } - .st-item { - display: inline-block; - vertical-align: top; - border: 1px solid #aab7b8; - margin: 10px 10px 0px 0px; - width: 47%; - label { - width: 100%; - height: 82px; - display: inline-block; - padding: 15px; - cursor: pointer; - } - input { - margin-right: 4px; - } - .desc { - color: #687078; - padding: 5px 0 15px 16px; - } - &.active { - background-color: #f1faff; - border: 1px solid #0073bb; + .select { + display: flex; + .st-item { + // display: inline-block; + vertical-align: top; + border: 1px solid #aab7b8; + margin: 10px 10px 0px 0px; + width: 47%; + label { + width: 100%; + // height: 82px; + display: inline-block; + padding: 15px; + cursor: pointer; + } + input { + margin-right: 4px; + } + .desc { + color: #687078; + padding: 1px 0 0px 16px; + } + &.active { + background-color: #f1faff; + border: 1px solid #0073bb; + } } } } diff --git a/source/portal/src/pages/creation/StepOne.tsx b/source/portal/src/pages/creation/StepOne.tsx index 46ba51d..68c9f24 100644 --- a/source/portal/src/pages/creation/StepOne.tsx +++ b/source/portal/src/pages/creation/StepOne.tsx @@ -1,5 +1,7 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 import React, { useState, useEffect } from "react"; -import { useHistory, useParams } from "react-router-dom"; +import { useNavigate, useParams } from "react-router-dom"; import { useDispatch } from "redux-react-hook"; import classNames from "classnames"; import { useTranslation } from "react-i18next"; @@ -28,11 +30,11 @@ import { S3_ENGINE_TYPE, } from "assets/types/index"; -const StepOne: React.FC = (props: any) => { +const StepOne: React.FC = () => { const { t } = useTranslation(); - const { engine } = useParams() as any; + const { engine, type } = useParams(); - const [taskType, setTaskType] = useState(props.match.params.type); + const [taskType, setTaskType] = useState(type); const [editionType, setEditionType] = useState(engine); const dispatch = useDispatch(); @@ -66,21 +68,16 @@ const StepOne: React.FC = (props: any) => { updateTmpTaskInfo(); }, [taskType, updateTmpTaskInfo]); - const history = useHistory(); + const navigate = useNavigate(); const goToHomePage = () => { - const toPath = "/"; - history.push({ - pathname: toPath, - }); + navigate("/"); }; const goToStepTwo = () => { let toPath = `/create/step2/${taskType}`; if (taskType === EnumTaskType.S3) { toPath = `/create/step2/${taskType}/${editionType}`; } - history.push({ - pathname: toPath, - }); + navigate(toPath); }; const changeDataType = (event: any) => { diff --git a/source/portal/src/pages/creation/comps/Step.scss b/source/portal/src/pages/creation/comps/Step.scss index 6a31bca..bf03d3c 100644 --- a/source/portal/src/pages/creation/comps/Step.scss +++ b/source/portal/src/pages/creation/comps/Step.scss @@ -1,3 +1,5 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 .step-list { padding: 0px 0 20px 0; border-bottom: 1px solid #d5dbdb; diff --git a/source/portal/src/pages/creation/comps/Step.tsx b/source/portal/src/pages/creation/comps/Step.tsx index 163018e..74ab250 100644 --- a/source/portal/src/pages/creation/comps/Step.tsx +++ b/source/portal/src/pages/creation/comps/Step.tsx @@ -1,3 +1,5 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 import React from "react"; import { useTranslation } from "react-i18next"; diff --git a/source/portal/src/pages/creation/ecr/StepOneECRTips.tsx b/source/portal/src/pages/creation/ecr/StepOneECRTips.tsx index 2eb2bec..a7de26f 100644 --- a/source/portal/src/pages/creation/ecr/StepOneECRTips.tsx +++ b/source/portal/src/pages/creation/ecr/StepOneECRTips.tsx @@ -1,3 +1,5 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 import React from "react"; import { useTranslation } from "react-i18next"; diff --git a/source/portal/src/pages/creation/ecr/StepThreeECR.tsx b/source/portal/src/pages/creation/ecr/StepThreeECR.tsx index 8a89c90..8510bf3 100644 --- a/source/portal/src/pages/creation/ecr/StepThreeECR.tsx +++ b/source/portal/src/pages/creation/ecr/StepThreeECR.tsx @@ -1,7 +1,9 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 import React, { useState, useEffect } from "react"; -import { useHistory } from "react-router-dom"; +import { useNavigate } from "react-router-dom"; import { useDispatch, useMappedState } from "redux-react-hook"; -import Loader from "react-loader-spinner"; +import { ThreeDots } from "react-loader-spinner"; import { useTranslation } from "react-i18next"; import Breadcrumbs from "@material-ui/core/Breadcrumbs"; @@ -22,17 +24,22 @@ import NormalButton from "common/comp/NormalButton"; import TextButton from "common/comp/TextButton"; import "../Creation.scss"; -import "react-loader-spinner/dist/loader/css/react-spinner-loader.css"; import { ECR_PARAMS_LIST_MAP, CUR_SUPPORT_LANGS, CREATE_USE_LESS_PROPERTY, getRegionNameById, DRH_CONFIG_JSON_NAME, + YES_NO, } from "assets/config/const"; -import { ACTION_TYPE } from "assets/types"; +import { + ACTION_TYPE, + ECREnumSourceType, + EnumDockerImageType, +} from "assets/types"; import { appSyncRequestMutation } from "assets/utils/request"; import { ScheduleType } from "API"; +import cloneDeep from "lodash.clonedeep"; const mapState = (state: IState) => ({ tmpECRTaskInfo: state.tmpECRTaskInfo, @@ -60,15 +67,12 @@ const StepThreeECR: React.FC = () => { const dispatch = useDispatch(); - const history = useHistory(); + const navigate = useNavigate(); useEffect(() => { // eslint-disable-next-line no-prototype-builtins if (!tmpECRTaskInfo?.hasOwnProperty("type")) { - const toPath = "/create/step1/ECR"; - history.push({ - pathname: toPath, - }); + navigate("/create/step1/ECR"); } }, [history, tmpECRTaskInfo]); @@ -95,6 +99,10 @@ const StepThreeECR: React.FC = () => { ParameterKey: "srcList", ParameterValue: parametersObj.srcList, }); + taskParamArr.push({ + ParameterKey: "includeUntagged", + ParameterValue: parametersObj.includeUntagged, + }); taskParamArr.push({ ParameterKey: "srcImageList", ParameterValue: parametersObj.srcImageList, @@ -119,7 +127,6 @@ const StepThreeECR: React.FC = () => { ParameterKey: "destPrefix", ParameterValue: parametersObj.destPrefix, }); - taskParamArr.push({ ParameterKey: "alarmEmail", ParameterValue: parametersObj.alarmEmail, @@ -167,7 +174,7 @@ const StepThreeECR: React.FC = () => { } } - // Remove uesless property when clone task + // Remove useless property when clone task for (const key in createTaskInfo) { if (CREATE_USE_LESS_PROPERTY.indexOf(key) > -1) { delete createTaskInfo?.[key]; @@ -189,27 +196,18 @@ const StepThreeECR: React.FC = () => { type: ACTION_TYPE.SET_CREATE_TASK_FLAG, }); // Redirect to task list page - const toPath = "/task/list"; - history.push({ - pathname: toPath, - }); + navigate("/task/list"); } catch (error) { setIsCreating(false); } } const goToHomePage = () => { - const toPath = "/"; - history.push({ - pathname: toPath, - }); + navigate("/"); }; const goToStepTwo = () => { - const toPath = "/create/step2/ECR"; - history.push({ - pathname: toPath, - }); + navigate("/create/step2/ECR"); }; const goToTaskList = () => { @@ -234,6 +232,36 @@ const StepThreeECR: React.FC = () => { } }; + const renderKeyValue = (item: any) => { + return ( + <> +
+ {ECR_PARAMS_LIST_MAP[item.ParameterKey] && + ECR_PARAMS_LIST_MAP[item.ParameterKey][nameStr]} +
+
+ {buildParamValue(item.ParameterKey, item.ParameterValue)} +
+ + ); + }; + + const buildPreviewKeyValue = (item: any) => { + if (item.ParameterKey === "includeUntagged") { + item.ParameterValue = + item.ParameterValue === "true" ? YES_NO.NO : YES_NO.YES; + return renderKeyValue(item); + } else if (item.ParameterKey === "srcImageList") { + if (tmpECRTaskInfo?.parametersObj.srcList === EnumDockerImageType.ALL) { + return ""; + } else { + return renderKeyValue(item); + } + } else { + return renderKeyValue(item); + } + }; + return (
@@ -287,7 +315,16 @@ const StepThreeECR: React.FC = () => {
{t("creation.step3.step2TaskParams")}{" "} - ({paramsList.length - 4}) + + ( + {tmpECRTaskInfo?.parametersObj.sourceType === + ECREnumSourceType.ECR && + tmpECRTaskInfo?.parametersObj.srcList === + EnumDockerImageType.ALL + ? paramsList.length - 5 + : paramsList.length - 4} + ) +
@@ -300,25 +337,14 @@ const StepThreeECR: React.FC = () => { {t("creation.step3.step2Value")}
- {paramsList.map((element: any) => { + {cloneDeep(paramsList).map((element: any) => { return ( ECR_PARAMS_LIST_MAP[element.ParameterKey] && (
-
- {ECR_PARAMS_LIST_MAP[element.ParameterKey] && - ECR_PARAMS_LIST_MAP[element.ParameterKey][ - nameStr - ]} -
-
- {buildParamValue( - element.ParameterKey, - element.ParameterValue - )} -
+ {buildPreviewKeyValue(element)}
) ); @@ -338,7 +364,7 @@ const StepThreeECR: React.FC = () => { {isCreating ? ( - + ) : ( diff --git a/source/portal/src/pages/creation/ecr/StepTwoECR.tsx b/source/portal/src/pages/creation/ecr/StepTwoECR.tsx index b041f09..9bf9bcc 100644 --- a/source/portal/src/pages/creation/ecr/StepTwoECR.tsx +++ b/source/portal/src/pages/creation/ecr/StepTwoECR.tsx @@ -1,5 +1,7 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 import React, { useEffect, useState } from "react"; -import { useHistory } from "react-router-dom"; +import { useNavigate } from "react-router-dom"; import { useDispatch, useMappedState } from "redux-react-hook"; import { useTranslation } from "react-i18next"; import classNames from "classnames"; @@ -51,12 +53,13 @@ const MAX_LENGTH = 4096; const ACCOUNT_REGEX = /^\d{12}$/; -const defaultTxtValue = "ubuntu:14.04,\namazon-linux:latest,\nmysql"; +const defaultTxtValue = + "ubuntu:14.04,\namazon-linux:latest,\nmysql,\npublic.ecr.aws/amaonlinux/amazonlinux:latest"; const defaultTxtValueSourceECR = - "my-ecr-repo:ALL_TAGS,\nubuntu:14.04,\namazon-linux:latest,\nmysql"; + "my-ecr-repo:ALL_TAGS,\nmy-oci-repo,\nubuntu:14.04,\namazon-linux:latest,\nmysql"; const StepTwoECR: React.FC = () => { - const history = useHistory(); + const navigate = useNavigate(); const dispatch = useDispatch(); const { tmpECRTaskInfo } = useMappedState(mapState); @@ -129,6 +132,9 @@ const StepTwoECR: React.FC = () => { const [destPrefix, setDestPrefix] = useState( tmpECRTaskInfo?.parametersObj?.destPrefix || "" ); + const [includeUntagged, setIncludeUntagged] = useState( + tmpECRTaskInfo?.parametersObj?.includeUntagged ?? "true" + ); const [description, setDescription] = useState( tmpECRTaskInfo?.parametersObj?.description ? decodeURIComponent(tmpECRTaskInfo?.parametersObj?.description) @@ -150,10 +156,7 @@ const StepTwoECR: React.FC = () => { // if the taskInfo has no taskType, redirect to Step one // eslint-disable-next-line no-prototype-builtins if (!tmpECRTaskInfo?.hasOwnProperty("type")) { - const toPath = "/create/step1/ECR"; - history.push({ - pathname: toPath, - }); + navigate("/create/step1/ECR"); } }, [history, tmpECRTaskInfo]); @@ -285,26 +288,16 @@ const StepTwoECR: React.FC = () => { }; const goToHomePage = () => { - const toPath = "/"; - history.push({ - pathname: toPath, - }); + navigate("/"); }; const goToStepOne = () => { - const toPath = "/create/step1/ECR"; - history.push({ - pathname: toPath, - }); + navigate("/create/step1/ECR"); }; const goToStepThree = () => { - console.info("tmpECRTaskInfo:", tmpECRTaskInfo); if (validateInput()) { - const toPath = "/create/step3/ECR"; - history.push({ - pathname: toPath, - }); + navigate("/create/step3/ECR"); } }; @@ -385,6 +378,11 @@ const StepTwoECR: React.FC = () => { updatetmpECRTaskInfo("destPrefix", destPrefix); }, [destPrefix]); + useEffect(() => { + // Update tmpECRTaskInfo + updatetmpECRTaskInfo("includeUntagged", includeUntagged); + }, [includeUntagged]); + useEffect(() => { // Update tmpECRTaskInfo updatetmpECRTaskInfo("description", encodeURIComponent(description)); @@ -429,7 +427,7 @@ const StepTwoECR: React.FC = () => {
{t("creation.step2ECR.selectContainerType")}
-
+
{ECR_SOURCE_TYPE.map((item: any) => { const stClass = classNames({ "st-item": true, @@ -588,6 +586,27 @@ const StepTwoECR: React.FC = () => { ); })}
+ {tmpECRTaskInfo?.parametersObj?.sourceType === + ECREnumSourceType.ECR && + tmpECRTaskInfo?.parametersObj?.srcList === + EnumDockerImageType.ALL && ( +
+ +
+ )}
@@ -618,7 +637,25 @@ const StepTwoECR: React.FC = () => { } rows={7} > -
{`${curLength}/${MAX_LENGTH}`}
+
+
+ +
+
{`${curLength}/${MAX_LENGTH}`}
+
diff --git a/source/portal/src/pages/creation/s3/StepOneS3Tips.tsx b/source/portal/src/pages/creation/s3/StepOneS3Tips.tsx index a870633..88da436 100644 --- a/source/portal/src/pages/creation/s3/StepOneS3Tips.tsx +++ b/source/portal/src/pages/creation/s3/StepOneS3Tips.tsx @@ -1,3 +1,5 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 import React from "react"; import { useTranslation } from "react-i18next"; diff --git a/source/portal/src/pages/creation/s3/StepThreeS3.tsx b/source/portal/src/pages/creation/s3/StepThreeS3.tsx index 0ce2b32..759126a 100644 --- a/source/portal/src/pages/creation/s3/StepThreeS3.tsx +++ b/source/portal/src/pages/creation/s3/StepThreeS3.tsx @@ -1,7 +1,9 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 import React, { useState, useEffect } from "react"; -import { useHistory, useParams } from "react-router-dom"; +import { useNavigate, useParams } from "react-router-dom"; import { useDispatch, useMappedState } from "redux-react-hook"; -import Loader from "react-loader-spinner"; +import { ThreeDots } from "react-loader-spinner"; import { useTranslation } from "react-i18next"; import Breadcrumbs from "@material-ui/core/Breadcrumbs"; @@ -11,7 +13,7 @@ import MLink from "@material-ui/core/Link"; import { createTask as createTaskMutaion } from "graphql/mutations"; -import { IState, S3_EC2_TASK } from "store/Store"; +import { IState, S3ec2Task } from "store/Store"; import InfoBar from "common/InfoBar"; import LeftMenu from "common/LeftMenu"; @@ -23,7 +25,6 @@ import NormalButton from "common/comp/NormalButton"; import TextButton from "common/comp/TextButton"; import "../Creation.scss"; -import "react-loader-spinner/dist/loader/css/react-spinner-loader.css"; import { S3_PARAMS_LIST_MAP, CUR_SUPPORT_LANGS, @@ -54,7 +55,7 @@ const StepThreeS3: React.FC = () => { const { t, i18n } = useTranslation(); const [nameStr, setNameStr] = useState("en_name"); - const { engine } = useParams() as any; + const { engine } = useParams(); console.info("type:", engine); useEffect(() => { @@ -70,16 +71,13 @@ const StepThreeS3: React.FC = () => { const dispatch = useDispatch(); - const history = useHistory(); + const navigate = useNavigate(); useEffect(() => { // if the taskInfo has no taskType, redirect to Step one // eslint-disable-next-line no-prototype-builtins if (!tmpTaskInfo?.hasOwnProperty("type")) { - const toPath = "/create/step1/S3/ec2"; - history.push({ - pathname: toPath, - }); + navigate("/create/step1/S3/ec2"); } }, [history, tmpTaskInfo]); @@ -90,7 +88,7 @@ const StepThreeS3: React.FC = () => { ); }; - const buildEC2Params = (parametersObj: S3_EC2_TASK) => { + const buildEC2Params = (parametersObj: S3ec2Task) => { const taskParamArr: any = []; console.info("parametersObj:", parametersObj); if (!parametersObj) { @@ -309,27 +307,18 @@ const StepThreeS3: React.FC = () => { type: ACTION_TYPE.SET_CREATE_TASK_FLAG, }); // Redirect to task list page - const toPath = "/task/list"; - history.push({ - pathname: toPath, - }); + navigate("/task/list"); } catch (error) { setIsCreating(false); } } const goToHomePage = () => { - const toPath = "/"; - history.push({ - pathname: toPath, - }); + navigate("/"); }; const goToStepTwo = () => { - const toPath = `/create/step2/s3/${engine}`; - history.push({ - pathname: toPath, - }); + navigate(`/create/step2/s3/${engine}`); }; const goToTaskList = () => { @@ -472,7 +461,7 @@ const StepThreeS3: React.FC = () => { {isCreating ? ( - + ) : ( diff --git a/source/portal/src/pages/creation/s3/StepTwoS3.tsx b/source/portal/src/pages/creation/s3/StepTwoS3.tsx index 73a8149..78a3718 100644 --- a/source/portal/src/pages/creation/s3/StepTwoS3.tsx +++ b/source/portal/src/pages/creation/s3/StepTwoS3.tsx @@ -1,5 +1,7 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 import React, { useState, useEffect } from "react"; -import { useHistory, useParams } from "react-router-dom"; +import { useNavigate, useParams } from "react-router-dom"; import { useMappedState } from "redux-react-hook"; import { useTranslation } from "react-i18next"; @@ -41,9 +43,9 @@ const mapState = (state: IState) => ({ const StepTwoS3: React.FC = () => { const { tmpTaskInfo } = useMappedState(mapState); const { t } = useTranslation(); - const { engine } = useParams() as any; + const { engine } = useParams(); - const history = useHistory(); + const navigate = useNavigate(); const [srcBucketRequiredError, setSrcBucketRequiredError] = useState(false); const [srcBucketFormatError, setSrcBucketFormatError] = useState(false); @@ -63,10 +65,7 @@ const StepTwoS3: React.FC = () => { // if the taskInfo has no taskType, redirect to Step one // eslint-disable-next-line no-prototype-builtins if (!tmpTaskInfo?.hasOwnProperty("type")) { - const toPath = "/create/step1/S3/ec2"; - history.push({ - pathname: toPath, - }); + navigate("/create/step1/S3/ec2"); } }, [tmpTaskInfo]); @@ -191,27 +190,18 @@ const StepTwoS3: React.FC = () => { // END Monitor tmpTaskInfo and hide validation error const goToHomePage = () => { - const toPath = "/"; - history.push({ - pathname: toPath, - }); + navigate("/"); }; const goToStepOne = () => { - const toPath = `/create/step1/S3/${engine}`; - history.push({ - pathname: toPath, - }); + navigate(`/create/step1/S3/${engine}`); }; const goToStepThree = () => { console.info("TO STEP THREE"); console.info("tmpTaskInfo:", tmpTaskInfo); if (validateInput()) { - const toPath = `/create/step3/s3/${engine}`; - history.push({ - pathname: toPath, - }); + navigate(`/create/step3/s3/${engine}`); } }; @@ -257,14 +247,14 @@ const StepTwoS3: React.FC = () => { {t("creation.step2.taskDetail")} = ( ) => { - setMinCapacity(event.target.value); + setWorkerNumber(event.target.value); }} - inputName="minCapacity" - inputValue={minCapacity} - placeholder="minCapacity" - /> - - -
- ) => { - setDesiredCapacity(event.target.value); - }} - inputName="desiredCapacity" - inputValue={desiredCapacity} - placeholder="desiredCapacity" + inputName="workerNumber" + inputValue={workerNumber} + placeholder="workerNumber" />
@@ -271,6 +255,46 @@ const EC2ConfigSettings: React.FC = ( {professionShow && (
+
+ + ) => { + setMinCapacity(event.target.value); + }} + inputName="minCapacity" + inputValue={minCapacity} + placeholder="minCapacity" + /> +
+ +
+ + ) => { + setDesiredCapacity(event.target.value); + }} + inputName="desiredCapacity" + inputValue={desiredCapacity} + placeholder="desiredCapacity" + /> +
+
= ( optionList={EC2_MEMORY_LIST} />
- -
- - ) => { - setWorkerNumber(event.target.value); - }} - inputName="workerNumber" - inputValue={workerNumber} - placeholder="workerNumber" - /> -
)} diff --git a/source/portal/src/pages/creation/s3/comps/LambdaConfig.tsx b/source/portal/src/pages/creation/s3/comps/LambdaConfig.tsx index 54117e7..1e87835 100644 --- a/source/portal/src/pages/creation/s3/comps/LambdaConfig.tsx +++ b/source/portal/src/pages/creation/s3/comps/LambdaConfig.tsx @@ -1,3 +1,5 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 import React, { useState, useEffect } from "react"; import { useTranslation } from "react-i18next"; import { useDispatch, useMappedState } from "redux-react-hook"; diff --git a/source/portal/src/pages/creation/s3/comps/MoreSettings.tsx b/source/portal/src/pages/creation/s3/comps/MoreSettings.tsx index fcadd38..c7ad424 100644 --- a/source/portal/src/pages/creation/s3/comps/MoreSettings.tsx +++ b/source/portal/src/pages/creation/s3/comps/MoreSettings.tsx @@ -1,3 +1,5 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 import React, { useState, useEffect, useRef } from "react"; import { useTranslation } from "react-i18next"; import { useDispatch, useMappedState } from "redux-react-hook"; diff --git a/source/portal/src/pages/creation/s3/comps/SourceSettings.tsx b/source/portal/src/pages/creation/s3/comps/SourceSettings.tsx index 7f4986e..5c6f4c6 100644 --- a/source/portal/src/pages/creation/s3/comps/SourceSettings.tsx +++ b/source/portal/src/pages/creation/s3/comps/SourceSettings.tsx @@ -1,3 +1,5 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 import React, { useState, useEffect, useRef } from "react"; import { useTranslation } from "react-i18next"; import { useDispatch, useMappedState } from "redux-react-hook"; @@ -135,10 +137,7 @@ const SourceSettings: React.FC = (props) => { setSrcEndpoint(""); } // Set Is Bucket In Account to No - if ( - sourceType !== EnumSourceType.S3 || - sourceType !== EnumSourceType.S3_EC2 - ) { + if (sourceType !== EnumSourceType.S3) { setSourceInAccount(YES_NO.NO); } if (sourceType === EnumSourceType.S3) { diff --git a/source/portal/src/pages/detail/Detail.scss b/source/portal/src/pages/detail/Detail.scss index 669d3b0..80d5598 100644 --- a/source/portal/src/pages/detail/Detail.scss +++ b/source/portal/src/pages/detail/Detail.scss @@ -1,3 +1,5 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 .general-content { padding: 0 40px 100px 40px; .top-title-button { diff --git a/source/portal/src/pages/detail/DetailECR.tsx b/source/portal/src/pages/detail/DetailECR.tsx index 6fc9708..e87f22f 100644 --- a/source/portal/src/pages/detail/DetailECR.tsx +++ b/source/portal/src/pages/detail/DetailECR.tsx @@ -1,11 +1,14 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 import React, { useState, useEffect } from "react"; import Breadcrumbs from "@material-ui/core/Breadcrumbs"; import NavigateNextIcon from "@material-ui/icons/NavigateNext"; import Typography from "@material-ui/core/Typography"; import MLink from "@material-ui/core/Link"; -import Loader from "react-loader-spinner"; +import { ThreeDots } from "react-loader-spinner"; import { useTranslation } from "react-i18next"; +import { useParams } from "react-router-dom"; import Loading from "common/Loading"; import { withStyles, Theme, createStyles } from "@material-ui/core/styles"; @@ -34,13 +37,14 @@ import { EnumDockerImageType, } from "assets/types/index"; -import { getRegionNameById } from "assets/config/const"; +import { YES_NO, getRegionNameById } from "assets/config/const"; import "./Detail.scss"; import { appSyncRequestMutation, appSyncRequestQuery, } from "assets/utils/request"; +import { formatLocalTime } from "assets/utils/utils"; interface StyledTabProps { label: string; @@ -106,8 +110,9 @@ function TabPanel(props: TabPanelProps) { ); } -const Detail: React.FC = (props: any) => { +const Detail: React.FC = () => { const { t } = useTranslation(); + const { id } = useParams(); const [value, setValue] = useState(0); const [isLoading, setIsLoading] = useState(true); @@ -137,8 +142,10 @@ const Detail: React.FC = (props: any) => { } useEffect(() => { - fetchNotes(props.match.params.id); - }, []); + if (id) { + fetchNotes(id); + } + }, [id]); const handleChange: any = (event: React.ChangeEvent, newValue: number) => { setValue(newValue); @@ -152,13 +159,27 @@ const Detail: React.FC = (props: any) => { }); setIsStopLoading(false); setOpen(false); - fetchNotes(props.match.params.id); + fetchNotes(id ?? ""); console.info(stopResData); } catch (error) { setIsLoading(false); } } + const buildAllImageDisplay = () => { + return ( + <> +
{t("taskDetail.images")}
+
ALL
+
+
{t("creation.step2ECR.onlyTag")}
+
+ {curTaskInfo.includeUntagged === "false" ? YES_NO.YES : YES_NO.NO} +
+ + ); + }; + const handleClose = () => { setOpen(false); }; @@ -168,7 +189,7 @@ const Detail: React.FC = (props: any) => { }; const confirmStopTask = () => { - stopTaskFunc(props.match.params.id); + stopTaskFunc(id ?? ""); }; return ( @@ -194,7 +215,7 @@ const Detail: React.FC = (props: any) => { {isStopLoading ? ( - + ) : ( {
{t("taskDetail.createdAt")}
- {/*
- - {curTaskInfo.createdAt} - -
*/} +
{formatLocalTime(curTaskInfo.createdAt)}
@@ -357,7 +374,7 @@ const Detail: React.FC = (props: any) => {
{curTaskInfo.srcList === EnumDockerImageType.ALL ? ( - "ALL" + buildAllImageDisplay() ) : (