diff --git a/CHANGELOG.md b/CHANGELOG.md index 09dc3fa..9152d81 100755 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,7 +4,11 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). -## [2.3.1] - 2023-04 +## [2.4.0] - 2023-04-28 +### Added +- Support for requester pay mode in S3 transfer task. + +## [2.3.1] - 2023-04-18 ### Fixed - Fix deployment failure due to S3 ACL changes. diff --git a/README.md b/README.md index 453e792..17f405c 100644 --- a/README.md +++ b/README.md @@ -17,6 +17,8 @@ gcr.io, Red Hat Quay.io) to Amazon ECR. ![](docs/images/homepage.png) +You will be responsible for your compliance with all applicable laws in respect of your data transfer tasks. + ## Features - [x] Authentication (Cognito User Pool, OIDC) @@ -90,8 +92,8 @@ Create your first data transfer task, For the complete user guide, refer to ## FAQ -**Q. Which are the supported Reigons of this solution?**
-You can deploy this solution in these Reigons: N.Virginia (us-east-1), Ohio (us-east-2), N.California (us-west-1), +**Q. Which are the supported Regions of this solution?**
+You can deploy this solution in these Regions: N.Virginia (us-east-1), Ohio (us-east-2), N.California (us-west-1), Oregon (us-west-2), Mumbai (ap-south-1), Seoul (ap-northeast-2), Singapore (ap-southeast-1), Sydney (ap-southeast-2), Tokyo (ap-northeast-1), Canada (ca-central-1), Frankfurt (eu-central-1), Ireland (eu-west-1), London (eu-west-2), Paris (eu-west-3), Stockholm (eu-north-1), São Paulo (sa-east-1), Beijing (cn-north-1), Ningxia (cn-northwest-1). diff --git a/docs/S3-SSE-KMS-Policy.md b/docs/S3-SSE-KMS-Policy.md index 580dacb..1305603 100644 --- a/docs/S3-SSE-KMS-Policy.md +++ b/docs/S3-SSE-KMS-Policy.md @@ -37,7 +37,7 @@ _Note_: If it's for S3 buckets in China regions, please make sure you also chang "kms:DescribeKey" ], "Resource": [ - "arn:aws:kms:us-west-2:123456789012:key/f5cd8cb7-476c-4322-ac9b-0c94a687700d " + "arn:aws:kms:us-west-2:111122223333:key/f5cd8cb7-476c-4322-ac9b-0c94a687700d " ] } ] diff --git a/docs/S3-SSE-KMS-Policy_CN.md b/docs/S3-SSE-KMS-Policy_CN.md index 1765fe9..79ecf30 100644 --- a/docs/S3-SSE-KMS-Policy_CN.md +++ b/docs/S3-SSE-KMS-Policy_CN.md @@ -37,7 +37,7 @@ _注意_:如果是针对中国地区的 S3 存储桶,请确保您也更改 "kms:DescribeKey" ], "Resource": [ - "arn:aws:kms:us-west-2:123456789012:key/f5cd8cb7-476c-4322-ac9b-0c94a687700d " + "arn:aws:kms:us-west-2:111122223333:key/f5cd8cb7-476c-4322-ac9b-0c94a687700d " ] } ] diff --git a/docs/en-base/additional-resources.md b/docs/en-base/additional-resources.md deleted file mode 100644 index 256af22..0000000 --- a/docs/en-base/additional-resources.md +++ /dev/null @@ -1,12 +0,0 @@ -- [AWS CloudFormation](https://aws.amazon.com/cloudformation/) -- [Amazon S3](https://aws.amazon.com/s3/) -- [AWS Lambda](https://aws.amazon.com/lambda/) -- [AWS Step Functions](https://aws.amazon.com/step-functions/) -- [Amazon CloudFront](https://aws.amazon.com/cloudfront/) -- [Amazon ECR](https://aws.amazon.com/ecr/) -- [Amazon DynamoDB](https://aws.amazon.com/dynamodb/) -- [AWS AppSync](https://aws.amazon.com/appsync/) -- [Amazon Cognito](https://aws.amazon.com/cognito/) -- [AWS IAM](https://aws.amazon.com/iam/) -- [Amazon EC2](https://aws.amazon.com/ec2/) -- [Amazon Route 53](https://aws.amazon.com/route53/) \ No newline at end of file diff --git a/docs/en-base/architecture-overview/architecture-details.md b/docs/en-base/architecture-overview/architecture-details.md new file mode 100644 index 0000000..35d8d5f --- /dev/null +++ b/docs/en-base/architecture-overview/architecture-details.md @@ -0,0 +1,55 @@ +This section describes the components and AWS services that make up this solution and the architecture details on how these components work together. + + +## AWS services in this solution + +The following AWS services are included in this solution: + +| AWS service | Description | +| --- | --- | +| [Amazon CloudFront](https://aws.amazon.com/cloudfront/) | **Core**. To made available the static web assets (frontend user interface). | +| [AWS AppSync](https://aws.amazon.com/appsync/) | **Core**. To provide the backend APIs. | +| [AWS Lambda](https://aws.amazon.com/lambda/) | **Core**. To call backend APIs. | +| [Amazon ECS](https://aws.amazon.com/ecs/) | **Core**.  To run the container images used by the plugin template. | +| [Amazon DynamoDB](https://aws.amazon.com/dynamodb/) | **Core**.  To store a record with transfer status for each object. | +| [Amazon EC2](https://aws.amazon.com/ec2/) | **Core**. To consume the messages in Amazon SQS and transfer the object from the source bucket to the destination bucket. | +| [AWS Secrets Manager](https://aws.amazon.com/secrets-manager/) | **Core**. Stores the credential for data transfer. | +| [AWS Step Functions](https://aws.amazon.com/step-functions/) | **Supporting**. To start or stop/delete the ECR or S3 plugin template. | +| [Amazon S3](https://aws.amazon.com/s3/) | **Supporting**. To store the static web assets (frontend user interface). | +| [Amazon Cognito](https://aws.amazon.com/cognito/) | **Supporting**. To authenticate users (in AWS Regions). | +| [Amazon ECR](https://aws.amazon.com/ecr/) | **Supporting**. To host the container images. | +| [Amazon SQS](https://aws.amazon.com/sqs/) | **Supporting**. To store the transfer tasks temporarily as a buffer. | +| [Amazon EventBridge](https://aws.amazon.com/eventbridge/) | **Supporting**. To invoke the transfer tasks regularly. | +| [Amazon SNS](https://aws.amazon.com/sns/) | **Supporting**. Provides topic and email subscription notifications for data transfer results. | +| [AWS CloudWatch](https://aws.amazon.com/cloudwatch/) | **Supporting**. To monitor the data transfer progress. | + +## How Data Transfer Hub works + +This solution has three components: a web console, the Amazon S3 transfer engine, and the Amazon ECR transfer engine. + +### Web console +This solution provides a simple web console which allows you to create and manage transfer tasks for Amazon S3 and Amazon ECR. + +### Amazon S3 transfer engine +Amazon S3 transfer engine runs the Amazon S3 plugin and is used for transferring objects from their sources into S3 buckets. The S3 plugin supports the following features: + +- Transfer Amazon S3 objects between AWS China Regions and AWS Regions +- Transfer objects from Alibaba Cloud OSS / Tencent COS / Qiniu Kodo to Amazon S3 +- Transfer objects from S3 Compatible Storage service to Amazon S3 +- Support near real time transfer via S3 Event +- Support transfer with object metadata +- Support incremental data transfer +- Support transfer from private payer request bucket +- Auto retry and error handling + +### Amazon ECR transfer engine + +Amazon ECR engine runs the Amazon ECR plugin and is used for transferring container images from other container registries. The ECR plugin supports the following features: + +- Transfer Amazon ECR images between AWS China Regions and AWS Regions +- Transfer from public container registry (such as Docker Hub, GCR.io, Quay.io) to Amazon ECR +- Transfer selected images to Amazon ECR +- Transfer all images and tags from Amazon ECR +The ECR plugin leverages [skopeo][skopeo] for the underlying engine. The AWS Lambda function lists images in their sources and uses Fargate to run the transfer jobs. + +[skopeo]: https://github.com/containers/skopeo \ No newline at end of file diff --git a/docs/en-base/architecture.md b/docs/en-base/architecture-overview/architecture.md similarity index 62% rename from docs/en-base/architecture.md rename to docs/en-base/architecture-overview/architecture.md index aa78094..7e1965a 100644 --- a/docs/en-base/architecture.md +++ b/docs/en-base/architecture-overview/architecture.md @@ -1,16 +1,15 @@ -Deploying the Data Transfer Hub solution with the default parameters builds the following environment in the AWS Cloud. +Deploying this solution with the default parameters builds the following environment in the AWS Cloud. -![architecture](./images/arch-global.png) - -Figure 1: Data Transfer Hub architecture +![architecture](../images/arch-global.png) +*Data Transfer Hub architecture* -The solution automatically deploys and configures a serverless architecture with the following services: +This solution deploys the Amazon CloudFormation template in your AWS Cloud account and completes the following settings. 1. The solution’s static web assets (frontend user interface) are stored in [Amazon S3][s3] and made available through [Amazon CloudFront][cloudfront]. 2. The backend APIs are provided via [AWS AppSync][appsync] GraphQL. 3. Users are authenticated by either [Amazon Cognito][cognito] User Pool (in AWS Regions) or by an OpenID connect provider (in AWS China Regions) such as [Authing](https://www.authing.cn/), [Auth0](https://auth0.com/), etc. 4. AWS AppSync runs [AWS Lambda][lambda] to call backend APIs. -5. Lambda starts an [AWS Step Functions][stepfunction] workflow that uses [AWS CloudFormation][cloudformation] to start or stop/delete the ECR or S3 plugin template. +5. Lambda starts an [AWS Step Functions][stepfunction] workflow that uses [AWS CloudFormation][cloudformation] to start or stop/delete the Amazon ECR or Amazon S3 plugin template. 6. The plugin templates are hosted in a centralized Amazon S3 bucket manged by AWS. 7. The solution also provisions an [Amazon ECS][ecs] cluster that runs the container images used by the plugin template, and the container images are hosted in [Amazon ECR][ecr]. 8. The data transfer task information is stored in in [Amazon DynamoDB][dynamodb]. @@ -20,27 +19,27 @@ After deploying the solution, you can use [AWS WAF][waf] to protect CloudFront o !!! note "Important" If you deploy this solution in AWS (Beijing) Region operated by Beijing Sinnet Technology Co., Ltd. (Sinnet), or the AWS (Ningxia) Region operated by Ningxia Western Cloud Data Technology Co., Ltd. ( ), you are required to provide a domain with ICP Recordal before you can access the web console. + The web console is a centralized place to create and manage all data transfer jobs. Each data type (for example, Amazon S3 or Amazon ECR) is a plugin for Data Transfer Hub, and is packaged as an AWS CloudFormation template hosted in an S3 bucket that AWS owns. When the you create a transfer task, an AWS Lambda function initiates the Amazon CloudFormation template, and state of each task is stored and displayed in the DynamoDB tables. -As of March 2023, the solution supports two data transfer plugins: an Amazon S3 plugin and an Amazon ECR plugin. +As of April 2023, the solution supports two data transfer plugins: an Amazon S3 plugin and an Amazon ECR plugin. ## Amazon S3 plugin -![s3-architecture](./images/s3-arch-global.png) - -Figure 2: Data Transfer Hub Amazon S3 plugin architecture +![s3-architecture](../images/s3-arch-global.png) +*Data Transfer Hub Amazon S3 plugin architecture* The Amazon S3 plugin runs the following workflows: 1. A time-based Event Bridge rule triggers a AWS Lambda function on an hourly basis. -2. AWS Lambda uses the launch template to launch a data comparison job (JobFinder) in an EC2. +2. AWS Lambda uses the launch template to launch a data comparison job (JobFinder) in an [Amazon Elastic Compute Cloud (Amazon EC2)][ec2]. 3. The job lists all the objects in the source and destination buckets, makes comparisons among objects and determines which objects should be transferred. -3. EC2 sends a message for each object that will be transferred to Amazon Simple Queue Service (Amazon SQS). Amazon S3 event messages can also be supported for more real-time data transfer; whenever there is object uploaded to source bucket, the event message is sent to the same SQS queue. -4. A JobWorker running in EC2 consumes the messages in SQS and transfers the object from the source bucket to the destination bucket. You can use an Auto Scaling Group to control the number of EC2 instances to transfer the data based on business need. -5. A record with transfer status for each object is stored in Amazon DynamoDB. -6. The Amazon EC2 instance will get (download) the object from the source bucket based on the SQS message. -7. The EC2 instance will put (upload) the object to the destination bucket based on the SQS message. +4. Amazon EC2 sends a message for each object that will be transferred to [Amazon Simple Queue Service (Amazon SQS)][sqs]. Amazon S3 event messages can also be supported for more real-time data transfer; whenever there is object uploaded to source bucket, the event message is sent to the same Amazon SQS queue. +5. A JobWorker running in Amazon EC2 consumes the messages in SQS and transfers the object from the source bucket to the destination bucket. You can use an Auto Scaling Group to control the number of EC2 instances to transfer the data based on business need. +6. A record with transfer status for each object is stored in Amazon DynamoDB. +7. The Amazon EC2 instance will get (download) the object from the source bucket based on the Amazon SQS message. +8. The Amazon EC2 instance will put (upload) the object to the destination bucket based on the Amazon SQS message. !!! note "Note" @@ -48,9 +47,8 @@ buckets, makes comparisons among objects and determines which objects should be ## Amazon ECR plugin -![ecr-architecture](./images/ecr-arch-global.png) - -Figure 3: Data Transfer Hub Amazon ECR plugin architecture +![ecr-architecture](../images/ecr-arch-global.png) +*Data Transfer Hub Amazon ECR plugin architecture* The Amazon ECR plugin runs the following workflows: @@ -62,14 +60,17 @@ The Amazon ECR plugin runs the following workflows: 6. After the copy completes, the status (either success or fail) is logged into DynamoDB for tracking purpose. -[s3]:https://www.amazonaws.cn/s3/?nc1=h_ls -[cloudfront]:https://www.amazonaws.cn/cloudfront/?nc1=h_ls -[appsync]:https://www.amazonaws.cn/appsync/?nc1=h_ls -[cognito]:https://www.amazonaws.cn/cognito/?nc1=h_ls -[lambda]:https://www.amazonaws.cn/lambda/?nc1=h_ls -[stepfunction]:https://www.amazonaws.cn/step-functions/?nc1=h_ls -[cloudformation]:https://aws.amazon.com/cn/cloudformation/ -[ecs]:https://aws.amazon.com/cn/ecs/ -[ecr]:https://aws.amazon.com/cn/ecr/ -[dynamodb]:https://www.amazonaws.cn/dynamodb/?nc1=h_ls -[waf]:https://aws.amazon.com/waf/ \ No newline at end of file + +[s3]:https://aws.amazon.com/s3/ +[cloudfront]:https://aws.amazon.com/cloudfront/ +[appsync]:https://aws.amazon.com/appsync/ +[cognito]:https://aws.amazon.com/cognito/ +[lambda]:https://aws.amazon.com/lambda/ +[stepfunction]:https://aws.amazon.com/step-functions/ +[cloudformation]:https://aws.amazon.com/cloudformation/ +[ecs]:https://aws.amazon.com/ecs/ +[ecr]:https://aws.amazon.com/ecr/ +[dynamodb]:https://aws.amazon.com/dynamodb/ +[waf]:https://aws.amazon.com/waf/ +[ec2]:https://aws.amazon.com/ec2/ +[sqs]:https://aws.amazon.com/sqs/ \ No newline at end of file diff --git a/docs/en-base/architecture-overview/design-considerations.md b/docs/en-base/architecture-overview/design-considerations.md new file mode 100644 index 0000000..c6145bd --- /dev/null +++ b/docs/en-base/architecture-overview/design-considerations.md @@ -0,0 +1,46 @@ +This solution was designed with best practices from the [AWS Well-Architected Framework][well-architected-framework] which helps customers design and operate reliable, secure, efficient, and cost-effective workloads in the cloud. + +This section describes how the design principles and best practices of the Well-Architected Framework were applied when building this solution. + +## Operational excellence +This section describes how the principles and best practices of the [operational excellence pillar][operational-excellence-pillar] were applied when designing this solution. + +The Data Transfer Hub solution pushes metrics to Amazon CloudWatch at various stages to provide observability into the infrastructure, Lambda functions, Amazon EC2 transfer workers, Step Function workflow and the rest of the solution components. Data transferring errors are added to the Amazon SQS queue for retries and alerts. + +## Security +This section describes how the principles and best practices of the [security pillar][security-pillar] were applied when designing this solution. + +- Data Transfer Hub web console users are authenticated and authorized with Amazon Cognito. +- All inter-service communications use AWS IAM roles. +- All roles used by the solution follows least-privilege access. That is, it only contains minimum permissions required so the service can function properly. + +## Reliability +This section describes how the principles and best practices of the [reliability pillar][reliability-pillar] were applied when designing this solution. + +- Using AWS serverless services wherever possible (for example, Lambda, Step Functions, Amazon S3, and Amazon SQS) to ensure high availability and recovery from service failure. +- Data is stored in DynamoDB and Amazon S3, so it persists in multiple Availability Zones (AZs) by default. + +## Performance efficiency +This section describes how the principles and best practices of the [performance efficiency pillar][performance-efficiency-pillar] were applied when designing this solution. + +- The ability to launch this solution in any Region that supports AWS services in this solution such as: AWS Lambda, AWS S3, Amazon SQS, Amazon DynamoDB, and Amazon EC2. +- Automatically testing and deploying this solution daily. Reviewing this solution by solution architects and subject matter experts for areas to experiment and improve. + +## Cost optimization +This section describes how the principles and best practices of the [cost optimization pillar][cost-optimization-pillar] were applied when designing this solution. + +- Use Autoscaling Group so that the compute costs are only related to how much data is transferred. +- Using serverless services such as Amazon SQS and DynamoDB so that customers only get charged for what they use. + +## Sustainability +This section describes how the principles and best practices of the [sustainability pillar][sustainability-pillar] were applied when designing this solution. + +- The solution‘s serverless design (using Lambda, Amazon SQS and DynamoDB) and the use of managed services (such as Amazon EC2) are aimed at reducing carbon footprint compared to the footprint of continually operating on-premises servers. + +[well-architected-framework]:https://aws.amazon.com/architecture/well-architected/?wa-lens-whitepapers.sort-by=item.additionalFields.sortDate&wa-lens-whitepapers.sort-order=desc&wa-guidance-whitepapers.sort-by=item.additionalFields.sortDate&wa-guidance-whitepapers.sort-order=desc +[operational-excellence-pillar]:https://docs.aws.amazon.com/wellarchitected/latest/operational-excellence-pillar/welcome.html +[security-pillar]:https://docs.aws.amazon.com/wellarchitected/latest/security-pillar/welcome.html +[reliability-pillar]:https://docs.aws.amazon.com/wellarchitected/latest/reliability-pillar/welcome.html +[performance-efficiency-pillar]:https://docs.aws.amazon.com/wellarchitected/latest/performance-efficiency-pillar/welcome.html +[cost-optimization-pillar]:https://docs.aws.amazon.com/wellarchitected/latest/cost-optimization-pillar/welcome.html +[sustainability-pillar]:https://docs.aws.amazon.com/wellarchitected/latest/sustainability-pillar/sustainability-pillar.html \ No newline at end of file diff --git a/docs/en-base/contributors.md b/docs/en-base/contributors.md new file mode 100644 index 0000000..44aff0a --- /dev/null +++ b/docs/en-base/contributors.md @@ -0,0 +1,7 @@ +- Aiden Dai +- Eva Liu +- Kervin Hu +- Haiyun Chen +- Joe Shi +- Ashwini Rudra +- Jyoti Tyagi diff --git a/docs/en-base/deployment/deployment-overview.md b/docs/en-base/deployment/deployment-overview.md new file mode 100644 index 0000000..3bc5a26 --- /dev/null +++ b/docs/en-base/deployment/deployment-overview.md @@ -0,0 +1,13 @@ +Use the following steps to deploy this solution on AWS. For detailed instructions, follow the links for each step. + +Before you launch the solution, [review the cost](../../plan-deployment/cost), architecture, network security, and other considerations discussed in this guide. Follow the step-by-step instructions in this section to configure and deploy the solution into your account. + + +**Time to deploy**: Approximately 15 minutes + +- Step 1. Launch the stack + - [(Option 1) Deploy the AWS CloudFormation template in AWS Regions](../deployment/#launch-cognito) + - [(Option 2) Deploy the AWS CloudFormation template in AWS China Regions](../deployment/#launch-openid) + +- Step 2. [Launch the web console](../deployment/#launch-web-console) +- Step 3. [Create a transfer task](../deployment/#create-task) \ No newline at end of file diff --git a/docs/en-base/deployment.md b/docs/en-base/deployment/deployment.md similarity index 81% rename from docs/en-base/deployment.md rename to docs/en-base/deployment/deployment.md index d329511..cad72ab 100644 --- a/docs/en-base/deployment.md +++ b/docs/en-base/deployment/deployment.md @@ -1,17 +1,3 @@ -Before you launch the solution, review the cost, architecture, network security, and other considerations discussed in this guide. Follow the step-by-step instructions in this section to configure and deploy the solution into your account. - -**Time to deploy**: Approximately 15 minutes -## Deployment overview - -Use the following steps to deploy this solution on AWS. For detailed instructions, follow the links for each step. - -- Step 1. Launch the stack - - [(Option 1) Deploy the AWS CloudFormation template in AWS Regions](#launch-cognito) - - [(Option 2) Deploy the AWS CloudFormation template in AWS China Regions](#launch-openid) -- Step 2. [Launch the web console](#launch-web-console) -- Step 3. [Create a transfer task](#create-task) - - ## Step 1. (Option 1) Launch the stack in AWS Regions !!! important "Important" @@ -25,13 +11,13 @@ Use the following steps to deploy this solution on AWS. For detailed instruction 1. Sign in to the AWS Management Console and select the button to launch the `DataTransferHub-cognito.template` AWS CloudFormation template. Alternatively, you can [download the template](https://s3.amazonaws.com/solutions-reference/data-transfer-hub/latest/DataTransferHub-cognito.template) as a starting point for your own implementation. - [![Launch Stack](./images/launch-stack.png)](https://console.aws.amazon.com/cloudformation/home#/stacks/create/template?stackName=DataTransferHub&templateURL=https://s3.amazonaws.com/solutions-reference/data-transfer-hub/latest/DataTransferHub-cognito.template) + [![Launch Stack](../images/launch-stack.png)](https://console.aws.amazon.com/cloudformation/home#/stacks/create/template?stackName=DataTransferHub&templateURL=https://s3.amazonaws.com/solutions-reference/data-transfer-hub/latest/DataTransferHub-cognito.template) 2. The template launches in the US East (N. Virginia) Region by default. To launch the solution in a different AWS Region, use the Region selector in the console navigation bar. 3. On the **Create stack** page, verify that the correct template URL is in the **Amazon S3 URL** text box and choose **Next**. -4. On the **Specify stack details** page, assign a name to your solution stack. For information about naming character limitations, refer to [IAM and STS Limits](https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html) in the *AWS Identity and Access Management User Guide*. +4. On the **Specify stack details** page, assign a name to your solution stack. For information about naming character limitations, refer to [IAM and and AWS STS quotas](https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html) in the *AWS Identity and Access Management User Guide*. 5. Under **Parameters**, review the parameters for this solution template and modify them as necessary. This solution uses the following default values. @@ -70,13 +56,13 @@ In AWS Regions where Amazon Cognito is not yet available, you can use OIDC to pr 5. Click the **Create** button. 6. Enter the **Application Name**, and **Subdomain**. 7. Save the `App ID` (that is, `client_id`) and `Issuer` to a text file from Endpoint Information, which will be used later. - [![](./images/OIDC/endpoint-info.png)](./images/OIDC/endpoint-info.png) + [![](../images/OIDC/endpoint-info.png)](../images/OIDC/endpoint-info.png) 8. Update the `Login Callback URL` and `Logout Callback URL` to your IPC recorded domain name. - [![](./images/OIDC/authentication-configuration.png)](./images/OIDC/authentication-configuration.png) + [![](../images/OIDC/authentication-configuration.png)](../images/OIDC/authentication-configuration.png) 9. Set the Authorization Configuration. - [![](./images/OIDC/authorization-configuration.png)](./images/OIDC/authorization-configuration.png) + [![](../images/OIDC/authorization-configuration.png)](../images/OIDC/authorization-configuration.png) 10. Update login control. 1. Select and enter the **Application** interface from the left sidebar, select **Login Control**, and then select **Registration and Login**. 2. Please select only **Password Login: Email** for the login method. @@ -105,7 +91,7 @@ The following is an example for configuring an Amazon Route 53. 1. From the hosted zone, choose **Create Record**. 1. In the **Record name** input box, enter the host name. 1. From **Record type** select **CNAME**. - 1. In the value field, Enter the CloudFormation output PortalUrl. + 1. In the value field, enter the CloudFormation output PortalUrl. 1. Select **Create records**. 3. Add alternative domain names to the CloudFront distribution. @@ -123,13 +109,13 @@ This automated AWS CloudFormation template deploys Data Transfer Hub in the AWS 1. Sign in to the AWS Management Console and select the button to launch the `DataTransferHub-openid.template` AWS CloudFormation template. Alternatively, you can [download the template](https://s3.amazonaws.com/solutions-reference/data-transfer-hub/latest/DataTransferHub-openid.template) as a starting point for your own implementation. - [![Launch Stack](./images/launch-stack.png)](https://console.amazonaws.cn/cloudformation/home#/stacks/create/template?stackName=DataTransferHub&templateURL=https://s3.amazonaws.com/solutions-reference/data-transfer-hub/latest/DataTransferHub-openid.template) + [![Launch Stack](../images/launch-stack.png)](https://console.amazonaws.cn/cloudformation/home#/stacks/create/template?stackName=DataTransferHub&templateURL=https://s3.amazonaws.com/solutions-reference/data-transfer-hub/latest/DataTransferHub-openid.template) 2. The template launches in your console’s default Region. To launch the solution in a different AWS Region, use the Region selector in the console navigation bar. 3. On the **Create stack** page, verify that the correct template URL is in the Amazon S3 URL text box and choose **Next**. -4. On the **Specify stack details** page, assign a name to your solution stack. For information about naming character limitations, refer to [IAM and STS Limits](https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html) in the *AWS Identity and Access Management User Guide*. +4. On the **Specify stack details** page, assign a name to your solution stack. For information about naming character limitations, refer to [IAM and AWS STS quotas](https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html) in the *AWS Identity and Access Management User Guide*. 5. Under **Parameters**, review the parameters for this solution template and modify them as necessary. This solution uses the following default values. @@ -178,10 +164,10 @@ Depending on the region where you start the stack, you can choose to access the ## Step 3. Create a transfer task -Use the web console to create a transfer task for Amazon S3 or Amazon ECR. For more information, refer to [Create Amazon S3 Transfer Task](./tutorial-s3.md) and [Create Amazon ECR Transfer Task](./tutorial-ecr.md). +Use the web console to create a transfer task for Amazon S3 or Amazon ECR. For more information, refer to [Create Amazon S3 Transfer Task](../user-guide/tutorial-s3.md) and [Create Amazon ECR Transfer Task](../user-guide/tutorial-ecr.md). -![dth-console](./images/dth-console.png) +![dth-console](../images/dth-console.png) -Figure 1: Data Transfer Hub web console +*Data Transfer Hub web console* [icp]: https://www.amazonaws.cn/en/support/icp/?nc2=h_l2_su \ No newline at end of file diff --git a/docs/en-base/template.md b/docs/en-base/deployment/template.md similarity index 100% rename from docs/en-base/template.md rename to docs/en-base/deployment/template.md diff --git a/docs/en-base/source.md b/docs/en-base/developer-guide/source.md similarity index 100% rename from docs/en-base/source.md rename to docs/en-base/developer-guide/source.md diff --git a/docs/en-base/faq.md b/docs/en-base/faq.md index 3e3910a..bfeeccb 100644 --- a/docs/en-base/faq.md +++ b/docs/en-base/faq.md @@ -1,12 +1,12 @@ -The following are common issues you may face in deploying and using the solution. +The following are common questions you might have when deploying and using the solution. ## Deployment **1. In which AWS Regions can this solution be deployed?**
-For the list of supported regions, refer to [supported regions](./regions.md). +For the list of supported regions, refer to [supported regions](../plan-deployment/regions). -**2. When creating a transfer task, shall I deploy it on the data source side or the destination side?**
+**2. When creating a transfer task, should I deploy it on the data source side or the destination side?**
The transfer performance of the solution will not be affected by whether the deployment is on the data source or destination side. @@ -35,7 +35,7 @@ Not supported currently. For this scenario, we recommend using Amazon S3's [Cros **7. Can I use AWS CLI to create a DTH S3 Transfer Task?**
-Yes. Please refer to the tutorial [Using AWS CLI to launch DTH S3 Transfer task](./tutorial-cli-launch.md). +Yes. Please refer to the tutorial [Using AWS CLI to launch DTH S3 Transfer task](../user-guide/tutorial-cli-launch). ## Performance @@ -89,7 +89,7 @@ By authentication through the Access Keyid and Access Key of the other party’s **5. Does the solution support SSE-S3, SSE-KMS, and SSE-CMK?**
-Yes. The solution supports the use of SSE-S3 and SSE-KMS data sources. If your source bucket has SSE-CMK enabled, refer to the [tutorial](../tutorial-s3/#how-to-transfer-s3-object-from-kms-encrypted-amazon-s3). +Yes. The solution supports the use of SSE-S3 and SSE-KMS data sources. If your source bucket has SSE-CMK enabled, refer to the [tutorial](../user-guide/tutorial-s3/#how-to-transfer-s3-object-from-kms-encrypted-amazon-s3). ## Features @@ -126,7 +126,7 @@ At **Task Scheduling Settings**, you can make the task scheduling configuration. **4. Is it possible for real-time synchronization of newly added files?**
-Near-real-time synchronization can be achieved, only if the Data Transfer Hub is deployed in the same AWS account and the same region as the data source. If the data source and the solution are not in the same account, you can configure it manually. For more information, refer to the [tutorial](https://github.com/awslabs/data-transfer-hub/blob/v2.0.0/docs/s3-event-trigger-config.md). +Near-real-time synchronization can be achieved, only if the Data Transfer Hub is deployed in the same AWS account and the same region as the data source. If the data source and the solution are not in the same account, you can configure it manually. For more information, refer to the [tutorial](https://github.com/awslabs/data-transfer-hub/blob/main/docs/s3-event-trigger-config.md). **5. Are there restrictions on the number of files and the size of files?**
@@ -164,36 +164,9 @@ What will **not be lost** are all files under `bucket_name/Jan/33/`, all files u Currently, when Data Transfer Hub perceived that the Access Key of S3 has been rotated, it will fetch the latest key from AWS Secrets Manager automatically. Therefore, the Access Key Rotation will not affect the migrating process of DTH. -## Error messages +**11. Does the Payer Request mode support Public Data Set?**
-After creating the task, you may encounter some error messages. The following list the error messages and provide general steps to troubleshoot them. - -**1. StatusCode: 400, InvalidToken: The provided token is malformed or otherwise invalid** - -If you get this error message, confirm that your secret is configured in the following format. You can copy and paste it directly. - -```json -{ - "access_key_id": "", - "secret_access_key": "" -} -``` - -**2. StatusCode: 403, InvalidAccessKeyId: The AWS Access Key Id you provided does not exist in our records** - -If you get this error message, check if your bucket name and region name are configured correctly. - -**3. StatusCode: 403, InvalidAccessKeyId: UnknownError** - -If you get this error message, check whether the Credential stored in Secrets Manager has the proper permissions. For more information, refer to [IAM Policy](https://github.com/awslabs/data-transfer-hub/blob/v2.0.0/docs/IAM-Policy.md). - -**4. StatusCode: 400, AccessDeniedException: Access to KMS is not allowed** - -If you get this error message, confirm that your secret is not encrypted by SSE-CMK. Currently, DTH does not support SSE-CMK encrypted secrets. - -**5. dial tcp: lookup xxx.xxxxx.xxxxx.xx (http://xxx.xxxxx.xxxxx.xx/) on xxx.xxx.xxx.xxx:53: no such host** - -If you get this error message, check if your endpoint is configured correctly. +No. Currently, Payer Request data synchronization is only supported through Access Key and Private Key authentication methods. ## Others **1. The cluster node (EC2) is terminated by mistake. How to resolve it?**
@@ -218,7 +191,7 @@ You need to update Secrets in Secrets Manager first, and then go to the EC2 cons When deploying the stack, you will be asked to enter the stack name (`DTHS3Stack` by default), and most resources will be created with the name prefix as the stack name. For example, the format of the queue name is `-S3TransferQueue-`. This plugin will create two main log groups. - - If there is no data transfer, you need to check whether there is a problem in the Finder task log. The following is the log group for scheduling Finder tasks. For more information, refer to the [Error Code List](#error-messages) section. + - If there is no data transfer, you need to check whether there is a problem in the Finder task log. The following is the log group for scheduling Finder tasks. For more information, refer to the [Troubleshooting](../troubleshooting) section. `-EC2FinderLogGroup` diff --git a/docs/en-base/index.md b/docs/en-base/index.md deleted file mode 100644 index ce7a43b..0000000 --- a/docs/en-base/index.md +++ /dev/null @@ -1,21 +0,0 @@ -The Data Transfer Hub solution provides secure, scalable, and trackable data transfer for Amazon Simple Storage Service (Amazon S3) objects and Amazon Elastic Container Registry (Amazon ECR) images. This data transfer helps customers expand their businesses globally in and out of AWS China Regions. - -The solution’s web console provides an interface for managing the following tasks: - -- Transferring Amazon S3 objects between AWS China Regions and AWS Regions -- Transferring data from other cloud providers’ object storage services (including Alibaba Cloud OSS, Tencent COS, and Qiniu Kodo) to Amazon S3 -- Transferring objects from Amazon S3 compatible object storage service to Amazon S3 -- Transferring Amazon ECR images between AWS China Regions and AWS Regions -- Transferring container images from public container registries (for example, Docker Hub, Google gcr.io, Red Hat Quay.io) to Amazon ECR - -!!! note "Note" - - If you need to transfer Amazon S3 objects between AWS Regions, we recommend that you use [Cross-Region Replication][crr]; if you want to transfer Amazon S3 objects within the same AWS Standard Region, we recommend using [Same-Region Replication][srr]. - -This implementation guide describes architectural considerations and configuration steps for deploying Data Transfer Hub in the Amazon Web Services (AWS) Cloud. It includes links to an AWS [CloudFormation][cloudformation] template that launches and configures the AWS services required to deploy this solution using AWS best practices for security and availability. - -The guide is intended for IT architects, developers, DevOps, data analysts, and marketing technology professionals who have practical experience architecting in the AWS Cloud. - -[cloudformation]: https://aws.amazon.com/en/cloudformation/ -[crr]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/replication.html#crr-scenario -[srr]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/replication.html#srr-scenario \ No newline at end of file diff --git a/docs/en-base/notices.md b/docs/en-base/notices.md new file mode 100644 index 0000000..aca5861 --- /dev/null +++ b/docs/en-base/notices.md @@ -0,0 +1,3 @@ +Customers are responsible for making their own independent assessment of the information in this document. This document: (a) is for informational purposes only, (b) represents Amazon Web Services current product offerings and practices, which are subject to change without notice, and (c) does not create any commitments or assurances from Amazon Web Services and its affiliates, suppliers or licensors. Amazon Web Services products or services are provided “as is” without warranties, representations, or conditions of any kind, whether express or implied. Amazon Web Services responsibilities and liabilities to its customers are controlled by Amazon Web Services agreements, and this document is not part of, nor does it modify, any agreement between Amazon Web Services and its customers. + +Data Transfer Hub is licensed under the terms of the of the Apache License Version 2.0 available at [Classless Inter-Domain Routing (CIDR)](https://www.apache.org/licenses/LICENSE-2.0). diff --git a/docs/en-base/cost.md b/docs/en-base/plan-deployment/cost.md similarity index 90% rename from docs/en-base/cost.md rename to docs/en-base/plan-deployment/cost.md index 718354e..f9f85f5 100644 --- a/docs/en-base/cost.md +++ b/docs/en-base/plan-deployment/cost.md @@ -2,7 +2,7 @@ You are responsible for the cost of the AWS services used while running this sol The solution automatically deploys an additional Amazon CloudFront Distribution and an Amazon S3 bucket for storing the static website assets in your account. You are responsible for the incurred variable charges from these services. For full details, refer to the pricing webpage for each AWS service you will be using in this solution. -The following three examples demonstrate how to estimate the cost. Two example estimates are for transferring S3 objects, and one is for transferring ECR images. +The following examples demonstrate how to estimate the cost. Two example estimates are for transferring Amazon S3 objects, and one is for transferring ECR images. ## Cost of an Amazon S3 transfer task @@ -16,7 +16,7 @@ Transfer 1 TB of S3 files from AWS Oregon Region (us-west-2) to AWS Beijing Regi - Average speed per EC2 instance: ~1GB/min - Total EC2 instance hours: ~17 hours -As of March 2023, the cost of using the solution to complete the transfer task is shown in the following table: +As of April 2023, the cost of using the solution to complete the transfer task is shown in the following table: | AWS service | Dimensions | Total Cost | |----------|--------|--------| @@ -36,7 +36,7 @@ Transfer 1 TB of S3 files from AWS Oregon region (us-west-2) to China Beijing Re - Average speed per EC2 instance: ~6MB/min (~10 files per sec) - Total EC2 instance hours: ~3000 hours -As of March 2023, the cost of using the solution to complete the transfer task is shown in the following table: +As of April 2023, the cost of using the solution to complete the transfer task is shown in the following table: | AWS service | Dimensions | Total Cost | |----------|--------|--------| @@ -56,7 +56,7 @@ For an Amazon ECR transfer task, the cost can vary based on network speed and to Transfer 27 Amazon ECR images (~3 GB in total size) from AWS Ireland Region (eu-west-1) to AWS Beijing Region (cn-north-1). The total runtime is about 6 minutes. -As of March 2023, the cost of using the solution to complete the transfer task is shown in the following table: +As of April 2023, the cost of using the solution to complete the transfer task is shown in the following table: | AWS service | Dimensions | Total Cost | |----------|--------|--------| diff --git a/docs/en-base/plan-deployment/quotas.md b/docs/en-base/plan-deployment/quotas.md new file mode 100644 index 0000000..cac60b1 --- /dev/null +++ b/docs/en-base/plan-deployment/quotas.md @@ -0,0 +1,9 @@ +### Quotas for AWS services in this solution + +Make sure you have sufficient quota for each of the services [implemented in this solution](../../architecture-overview/architecture-details/#aws-services-in-this-solution). For more information, see [AWS service quotas](https://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html). + +Choose one of the following links to go to the page for that service. To view the service quotas for all AWS services in the documentation without switching pages, view the information in the [Service endpoints and quotas](https://docs.aws.amazon.com/general/latest/gr/aws-general.pdf#aws-service-information) page in the PDF instead. + +### AWS CloudFormation quotas + +Your AWS account has AWS CloudFormation quotas that you should be aware of when launching the stack in this solution. By understanding these quotas, you can avoid limitation errors that would prevent you from deploying this solution successfully. For more information, refer to [AWS CloudFormation quotas](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/cloudformation-limits.html) in the *AWS CloudFormation User Guide*. \ No newline at end of file diff --git a/docs/en-base/plan-deployment/regions.md b/docs/en-base/plan-deployment/regions.md new file mode 100644 index 0000000..ce21e11 --- /dev/null +++ b/docs/en-base/plan-deployment/regions.md @@ -0,0 +1,28 @@ +This solution uses services which may not be currently available in all AWS Regions. Launch this solution in an AWS Region where required services are available. For the most current availability by Region, refer to the [AWS Regional Services List](https://aws.amazon.com/about-aws/global-infrastructure/regional-product-services/). + +## Supported regions for deployment in AWS Regions + +| Region Name | Region ID | +|----------|--------| +| US East (N. Virginia) | us-east-1| +| US East (Ohio) | us-east-2| +| US West (N. California) | us-west-1| +| US West (Oregon) | us-west-2| +| Asia Pacific (Mumbai) | ap-south-1| +| Asia Pacific (Tokyo) | ap-northeast-1| +| Asia Pacific (Seoul) | ap-northeast-2| +| Asia Pacific (Singapore) | ap-southeast-1| +| Asia Pacific (Sydney) | ap-southeast-2| +| Canada (Central) | ca-central-1| +| Europe (Ireland) | eu-west-1| +| Europe (London) | eu-west-2| +| Europe (Stockholm) | eu-north-1| +| Europe (Frankfurt) | eu-central-1| +| South America (São Paulo) | sa-east-1| + +## Supported regions for deployment in AWS China Regions + +| Region Name | Region ID | +|----------|--------| +| China (Beijing) Region Operated by Sinnet | cn-north-1 +| China (Ningxia) Region Operated by NWCD | cn-northwest-1 diff --git a/docs/en-base/plan-deployment/security.md b/docs/en-base/plan-deployment/security.md new file mode 100644 index 0000000..98f81aa --- /dev/null +++ b/docs/en-base/plan-deployment/security.md @@ -0,0 +1,9 @@ +When you build systems on AWS infrastructure, security responsibilities are shared between you and AWS. This [shared model](https://aws.amazon.com/compliance/shared-responsibility-model/) reduces your operational burden because AWS operates, manages, and controls the components including the host operating system, the virtualization layer, and the physical security of the facilities in which the services operate. For more information about AWS security, visit [AWS Cloud Security](http://aws.amazon.com/security/). + +### IAM roles + +AWS Identity and Access Management (IAM) roles allow customers to assign granular access policies and permissions to services and users on the AWS Cloud. This solution creates IAM roles that grant the solution’s AWS Lambda functions, Amazon API Gateway, and Amazon Cognito access to create regional resources. + +### Amazon CloudFront + +This solution deploys a web console hosted in an Amazon S3 bucket. To help reduce latency and improve security, this solution includes an Amazon CloudFront distribution with an origin access identity, which is a CloudFront user that provides public access to the solution’s website bucket contents. For more information, refer to [Restricting Access to Amazon S3 Content by Using an Origin Access Identity](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/private-content-restricting-access-to-s3.html) in the *Amazon CloudFront Developer Guide*. \ No newline at end of file diff --git a/docs/en-base/regions.md b/docs/en-base/regions.md deleted file mode 100644 index 649ba46..0000000 --- a/docs/en-base/regions.md +++ /dev/null @@ -1,28 +0,0 @@ -This solution uses services which may not be currently available in all AWS Regions. Launch this solution in an AWS Region where required services are available. For the most current availability by Region, refer to the AWS Regional Services List. - -## Supported regions for deployment in AWS Regions - -| Region Name | Region ID | -|----------|--------| -| US East (N. Virginia) | us-east-1 -| US East (Ohio) | us-east-2 -| US West (N. California) | us-west-1 -| US West (Oregon) | us-west-2 -| Asia Pacific (Mumbai) | ap-south-1 -| Asia Pacific (Tokyo) | ap-northeast-1 -| Asia Pacific (Seoul) | ap-northeast-2 -| Asia Pacific (Singapore) | ap-southeast-1 -| Asia Pacific (Sydney) | ap-southeast-2 -| Canada (Central) | ca-central-1 -| Europe (Ireland) | eu-west-1 -| Europe (London) | eu-west-2 -| Europe (Stockholm) | eu-north-1 -| Europe (Frankfurt) | eu-central-1 -| South America (São Paulo) | sa-east-1 - -## Supported regions for deployment in AWS China Regions - -| Region Name | Region ID | -|----------|--------| -| China (Beijing) Region Operated by Sinnet | cn-north-1 -| China (Ningxia) Region Operated by NWCD | cn-northwest-1 diff --git a/docs/en-base/revisions.md b/docs/en-base/revisions.md index b50a1dd..9a9be3b 100644 --- a/docs/en-base/revisions.md +++ b/docs/en-base/revisions.md @@ -4,4 +4,5 @@ | July 2021 | Released version 2.0
1. Support general OIDC providers, including Authing, Auth0, okta, etc.
2. Support transferring objects from more Amazon S3 compatible storage services, such as Huawei Cloud OBS.
3. Support setting the access control list (ACL) of the target bucket object
4. Support deployment in account A, and copying data from account B to account C
5. Change to use Graviton 2 instance, and turn on BBR to transfer S3 objects to improve performance and save costs
6. Change to use Secrets Manager to maintain credential information | | December 2021 | Released version 2.1
1. Support custom prefix list to filter transfer tasks
2. Support configuration of single-run file transfer tasks
3. Support configuration of tasks through custom CRON Expression timetable
4. Support manual enabling or disabling of data comparison function | | July 2022 | Released version 2.2
1. Support transfer data through Direct Connect| -| March 2023 | Released version 2.3
1. Support embeded dashboard and logs
2. Support S3 Access Key Rotation
3. Enhance One Time Transfer Task monitoring| \ No newline at end of file +| March 2023 | Released version 2.3
1. Support embedded dashboard and logs
2. Support S3 Access Key Rotation
3. Enhance One Time Transfer Task monitoring| +| April 2023 | Released version 2.4
1. Support payer request S3 object transfer| \ No newline at end of file diff --git a/docs/en-base/security.md b/docs/en-base/security.md deleted file mode 100644 index b82b463..0000000 --- a/docs/en-base/security.md +++ /dev/null @@ -1,9 +0,0 @@ -When you build systems on AWS infrastructure, security responsibilities are shared between you and AWS. This [shared model](https://aws.amazon.com/compliance/shared-responsibility-model/) reduces your operational burden because AWS operates, manages, and controls the components including the host operating system, the virtualization layer, and the physical security of the facilities in which the services operate. For more information about AWS security, see [AWS Cloud Security](http://aws.amazon.com/security/). - -## IAM roles - -AWS Identity and Access Management (IAM) roles allow customers to assign granular access policies and permissions to services and users on the AWS Cloud. This solution creates IAM roles that grant the solution’s AWS Lambda functions, Amazon API Gateway and Amazon Cognito access to create regional resources. - -## Amazon CloudFront - -This solution deploys a web console hosted in an Amazon Simple Storage Service (Amazon S3) bucket. To help reduce latency and improve security, this solution includes an Amazon CloudFront distribution with an origin access identity, which is a CloudFront user that provides public access to the solution’s website bucket contents. For more information, refer to [Restricting Access to Amazon S3 Content by Using an Origin Access Identity](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/private-content-restricting-access-to-s3.html) in the Amazon CloudFront Developer Guide. \ No newline at end of file diff --git a/docs/en-base/solution-components.md b/docs/en-base/solution-components.md deleted file mode 100644 index 8738555..0000000 --- a/docs/en-base/solution-components.md +++ /dev/null @@ -1,31 +0,0 @@ -This solution has three components: - -- Web console -- Amazon S3 Transfer Engine -- Amazon ECR Transfer Engine - -## Web console - -This solution provides a simple web console which allows you to create and manage transfer tasks for Amazon S3 and Amazon ECR. - -## Amazon S3 Transfer Engine -Amazon S3 transfer engine runs the Amazon S3 plugin and is used for transferring objects from their sources into S3 buckets. The S3 plugin supports the following features: - -- Transfer Amazon S3 objects between AWS China Regions and AWS Regions -- Transfer objects from Alibaba Cloud OSS / Tencent COS / Qiniu Kodo to Amazon S3 -- Transfer objects from S3 Compatible Storage service to Amazon S3 -- Support near real time transfer via S3 Event -- Support Transfer with object metadata -- Support incremental data transfer -- Auto retry and error handling - - -## Amazon ECR Transfer Engine -Amazon ECR engine runs the Amazon ECR plugin and is used for transferring container images from other container registries. The ECR plugin supports the following features: - -- Transfer Amazon ECR images between AWS China Regions and AWS Regions -- Transfer from public container registry (such as Docker Hub, GCR.io, Quay.io) to Amazon ECR -- Transfer selected images to Amazon ECR -- Transfer all images and tags from Amazon ECR - -The ECR plugin leverages [skopeo](https://github.com/containers/skopeo) for the underlying engine. The AWS Lambda function lists images in their sources and uses Fargate to run the transfer jobs. \ No newline at end of file diff --git a/docs/en-base/solution-overview/features-and-benefits.md b/docs/en-base/solution-overview/features-and-benefits.md new file mode 100644 index 0000000..003f715 --- /dev/null +++ b/docs/en-base/solution-overview/features-and-benefits.md @@ -0,0 +1,14 @@ +The solution’s web console provides an interface for managing the following tasks: + +- Transferring Amazon S3 objects between AWS China Regions and AWS Regions +- Transferring data from other cloud providers’ object storage services (including Alibaba Cloud OSS, Tencent COS, and Qiniu Kodo) to Amazon S3 +- Transferring objects from Amazon S3 compatible object storage service to Amazon S3 +- Transferring Amazon ECR images between AWS China Regions and AWS Regions +- Transferring container images from public container registries (for example, Docker Hub, Google gcr.io, Red Hat Quay.io) to Amazon ECR + +!!! note "Note" + + If you need to transfer Amazon S3 objects between AWS Regions, we recommend that you use [Cross-Region Replication][crr]. If you want to transfer Amazon S3 objects within the same AWS Region, we recommend using [Same-Region Replication][srr]. + +[crr]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/replication.html#crr-scenario +[srr]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/replication.html#srr-scenario \ No newline at end of file diff --git a/docs/en-base/solution-overview/index.md b/docs/en-base/solution-overview/index.md new file mode 100644 index 0000000..dd8caf1 --- /dev/null +++ b/docs/en-base/solution-overview/index.md @@ -0,0 +1,17 @@ +The Data Transfer Hub solution provides secure, scalable, and trackable data transfer for Amazon Simple Storage Service (Amazon S3) objects and Amazon Elastic Container Registry (Amazon ECR) images. This data transfer helps customers expand their businesses globally by easily moving data in and out of Amazon Web Services (AWS) China Regions. + +This implementation guide provides an overview of the Data Transfer Hub solution, its reference architecture and components, considerations for planning the deployment, configuration steps for deploying the Data Transfer Hub solution to the AWS Cloud. + +Use this navigation table to quickly find answers to these questions: + +| If you want to … | Read… | +|----------|--------| +| Know the cost for running this solution | [Cost](../plan-deployment/cost) | +| Understand the security considerations for this solution | [Security](../plan-deployment/security) | +| Know how to plan for quotas for this solution | [Quotas](../plan-deployment/quotas) | +| Know which AWS Regions are supported for this solution | [Supported AWS Regions](../plan-deployment/regions) | +| View or download the AWS CloudFormation template included in this solution to automatically deploy the infrastructure resources (the “stack”) for this solution | [AWS CloudFormation templates](../deployment/template) | + +This guide is intended for IT architects, developers, DevOps, data analysts, and marketing technology professionals who have practical experience architecting in the AWS Cloud. + +You will be responsible for your compliance with all applicable laws in respect of your data transfer tasks. \ No newline at end of file diff --git a/docs/en-base/solution-overview/use-cases.md b/docs/en-base/solution-overview/use-cases.md new file mode 100644 index 0000000..1e99991 --- /dev/null +++ b/docs/en-base/solution-overview/use-cases.md @@ -0,0 +1,10 @@ +Today, the China market is one of biggest markets in the world. Many international companies are seeking their success in China, as well as a number of Chinese companies are expanding their businesses globally. One of most important steps of in the business is moving their data. + +S3 Cross-Region Replication and ECR Cross-Region Replication are popular but customers cannot use them to replicate data into China Regions. With the launch of Data Transfer Hub solution, customers can now create S3 and ECR data transfer tasks between AWS Regions and AWS China Regions in a web portal. Moreover, it supports replicating data from cloud providers to AWS. + +Data Transfer Hub supports the following use cases: + +* Copy Amazon S3 objects between AWS Regions and AWS China Regions. +* Copy data from other cloud providers’ object storage services (including Alibaba Cloud OSS, Tencent COS, Qiniu Kodo) to Amazon S3. +* Transfer Amazon ECR images between AWS Regions and AWS China Regions. +* Transfer Dockers image from public docker registry (for example, Docker Hub, Google gcr.io, Red Hat Quay.io) to Amazon ECR. diff --git a/docs/en-base/troubleshooting.md b/docs/en-base/troubleshooting.md new file mode 100644 index 0000000..0b10636 --- /dev/null +++ b/docs/en-base/troubleshooting.md @@ -0,0 +1,28 @@ +After creating the task, you may encounter some error messages. The following list the error messages and provide general steps to troubleshoot them. + +**1. StatusCode: 400, InvalidToken: The provided token is malformed or otherwise invalid** + +If you get this error message, confirm that your secret is configured in the following format. You can copy and paste it directly. + +```json +{ + "access_key_id": "", + "secret_access_key": "" +} +``` + +**2. StatusCode: 403, InvalidAccessKeyId: The AWS Access Key Id you provided does not exist in our records** + +If you get this error message, check if your bucket name and region name are configured correctly. + +**3. StatusCode: 403, InvalidAccessKeyId: UnknownError** + +If you get this error message, check whether the Credential stored in Secrets Manager has the proper permissions. For more information, refer to [IAM Policy](https://github.com/awslabs/data-transfer-hub/blob/v2.0.0/docs/IAM-Policy.md). + +**4. StatusCode: 400, AccessDeniedException: Access to KMS is not allowed** + +If you get this error message, confirm that your secret is not encrypted by SSE-CMK. Currently, DTH does not support SSE-CMK encrypted secrets. + +**5. dial tcp: lookup xxx.xxxxx.xxxxx.xx (http://xxx.xxxxx.xxxxx.xx/) on xxx.xxx.xxx.xxx:53: no such host** + +If you get this error message, check if your endpoint is configured correctly. diff --git a/docs/en-base/tutorial-directconnect.md b/docs/en-base/tutorial-directconnect.md deleted file mode 100644 index 156cbdd..0000000 --- a/docs/en-base/tutorial-directconnect.md +++ /dev/null @@ -1,257 +0,0 @@ -This tutorial describes how to use Data Transfer Hub (DTH) via Direct Connect (DX). - -When DTH worker node and finder node start to work, they need to download related assets (such as CloudWatch agent, DTH CLI) from internet by default. In an isolated network, you need to manually download and upload these files to an S3 bucket in the region where DTH is deployed. - -You have two options to use DTH to transfer data via DX: - -- [Use DTH to transfer data via DX in a non-isolated network](#non-isolated-network) -- [Use DTH to transfer data via DX in an isolated network](#isolated-network) - -## Use DTH to transfer data via DX in a non-isolated network -In this scenario, DTH is deployed in the **destination side** and within a VPC with **public access** (has Internet Gateway or NAT), and the source bucket is in the isolated network. - -!!! note "Note" - - As DTH deployment VPC has public internet access (IGW or NAT), EC2 worker/finder nodes can access other AWS services used by DTH such as secret managers and download related assets (such as CloudWatch agent, DTH CLI) from internet without any changes. - -1. From the **Create Transfer Task** page, select **Create New Task**, and then select **Next**. - -2. From the **Engine options** page, under engine, select **Amazon S3**, and then choose **Next Step**. - -3. Specify the transfer task details. - - Under **Source Type**, select the data source **Amazon S3 Compatible Storage**. - -4. Enter **endpoint url**, which must be the **interface endpoint** url, such as `https://bucket.vpce-076205013d3a9a2ca-us23z2ze.s3.ap-east-1.vpce.amazonaws.com`. You can find the specific url in [VPC Endpoint Console](https://us-east-1.console.aws.amazon.com/vpc/home?region=us-west-2#Endpoints:vpcEndpointType=Interface) DNS names part. - -5. Enter **bucket name** and choose to sync **Full Bucket** or **Objects with a specific prefix** or **Objects with different prefixes**. - -6. Provide destination settings for the S3 buckets. - -7. From **Engine settings**, verify the values and modify them if necessary. For incremental data transfer, we recommend to set the **minimum capacity** to at least 1. - -8. At **Task Scheduling Settings**, select your task scheduling configuration. - - If you want to configure the timed task at a fixed frequency to compare the data difference on both sides of the time, select **Fixed Rate**. - - If you want to configure a scheduled task through [Cron Expression](https://docs.aws.amazon.com/AmazonCloudWatch/latest/events/ScheduledEvents.html#CronExpressions) to achieve a scheduled comparison of data differences on both sides, select **Cron Expression**. - - If you only want to perform the data synchronization task once, select **One Time Transfer**. - -9. For **Advanced Options**, keep the default values. - -10. At **Need Data Comparison before Transfer**, select your task configuration. - - - If you want to skip the data comparison process and transfer all files, select **No**. - - If you only want to synchronize files with differences, select **Yes**. - -11. In **Alarm Email**, provide an email address. - -12. Choose **Next** and review your task parameter details. - -13. Choose **Create Task**. - -## Use DTH to transfer data via DX in an isolated network -In this scenario, DTH is deployed in the **destination side** and within a VPC **without public access** (isolated VPC), and the source bucket is also in an isolated network. - -### Prerequisites -**Configure the service endpoints for VPC** - -DTH worker/finder nodes need to access other AWS services. To do so, you need to create **Gateway Endpoint** for **DynamoDB** and **S3**, create **Interface Endpoint** for **logs**, **SQS** and **Secret Managers**. - -![endpoints](./images/dx-vpc-endpoints.png) - -**Upload the artifacts to an S3 bucket** - -In an isolated network, do the following to manually download and upload files to an S3 bucket in the region where DTH is deployed. - -1. Download [Amazon CloudWatch Agent](https://s3.amazonaws.com/amazoncloudwatch-agent/amazon_linux/arm64/latest/amazon-cloudwatch-agent.rpm) and [DTH CLI](https://aws-gcr-solutions-assets.s3.amazonaws.com/data-transfer-hub-cli/v1.2.1/dthcli_1.2.1_linux_arm64.tar.gz). - -2. Create the worker's CloudWatch Agent Config file. You can create a file named `cw_agent_config.json`. -```json -{ - "agent": { - "metrics_collection_interval": 60, - "run_as_user": "root" - }, - "logs": { - "logs_collected": { - "files": { - "collect_list": [ - { - "file_path": "/home/ec2-user/worker.log", - "log_group_name": "##log group##", - "log_stream_name": "Instance-{instance_id}" - } - ] - } - } - }, - "metrics": { - "append_dimensions": { - "AutoScalingGroupName": "${aws:AutoScalingGroupName}", - "InstanceId": "${aws:InstanceId}" - }, - "aggregation_dimensions": [ - [ - "AutoScalingGroupName" - ] - ], - "metrics_collected": { - "disk": { - "measurement": [ - "used_percent" - ], - "metrics_collection_interval": 60, - "resources": [ - "*" - ] - }, - "mem": { - "measurement": [ - "mem_used_percent" - ], - "metrics_collection_interval": 60 - } - } - } -} -``` -3. Upload these three files to an S3 bucket in the region where DTH is deployed. - -![assets](./images/dx-s3-assets.png) - -### Deploy the DTH S3-Plugin - -We recommend using the **DTH S3-plugin** to create the transfer task, instead of using the DTH console. - -**For AWS China Regions** - -[![Launch Stack](./images/launch-stack.png)](https://console.amazonaws.cn/cloudformation/home#/stacks/create/template?stackName=DTHS3Stack&templateURL=https://s3.amazonaws.com/solutions-reference/data-transfer-hub-s3-plugin/latest/DataTransferS3Stack-ec2.template) - -**For AWS Global Regions** - -[![Launch Stack](./images/launch-stack.png)](https://console.aws.amazon.com/cloudformation/home#/stacks/create/template?stackName=DTHS3Stack&templateURL=https://s3.amazonaws.com/solutions-reference/data-transfer-hub-s3-plugin/latest/DataTransferS3Stack-ec2.template) - -1. For **Source Type**, choose **Amazon_S3**. - -2. Enter the **Source Bucket** name. - -3. Enter the **Source Prefix** if needed. - -4. Enter the **Source Endpoint URL**. For example, `https://bucket.vpce-076205013d3a9a2ca-us23z2ze.s3.ap-east-1.vpce.amazonaws.com`. - -5. For **Source In Current Account**, choose **false**. - -6. For **Source Credentials**, enter the secret's name stored in the [Secrets Manager](https://console.aws.amazon.com/secretsmanager/home). - -7. For **Enable S3 Event**, choose **No**. - -8. Configure the **Destination Bucket**, **Destination Prefix**, **Destination Region** and **Destination in Current Account**. Leave the Destination Credentials blank if the destination bucket is in current account. - -9. Configure the **Alarm Email**. - -10. Configure the **VPC ID** and **Subnet IDs**. - -11. For other parameters, keep the default values and choose **Next**. - -12. Choose **Next**. Configure additional stack options such as tags (Optional). - -13. Choose **Next**. Review and confirm acknowledgement, then choose **Create Stack** to start the deployment. - -The deployment will take approximately 3 to 5 minutes. - -### Update the EC2 Userdata for worker nodes and finder node - -**Update worker nodes' Userdata** - -1. Go to the Auto Scaling Group's [Launch configurations](https://us-west-2.console.aws.amazon.com/ec2/v2/home?region=us-west-2#LaunchConfigurations:launchConfigurationName=). - -2. Select the configuration and choose **Copy Launch Configuration**. - -3. Edit the **User data** under the **Advanced details** section. - - Replace the code above `echo "export JOB_TABLE_NAME=xxxxxxxxxxx" >> env.sh` with the following shell script. - - - ```shell - #!/bin/bash - - yum update -y - cd /home/ec2-user/ - asset_bucket= - aws s3 cp "s3://$asset_bucket/cw_agent_config.json" . --region - aws s3 cp "s3://$asset_bucket/amazon-cloudwatch-agent.rpm" . --region - aws s3 cp "s3://$asset_bucket/dthcli_1.2.1_linux_arm64.tar.gz" . --region - - sudo yum install -y amazon-cloudwatch-agent.rpm - sed -i -e "s/##log group##//g" cw_agent_config.json - /opt/aws/amazon-cloudwatch-agent/bin/amazon-cloudwatch-agent-ctl -a fetch-config -m ec2 -c file:/home/ec2-user/cw_agent_config.json -s - tar zxvf dthcli_1.2.1_linux_arm64.tar.gz - - ``` - - - Replace the `` with your specific bucket name where the assets are stored. - - - Replace the `` with the region where you deploy the DTH S3-Plugin solution. - - - Replace the `` with the DTH Worker's log group name. - - - Do not edit the code behind `echo "export JOB_TABLE_NAME=xxxxxxxxxxx" >> env.sh`. - -4. Choose **Create Launch Configuration**. - -5. Go to [Auto Scaling Group](https://us-west-2.console.aws.amazon.com/ec2/v2/home?region=us-west-2#AutoScalingGroups:). - -6. Choose the specific scaling group and click **Edit**. - -7. In the **Launch configuration** section, choose the new launch configuration created in the previous step. Click **Update**. - -8. Terminate all the running DTH worker node, and the Auto Scaling Group will launch the new worker node with the new Userdata. - -**Update finder nodes' Userdata** - -1. Go to the EC2 [Launch Templates](https://us-west-2.console.aws.amazon.com/ec2/v2/home?region=us-west-2#LaunchTemplates:). - -2. Click **Modify template**. - -3. Edit the **User data** under the **Advanced details** section. - - Replace the code above `echo "export JOB_TABLE_NAME=xxxxxxxxxxx" >> env.sh` using the shell script bellow. - - ```shell - #!/bin/bash - - yum update -y - cd /home/ec2-user/ - asset_bucket= - aws s3 cp "s3://$asset_bucket/amazon-cloudwatch-agent.rpm" . --region - aws s3 cp "s3://$asset_bucket/dthcli_1.2.1_linux_arm64.tar.gz" . --region - - echo "{\"agent\": {\"metrics_collection_interval\": 60,\"run_as_user\": \"root\"},\"logs\": {\"logs_collected\": {\"files\": {\"collect_list\": [{\"file_path\": \"/home/ec2-user/finder.log\",\"log_group_name\": \"##log group##\"}]}}}}" >> /home/ec2-user/cw_agent_config.json, - - sudo yum install -y amazon-cloudwatch-agent.rpm - - sed -i -e "s/##log group##/`/g" cw_agent_config.json - /opt/aws/amazon-cloudwatch-agent/bin/amazon-cloudwatch-agent-ctl -a fetch-config -m ec2 -c file:/home/ec2-user/cw_agent_config.json -s - tar zxvf dthcli_1.2.1_linux_arm64.tar.gz - - ``` - - - Replace the `` with your specific bucket name where the assets are stored. - - - Replace the `` with the region where you deploy the DTH S3-Plugin solution. - - - Replace the `` with the DTH Finder's log group name. - - - Do not edit the code behind `echo "export JOB_TABLE_NAME=xxxxxxxxxxx" >> env.sh`. - -4. Choose **Create template version**. - -5. Use this new version template to launch a new Finder Node, and manually terminate the old one. - - -## Architecture - -[![architecture]][architecture] - -[architecture]: ./images/dx-arch-global.png - -DTH worker nodes running on EC2 transfer data from bucket in one AWS account to bucket in another AWS account. - -* To access bucket in the account where DTH is deployed, DTH worker nodes use **S3 Gateway Endpoint** -* To access bucket in another account, DTH worker nodes use **S3 Private Link** by **S3 Interface Endpoint** \ No newline at end of file diff --git a/docs/en-base/uninstall.md b/docs/en-base/uninstall.md index 2a2cbf7..023a360 100644 --- a/docs/en-base/uninstall.md +++ b/docs/en-base/uninstall.md @@ -1,8 +1,4 @@ -You can uninstall the Data Transfer Hub solution from the AWS Management Console or by using the AWS Command Line Interface. - -!!! note "Note" - - Before uninstalling the solution, you must manually stop any active data transfer tasks. +You can uninstall the Data Transfer Hub solution from the AWS Management Console or by using the AWS Command Line Interface. You must manually stop any active transfer tasks before uninstalling. ## Using the AWS Management Console @@ -22,9 +18,9 @@ $ aws cloudformation delete-stack --stack-name ## Deleting the Amazon S3 buckets This solution is configured to retain the solution-created Amazon S3 bucket (for deploying in an opt-in Region) if you decide to delete the AWS CloudFormation stack to prevent accidental data loss. After uninstalling the solution, you can manually delete this S3 bucket if you do not need to retain the data. Follow these steps to delete the Amazon S3 bucket. -1. Sign in to the [Amazon S3](https://console.aws.amazon.com/s3/home) console +1. Sign in to the [Amazon S3](https://console.aws.amazon.com/s3/home) console. 2. Choose Buckets from the left navigation pane. -3. Locate the S3 buckets. +3. Locate the `` S3 buckets. 4. Select the S3 bucket and choose **Delete**. To delete the S3 bucket using AWS CLI, run the following command: diff --git a/docs/en-base/upgrade.md b/docs/en-base/update.md similarity index 96% rename from docs/en-base/upgrade.md rename to docs/en-base/update.md index 0190368..22c7049 100644 --- a/docs/en-base/upgrade.md +++ b/docs/en-base/update.md @@ -1,5 +1,3 @@ -# Upgrade Data Transfer Hub - **Time to upgrade**: Approximately 20 minutes ## Upgrade Overview @@ -17,7 +15,7 @@ Use the following steps to upgrade the solution on AWS console. 2. Select the Data Transfer Hub main stack, and click the **Update** button. -3. Choose **Replace current template**, and enter the specific **Amazon S3 URL** according to your initial deployment type. Refer to [Deployment Overview](./deployment.md) for more details. +3. Choose **Replace current template**, and enter the specific **Amazon S3 URL** according to your initial deployment type. Refer to [Deployment Overview](../../deployment/deployment-overview) for more details. | Type | Link | | -------------------------------------------- | ------------------------------------------------------------ | diff --git a/docs/en-base/tutorial-cli-launch.md b/docs/en-base/user-guide/tutorial-cli-launch.md similarity index 95% rename from docs/en-base/tutorial-cli-launch.md rename to docs/en-base/user-guide/tutorial-cli-launch.md index 2e4b1ef..672c497 100644 --- a/docs/en-base/tutorial-cli-launch.md +++ b/docs/en-base/user-guide/tutorial-cli-launch.md @@ -13,13 +13,13 @@ You can use the [AWS CLI][aws-cli] to create an Amazon S3 transfer task. Note th https://s3.amazonaws.com/solutions-reference/data-transfer-hub-s3-plugin/latest/DataTransferS3Stack-ec2.template ``` -3. Go to your terminal and enter the following command. For the parameter details, refer to the table below. +3. Go to your terminal and enter the following command. For the parameter details, refer to the Parameters table. ```shell aws cloudformation create-stack --stack-name dth-s3-task --template-url CLOUDFORMATION_URL \ --capabilities CAPABILITY_NAMED_IAM \ --parameters \ - ParameterKey=alarmEmail,ParameterValue=your_email@abc.com \ + ParameterKey=alarmEmail,ParameterValue=your_email@example.com \ ParameterKey=destBucket,ParameterValue=dth-receive-cn-north-1 \ ParameterKey=destPrefix,ParameterValue=test-prefix \ ParameterKey=destCredentials,ParameterValue=drh-cn-secret-key \ @@ -55,6 +55,7 @@ You can use the [AWS CLI][aws-cli] to create an Amazon S3 transfer task. Note th | destPrefix | | | Destination prefix (Optional) | destRegion | | | Destination region name | destStorageClass | STANDARD
STANDARD_IA
ONEZONE_IA
INTELLIGENT_TIERING | INTELLIGENT_TIERING | Destination storage class, which defaults to INTELLIGENT_TIERING +| isPayerRequest | true
false | false | Indicates whether to enable payer request. If true, it will get object in payer request mode. | | ec2CronExpression | | 0/60 * * * ? * | Cron expression for EC2 Finder task
"" for one time transfer. | | finderEc2Memory | 8
16
32
64
128
256 | 8 GB| The amount of memory (in GB) used by the Finder task. | ec2Subnets | | | Two public subnets or two private subnets with [NAT gateway][nat] | diff --git a/docs/en-base/user-guide/tutorial-directconnect.md b/docs/en-base/user-guide/tutorial-directconnect.md new file mode 100644 index 0000000..a7dc149 --- /dev/null +++ b/docs/en-base/user-guide/tutorial-directconnect.md @@ -0,0 +1,60 @@ +This tutorial describes how to use Data Transfer Hub (DTH) via Direct Connect (DX). + +When the DTH worker node and finder node start to work, they need to download related assets (such as CloudWatch agent, DTH CLI) from the internet by default. In an isolated network, you need to manually download and upload these files to an S3 bucket in the region where DTH is deployed. + +You have two options to use DTH to transfer data via DX: + +- [Use DTH to transfer data via DX in a non-isolated network](#non-isolated-network) +- [Use DTH to transfer data via DX in an isolated network](#isolated-network) + +## Use DTH to transfer data via DX in a non-isolated network +In this scenario, DTH is deployed in the **destination side** and within a VPC with **public access** (has Internet Gateway or NAT), and the source bucket is in the isolated network. + +!!! note "Note" + + As DTH deployment VPC has public internet access (IGW or NAT), EC2 worker/finder nodes can access other AWS services used by DTH such as secret managers and download related assets (such as CloudWatch agent, DTH CLI) from internet without any changes. + +1. From the **Create Transfer Task** page, select **Create New Task**, and then select **Next**. + +2. From the **Engine options** page, under engine, select **Amazon S3**, and then choose **Next Step**. + +3. Specify the transfer task details. + - Under **Source Type**, select the data source **Amazon S3 Compatible Storage**. + +4. Enter **endpoint url**, which must be the **interface endpoint** url, such as `https://bucket.vpce-076205013d3a9a2ca-us23z2ze.s3.ap-east-1.vpce.amazonaws.com`. You can find the specific url in [VPC Endpoint Console](https://us-east-1.console.aws.amazon.com/vpc/home?region=us-west-2#Endpoints:vpcEndpointType=Interface) DNS names part. + +5. Enter **bucket name** and choose to sync **Full Bucket** or **Objects with a specific prefix** or **Objects with different prefixes**. + +6. Provide destination settings for the S3 buckets. + +7. From **Engine settings**, verify the values and modify them if necessary. For incremental data transfer, we recommend to set the **minimum capacity** to at least 1. + +8. At **Task Scheduling Settings**, select your task scheduling configuration. + - If you want to configure the timed task at a fixed frequency to compare the data difference on both sides of the time, select **Fixed Rate**. + - If you want to configure a scheduled task through [Cron Expression](https://docs.aws.amazon.com/AmazonCloudWatch/latest/events/ScheduledEvents.html#CronExpressions) to achieve a scheduled comparison of data differences on both sides, select **Cron Expression**. + - If you only want to perform the data synchronization task once, select **One Time Transfer**. + +9. For **Advanced Options**, keep the default values. + +10. At **Need Data Comparison before Transfer**, select your task configuration. + + - If you want to skip the data comparison process and transfer all files, select **No**. + - If you only want to synchronize files with differences, select **Yes**. + +11. In **Alarm Email**, provide an email address. + +12. Choose **Next** and review your task parameter details. + +13. Choose **Create Task**. + +## Use DTH to transfer data via DX in an isolated network +In this scenario, DTH is deployed in the **destination side** and within a VPC **without public access** (isolated VPC), and the source bucket is also in an isolated network. For details, refer to the [tutorial][https://github.com/awslabs/data-transfer-hub/blob/main/docs/tutorial-directconnect-isolated.md]. + +[![architecture]][architecture] + +[architecture]: ../images/dx-arch-global.png + +DTH worker nodes running on EC2 transfer data from bucket in one AWS account to bucket in another AWS account. + +* To access bucket in the account where DTH is deployed, DTH worker nodes use **S3 Gateway Endpoint** +* To access bucket in another account, DTH worker nodes use **S3 Private Link** by **S3 Interface Endpoint** \ No newline at end of file diff --git a/docs/en-base/tutorial-ecr.md b/docs/en-base/user-guide/tutorial-ecr.md similarity index 97% rename from docs/en-base/tutorial-ecr.md rename to docs/en-base/user-guide/tutorial-ecr.md index a850612..ddaa98b 100644 --- a/docs/en-base/tutorial-ecr.md +++ b/docs/en-base/user-guide/tutorial-ecr.md @@ -1,4 +1,4 @@ -You can use the web console to create an Amazon ECR transfer task. For more information about how to launch the web console, see [deployment](./deployment.md). +You can use the web console to create an Amazon ECR transfer task. For more information about how to launch the web console, see [deployment](../../deployment/deployment-overview). 1. From the **Create Transfer Task** page, select **Create New Task**, and then select **Next**. diff --git a/docs/en-base/tutorial-oss.md b/docs/en-base/user-guide/tutorial-oss.md similarity index 83% rename from docs/en-base/tutorial-oss.md rename to docs/en-base/user-guide/tutorial-oss.md index 6cef7d3..93b5d4a 100644 --- a/docs/en-base/tutorial-oss.md +++ b/docs/en-base/user-guide/tutorial-oss.md @@ -1,7 +1,7 @@ This tutorial describes how to transfer Objects from **Alibaba Cloud OSS** to **Amazon S3**. ## Prerequisite -You have already deployed the Data Transfer Hub in **Oregon (us-west-2)** region. For more information, see [deployment](./deployment.md). +You have already deployed the Data Transfer Hub in **Oregon (us-west-2)** region. For more information, see [deployment](../../deployment/deployment-overview). ## Step 1: Configure credentials for OSS 1. Open the **[Secrets Manager](https://console.aws.amazon.com/secretsmanager/home#)** console. @@ -35,7 +35,7 @@ You have already deployed the Data Transfer Hub in **Oregon (us-west-2)** region 5. Provide destination settings for the S3 buckets. -6. From **Engine settings**, verify the values and modify them if necessary. For incremental data transfer, we recommend to set the **minimum capacity** to at least 1. +6. From **Engine settings**, verify the values and modify them if necessary. For incremental data transfer, set the **minimum capacity** to at least 1. 7. At **Task Scheduling Settings**, select your task scheduling configuration. - If you want to configure the timed task at a fixed frequency to compare the data difference on both sides of the time, select **Fixed Rate**. @@ -57,9 +57,9 @@ You have already deployed the Data Transfer Hub in **Oregon (us-west-2)** region After the task is created successfully, it will appear on the **Tasks** page. -![s3-task-list](./images/s3-task-list-oss.png) +![s3-task-list](../images/s3-task-list-oss.png) -Figure 2: Transfer task details and status +*Transfer task details and status* Select the **Task ID** to go to the task Details page, and then choose **CloudWatch Dashboard** to monitor the task status. @@ -71,9 +71,11 @@ After you created the task, go to [SQS console](https://us-west-2.console.aws.am **Prepare your AWS account's Access Key/Secret Key** -1. Go to [IAM console](https://us-east-1.console.aws.amazon.com/iam/home#/policies$new?step=edit), and click **Create Policy**. +1. Sign in to the [IAM console](https://console.aws.amazon.com/iam/home). -2. Choose the **JSON** tab, and enter the following information. +2. In the navigation pane, choose **Policies**, then choose **Create poliy**. + +3. Select the **JSON** tab, and enter the following information. ```json { @@ -82,7 +84,6 @@ After you created the task, go to [SQS console](https://us-west-2.console.aws.am { "Effect": "Allow", "Action": [ - "sqs:SendMessageBatch", "sqs:SendMessage" ], "Resource": "arn:aws:sqs:us-west-2:xxxxxxxxxxx:DTHS3Stack-S3TransferQueue-1TSF4ESFQEFKJ" @@ -91,13 +92,15 @@ After you created the task, go to [SQS console](https://us-west-2.console.aws.am } ``` !!! Note "Note" - Replace your Queue arn in the JSON. + Replace your queue ARN in the JSON. + +4. Complete the workflow to create the policy. -3. Create the user. Go to the [User](https://console.aws.amazon.com/iam/home?region=us-west-2#/users) console and click **Add User**. +5. In the navigation pane, choose **Users**, then choose **Add users**. -4. Attach the policy you created previously to the user. +6. Attach the policy you created previously to the user. -5. Save the **ACCESS_KEY/SECRET_KEY** which will be used later. +7. Save the **ACCESS_KEY/SECRET_KEY** which will be used later. **Prepare the event-sender function for Alibaba Cloud** @@ -108,7 +111,7 @@ After you created the task, go to [SQS console](https://us-west-2.console.aws.am pip3 install -t . boto3 ``` -2. Create a `index.py` in the same folder, and enter the code below. +2. Create an `index.py` in the same folder, and enter the code below. ```python import json @@ -160,9 +163,9 @@ After you created the task, go to [SQS console](https://us-west-2.console.aws.am **Create a function in Alibaba Cloud** -1. Use your Alibaba Cloud account to log in to [Function Compute](https://fc.console.aliyun.com/fc/tasks/), and click **Task**. +1. Use your Alibaba Cloud account to log in to [Function Compute](https://fc.console.aliyun.com/fc/tasks/), and select **Task**. -2. Click **Create Function**. +2. Choose **Create Function**. 3. Choose **Python3.x** as the **Runtime Environments**. @@ -170,15 +173,15 @@ After you created the task, go to [SQS console](https://us-west-2.console.aws.am 5. Upload the `code.zip` created in the previous step to create the function. -6. Click **Create**. +6. Select **Create**. **Configure the function's environment variables** -1. Click the **Configurations**. +1. Choose the **Configurations**. -2. Click **Modify** in the **Environment Variables**. +2. Select **Modify** in the **Environment Variables**. -3. Enter the config json in the **Environment Variables**. Here you need to use your own `ACCESS_KEY`, `SECRET_KEY` and `QUEUE_URL`. +3. Enter the config JSON in the **Environment Variables**. Here you need to use your own `ACCESS_KEY`, `SECRET_KEY` and `QUEUE_URL`. ```json { @@ -192,9 +195,9 @@ After you created the task, go to [SQS console](https://us-west-2.console.aws.am ### Create the trigger -1. Click the **Create Trigger** in **Triggers** tab to create the trigger for the function. +1. Navigate to the **Create Trigger** in **Triggers** tab to create the trigger for the function. - ![portal](images/aliyun_create_trigger.png) + ![portal](../images/aliyun_create_trigger.png) 2. Choose **OSS** as the **Trigger Type**, choose the bucket name. @@ -208,4 +211,4 @@ After you created the task, go to [SQS console](https://us-west-2.console.aws.am oss:ObjectCreated:AppendObject ``` -4. Click **OK**. \ No newline at end of file +4. Select **OK**. \ No newline at end of file diff --git a/docs/en-base/tutorial-s3.md b/docs/en-base/user-guide/tutorial-s3.md similarity index 97% rename from docs/en-base/tutorial-s3.md rename to docs/en-base/user-guide/tutorial-s3.md index dfe0750..2b90200 100644 --- a/docs/en-base/tutorial-s3.md +++ b/docs/en-base/user-guide/tutorial-s3.md @@ -1,4 +1,4 @@ -You can use the web console to create an Amazon S3 transfer task. For more information about how to launch the web console, see [deployment](./deployment.md). +You can use the web console to create an Amazon S3 transfer task. For more information about how to launch the web console, see [deployment](../../deployment/deployment-overview). !!! Note "Note" Data Transfer Hub also supports using AWS CLI to create an Amazon S3 transfer task. For details, refer to this [tutorial](./tutorial-cli-launch.md). @@ -68,7 +68,7 @@ If your source bucket enabled **SSE-CMK**, you need to create an IAM Policy and Pay attention to the following: -- Change the `Resource` in kms part to your own KMS key's arn. +- Change the `Resource` in KMS part to your own KMS key's Amazon Resource Name (ARN). - For S3 buckets in AWS China Regions, make sure to use `arn:aws-cn:kms:::` instead of `arn:aws:kms:::`. ``` diff --git a/docs/mkdocs.base.yml b/docs/mkdocs.base.yml index a528721..0fbaaa9 100644 --- a/docs/mkdocs.base.yml +++ b/docs/mkdocs.base.yml @@ -1,30 +1,42 @@ site_name: Data Transfer Hub -site_url: 'https://awslabs.github.io/data-transfer-hub/' -repo_url: 'https://github.com/awslabs/data-transfer-hub' +site_url: https://awslabs.github.io/data-transfer-hub/ +repo_url: https://github.com/awslabs/data-transfer-hub +repo_name: awslabs/data-transfer-hub + theme: name: material features: - - tabs + - navigation.tabs - search.suggest - search.highlight favicon: images/aws-solutions.png logo: images/aws-solutions.png - + icon: + repo: fontawesome/brands/github + language: en + +extra: + generator: false + copyright: Copyright © 2020 - 2023 Amazon Web Services + alternate: + - name: English + link: /data-transfer-hub/en/ + lang: en + - name: 简体中文 + link: /data-transfer-hub/zh/ + lang: zh + bucket: solutions-reference + version: latest + plugins: - search - include-markdown - macros - + markdown_extensions: - admonition - attr_list - pymdownx.details - pymdownx.superfences -extra: - alternate: - - name: English - link: /data-transfer-hub/en/ - lang: en - - name: 简体中文 - link: /data-transfer-hub/zh/ - lang: zh-CN + + diff --git a/docs/mkdocs.en.yml b/docs/mkdocs.en.yml index f87b335..ee40b6c 100644 --- a/docs/mkdocs.en.yml +++ b/docs/mkdocs.en.yml @@ -5,27 +5,39 @@ theme: language: en favicon: images/aws-solutions.png logo: images/aws-solutions.png - -nav: - - Welcome: index.md - - Cost: cost.md - - Architecture overview: architecture.md - - Solution components: solution-components.md - - Security: security.md - - Supported regions: regions.md - - CloudFormation template: template.md - - Automated deployment: deployment.md - - Create Amazon S3 transfer task: tutorial-s3.md - - Create Amazon ECR transfer task: tutorial-ecr.md - - Tutorials: - - Transfer S3 object from Alibaba Cloud OSS: tutorial-oss.md - - Transfer S3 object via Direct Connect: tutorial-directconnect.md - - Using AWS CLI to create S3 transfer task: tutorial-cli-launch.md - - Additional resources: additional-resources.md - - Upgrade the solution: upgrade.md - - Uninstall the solution: uninstall.md - - FAQ: faq.md - - Source code: source.md - - Revisions: revisions.md +nav: + - Implementation Guide: + - Solution Overview: + - Overview: solution-overview/index.md + - Features and benefits: solution-overview/features-and-benefits.md + - Use cases: solution-overview/use-cases.md + - Architecture Overview: + - Architecture diagram: architecture-overview/architecture.md + - AWS Well-Architected pillars: architecture-overview/design-considerations.md + - Architecture details: architecture-overview/architecture-details.md + - Plan your deployment: + - Cost: plan-deployment/cost.md + - Security: plan-deployment/security.md + - Supported Regions: plan-deployment/regions.md + - Quotas: plan-deployment/quotas.md + - Deploy the solution: + - Deployment overview: deployment/deployment-overview.md + - AWS CloudFormation template: deployment/template.md + - Deployment: deployment/deployment.md + - Use Data Transfer Hub: + - Create Amazon S3 transfer task: user-guide/tutorial-s3.md + - Create Amazon ECR transfer task: user-guide/tutorial-ecr.md + - Transfer S3 object from Alibaba Cloud OSS: user-guide/tutorial-oss.md + - Transfer S3 object via Direct Connect: user-guide/tutorial-directconnect.md + - Use AWS CLI to create S3 transfer task: user-guide/tutorial-cli-launch.md + - Upgrade the solution: update.md + - Uninstall the solution: uninstall.md + - FAQ: faq.md + - Troubleshooting: troubleshooting.md + - Developer guide: + - Source code: developer-guide/source.md + - Contributors: contributors.md + - Revisions: revisions.md + - Notices: notices.md diff --git a/docs/mkdocs.zh.yml b/docs/mkdocs.zh.yml index 1b55269..03a760d 100644 --- a/docs/mkdocs.zh.yml +++ b/docs/mkdocs.zh.yml @@ -7,28 +7,40 @@ theme: language: zh favicon: images/aws-solutions.png logo: images/aws-solutions.png - nav: + - 实施指南: + - 方案概述: + - 概述: solution-overview/index.md + - 功能和优势: solution-overview/features-and-benefits.md + - 客户用例: solution-overview/use-cases.md + - 架构概览: + - 架构图: architecture-overview/architecture.md + - 架构设计思路: architecture-overview/design-considerations.md + - 架构细节: architecture-overview/architecture-details.md + - 部署前的规划: + - 成本预估: plan-deployment/cost.md + - 安全信息: plan-deployment/security.md + - 区域支持: plan-deployment/regions.md + - 配额信息: plan-deployment/quotas.md + - 部署解决方案: + - 部署概览: deployment/deployment-overview.md + - 模板信息: deployment/template.md + - 部署步骤: deployment/deployment.md + - 用户指南: + - 创建 Amazon S3 传输任务: user-guide/tutorial-s3.md + - 创建 Amazon ECR 传输任务: user-guide/tutorial-ecr.md + - 从阿里云 OSS 传输 S3 对象: user-guide/tutorial-oss.md + - 通过 Direct Connect 传输 S3 对象: user-guide/tutorial-directconnect.md + - 使用 AWS CLI 创建 S3 传输任务: user-guide/tutorial-cli-launch.md + - 更新解决方案: update.md + - 卸载解决方案: uninstall.md + - 常见问题解答: faq.md + - 故障排除: troubleshooting.md + - 开发者指南: + - 源代码: developer-guide/source.md + - 贡献人员: contributors.md + - 文档修订信息: revisions.md + + - - 概述: index.md - - 成本预估: cost.md - - 架构概览: architecture.md - - 方案组件: solution-components.md - - 安全信息: security.md - - 区域支持: regions.md - - 模板信息: template.md - - 部署解决方案: deployment.md - - 创建Amazon S3传输任务: tutorial-s3.md - - 创建Amazon ECR传输任务: tutorial-ecr.md - - 教程: - - 从阿里云 OSS 传输 S3 对象: tutorial-oss.md - - 通过 Direct Connect 传输 S3 对象: tutorial-directconnect.md - - 使用 AWS CLI 创建 S3 传输任务: tutorial-cli-launch.md - - 更多资源: additional-resources.md - - 更新解决方案: upgrade.md - - 卸载解决方案: uninstall.md - - 常见问题解答: faq.md - - 源代码: source.md - - 版本发布说明: revisions.md - diff --git a/docs/zh-base/additional-resources.md b/docs/zh-base/additional-resources.md deleted file mode 100644 index 256af22..0000000 --- a/docs/zh-base/additional-resources.md +++ /dev/null @@ -1,12 +0,0 @@ -- [AWS CloudFormation](https://aws.amazon.com/cloudformation/) -- [Amazon S3](https://aws.amazon.com/s3/) -- [AWS Lambda](https://aws.amazon.com/lambda/) -- [AWS Step Functions](https://aws.amazon.com/step-functions/) -- [Amazon CloudFront](https://aws.amazon.com/cloudfront/) -- [Amazon ECR](https://aws.amazon.com/ecr/) -- [Amazon DynamoDB](https://aws.amazon.com/dynamodb/) -- [AWS AppSync](https://aws.amazon.com/appsync/) -- [Amazon Cognito](https://aws.amazon.com/cognito/) -- [AWS IAM](https://aws.amazon.com/iam/) -- [Amazon EC2](https://aws.amazon.com/ec2/) -- [Amazon Route 53](https://aws.amazon.com/route53/) \ No newline at end of file diff --git a/docs/zh-base/architecture-overview/architecture-details.md b/docs/zh-base/architecture-overview/architecture-details.md new file mode 100644 index 0000000..9fcaf27 --- /dev/null +++ b/docs/zh-base/architecture-overview/architecture-details.md @@ -0,0 +1,56 @@ +本节介绍构成此解决方案的组件和 AWS 服务,以及有关这些组件如何协同工作的架构详细信息。 + +## 这个方案中的 AWS 服务 + +这个方案中包含以下 AWS 服务: + +| AWS 服务 | 描述 | +| --- | --- | +| [Amazon CloudFront](https://aws.amazon.com/cloudfront/) | 核心的。用于提供静态网站资源(前端用户界面)。 | +| [AWS AppSync](https://aws.amazon.com/appsync/) | 核心的。提供后端 API。 | +| [AWS Lambda](https://aws.amazon.com/lambda/) | 核心的。用于调用后端 API。 | +| [AWS Step Functions](https://aws.amazon.com/step-functions/) | 支持性的。用于启动、停止或删除 ECR 或 S3 插件模板。 | +| [Amazon ECS](https://aws.amazon.com/cn/ecs/) | 核心的。用于运行插件模板使用的容器镜像。 | +| [Amazon EC2](https://aws.amazon.com/ec2/) | 核心的。用于消费 Amazon SQS 中的消息并将对象从源存储桶传输到目标存储桶。 | +| [Amazon DynamoDB](https://aws.amazon.com/dynamodb/) | 核心的。用于为每个对象存储传输状态的记录。 | +| [AWS Secrets Manager](https://aws.amazon.com/secrets-manager/) | 核心的。存储数据传输的凭据。 | +| [Amazon ECR](https://aws.amazon.com/ecr/) | 支持性的。用于托管容器镜像。 | +| [Amazon Cognito](https://aws.amazon.com/cognito/) | 支持性的。用于认证用户(在 AWS 区域内)。 | +| [Amazon S3](https://aws.amazon.com/s3/) | 支持性的。用于存储静态网站资源(前端用户界面)。 | +| [Amazon SQS](https://aws.amazon.com/sqs/) | 支持性的。用作传输任务的临时缓冲区。 | +| [Amazon EventBridge](https://aws.amazon.com/eventbridge/) | 支持性的。用于定期触发传输任务。 | +| [Amazon SNS](https://aws.amazon.com/sns/) | 支持性的。提供数据传输结果的主题和电子邮件订阅通知。 | +| [AWS CloudWatch](https://aws.amazon.com/cloudwatch/) | 支持性的。用于监视数据传输进度。 | + +## **数据传输中心的工作方式** + +这个解决方案有三个组成部分:Web控制台,Amazon S3传输引擎,以及Amazon ECR传输引擎。 + +### **Web控制台** + +这个解决方案提供了一个简单的Web控制台,允许您创建和管理Amazon S3和Amazon ECR的传输任务。 + +### **Amazon S3传输引擎** + +Amazon S3传输引擎运行Amazon S3插件,用于将对象从其来源传输到S3存储桶中。S3插件支持以下功能: + +- 在AWS中国区域和AWS区域之间传输Amazon S3对象 +- 从阿里云OSS/Tencent COS/Qiniu Kodo传输对象到Amazon S3 +- 从S3兼容存储服务传输对象到Amazon S3 +- 支持通过S3事件进行几乎实时的传输 +- 支持传输对象元数据 +- 支持增量数据传输 +- 支持从私有请求存储桶中传输 +- 自动重试和错误处理 + +### **Amazon ECR传输引擎** + +Amazon ECR引擎运行Amazon ECR插件,用于从其他容器注册表传输容器镜像。ECR插件支持以下功能: + +- 在AWS中国区域和AWS区域之间传输Amazon ECR镜像 +- 从公共容器注册表(如Docker Hub、GCR.io、Quay.io)传输到Amazon ECR +- 传输选定的镜像到Amazon ECR +- 从Amazon ECR传输所有镜像和标签 +ECR插件利用[skopeo][skopeo]作为底层引擎。AWS Lambda函数列出其来源中的镜像,并使用Fargate运行传输作业。 + +[skopeo]: https://github.com/containers/skopeo diff --git a/docs/zh-base/architecture.md b/docs/zh-base/architecture-overview/architecture.md similarity index 78% rename from docs/zh-base/architecture.md rename to docs/zh-base/architecture-overview/architecture.md index 5b5e3c0..5b4d284 100644 --- a/docs/zh-base/architecture.md +++ b/docs/zh-base/architecture-overview/architecture.md @@ -1,8 +1,7 @@ -使用默认参数部署数据传输解决方案后,在亚马逊云科技中构建的环境如下图所示。 +下图展示的是使用默认参数部署本解决方案在亚马逊云科技中构建的环境。 -![architecture](./images/arch-global.png) - -图1:解决方案架构 +![architecture](../images/arch-global.png) +*解决方案架构图* 本解决方案在您的亚马逊云科技账户中部署AWS CloudFormation模板并完成以下设置。 @@ -23,20 +22,19 @@ 网页控制台用于集中创建和管理所有数据传输任务。每种数据类型(例如,Amazon S3或Amazon ECR)都是插件,并打包为AWS CloudFormation模板,托管在Amazon S3存储桶中。当您创建传输任务时,AWS Lambda函数会启动AWS CloudFormation模板,并且每个任务的状态都会存储并显示在Amazon DynamoDB表中。 -截至2023年3月,该解决方案支持两个数据传输插件:Amazon S3插件和Amazon ECR插件。 +截至2023年4月,该解决方案支持两个数据传输插件:Amazon S3插件和Amazon ECR插件。 ## Amazon S3插件 -![s3-architecture](./images/s3-arch-global.png) - -图2:Amazon S3插件架构 +![s3-architecture](../images/s3-arch-global.png) +*Amazon S3插件架构图* 使用Amazon S3插件的工作流程如下: 1. Event Bridge规则定时触发AWS Lambda 函数,默认每小时运行一次。 -2. AWS Lambda 将使用启动模板在 EC2 中启动数据比较作业 (JobFinder)。 +2. AWS Lambda 将使用启动模板在 Amazon EC2 中启动数据比较作业 (JobFinder)。 2. 该任务列出源和目标存储桶中的所有对象,进行比较并确定传输对象。 -3. EC2 为每一个需要传输的对象发送一条消息到 Amazon SQS 队列中。同时该方案还支持Amazon S3事件消息,以实现更实时的数据传输;每当有对象上传到源存储桶时,事件消息就会被发送到同一个SQS队列。 -4. 在Amazon EC2中运行的JobWorker使用SQS中的消息,并将对象从源存储桶传输到目标存储桶。该方案将使用Auto Scaling Group来控制EC2实例的数量,并根据业务需要传输数据。 +3. Amazon EC2 为每一个需要传输的对象发送一条消息到 Amazon SQS 队列中。同时该方案还支持Amazon S3事件消息,以实现更实时的数据传输;每当有对象上传到源存储桶时,事件消息就会被发送到同一个 Amazon SQS 队列。 +4. 在Amazon EC2中运行的JobWorker使用 Amazon SQS 中的消息,并将对象从源存储桶传输到目标存储桶。该方案将使用Auto Scaling Group来控制 Amazon EC2 实例的数量,并根据业务需要传输数据。 5. 每个对象的传输状态记录存储在Amazon DynamoDB中。 6. Amazon EC2实例将根据SQS消息从源存储桶中获取(下载)对象。 7. Amazon EC2实例将根据SQS消息将对象放入(上传)到目标存储桶。 @@ -47,9 +45,8 @@ ## Amazon ECR插件 -![ecr-architecture](./images/ecr-arch-global.png) - -图3:Amazon ECR插件架构 +![ecr-architecture](../images/ecr-arch-global.png) +*Amazon ECR插件架构图* 使用Amazon ECR插件的工作流程如下: @@ -60,6 +57,7 @@ 5. 每个任务使用[skopeo](https://github.com/containers/skopeo)将镜像复制到目标ECR中。 6. 复制完成后,状态(成功或失败)会记录到Amazon DynamoDB中以进行跟踪。 + [s3]:https://www.amazonaws.cn/s3/?nc1=h_ls [cloudfront]:https://www.amazonaws.cn/cloudfront/?nc1=h_ls [appsync]:https://www.amazonaws.cn/appsync/?nc1=h_ls @@ -70,4 +68,4 @@ [ecs]:https://aws.amazon.com/cn/ecs/ [ecr]:https://aws.amazon.com/cn/ecr/ [dynamodb]:https://www.amazonaws.cn/dynamodb/?nc1=h_ls -[waf]:https://aws.amazon.com/cn/waf/?nc1=h_ls \ No newline at end of file +[waf]:https://aws.amazon.com/waf/ \ No newline at end of file diff --git a/docs/zh-base/architecture-overview/design-considerations.md b/docs/zh-base/architecture-overview/design-considerations.md new file mode 100644 index 0000000..54cfde0 --- /dev/null +++ b/docs/zh-base/architecture-overview/design-considerations.md @@ -0,0 +1,49 @@ +这个解决方案是根据 [AWS Well-Architected Framework][well-architected-framework] 中的最佳实践设计的,该框架帮助客户在云中设计和操作可靠、安全、高效和经济实惠的工作负载。 + +本节描述了在构建此解决方案时,应用了 Well-Architected Framework 的设计原则和最佳实践。 + +## 卓越运营 +本节描述了在设计此解决方案时,应用了 [卓越运营支柱][operational-excellence-pillar] 的原则和最佳实践。 + +数据传输解决方案在各个阶段将指标推送到 Amazon CloudWatch,以提供对基础设施的可观察性,Lambda 函数、Amazon EC2 传输工作程序、Step Function 工作流和其余解决方案组件。将数据传输错误添加到 Amazon SQS 队列以进行重试和警报。 + +## 安全性 +本节描述了在设计此解决方案时,应用了 [安全性支柱][security-pillar] 的原则和最佳实践。 + +- Data Transfer Hub 网页控制台用户使用 Amazon Cognito 进行身份验证和授权。 +- 所有服务间通信都使用 AWS IAM 角色。 +- 解决方案使用的所有角色都遵循最小权限访问原则。也就是说,它只包含所需的最小权限,以便服务能够正常运行。 + +## 可靠性 +本节描述了在设计此解决方案时,应用了 [可靠性支柱][reliability-pillar] 的原则和最佳实践。 + +- 在可能的情况下使用 AWS Serverless Services (例如 Lambda、Step Functions、Amazon S3 和 Amazon SQS),以确保高可用性和从服务故障中恢复。 +- 数据存储在 DynamoDB 和 Amazon S3 中,因此默认情况下会在多个可用区域 (AZs)中持久存在。 + +## 性能效率 +本节描述了在设计此解决方案时,应用了 [性能效率支柱][performance-efficiency-pillar] 的原则和最佳实践。 + +- 能够在支持此解决方案的 AWS 服务的任何区域中启动此解决方案,例如:AWS Lambda、AWS S3、Amazon SQS、Amazon DynamoDB 和 Amazon EC2。 +- 每天自动测试和部署此解决方案。由解决方案架构师和专业主题专家审查此解决方案以寻找实验和改进的领域。 + +## 成本优化 + +本节介绍在设计此解决方案时如何应用[成本优化支柱][cost-optimization-pillar]的原则和最佳实践。 + +- 使用 Autoscaling Group,以使计算成本仅与传输的数据量有关。 +- 使用 Amazon SQS 和 DynamoDB 等无服务器服务,以使客户仅按其使用付费。 + +## 可持续性 + +本节介绍了在设计此解决方案时如何应用[可持续性支柱][sustainability-pillar]的原则和最佳实践。 + +- 该解决方案的无服务器设计(使用Lambda、Amazon SQS和DynamoDB)和使用托管服务(如Amazon EC2)旨在减少碳足迹,与持续运行本地服务器的碳足迹相比。 + + +[well-architected-framework]:https://aws.amazon.com/architecture/well-architected/?wa-lens-whitepapers.sort-by=item.additionalFields.sortDate&wa-lens-whitepapers.sort-order=desc&wa-guidance-whitepapers.sort-by=item.additionalFields.sortDate&wa-guidance-whitepapers.sort-order=desc +[operational-excellence-pillar]:https://docs.aws.amazon.com/wellarchitected/latest/operational-excellence-pillar/welcome.html +[security-pillar]:https://docs.aws.amazon.com/wellarchitected/latest/security-pillar/welcome.html +[reliability-pillar]:https://docs.aws.amazon.com/wellarchitected/latest/reliability-pillar/welcome.html +[performance-efficiency-pillar]:https://docs.aws.amazon.com/wellarchitected/latest/performance-efficiency-pillar/welcome.html +[cost-optimization-pillar]:https://docs.aws.amazon.com/wellarchitected/latest/cost-optimization-pillar/welcome.html +[sustainability-pillar]:https://docs.aws.amazon.com/wellarchitected/latest/sustainability-pillar/sustainability-pillar.html \ No newline at end of file diff --git a/docs/zh-base/contributors.md b/docs/zh-base/contributors.md new file mode 100644 index 0000000..44aff0a --- /dev/null +++ b/docs/zh-base/contributors.md @@ -0,0 +1,7 @@ +- Aiden Dai +- Eva Liu +- Kervin Hu +- Haiyun Chen +- Joe Shi +- Ashwini Rudra +- Jyoti Tyagi diff --git a/docs/zh-base/deployment/deployment-overview.md b/docs/zh-base/deployment/deployment-overview.md new file mode 100644 index 0000000..37ec2ad --- /dev/null +++ b/docs/zh-base/deployment/deployment-overview.md @@ -0,0 +1,12 @@ +以下是在AWS上部署此解决方案的步骤。有关详细说明,请按照每个步骤的链接进行操作。 + +在启动解决方案之前,请[查看本指南中讨论的费用](../../plan-deployment/cost)、架构、网络安全和其他注意事项。按照本节中的逐步说明配置和部署解决方案到您的帐户中。 + +**部署时间**:大约需要 15 分钟 + +- 第 1 步. 启动堆栈 + - [(选项 1) 在 AWS 区域中部署 AWS CloudFormation 模板](../deployment/#launch-cognito) + - [(选项 2) 在 AWS 中国区域中部署 AWS CloudFormation 模板](../deployment/#launch-openid) + +- 第 2 步. [启动 Web 控制台](../deployment/#launch-web-console) +- 第 3 步. [创建传输任务](../deployment/#create-task) \ No newline at end of file diff --git a/docs/zh-base/deployment.md b/docs/zh-base/deployment/deployment.md similarity index 84% rename from docs/zh-base/deployment.md rename to docs/zh-base/deployment/deployment.md index 55da6ec..d748422 100644 --- a/docs/zh-base/deployment.md +++ b/docs/zh-base/deployment/deployment.md @@ -1,15 +1,8 @@ -在部署解决方案之前,建议您先查看本指南中有关架构图和区域支持等信息,然后按照下面的说明配置解决方案并将其部署到您的帐户中。 +在部署解决方案之前,建议您先查看本指南中有关架构图和区域支持等信息。然后按照下面的说明配置解决方案并将其部署到您的帐户中。 **部署时间**:约15分钟 -## 部署概述 -您可以在亚马逊云科技上部署并使用解决方案,过程如下: - -- 步骤1:启动堆栈 - - [(选项 1)从全球区域启动堆栈](#launch-cognito) - - [(选项 2)从中国区域启动堆栈](#launch-openid) -- 步骤2:[访问网页控制台](#launch-web-console) -- 步骤3:[创建数据传输任务](#create-task) +## 部署步骤 ## 步骤1.(选项1)从全球区域启动堆栈 @@ -22,16 +15,16 @@ !!! note "注意" 您需要承担运行数据传输解决方案时使用亚马逊云科技各项服务的成本费用。想要了解详细信息,请参阅本实施指南中的成本章节,以及解决方案中使用的每项服务的定价页面。 - + 1. 登录到Amazon Web Services管理控制台,选择按钮以启动 `DataTransferHub-cognito.template` 模板。 您还可以选择直接[下载模板](https://s3.amazonaws.com/solutions-reference/data-transfer-hub/latest/DataTransferHub-cognito.template)进行部署。 - [![Launch Stack](./images/launch-stack.png)](https://console.aws.amazon.com/cloudformation/home#/stacks/create/template?stackName=DataTransferHub&templateURL=https://s3.amazonaws.com/solutions-reference/data-transfer-hub/latest/DataTransferHub-cognito.template) + [![Launch Stack](../images/launch-stack.png)](https://console.aws.amazon.com/cloudformation/home#/stacks/create/template?stackName=DataTransferHub&templateURL=https://s3.amazonaws.com/solutions-reference/data-transfer-hub/latest/DataTransferHub-cognito.template) 2. 默认情况下,该模板将在您登录控制台后默认的区域启动,即美国东部(弗吉尼亚北部)区域。若需在指定的区域中启动该解决方案,请在控制台导航栏中的区域下拉列表中选择。 3. 在**创建堆栈**页面上,确认Amazon S3 URL文本框中显示正确的模板URL,然后选择**下一步**。 -4. 在**指定堆栈详细信息**页面上,为您的解决方案堆栈分配一个账户内唯一且符合命名要求的名称。有关命名字符限制的信息,请参阅*AWS Identity and Access Management用户指南*中的[IAM 和 STS 限制](https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html)。 +4. 在**指定堆栈详细信息**页面上,为您的解决方案堆栈分配一个账户内唯一且符合命名要求的名称。有关命名字符限制的信息,请参阅*AWS Identity and Access Management用户指南*中的[IAM 和 AWS STS 配额](https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html)。 5. 在**参数**部分,查看此解决方案模板的参数并根据需要进行修改。 | 参数名称 | 默认值 | 描述 | @@ -69,13 +62,13 @@ 5. 单击**创建自建应用**按钮。 6. 输入**应用名称**和**认证地址**。 7. 将Endpoint Information中的`App ID`(即`client_id`)和`Issuer`保存到一个文本文件中,以备后面使用。 - [![](./images/OIDC/endpoint-info.png)](./images/OIDC/endpoint-info.png) + [![](../images/OIDC/endpoint-info.png)](../images/OIDC/endpoint-info.png) 8. 将`Login Callback URL`和`Logout Callback URL`更新为IPC记录的域名。 - [![](./images/OIDC/authentication-configuration.png)](./images/OIDC/authentication-configuration.png) + [![](../images/OIDC/authentication-configuration.png)](../images/OIDC/authentication-configuration.png) 9. 设置以下授权配置。 - [![](./images/OIDC/authorization-configuration.png)](./images/OIDC/authorization-configuration.png) + [![](../images/OIDC/authorization-configuration.png)](../images/OIDC/authorization-configuration.png) 10. 更新登录控制。 1. 从左侧边栏选择并进入**应用**界面,选择**登录控制**,然后选择**登录注册方式**。 @@ -122,11 +115,11 @@ 1. 登录到Amazon Web Services管理控制台,选择按钮以启动 `DataTransferHub-openid.template` 模板。您还可以选择直接[下载模板](https://s3.amazonaws.com/solutions-reference/data-transfer-hub/latest/DataTransferHub-openid.template)进行部署。 - [![Launch Stack](./images/launch-stack.png)](https://console.amazonaws.cn/cloudformation/home#/stacks/create/template?stackName=DataTransferHub&templateURL=https://s3.amazonaws.com/solutions-reference/data-transfer-hub/latest/DataTransferHub-openid.template) + [![Launch Stack](../images/launch-stack.png)](https://console.amazonaws.cn/cloudformation/home#/stacks/create/template?stackName=DataTransferHub&templateURL=https://s3.amazonaws.com/solutions-reference/data-transfer-hub/latest/DataTransferHub-openid.template) 2. 模板将在您的默认区域启动。要在不同的区域中启动解决方案,请使用控制台导航栏中的区域选择器。 3. 在**创建堆栈**页面上,确认Amazon S3 URL文本框中显示正确的模板URL,然后选择**下一步**。 -4. 在**指定堆栈详细信息**页面上,为您的解决方案堆栈分配一个账户内唯一且符合命名要求的名称。有关命名字符限制的信息,请参阅*AWS Identity and Access Management用户指南*中的[IAM 和 STS 限制](https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html)。 +4. 在**指定堆栈详细信息**页面上,为您的解决方案堆栈分配一个账户内唯一且符合命名要求的名称。有关命名字符限制的信息,请参阅*AWS Identity and Access Management用户指南*中的[IAM 和 AWS STS 配额](https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html)。 5. 在**参数**部分,查看此解决方案模板的参数并根据需要进行修改。 | 参数名称 | 默认值 | 描述 | @@ -168,10 +161,10 @@ 2. 输入您在部署解决方案时注册的用户名和密码,然后选择登录。系统将打开网页控制台。 3. 更改您的密码,然后重新登录。 ## 步骤3:创建数据传输任务 -您可以选择为Amazon S3或Amazon ECR创建传输任务。更多信息请参考[创建Amazon S3传输任务](./tutorial-s3.md)和[创建Amazon ECR传输任务](./tutorial-ecr.md)。 +您可以选择为Amazon S3或Amazon ECR创建传输任务。更多信息请参考[创建Amazon S3传输任务](../user-guide/tutorial-s3.md)和[创建Amazon ECR传输任务](../user-guide/tutorial-ecr.md)。 -![dth-console](./images/dth-console.png) +![dth-console](../images/dth-console.png) -图1:网页控制台 +*网页控制台* [icp]: https://www.amazonaws.cn/en/support/icp/?nc2=h_l2_su diff --git a/docs/zh-base/template.md b/docs/zh-base/deployment/template.md similarity index 100% rename from docs/zh-base/template.md rename to docs/zh-base/deployment/template.md diff --git a/docs/zh-base/source.md b/docs/zh-base/developer-guide/source.md similarity index 100% rename from docs/zh-base/source.md rename to docs/zh-base/developer-guide/source.md diff --git a/docs/zh-base/faq.md b/docs/zh-base/faq.md index eeb7757..01d5f50 100644 --- a/docs/zh-base/faq.md +++ b/docs/zh-base/faq.md @@ -3,7 +3,7 @@ **1. 哪些亚马逊云科技区域可以部署该方案?**
-请参考[区域支持](./regions.md)。 +请参考[区域支持](../plan-deployment/regions)。 **2. 创建传输任务时,建议部署在数据源端还是目标端?**
@@ -34,7 +34,7 @@ **7. 能否使用 AWS CLI 创建 DTH S3 传输任务?**
-可以。请参考[使用AWS CLI启动DTH S3 Transfer任务](./tutorial-cli-launch.md)指南。 +可以。请参考[使用AWS CLI启动DTH S3 Transfer任务](../user-guide/tutorial-cli-launch)指南。 ## 性能相关问题 @@ -86,7 +86,7 @@ Auto Scaling Group 的大小会根据 SQS 中的任务数量[自动放大或缩 **5. 支持SSE-S3,SSE-KMS,SSE-CMK吗?**
-是的。支持使用SSE-S3和SSE-KMS的数据源。如果您的源存储桶启用了SSE-CMK,请参考[教程](../tutorial-s3/#kms-amazon-s3-s3)。 +是的。支持使用SSE-S3和SSE-KMS的数据源。如果您的源存储桶启用了SSE-CMK,请参考[教程](../user-guide/tutorial-s3/#how-to-transfer-s3-object-from-kms-encrypted-amazon-s3)。 ## 功能相关问题 @@ -151,35 +151,6 @@ Auto Scaling Group 的大小会根据 SQS 中的任务数量[自动放大或缩 **10. 如何处理Access Key 轮换?**
目前,当 Data Transfer Hub 感知到 S3 的 Access Key 被轮换时,它会自动从 AWS Secrets Manager 中获取最新的密钥。 因此,Access Key Rotation 不会影响DTH 的迁移过程。 -## 错误消息列表 - -**1. StatusCode: 400, InvalidToken: The provided token is malformed or otherwise invalid** - -如果您收到此错误消息,请确认您的 Secret 配置为以下格式,建议您通过复制粘贴的方式创建。 - -```json -{ - "access_key_id": "", - "secret_access_key": "" -} -``` - -**2. StatusCode: 403, InvalidAccessKeyId: The AWS Access Key Id you provided does not exist in our records** - -如果您收到此错误消息,请检查您的存储桶名称和区域名称是否配置正确。 - -**3. StatusCode: 403, InvalidAccessKeyId: UnknownError** - -请检查Secrets Manager中存放的Credential是否具有应有的权限,具体可参考[IAM Policy](https://github.com/awslabs/data-transfer-hub/blob/v2.0.0/docs/IAM-Policy.md)。 - -**4. StatusCode: 400, AccessDeniedException: Access to KMS is not allowed** - -如果您收到此错误消息,请确认您的密钥没有被SSE-CMK加密。目前,DTH不支持被SSE-CMK加密的密钥。 - -**5. dial tcp: lookup xxx.xxxxx.xxxxx.xx (http://xxx.xxxxx.xxxxx.xx/) on xxx.xxx.xxx.xxx:53: no such host** - -如果您收到此错误消息,请检查您的端点URL是否配置正确。 - ## 其它相关问题 **1. 集群节点(EC2)被失误Terminate了,如何解决?**
@@ -215,11 +186,11 @@ Auto Scaling Group 的大小会根据 SQS 中的任务数量[自动放大或缩 **5. 部署完成后,为什么在两个CloudWatch日志组中找不到任何日志流?**
-这是因为您在部署此解决方案时选择的子网没有公共网络访问权限,因此Fargate任务无法拉取映像,并且EC2无法下载CloudWatch 代理将日志发送到CloudWatch。请检查您的VPC设置。解决问题后,您需要通过此解决方案手动终止正在运行的EC2实例(如果有的话)。随后,弹性伸缩组会自动启动新的实例。 +这是因为您在部署此解决方案时选择的子网没有公共网络访问权限,因此Fargate任务无法拉取镜像,并且EC2无法下载CloudWatch 代理将日志发送到CloudWatch。请检查您的VPC设置。解决问题后,您需要通过此解决方案手动终止正在运行的EC2实例(如果有的话)。随后,弹性伸缩组会自动启动新的实例。 **6. 如何在此解决方案中使用TLSv1.2_2021或更高版本?**
-在部署数据传输中心解决方案后,请前往 [CloudFront控制台](https://us-east-1.console.aws.amazon.com/cloudfront/v3/home#/distributions) 配置安全策略。您需要准备一个域名和相应的TLS证书,从而可以实现更加安全的TLS配置。 +在部署数据传输解决方案后,请前往 [CloudFront控制台](https://us-east-1.console.aws.amazon.com/cloudfront/v3/home#/distributions) 配置安全策略。您需要准备一个域名和相应的TLS证书,从而可以实现更加安全的TLS配置。 [crr]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/replication.html#crr-scenario [asg_scale]: https://docs.aws.amazon.com/zh_cn/autoscaling/ec2/userguide/as-scaling-simple-step.html#as-scaling-steps \ No newline at end of file diff --git a/docs/zh-base/index.md b/docs/zh-base/index.md deleted file mode 100644 index 2cb37e5..0000000 --- a/docs/zh-base/index.md +++ /dev/null @@ -1,20 +0,0 @@ -数据传输解决方案(Data Transfer Hub)允许客户在亚马逊云科技中国区域和全球区域之间传输Amazon S3对象和Amazon ECR镜像,从而帮助客户拓展全球业务。本解决方案安全、可靠、可扩展、可追踪,并通过网页控制台提供一致的用户体验。 - -本解决方案通过网页控制台支持客户完成以下任务: - -- 在亚马逊云科技中国区域和和全球区域之间传输Amazon S3对象 -- 将数据从其它云服务商的对象存储服务(包括阿里云OSS,腾讯COS,七牛Kodo以及兼容Amazon S3的云存储服务)传输到Amazon S3 -- 在亚马逊云科技中国区域和和全球区域之间传输Amazon ECR镜像 -- 将容器镜像从公共容器镜像仓库(例如,Docker Hub,Google gcr.io,Red Hat Quay.io)传输到Amazon ECR - -!!! note "注意" - - 如果您需要在不同的亚马逊云科技全球区域之间传输Amazon S3对象,建议您使用[跨区域复制][crr]; 如果您想在同一个亚马逊云科技全球区域内传输Amazon S3对象,建议您使用[同区域复制][srr]。 - -本实施指南介绍在Amazon Web Services(亚马逊云科技)云中部署数据传输解决方案的架构信息和具体配置步骤。它包含指向[CloudFormation][cloudformation]模板的链接,这些模板使用亚马逊云科技在安全性和可用性方面的最佳实践来启动和配置本解决方案所需的亚马逊云科技服务。 - -本指南面向具有亚马逊云科技架构实践经验的IT架构师、开发人员、运维人员等专业人士。 - -[cloudformation]: https://aws.amazon.com/en/cloudformation/ -[crr]: https://docs.aws.amazon.com/zh_cn/AmazonS3/latest/userguide/replication.html#crr-scenario -[srr]: https://docs.aws.amazon.com/zh_cn/AmazonS3/latest/userguide/replication.html#srr-scenario \ No newline at end of file diff --git a/docs/zh-base/cost.md b/docs/zh-base/plan-deployment/cost.md similarity index 94% rename from docs/zh-base/cost.md rename to docs/zh-base/plan-deployment/cost.md index 77c4ef3..9d5edc2 100644 --- a/docs/zh-base/cost.md +++ b/docs/zh-base/plan-deployment/cost.md @@ -15,7 +15,7 @@ - 每个EC2实例运行的平均速度: 1GB/min - EC2实例运行的总时长: 17小时 - 截至2023年3月,使用解决方案完成该传输任务的成本费用如下表所示: + 截至2023年4月,使用解决方案完成该传输任务的成本费用如下表所示: | 服务 | 用量 | 费用 | |----------|--------|--------| @@ -35,7 +35,7 @@ - 每个EC2实例运行的平均速度: 约6MB/分钟(约每秒10个文件) - EC2实例运行的总时长: 约3000小时 -截至2022年7月,使用解决方案完成传输任务的成本费用如下表所示: +截至2023年4月,使用解决方案完成传输任务的成本费用如下表所示: | 服务 | 用量 | 费用 | |----------|--------|--------| @@ -55,7 +55,7 @@ 将27个Amazon ECR镜像(总大小约3GB)从欧洲(爱尔兰)区域(eu-west-1)传输到由光环新网运营的亚马逊云科技中国(北京)区域(cn-north-1),运行总时长约为6分钟。 -截至2023年3月,使用解决方案完成传输任务的成本费用如下表所示: +截至2023年4月,使用解决方案完成传输任务的成本费用如下表所示: | 服务 | 用量 | 费用 | |----------|--------|--------| diff --git a/docs/zh-base/plan-deployment/quotas.md b/docs/zh-base/plan-deployment/quotas.md new file mode 100644 index 0000000..15df1ac --- /dev/null +++ b/docs/zh-base/plan-deployment/quotas.md @@ -0,0 +1,9 @@ +### 本解决方案中 AWS 服务的配额 + +请确保您拥有足够的配额来使用本解决方案中实现的[每个服务](../../architecture-overview/architecture-details/#aws-services-in-this-solution)。有关更多信息,请参阅[AWS 服务配额](https://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html)。 + +选择以下链接之一以转到该服务的页面。要在不切换页面的情况下查看文档中所有 AWS 服务的服务配额,请查看 PDF 中的[服务端点和配额](https://docs.aws.amazon.com/general/latest/gr/aws-general.pdf#aws-service-information)。 + +### AWS CloudFormation 配额 + +在启动此解决方案时,您的 AWS 帐户具有 AWS CloudFormation 配额,您在了解这些配额后,可以避免出现限制错误,这些错误会阻止您成功部署此解决方案。有关更多信息,请参阅 *AWS CloudFormation 用户指南* 中的[AWS CloudFormation 配额](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/cloudformation-limits.html)。 diff --git a/docs/zh-base/regions.md b/docs/zh-base/plan-deployment/regions.md similarity index 100% rename from docs/zh-base/regions.md rename to docs/zh-base/plan-deployment/regions.md diff --git a/docs/zh-base/plan-deployment/security.md b/docs/zh-base/plan-deployment/security.md new file mode 100644 index 0000000..1a32d33 --- /dev/null +++ b/docs/zh-base/plan-deployment/security.md @@ -0,0 +1,12 @@ +当您在亚马逊云科技基础设施上构建解决方案时,安全责任由您和亚马逊云科技共同承担。此[共享模型](https://aws.amazon.com/compliance/shared-responsibility-model/)减少了您的操作负担,这是由于亚马逊云科技操作、管理和控制组件,包括主机操作系统、虚拟化层以及服务运行所在设施的物理安全性。有关亚马逊云科技安全的更多信息,请访问亚马逊云科技[云安全](http://aws.amazon.com/security/)。 + +### IAM角色 + +亚马逊云科技身份和访问管理(IAM)角色允许客户为亚马逊云科技上的服务和用户分配细粒度访问策略和权限。此解决方案创建IAM角色,这些角色授予解决方案各组件间的访问权限。 + +### Amazon CloudFront + +该解决方案部署了一个托管在 Amazon Simple Storage Service (Amazon S3) 存储桶中的Web控制台。为了减少延迟并提高安全性,该解决方案包括一个Amazon CloudFront分发,并带有一个来源访问身份验证,这是一个CloudFront用户,为解决方案的网站存储桶内容提供公共访问。有关更多信息,请参阅《Amazon CloudFront开发人员指南》中的[通过使用来源访问身份验证来限制对Amazon S3内容的访问](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/private-content-restricting-access-to-s3.html)。 + + + diff --git a/docs/zh-base/revisions.md b/docs/zh-base/revisions.md index 0e640b9..369e942 100644 --- a/docs/zh-base/revisions.md +++ b/docs/zh-base/revisions.md @@ -4,4 +4,5 @@ | 2021年7月 | 发布版本2.0
1. 支持通用OIDC提供商,包括Authing、Auth0、okta等
2. 支持从更多Amazon S3兼容的存储服务中传输对象,如华为云OBS等
3. 支持设置目标桶对象的访问控制列表(ACL)
4. 支持在账户A中部署,将数据从账户B复制到账户C
5. 更改为使用Graviton 2实例,并开启BBR来传输S3对象以提高性能并节省成本
6. 更改为使用Secrets Manager来维护凭证信息 | | 2021年12月 | 发布版本2.1
1. 支持自定义Prefix列表过滤传输任务
2. 支持配置单次运行的文件传输任务
3. 支持通过自定义CRON表达式配置任务时间表
4. 支持自定义开启或关闭数据比对功能 | | 2022年7月 | 发布版本2.2
1. 支持通过 Direct Connect 传输数据| -| 2023年3月 | 发布版本2.3
1. 支持嵌入式仪表板和监控日志
2. 支持S3 Access Key 自动轮换
3. 增强一次性传输任务监控| \ No newline at end of file +| 2023年3月 | 发布版本2.3
1. 支持嵌入式仪表板和监控日志
2. 支持S3 Access Key 自动轮换
3. 增强一次性传输任务监控| +| 2023年4月 | 发布版本2.4
1. 支持请求者付费模式| \ No newline at end of file diff --git a/docs/zh-base/security.md b/docs/zh-base/security.md deleted file mode 100644 index b80879a..0000000 --- a/docs/zh-base/security.md +++ /dev/null @@ -1,8 +0,0 @@ -当您在亚马逊云科技基础设施上构建解决方案时,安全责任由您和亚马逊云科技共同承担。此[责任共担模型](https://aws.amazon.com/compliance/shared-responsibility-model/)减少了您的操作负担,这是由于亚马逊云科技操作、管理和控制组件,包括主机操作系统、虚拟化层以及服务运行所在设施的物理安全性。有关亚马逊云科技安全的更多信息,请访问亚马逊云科技[云安全](http://aws.amazon.com/security/)。 - -## IAM角色 - -亚马逊云科技IAM角色允许客户为亚马逊云科技上的服务和用户分配细粒度访问策略和权限。此解决方案创建IAM角色,这些角色授予解决方案各组件间的访问权限。 -## Amazon CloudFront - -此解决方案部署托管在Amazon Simple Storage Service (Amazon S3) 存储桶中的Web控制台。为了帮助减少延迟和提高安全性,该解决方案包括一个带有源访问身份的Amazon CloudFront distribution,该身份是一个CloudFront用户,提供对解决方案网站存储桶内容的公共访问。更多有关信息,请参阅[Amazon CloudFront开发人员指南中的使用源访问身份限制对Amazon S3内容的访问](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/private-content-restricting-access-to-s3.html)。 \ No newline at end of file diff --git a/docs/zh-base/solution-components.md b/docs/zh-base/solution-components.md deleted file mode 100644 index 0e13bd1..0000000 --- a/docs/zh-base/solution-components.md +++ /dev/null @@ -1,30 +0,0 @@ -本解决方案包含三个组件: - -- 网页控制台 -- Amazon S3传输引擎 -- Amazon ECR传输引擎 - -## 网页控制台 - -解决方案提供网页控制台,便于创建和管理Amazon S3和 Amazon ECR传输任务。 - -## Amazon S3传输引擎 -Amazon S3传输引擎运行Amazon S3插件,用于将对象从数据源传输到S3存储桶。S3插件支持以下功能: - -- 在中国区域和全球区域之间传输Amazon S3对象 -- 将对象从阿里云OSS/腾讯COS/七牛Kodo传输到Amazon S3 -- 将对象从S3兼容存储服务传输到Amazon S3 -- 支持通过S3 Event准实时的传输 -- 支持传输对象元数据 -- 支持增量数据传输 -- 自动重试和错误处理 - -## Amazon ECR传输引擎 -Amazon ECR引擎运行Amazon ECR插件并用于从其他容器注册表传输容器镜象。ECR插件支持以下功能: - -- 在中国区域和全球区域之间传输Amazon ECR镜像 -- 从公共容器仓库(例如Docker Hub、GCR.io、Quay.io)传输到Amazon ECR -- 将指定的镜像传输到Amazon ECR -- 从Amazon ECR传输所有的镜像和标签 - -ECR插件利用[skopeo](https://github.com/containers/skopeo)作为底层引擎。AWS Lambda函数在其源中列出图像并使用Fargate运行传输任务。 \ No newline at end of file diff --git a/docs/zh-base/solution-overview/features-and-benefits.md b/docs/zh-base/solution-overview/features-and-benefits.md new file mode 100644 index 0000000..de886ba --- /dev/null +++ b/docs/zh-base/solution-overview/features-and-benefits.md @@ -0,0 +1,14 @@ +该解决方案的网页控制台提供了以下任务的管理界面: + +- 在AWS中国区域和AWS区域之间传输Amazon S3对象 +- 将数据从其他云服务提供商的对象存储服务(包括阿里云OSS、腾讯COS和七牛Kodo)传输到Amazon S3 +- 从Amazon S3兼容的对象存储服务传输对象到Amazon S3 +- 在AWS中国区域和AWS区域之间传输Amazon ECR镜像 +- 将容器镜像从公共容器镜像仓库(例如Docker Hub、Google gcr.io、Red Hat Quay.io)传输到Amazon ECR + +!!! note "注意" + + 如果您需要在AWS区域之间传输Amazon S3对象,我们建议使用[跨区域复制][crr]。如果您想在同一AWS区域内传输Amazon S3对象,我们建议使用[同区域复制][srr]。 + +[crr]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/replication.html#crr-scenario +[srr]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/replication.html#srr-scenario diff --git a/docs/zh-base/solution-overview/index.md b/docs/zh-base/solution-overview/index.md new file mode 100644 index 0000000..8e09952 --- /dev/null +++ b/docs/zh-base/solution-overview/index.md @@ -0,0 +1,17 @@ +**数据传输解决方案**(Data Transfer Hub)为 Amazon Simple Storage Service(Amazon S3)对象和 Amazon Elastic Container Registry(Amazon ECR)镜像提供安全、可扩展和可跟踪的数据传输。该数据传输帮助客户通过轻松地在亚马逊云科技(Amazon Web Services,AWS)中国区域内进出数据来拓展其全球业务。 + +本实施指南提供了数据传输解决方案的概述、参考架构和组件、规划部署的注意事项以及将数据传输解决方案部署到 AWS 云的配置步骤。 + +您可以使用以下表格快速导航至相关内容: + +| 如果您想要... | 请阅读... | +|----------|--------| +| 了解运行此解决方案的成本 | [成本](../plan-deployment/cost) | +| 理解此解决方案的安全注意事项 | [安全](../plan-deployment/security) | +| 知道如何为此解决方案计划配额 | [配额](../plan-deployment/quotas) | +| 知道此解决方案支持哪些 AWS 区域 | [支持的 AWS 区域](../plan-deployment/regions) | +| 查看或下载此解决方案中包含的 AWS CloudFormation 模板以自动部署此解决方案的基础设施资源(“堆栈”) | [AWS CloudFormation 模板](../deployment/template) | + +本指南适用于在 AWS 云中进行实际架构工作的 IT 架构师、开发人员、DevOps、数据分析师和市场技术专业人员。 + +您将负责遵守与您的数据传输任务相关的所有适用法律。 diff --git a/docs/zh-base/solution-overview/use-cases.md b/docs/zh-base/solution-overview/use-cases.md new file mode 100644 index 0000000..0c542dc --- /dev/null +++ b/docs/zh-base/solution-overview/use-cases.md @@ -0,0 +1,10 @@ +当今的中国市场是世界上最大的市场之一。许多国际公司正在寻求在中国取得成功,同样,一些中国公司也在全球范围内扩展业务。在业务中有很重要的一步是对数据的移动。 + +虽然 S3 跨区域复制和 ECR 跨区域复制非常流行,但客户无法使用它们将数据复制到中国区域。随着 Data Transfer Hub 解决方案的推出,客户现在可以在 Web 门户中创建 AWS 区域和 AWS 中国区域之间的 S3 和 ECR 数据传输任务。此外,它还支持将数据从云提供商复制到 AWS。 + +Data Transfer Hub 支持以下用例: + +* 在 AWS 区域和 AWS 中国区域之间复制 Amazon S3 对象。 +* 将其他云提供商的对象存储服务(包括阿里云 OSS、腾讯 COS、七牛 Kodo)的数据复制到 Amazon S3。 +* 在 AWS 区域和 AWS 中国区域之间传输 Amazon ECR 镜像。 +* 将来自公共 Docker 容器镜像仓库(例如 Docker Hub、Google gcr.io、Red Hat Quay.io)的 Docker 镜像传输到 Amazon ECR。 diff --git a/docs/zh-base/troubleshooting.md b/docs/zh-base/troubleshooting.md new file mode 100644 index 0000000..7c30a2a --- /dev/null +++ b/docs/zh-base/troubleshooting.md @@ -0,0 +1,26 @@ +**1. StatusCode: 400, InvalidToken: The provided token is malformed or otherwise invalid** + +如果您收到此错误消息,请确认您的 Secret 配置为以下格式,建议您通过复制粘贴的方式创建。 + +```json +{ + "access_key_id": "", + "secret_access_key": "" +} +``` + +**2. StatusCode: 403, InvalidAccessKeyId: The AWS Access Key Id you provided does not exist in our records** + +如果您收到此错误消息,请检查您的存储桶名称和区域名称是否配置正确。 + +**3. StatusCode: 403, InvalidAccessKeyId: UnknownError** + +请检查Secrets Manager中存放的Credential是否具有应有的权限,具体可参考[IAM Policy](https://github.com/awslabs/data-transfer-hub/blob/v2.0.0/docs/IAM-Policy.md)。 + +**4. StatusCode: 400, AccessDeniedException: Access to KMS is not allowed** + +如果您收到此错误消息,请确认您的密钥没有被SSE-CMK加密。目前,DTH不支持被SSE-CMK加密的密钥。 + +**5. dial tcp: lookup xxx.xxxxx.xxxxx.xx (http://xxx.xxxxx.xxxxx.xx/) on xxx.xxx.xxx.xxx:53: no such host** + +如果您收到此错误消息,请检查您的端点URL是否配置正确。 diff --git a/docs/zh-base/tutorial-directconnect.md b/docs/zh-base/tutorial-directconnect.md deleted file mode 100644 index dcdf53e..0000000 --- a/docs/zh-base/tutorial-directconnect.md +++ /dev/null @@ -1,254 +0,0 @@ -本教程介绍如何通过 Direct Connect (DX) 运行 Data Transfer Hub (DTH)。 - -DTH Worker Node 和 Finder Node 启动时,默认需要从 Internet 下载相关文件(如 CloudWatch 代理、DTH CLI)。在隔离场景下,您需要手动将文件下载并上传到 DTH 所在区域的 S3 存储桶中。 - -使用 DTH 通过 DX 传输数据有两种方式: - -- [在非隔离网络环境下通过Direct Connect使用DTH进行数据传输](#default-network) -- [在隔离网络环境下通过Direct Connect使用DTH进行数据传输](#isolated-network) - -## 在非隔离网络环境下通过Direct Connect使用DTH进行数据传输 - -在这种情况下,DTH 部署在**目标端**,并在一个具有**公共访问权限**(具有 Internet 网关或 NAT)的 VPC 内。数据源桶在隔离的网络环境中。 - -!!! note "说明" - - 由于 DTH 部署 VPC 具有公共 Internet 访问权限(IGW 或 NAT),EC2 Worker/Finder器节点可以访问 DTH 使用的其他 AWS 服务,例如Secrets Manager等,并从 Internet 下载相关资源(例如 CloudWatch 代理、DTH CLI),从而无需任何其他手动操作。 - -1. 从**创建传输任务**页面,选择**创建新任务**,然后选择**下一步**。 -2. 在**引擎选项**页面的引擎下,选择**Amazon S3**,然后选择**下一步**。 -3. 指定传输任务详细信息。 - - 在**源类型**下,选择**Amazon S3 Compatible Storage**。 - -4. 输入 **endpoint url**, 该参数必须填写接口端点url,如 `https://bucket.vpce-076205013d3a9a2ca-us23z2ze.s3.ap-east-1.vpce.amazonaws.com`。您可以在[VPC 终端节点 控制台](https://us-east-1.console.aws.amazon.com/vpc/home?region=us-west-2#Endpoints:vpcEndpointType=Interface) 的 DNS 名称部分找到对应的url. - -5. 输入**存储桶名称**,并选择同步**整个存储桶**或**指定前缀的对象**或**多个指定前缀的对象**。 - -6. 设置目标端S3存储桶信息。 - -7. 在**引擎设置**中,验证信息,并在必要时修改信息。如果要进行增量数据传输,建议将**最小容量**设置为至少为1的值。 - -8. 在**任务调度设置**处,选择您的任务调度配置。 - - 如果要以固定频率配置定时任务,以实现定时对比两侧的数据差异,请选择**Fixed Rate**。 - - 如果要通过[Cron Expression](https://docs.aws.amazon.com/AmazonCloudWatch/latest/events/ScheduledEvents.html#CronExpressions)配置定时任务,以实现定时对比两侧的数据差异,请选择**Cron Expression**。 - - 如果只想执行一次数据同步任务,请选择**One Time Transfer**。 - -9. 在**高级选项**中,保留默认值。 -10. 在**是否需要数据比对**处,选择您的任务配置。 - - 如果要跳过数据对比过程,传输所有文件,请选择**No**。 - - 如果只想同步有差异的文件,请选择**Yes**。 - -11. 在**通知邮箱**中提供电子邮件地址。 - -12. 选择**下一步**并查看您的任务参数详细信息。 - -13. 选择**创建任务**。 - -## 在隔离网络环境下通过Direct Connect使用DTH进行数据传输 -在这种情况下,DTH 部署在**目标侧**,并且在一个**没有公共访问权限**VPC内(隔离 VPC)。同时数据源桶也在一个隔离的网络环境中。 - -### 准备工作 -**配置VPC的相关Endpoint** - -DTH worker/finder 节点还需要访问其他 AWS 服务。为此,请为 **DynamoDB** 和 **S3** 创建 **Gateway Endpoint**,为 **logs**、**SQS** 和 **Secret Managers** 创建**Interface Endpoint**。 - -![endpoints](./images/dx-vpc-endpoints.png) - -**将所需文件上传到一个 S3 存储桶** - -在隔离场景下,您需要手动将文件下载并上传到 DTH 所在区域的 S3 存储桶中。 - -1. 下载 [Amazon CloudWatch 代理](https://s3.amazonaws.com/amazoncloudwatch-agent/amazon_linux/arm64/latest/amazon-cloudwatch-agent.rpm) 和 [DTH CLI](https://aws-gcr-solutions-assets.s3.amazonaws.com/data-transfer-hub-cli/v1.2.1/dthcli_1.2.1_linux_arm64.tar.gz)。 - -2. 创建DTH Worker的 CloudWatch 代理配置文件。您可以创建一个名为 `cw_agent_config.json` 的文件。 -```json -{ - "agent": { - "metrics_collection_interval": 60, - "run_as_user": "root" - }, - "logs": { - "logs_collected": { - "files": { - "collect_list": [ - { - "file_path": "/home/ec2-user/worker.log", - "log_group_name": "##log group##", - "log_stream_name": "Instance-{instance_id}" - } - ] - } - } - }, - "metrics": { - "append_dimensions": { - "AutoScalingGroupName": "${aws:AutoScalingGroupName}", - "InstanceId": "${aws:InstanceId}" - }, - "aggregation_dimensions": [ - [ - "AutoScalingGroupName" - ] - ], - "metrics_collected": { - "disk": { - "measurement": [ - "used_percent" - ], - "metrics_collection_interval": 60, - "resources": [ - "*" - ] - }, - "mem": { - "measurement": [ - "mem_used_percent" - ], - "metrics_collection_interval": 60 - } - } - } -} -``` -3. 将这三个文件上传到部署 DTH 的区域中的一个 S3 存储桶。 - -![assets](./images/dx-s3-assets.png) - -### 部署 DTH S3-Plugin - -我们建议使用 **DTH S3-plugin** 创建传输任务,而不是使用 DTH 控制台。 - -**从全球区域启动堆栈** - -[![Launch Stack](./images/launch-stack.png)](https://console.aws.amazon.com/cloudformation/home#/stacks/create/template?stackName=DTHS3Stack&templateURL=https://s3.amazonaws.com/solutions-reference/data-transfer-hub-s3-plugin/latest/DataTransferS3Stack-ec2.template) - -**从中国区域启动堆栈** - -[![Launch Stack](./images/launch-stack.png)](https://console.amazonaws.cn/cloudformation/home#/stacks/create/template?stackName=DTHS3Stack&templateURL=https://s3.amazonaws.com/solutions-reference/data-transfer-hub-s3-plugin/latest/DataTransferS3Stack-ec2.template) - - -1. 为 **Source Type** 选择 **Amazon_S3**。 - -2. 输入 **Source Bucket**。 - -3. 如果需要的话,输入 **Source Prefix**。 - -4. 输入 **Source Endpoint URL**, 例如 `https://bucket.vpce-076205013d3a9a2ca-us23z2ze.s3.ap-east-1.vpce.amazonaws.com`。 - -5. 为 **Source In Current Account** 选择 **false**。 - -6. 在 **Source Credentials** 中输入[Secrets Manager](https://console.aws.amazon.com/secretsmanager/home) 中存储的密钥名称。 - -7. 为 **Enable S3 Event** 选择 **No**。 - -8. 配置**Destination Bucket**、**Destination Prefix**、**Destination Region**和**Destination in Current Account**。 如果目标存储桶在当前账户中,请将目标凭证留空。 - -9. 配置**告警邮件**。 - -10. 配置 **VPC ID** 和 **Subnet IDs**。 - -11. 其他参数保持默认,点击**下一步**。 - -12. 单击**下一步**。配置其他堆栈选项,例如标签(可选)。 - -13. 单击**下一步**。查看并勾选确认,然后单击**创建堆栈**开始部署。 - -部署预计用时3到5分钟。 - -### 为 Worker 节点和 Finder 节点更新 EC2 Userdata - -**更新 Worker 节点的Userdata** - -1. 前往Auto Scaling Group的 [Launch configurations](https://us-west-2.console.aws.amazon.com/ec2/v2/home?region=us-west-2#LaunchConfigurations:launchConfigurationName=)。 - -2. 选择所要更改的Config并点击 **Copy Launch Configuration**。 - -3. 编辑**Advanced details** 部分下的 **User data** 。 - - 使用下面的 shell 脚本替换 `echo "export JOB_TABLE_NAME=xxxxxxxxxxx" >> env.sh`之前的代码。 - - - ```shell - #!/bin/bash - - yum update -y - cd /home/ec2-user/ - asset_bucket= - aws s3 cp "s3://$asset_bucket/cw_agent_config.json" . --region - aws s3 cp "s3://$asset_bucket/amazon-cloudwatch-agent.rpm" . --region - aws s3 cp "s3://$asset_bucket/dthcli_1.2.1_linux_arm64.tar.gz" . --region - - sudo yum install -y amazon-cloudwatch-agent.rpm - sed -i -e "s/##log group##//g" cw_agent_config.json - /opt/aws/amazon-cloudwatch-agent/bin/amazon-cloudwatch-agent-ctl -a fetch-config -m ec2 -c file:/home/ec2-user/cw_agent_config.json -s - tar zxvf dthcli_1.2.1_linux_arm64.tar.gz - - ``` - - - 请将 `` 替换为您存储前述文件的特定存储桶名称。 - - - 将 `` 替换为部署 DTH S3-Plugin 解决方案的区域。 - - - 将 `` 替换为 DTH Worker 的日志组名称。 - - - 请不要编辑 `// Prepare the environment variables` 之后的代码。 - -4. 点击 **Create Launch Configuration**。 - -5. 前往 [Auto Scling Group](https://us-west-2.console.aws.amazon.com/ec2/v2/home?region=us-west-2#AutoScalingGroups:), 选择目标Auto Scaling Group 并点击 **Edit**。 - -6. 在 **Launch configuration** 部分, 选择在前述步骤汇总新建的launch configuration。 点击 **Update**。 - -7. 终止所有正在运行的 DTH 工作程序节点,Auto Scaling Group将使用新的用户数据启动新的Worker节点。 - -**更新 Finder 节点的Userdata** - -1. 前往EC2 [Launch Templates](https://us-west-2.console.aws.amazon.com/ec2/v2/home?region=us-west-2#LaunchTemplates:)。 - -2. 点击 **Modify template**。 - -3. 在**Advanced details**部分,编辑**User data**. - - 使用下面的 shell 脚本替换 `echo "export JOB_TABLE_NAME=xxxxxxxxxxx" >> env.sh`之前的代码。 - - ```shell - #!/bin/bash - - yum update -y - cd /home/ec2-user/ - asset_bucket= - aws s3 cp "s3://$asset_bucket/amazon-cloudwatch-agent.rpm" . --region - aws s3 cp "s3://$asset_bucket/dthcli_1.2.1_linux_arm64.tar.gz" . --region - - echo "{\"agent\": {\"metrics_collection_interval\": 60,\"run_as_user\": \"root\"},\"logs\": {\"logs_collected\": {\"files\": {\"collect_list\": [{\"file_path\": \"/home/ec2-user/finder.log\",\"log_group_name\": \"##log group##\"}]}}}}" >> /home/ec2-user/cw_agent_config.json, - - sudo yum install -y amazon-cloudwatch-agent.rpm - - sed -i -e "s/##log group##/`/g" cw_agent_config.json - /opt/aws/amazon-cloudwatch-agent/bin/amazon-cloudwatch-agent-ctl -a fetch-config -m ec2 -c file:/home/ec2-user/cw_agent_config.json -s - tar zxvf dthcli_1.2.1_linux_arm64.tar.gz - - ``` - - - 请将 `` 替换为您存储前述文件的特定存储桶名称。 - - - 将 `` 替换为部署 DTH S3-Plugin 解决方案的区域。 - - - 将 `` 替换为 DTH Finder 的日志组名称。 - - - 请不要编辑 `echo "export JOB_TABLE_NAME=xxxxxxxxxxx" >> env.sh`后面的代码。 - -4. 点击 **Create template version**。 - -5. 使用这个新版本模板启动一个新的 Finder 节点,并手动终止旧的Finder节点。 - - -## 架构图 - -[![architecture]][architecture] - -[architecture]: ./images/dx-arch-global.png - -在 EC2 上运行的 DTH Worker节点将数据从一个 AWS 账户中的存储桶传输到另一个 AWS 账户中的存储桶。 - -* 要访问当前账户中的存储桶(DTH 所部署侧),DTH Worker节点使用**S3 Gateway Endpoint** -* 要访问另一个账户中的存储桶,DTH Worker节点使用 **S3 Private Link** by **S3 Interface Endpoint** - diff --git a/docs/zh-base/upgrade.md b/docs/zh-base/update.md similarity index 95% rename from docs/zh-base/upgrade.md rename to docs/zh-base/update.md index a49a383..5009a32 100644 --- a/docs/zh-base/upgrade.md +++ b/docs/zh-base/update.md @@ -7,7 +7,7 @@ * [步骤 1. 更新 CloudFormation 堆栈](#1) * [步骤 2. (可选)更新 OIDC 配置](#oidc-update) -* [步骤 3. 在 CloudFront 创建失效](#3-cloudfront) +* [步骤 3. 在 CloudFront 创建失效](#cloudfront) * [步骤 4. 刷新网页控制台](#4) ## 步骤 1. 更新 CloudFormation 堆栈 @@ -40,7 +40,7 @@ 如果您已经在中国区结合 OIDC 部署了该方案,请参考[部署](../deployment/#1oidc)章节更新 OIDC 中的授权、认证配置。 -## 步骤 3. 在 CloudFront 创建 CDN 刷新 +## 步骤 3. 在 CloudFront 创建 CDN 刷新 CloudFront 已在其边缘节点缓存旧版本的 Data Transfer Hub 控制台。 我们需要在 CloudFront 控制台上创建一个失效(invalidation)以强制删除缓存。 diff --git a/docs/zh-base/tutorial-cli-launch.md b/docs/zh-base/user-guide/tutorial-cli-launch.md similarity index 90% rename from docs/zh-base/tutorial-cli-launch.md rename to docs/zh-base/user-guide/tutorial-cli-launch.md index cfe4a20..2f59d4d 100644 --- a/docs/zh-base/tutorial-cli-launch.md +++ b/docs/zh-base/user-guide/tutorial-cli-launch.md @@ -2,24 +2,15 @@ 1. 创建一个具有两个公有子网或两个拥有[NAT 网关][nat] 私有子网的Amazon VPC。 -2. 根据需要替换``。 +2. 根据需要替换``为`https://s3.amazonaws.com/solutions-reference/data-transfer-hub/latest/DataTransferS3Stack-ec2.template`。 - - 全球区域: - ``` - https://s3.amazonaws.com/solutions-reference/data-transfer-hub-s3-plugin/latest/DataTransferS3Stack-ec2.template - ``` - - 中国区域: - ``` - https://s3.amazonaws.com/solutions-reference/data-transfer-hub-s3-plugin/latest/DataTransferS3Stack-ec2.template - ``` - -3. 转到您的终端并输入以下命令。参数详情请参考下表。 +3. 转到您的终端并输入以下命令。详情请参考**参数列表**。 ```shell aws cloudformation create-stack --stack-name dth-s3-task --template-url CLOUDFORMATION_URL \ --capabilities CAPABILITY_NAMED_IAM \ --parameters \ - ParameterKey=alarmEmail,ParameterValue=your_email@abc.com \ + ParameterKey=alarmEmail,ParameterValue=your_email@example.com \ ParameterKey=destBucket,ParameterValue=dth-recive-cn-north-1 \ ParameterKey=destPrefix,ParameterValue=test-prefix \ ParameterKey=destCredentials,ParameterValue=drh-cn-secret-key \ @@ -57,6 +48,7 @@ | destPrefix | | | 目标前缀(可选) | destRegion | | | 目标区域名称 | destStorageClass | STANDARD
STANDARD_IA
ONEZONE_IA
INTELLIGENT_TIERING | INTELLIGENT_TIERING | 目标存储类。默认值为INTELLIGENT_TIERING +| isPayerRequest | true
false | false | 是否启用付费者请求模式 | | ec2CronExpression | | 0/60 * * * ? * | EC2 Finder 任务的 Cron 表达式。
"" 表示一次性转移。| | finderEc2Memory | 8
16
32
64
128
256 | 8 GB| Finder 任务使用的内存量(以 GB 为单位) | ec2Subnets | | | 两个公共子网或具有 [NAT 网关][nat] 的两个私有子网 | diff --git a/docs/zh-base/user-guide/tutorial-directconnect.md b/docs/zh-base/user-guide/tutorial-directconnect.md new file mode 100644 index 0000000..8cd7a74 --- /dev/null +++ b/docs/zh-base/user-guide/tutorial-directconnect.md @@ -0,0 +1,58 @@ +本教程介绍如何通过 Direct Connect (DX) 运行 Data Transfer Hub (DTH)。 + +DTH Worker Node 和 Finder Node 启动时,默认需要从 Internet 下载相关文件(如 CloudWatch 代理、DTH CLI)。在隔离场景下,您需要手动将文件下载并上传到 DTH 所在区域的 S3 存储桶中。 + +使用 DTH 通过 DX 传输数据有两种方式: + +- [在非隔离网络环境下通过Direct Connect使用DTH进行数据传输](#default-network) +- [在隔离网络环境下通过Direct Connect使用DTH进行数据传输](#isolated-network) + +## 在非隔离网络环境下通过Direct Connect使用DTH进行数据传输 + +在这种情况下,DTH 部署在**目标端**,并在一个具有**公共访问权限**(具有 Internet 网关或 NAT)的 VPC 内。数据源桶在隔离的网络环境中。 + +!!! note "说明" + + 由于 DTH 部署 VPC 具有公共 Internet 访问权限(IGW 或 NAT),EC2 Worker/Finder器节点可以访问 DTH 使用的其他 AWS 服务,例如Secrets Manager等,并从 Internet 下载相关资源(例如 CloudWatch 代理、DTH CLI),从而无需任何其他手动操作。 + +1. 从**创建传输任务**页面,选择**创建新任务**,然后选择**下一步**。 +2. 在**引擎选项**页面的引擎下,选择**Amazon S3**,然后选择**下一步**。 +3. 指定传输任务详细信息。 + - 在**源类型**下,选择**Amazon S3 Compatible Storage**。 + +4. 输入 **endpoint url**, 该参数必须填写接口端点url,如 `https://bucket.vpce-076205013d3a9a2ca-us23z2ze.s3.ap-east-1.vpce.amazonaws.com`。您可以在[VPC 终端节点 控制台](https://us-east-1.console.aws.amazon.com/vpc/home?region=us-west-2#Endpoints:vpcEndpointType=Interface) 的 DNS 名称部分找到对应的url. + +5. 输入**存储桶名称**,并选择同步**整个存储桶**或**指定前缀的对象**或**多个指定前缀的对象**。 + +6. 设置目标端S3存储桶信息。 + +7. 在**引擎设置**中,验证信息,并在必要时修改信息。如果要进行增量数据传输,建议将**最小容量**设置为至少为1的值。 + +8. 在**任务调度设置**处,选择您的任务调度配置。 + - 如果要以固定频率配置定时任务,以实现定时对比两侧的数据差异,请选择**Fixed Rate**。 + - 如果要通过[Cron Expression](https://docs.aws.amazon.com/AmazonCloudWatch/latest/events/ScheduledEvents.html#CronExpressions)配置定时任务,以实现定时对比两侧的数据差异,请选择**Cron Expression**。 + - 如果只想执行一次数据同步任务,请选择**One Time Transfer**。 + +9. 在**高级选项**中,保留默认值。 +10. 在**是否需要数据比对**处,选择您的任务配置。 + - 如果要跳过数据对比过程,传输所有文件,请选择**No**。 + - 如果只想同步有差异的文件,请选择**Yes**。 + +11. 在**通知邮箱**中提供电子邮件地址。 + +12. 选择**下一步**并查看您的任务参数详细信息。 + +13. 选择**创建任务**。 + +## 在隔离网络环境下通过Direct Connect使用DTH进行数据传输 +在这种情况下,DTH 部署在**目标侧**,并且在一个**没有公共访问权限**VPC内(隔离 VPC)。同时数据源桶也在一个隔离的网络环境中。详情请参考[教程][https://github.com/awslabs/data-transfer-hub/blob/main/docs/tutorial-directconnect-isolated.md]。 + +[![architecture]][architecture] + +[architecture]: ../images/dx-arch-global.png + +在 EC2 上运行的 DTH Worker节点将数据从一个 AWS 账户中的存储桶传输到另一个 AWS 账户中的存储桶。 + +* 要访问当前账户中的存储桶(DTH 所部署侧),DTH Worker节点使用**S3 Gateway Endpoint** +* 要访问另一个账户中的存储桶,DTH Worker节点使用 **S3 Private Link** by **S3 Interface Endpoint** + diff --git a/docs/zh-base/tutorial-ecr.md b/docs/zh-base/user-guide/tutorial-ecr.md similarity index 95% rename from docs/zh-base/tutorial-ecr.md rename to docs/zh-base/user-guide/tutorial-ecr.md index f173c4f..548397c 100644 --- a/docs/zh-base/tutorial-ecr.md +++ b/docs/zh-base/user-guide/tutorial-ecr.md @@ -1,4 +1,4 @@ -您可以在网页控制台创建Amazon ECR数据传输任务。更多信息请参考[部署解决方案](./deployment.md)。 +您可以在网页控制台创建Amazon ECR数据传输任务。更多信息请参考[部署解决方案](../../deployment/deployment-overview)。 1. 从**创建传输任务**页面,选择**创建新任务**,然后选择**下一步**。 2. 在**引擎选项**页面的引擎下,选择**Amazon ECR**,然后选择**下一步**。您还可以通过选择 **Public Container Registry** 从 Docker Hub,GCR.io,Quay.io 等复制镜像。 diff --git a/docs/zh-base/tutorial-oss.md b/docs/zh-base/user-guide/tutorial-oss.md similarity index 90% rename from docs/zh-base/tutorial-oss.md rename to docs/zh-base/user-guide/tutorial-oss.md index 0c22866..eb004f8 100644 --- a/docs/zh-base/tutorial-oss.md +++ b/docs/zh-base/user-guide/tutorial-oss.md @@ -1,7 +1,7 @@ 本教程介绍如何将 **阿里云 OSS** 中的数据传输到 **Amazon S3**。 ## 前提条件 -您已经完成了Data Transfer Hub解决方案的部署,并将解决方案部署在 **俄勒冈州(us-west-2)** 区域。更多信息请参考[部署解决方案](./deployment.md)。 +您已经完成了Data Transfer Hub解决方案的部署,并将解决方案部署在 **俄勒冈州(us-west-2)** 区域。更多信息请参考[部署解决方案](../../deployment/deployment-overview)。 ## 步骤1: 为OSS配置凭证 @@ -57,7 +57,7 @@ 任务创建成功后,会出现在**任务**页面。 -![s3-task-list](./images/s3-task-list-oss.png) +![s3-task-list](../images/s3-task-list-oss.png) 图2:任务页面 @@ -71,9 +71,11 @@ **准备您的AWS账户的AK/SK** -1. 前往 [IAM 控制台](https://console.aws.amazon.com/iam/home?region=us-west-2), 点击 **创建一个新的策略(Create Policy)**。 +1. 前往 [IAM 控制台](https://console.aws.amazon.com/iam/home?region=us-west-2)。 -2. 点击 **JSON**,并将下面的权限Json文件输入到策略中。 +2. 在导航窗格中选择**策略**,然后选择**创建一个新的策略(Create Policy)**。 + +3. 点击 **JSON**,并将下面的权限JSON文件输入到策略中。 ```json { @@ -82,7 +84,6 @@ { "Effect": "Allow", "Action": [ - "sqs:SendMessageBatch", "sqs:SendMessage" ], "Resource": "arn:aws:sqs:us-west-2:xxxxxxxxxxx:DTHS3Stack-S3TransferQueue-1TSF4ESFQEFKJ" @@ -91,13 +92,15 @@ } ``` !!! Note "说明" - 请替换JSON中您的Queue ARN。 + 请替换JSON中您的queue ARN。 + +4. 完成创建策略。 -3. 创建一个User。前往 [User 控制台](https://console.aws.amazon.com/iam/home?region=us-west-2#/users) 然后点击 **添加用户(Add User)**。 +5. 在导航窗格中,选择**用户**,然后选择**添加用户(Add User)**。 -4. 然后将您先前创建的策略关联到该用户上。 +6. 将您先前创建的策略关联到该用户上。 -5. 保存 **ACCESS_KEY/SECRET_KEY**,以备后面的步骤使用。 +7. 保存 **ACCESS_KEY/SECRET_KEY**,以备后面的步骤使用。 **准备阿里云中的事件发送函数** @@ -196,7 +199,7 @@ 1. 在**触发器**页签中点击**创建触发器**以创建函数的触发器。 - ![portal](images/aliyun_create_trigger.png) + ![portal](../images/aliyun_create_trigger.png) 2. 选择**OSS**作为触发器类型,然后选择桶名称。 diff --git a/docs/zh-base/tutorial-s3.md b/docs/zh-base/user-guide/tutorial-s3.md similarity index 98% rename from docs/zh-base/tutorial-s3.md rename to docs/zh-base/user-guide/tutorial-s3.md index 137827e..243011f 100644 --- a/docs/zh-base/tutorial-s3.md +++ b/docs/zh-base/user-guide/tutorial-s3.md @@ -1,4 +1,4 @@ -您可以在网页控制台创建Amazon S3数据传输任务。更多信息请参考[部署解决方案](./deployment.md)。 +您可以在网页控制台创建Amazon S3数据传输任务。更多信息请参考[部署解决方案](../../deployment/deployment-overview)。 !!! Note "注意" Data Transfer Hub 也支持通过 AWS CLI 创建 Amazon S3 的传输任务, 请参考该[教程](./tutorial-cli-launch.md). @@ -62,7 +62,7 @@ Data Transfer Hub 默认支持使用 SSE-S3 和 SSE-KMS 的数据源。 注意进行以下修改: -- 请将 kms 部分中的 `Resource` 更改为您自己的 KMS 密钥的 arn。 +- 请将 kms 部分中的 `Resource` 更改为您自己的 KMS 密钥的 ARN。 - 如果是针对中国地区的 S3 存储桶,请确保更改为使用 `arn:aws-cn:kms:::` 而不是 `arn:aws:kms:::`。 diff --git a/docs/zh-rebrand/faq.md b/docs/zh-rebrand/faq.md index 3c8bc60..3b383ee 100644 --- a/docs/zh-rebrand/faq.md +++ b/docs/zh-rebrand/faq.md @@ -131,6 +131,6 @@ **6. 部署完成后,为什么在两个CloudWatch日志组中找不到任何日志流?**
-这是因为您在部署此解决方案时选择的子网没有公共网络访问权限,因此Fargate任务无法拉取映像,并且EC2无法下载CloudWatch 代理将日志发送到CloudWatch。请检查您的VPC设置。解决问题后,您需要通过此解决方案手动终止正在运行的EC2实例(如果有的话)。随后,弹性伸缩组会自动启动新的实例。 +这是因为您在部署此解决方案时选择的子网没有公共网络访问权限,因此Fargate任务无法拉取镜像,并且EC2无法下载CloudWatch 代理将日志发送到CloudWatch。请检查您的VPC设置。解决问题后,您需要通过此解决方案手动终止正在运行的EC2实例(如果有的话)。随后,弹性伸缩组会自动启动新的实例。 [crr]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/replication.html#crr-scenario \ No newline at end of file diff --git a/source/constructs/lambda/api/cwl/lambda_function.py b/source/constructs/lambda/api/cwl/lambda_function.py index 65a5e5b..ce33794 100644 --- a/source/constructs/lambda/api/cwl/lambda_function.py +++ b/source/constructs/lambda/api/cwl/lambda_function.py @@ -41,7 +41,7 @@ def wrapper(*args, **kwargs): @handle_error -def lambda_handler(event, context): +def lambda_handler(event, _): # logger.info("Received event: " + json.dumps(event, indent=2)) action = event["info"]["fieldName"] diff --git a/source/constructs/lambda/api/secrets_manager/api_sm_param.py b/source/constructs/lambda/api/secrets_manager/api_sm_param.py index a0ae1c4..a76fbff 100644 --- a/source/constructs/lambda/api/secrets_manager/api_sm_param.py +++ b/source/constructs/lambda/api/secrets_manager/api_sm_param.py @@ -9,7 +9,7 @@ client = boto3.client('secretsmanager') -def lambda_handler(event, context): +def lambda_handler(event, _): result = [] response = client.list_secrets(SortOrder='asc') diff --git a/source/constructs/lambda/api/task-monitoring/change_asg_size.py b/source/constructs/lambda/api/task-monitoring/change_asg_size.py index a13fd0a..e1c3797 100644 --- a/source/constructs/lambda/api/task-monitoring/change_asg_size.py +++ b/source/constructs/lambda/api/task-monitoring/change_asg_size.py @@ -34,7 +34,7 @@ def wrapper(*args, **kwargs): @handle_error -def lambda_handler(event, context): +def lambda_handler(event, _): # logger.info("Received event: " + json.dumps(event, indent=2)) args = event["arguments"] diff --git a/source/constructs/lambda/api/task-monitoring/check_finder_job_status.py b/source/constructs/lambda/api/task-monitoring/check_finder_job_status.py index ed033b6..6944f96 100644 --- a/source/constructs/lambda/api/task-monitoring/check_finder_job_status.py +++ b/source/constructs/lambda/api/task-monitoring/check_finder_job_status.py @@ -19,7 +19,7 @@ default_config = config.Config(**user_agent_config) -def lambda_handler(event, context): +def lambda_handler(event, _): # logger.info("Received event: " + json.dumps(event, indent=2)) args = event["arguments"] diff --git a/source/constructs/lambda/api/task-monitoring/check_is_onetime_transfer_task.py b/source/constructs/lambda/api/task-monitoring/check_is_onetime_transfer_task.py index a04b2c7..365115a 100644 --- a/source/constructs/lambda/api/task-monitoring/check_is_onetime_transfer_task.py +++ b/source/constructs/lambda/api/task-monitoring/check_is_onetime_transfer_task.py @@ -19,7 +19,7 @@ default_config = config.Config(**user_agent_config) -def lambda_handler(event, context): +def lambda_handler(event, _): # logger.info("Received event: " + json.dumps(event, indent=2)) args = event["arguments"] diff --git a/source/constructs/lambda/api/task-monitoring/check_sqs_status.py b/source/constructs/lambda/api/task-monitoring/check_sqs_status.py index 9e657df..52b287f 100644 --- a/source/constructs/lambda/api/task-monitoring/check_sqs_status.py +++ b/source/constructs/lambda/api/task-monitoring/check_sqs_status.py @@ -19,7 +19,7 @@ default_config = config.Config(**user_agent_config) -def lambda_handler(event, context): +def lambda_handler(event, _): # logger.info("Received event: " + json.dumps(event, indent=2)) args = event["arguments"] diff --git a/source/constructs/lambda/api/task-monitoring/check_transfer_complete.py b/source/constructs/lambda/api/task-monitoring/check_transfer_complete.py index a351a26..c85c9b8 100644 --- a/source/constructs/lambda/api/task-monitoring/check_transfer_complete.py +++ b/source/constructs/lambda/api/task-monitoring/check_transfer_complete.py @@ -19,7 +19,7 @@ default_config = config.Config(**user_agent_config) -def lambda_handler(event, context): +def lambda_handler(event, _): # logger.info("Received event: " + json.dumps(event, indent=2)) args = event["arguments"] diff --git a/source/constructs/lambda/api/task-monitoring/send_sns_notification.py b/source/constructs/lambda/api/task-monitoring/send_sns_notification.py index b30ec2f..acdb14c 100644 --- a/source/constructs/lambda/api/task-monitoring/send_sns_notification.py +++ b/source/constructs/lambda/api/task-monitoring/send_sns_notification.py @@ -8,7 +8,7 @@ logger.setLevel(logging.INFO) -def lambda_handler(event, context): +def lambda_handler(event, _): # logger.info("Received event: " + json.dumps(event, indent=2)) args = event["arguments"] diff --git a/source/constructs/lambda/api/task-monitoring/start_monitor_flow.py b/source/constructs/lambda/api/task-monitoring/start_monitor_flow.py index cde32ef..226f3a4 100644 --- a/source/constructs/lambda/api/task-monitoring/start_monitor_flow.py +++ b/source/constructs/lambda/api/task-monitoring/start_monitor_flow.py @@ -23,7 +23,7 @@ monitor_sfn_arn = os.environ.get("MONITOR_SFN_ARN") -def lambda_handler(event, context): +def lambda_handler(event, _): # logger.info("Received event: " + json.dumps(event, indent=2)) task_id = event.get("id") @@ -34,7 +34,7 @@ def lambda_handler(event, context): } } - response = stepfunctions_client.start_execution( + stepfunctions_client.start_execution( stateMachineArn=monitor_sfn_arn, input=json.dumps(input_data), ) diff --git a/source/constructs/lambda/api/task-monitoring/util/__pycache__/monitor_helper.cpython-310.pyc b/source/constructs/lambda/api/task-monitoring/util/__pycache__/monitor_helper.cpython-310.pyc deleted file mode 100644 index 5bbf80c..0000000 Binary files a/source/constructs/lambda/api/task-monitoring/util/__pycache__/monitor_helper.cpython-310.pyc and /dev/null differ diff --git a/source/constructs/lambda/api/task/api_task_v2.py b/source/constructs/lambda/api/task/api_task_v2.py index 97904f2..2849e34 100644 --- a/source/constructs/lambda/api/task/api_task_v2.py +++ b/source/constructs/lambda/api/task/api_task_v2.py @@ -22,7 +22,7 @@ transfer_task_table = dynamodb.Table(transfer_task_table_name) -def lambda_handler(event, context): +def lambda_handler(event, _): # logger.info("Received event: " + json.dumps(event, indent=2)) action = event['info']['fieldName'] diff --git a/source/constructs/lambda/api/task/util/__pycache__/task_helper.cpython-310.pyc b/source/constructs/lambda/api/task/util/__pycache__/task_helper.cpython-310.pyc deleted file mode 100644 index 8d82972..0000000 Binary files a/source/constructs/lambda/api/task/util/__pycache__/task_helper.cpython-310.pyc and /dev/null differ diff --git a/source/constructs/lambda/cdk/cfn-task.ts b/source/constructs/lambda/cdk/cfn-task.ts index bad23bc..eb54cf7 100644 --- a/source/constructs/lambda/cdk/cfn-task.ts +++ b/source/constructs/lambda/cdk/cfn-task.ts @@ -169,7 +169,6 @@ exports.queryTaskCfn = async function (input: QueryCfnTaskInput) { ReturnValues: "ALL_NEW" }).promise() - // TODO: if failed, update the reason. pprint('updatedTask.Attributes', updatedTask.Attributes) return updatedTask.Attributes } else { diff --git a/source/constructs/lambda/custom-resource/create_service_linked_role.py b/source/constructs/lambda/custom-resource/create_service_linked_role.py index 95ea012..31ab593 100644 --- a/source/constructs/lambda/custom-resource/create_service_linked_role.py +++ b/source/constructs/lambda/custom-resource/create_service_linked_role.py @@ -11,16 +11,16 @@ logger = logging.getLogger() logger.setLevel(logging.INFO) -def lambda_handler(event, context): +def lambda_handler(event, _): request_type = event['RequestType'] if request_type == 'Create' or request_type == 'Update': try: - response = iam.get_role( + iam.get_role( RoleName='AWSServiceRoleForAppSync', ) - except Exception as err: + except Exception: logger.info("AWSServiceRoleForAppSync does not exist, create it.") - response = iam.create_service_linked_role( + iam.create_service_linked_role( AWSServiceName='appsync.amazonaws.com' ) time.sleep(5) diff --git a/source/constructs/lambda/layer/api/nodejs/package.json b/source/constructs/lambda/layer/api/nodejs/package.json index 0bab53f..dfaa7cd 100644 --- a/source/constructs/lambda/layer/api/nodejs/package.json +++ b/source/constructs/lambda/layer/api/nodejs/package.json @@ -1,6 +1,11 @@ { "name": "src", - "version": "1.0.0", + "version": "2.4.0", + "license": "Apache-2.0", + "author": { + "name": "Amazon Web Services", + "url": "https://aws.amazon.com/solutions" + }, "dependencies": { "uuid": "^8.3.0", "aws-lambda": "^1.0.6", diff --git a/source/constructs/lambda/layer/cdk/nodejs/package.json b/source/constructs/lambda/layer/cdk/nodejs/package.json index 0bab53f..dfaa7cd 100644 --- a/source/constructs/lambda/layer/cdk/nodejs/package.json +++ b/source/constructs/lambda/layer/cdk/nodejs/package.json @@ -1,6 +1,11 @@ { "name": "src", - "version": "1.0.0", + "version": "2.4.0", + "license": "Apache-2.0", + "author": { + "name": "Amazon Web Services", + "url": "https://aws.amazon.com/solutions" + }, "dependencies": { "uuid": "^8.3.0", "aws-lambda": "^1.0.6", diff --git a/source/constructs/lib/api-stack.ts b/source/constructs/lib/api-stack.ts index 4616071..1298807 100644 --- a/source/constructs/lib/api-stack.ts +++ b/source/constructs/lib/api-stack.ts @@ -24,12 +24,10 @@ import { import * as appsync from "@aws-cdk/aws-appsync-alpha"; -// import { Construct, CfnParameter, Duration, Stack, RemovalPolicy, CustomResource, Aws, CfnOutput } from '@aws-cdk/core'; import * as path from 'path'; import * as cfnSate from './cfn-step-functions'; import * as monitorSate from './monitor-step-functions' - import { AuthType } from './constructs-stack'; - import { addCfnNagSuppressRules } from "./constructs-stack"; + import { AuthType, addCfnNagSuppressRules } from './constructs-stack'; export interface ApiProps { @@ -53,11 +51,11 @@ import * as appsync from "@aws-cdk/aws-appsync-alpha"; // Can define custom bucket to hold the plugin url. Default to aws-gcr-solutions const templateBucket = process.env.TEMPLATE_OUTPUT_BUCKET || 'aws-gcr-solutions' - let s3PluginVersion = 'v2.3.0' + let s3PluginVersion = 'v2.4.0' let ecrPluginVersion = 'v2.3.0' let suffix = '-plugin' if (templateBucket === 'aws-gcr-solutions') { - s3PluginVersion = 'v2.3.0' + s3PluginVersion = 'v2.4.0' ecrPluginVersion = 'v2.3.0' suffix = '' } @@ -330,9 +328,6 @@ import * as appsync from "@aws-cdk/aws-appsync-alpha"; }) // Create an Admin User - // TODO: The user can be created, however, the state is FORCE_PASSWORD_CHANGE, the customer still cannot use the account yet. - // https://stackoverflow.com/questions/40287012/how-to-change-user-status-force-change-password - // resolution: create a custom lambda to set user password new cognito.CfnUserPoolUser(this, 'AdminUser', { userPoolId: this.userPool.userPoolId, username: props?.usernameParameter?.valueAsString, @@ -351,11 +346,6 @@ import * as appsync from "@aws-cdk/aws-appsync-alpha"; } }) - // const userPoolDomainOutput = new cdk.CfnOutput(this, 'UserPoolDomainOutput', { - // exportName: 'UserPoolDomain', - // value: `https://${userPoolDomain.domainName}.auth.${this.region}.amazoncognito.com`, - // description: 'Cognito Hosted UI domain name' - // }) this.authDefaultConfig = { authorizationType: appsync.AuthorizationType.USER_POOL, userPoolConfig: { diff --git a/source/constructs/lib/portal-stack.ts b/source/constructs/lib/portal-stack.ts index cd4a419..5f7cc29 100644 --- a/source/constructs/lib/portal-stack.ts +++ b/source/constructs/lib/portal-stack.ts @@ -20,8 +20,7 @@ import { import { CloudFrontToS3 } from '@aws-solutions-constructs/aws-cloudfront-s3'; import * as path from 'path' -import { AuthType } from './constructs-stack'; -import { addCfnNagSuppressRules } from "./constructs-stack"; +import { AuthType, addCfnNagSuppressRules } from './constructs-stack'; import { NagSuppressions } from 'cdk-nag'; // const { BUCKET_NAME, SOLUTION_NAME, VERSION } = process.env @@ -276,7 +275,6 @@ function handler(event) { serviceToken: customResourceFunction.functionArn }); customResource.addOverride('Type', 'Custom::CustomResource'); - // customResource.overrideLogicalId(id); if (config) { const { properties, condition, dependencies } = config; diff --git a/source/constructs/package.json b/source/constructs/package.json index 6b4252e..490e0fc 100755 --- a/source/constructs/package.json +++ b/source/constructs/package.json @@ -1,6 +1,6 @@ { "name": "data-transfer-hub", - "version": "2.3.0", + "version": "2.4.0", "license": "Apache-2.0", "author": { "name": "Amazon Web Services", @@ -24,7 +24,7 @@ "@types/uuid": "^8.3.1", "aws-cdk": "2.74.0", "aws-cdk-lib": "2.74.0", - "aws-sdk": "2.814.0", + "aws-sdk": "2.1360.0", "aws-sdk-mock": "^5.4.0", "jest": "^29.4.3", "ts-jest": "^29.0.5", diff --git a/source/custom-resource/package.json b/source/custom-resource/package.json index c8bb4e7..d495e64 100644 --- a/source/custom-resource/package.json +++ b/source/custom-resource/package.json @@ -1,11 +1,14 @@ { "name": "ui-framework-helper", + "description": "UI Framework custom resource helper Lambda function", "main": "index.js", + "version": "2.4.0", + "license": "Apache-2.0", "author": { - "name": "aws-solutions-builder" + "name": "Amazon Web Services", + "url": "https://aws.amazon.com/solutions" }, - "version": "5.0.0", "private": true, "dependencies": { "moment": "^2.24.0", @@ -32,6 +35,5 @@ "bundledDependencies": [ "moment", "uuid" - ], - "license": "Apache-2.0" + ] } diff --git a/source/portal/package.json b/source/portal/package.json index a9cc928..7cf5e0c 100644 --- a/source/portal/package.json +++ b/source/portal/package.json @@ -1,6 +1,6 @@ { "name": "data-transfer-hub-portal", - "version": "0.1.0", + "version": "2.4.0", "license": "Apache-2.0", "author": { "name": "Amazon Web Services", @@ -33,11 +33,11 @@ "classnames": "^2.3.2", "date-fns": "^2.29.3", "i18next": "^20.2.1", - "moment": "^2.29.1", "i18next-browser-languagedetector": "^6.1.4", "i18next-http-backend": "^1.4.1", "lodash.clonedeep": "^4.5.0", - "node-sass": "^7.0.1", + "moment": "^2.29.1", + "node-sass": "^8.0.0", "oidc-client": "^1.11.5", "oidc-client-ts": "^2.2.0", "react": "^17.0.2", @@ -50,10 +50,9 @@ "react-number-format": "^4.9.3", "react-oidc-context": "^2.2.0", "react-router-dom": "^5.3.0", - "react-scripts": "5.0.1", "redux": "^4.2.0", "redux-react-hook": "^4.0.3", - "sweetalert2": "10.16.11", + "sweetalert2": "11.4.8", "typescript": "^4.9.3" }, "scripts": { @@ -93,6 +92,10 @@ "eslint-plugin-promise": "^6.1.1", "eslint-plugin-react": "^7.31.11", "prettier": "^2.2.1", + "react-scripts": "^5.0.1", "rimraf": "^3.0.2" + }, + "overrides": { + "nth-check": "2.1.1" } } diff --git a/source/portal/public/locales/en/translation.json b/source/portal/public/locales/en/translation.json index a9a8bcb..4898687 100644 --- a/source/portal/public/locales/en/translation.json +++ b/source/portal/public/locales/en/translation.json @@ -8,6 +8,7 @@ "info": "Info", "optional": "optional", "recommened": "Recommened", + "credentialsGuide": "Create secret guide & AK/SK format", "signin": { "signInToDRH": "Sign in to Data Transfer Hub", "email": "Email *", @@ -88,7 +89,7 @@ "requiredCredential": "Please select a Secret Key in Secrets Manager", "store1": "Select a secret created by", "store2": "Secrets Manager.", - "store3": "Leave it bland for public bucket." + "store3": "Leave it blank for public bucket." }, "step1": { "engineType": "Select engine type", @@ -125,7 +126,10 @@ "objectACLDesc3": " Use the default value if you don't want to set the object ACL.", "transferType": "Transfer Type", "nameOfPrefixList": "Name of Prefix List", - "nameOfPrefixListDesc": "Upload the prefix list (.txt file, one prefix per line) in root directory of source bucket. E.g. prefix_list.txt" + "nameOfPrefixListDesc": "Upload the prefix list (.txt file, one prefix per line) in root directory of source bucket. E.g. prefix_list.txt", + "govCloudTips": "GovCloud data transfer limited in US", + "payerRequest": "Enable S3 Payer Request?", + "payerRequestDesc": "S3 Payer Request required the Requestor pay DTO fee" }, "dest": { "title": "Destination settings", @@ -193,6 +197,11 @@ "taskDetail": "Specify task details", "sourceType": "Source Type", "selectContainerType": "Select container registry type", + "valid": { + "srcRegionRequired": "Source Region is Required", + "accountRequired": "Please input Amazon Web Services account ID", + "accountFormatInvalid": "Account ID is invalid" + }, "settings": { "source": { "title": "Source settings", @@ -391,7 +400,7 @@ "create": "Create a", "store": "Secret Key", "save1": "in Secrets Manager. Select ", - "save2": " Other type of secrets ", + "save2": " Other type of secret", "save3": " as the type.", "format": "Format", "formatDesc": "Copy the following credential, and replace with your own value in the plaintext tab.", diff --git a/source/portal/public/locales/zh/translation.json b/source/portal/public/locales/zh/translation.json index 3c1f3f4..5d98f29 100644 --- a/source/portal/public/locales/zh/translation.json +++ b/source/portal/public/locales/zh/translation.json @@ -8,6 +8,7 @@ "info": "信息", "optional": "可选", "recommened": "推荐", + "credentialsGuide": "创建密钥指南和 AK/SK 格式", "signin": { "signInToDRH": "登录到 Data Transfer Hub", "email": "邮箱地址 *", @@ -125,7 +126,10 @@ "objectACLDesc3": " 如果您不想设置对象ACL,请使用默认值.", "transferType": "传输类型", "nameOfPrefixList": "前缀列表文件名", - "nameOfPrefixListDesc": "请将前缀文件以.txt的格式上传到源存储桶根目录,每行写一个前缀。例如: prefix_list.txt" + "nameOfPrefixListDesc": "请将前缀文件以.txt的格式上传到源存储桶根目录,每行写一个前缀。例如: prefix_list.txt", + "govCloudTips": "GovCloud 数据传输仅限在美国", + "payerRequest": "使用申请方付款?", + "payerRequestDesc": "使用申请方付款存储桶时,申请方将支付请求和从存储桶下载数据的费用" }, "dest": { "title": "目标桶设置", @@ -193,6 +197,11 @@ "taskDetail": "填写任务信息", "sourceType": "源仓库类型", "selectContainerType": "选择容器仓库类型", + "valid": { + "srcRegionRequired": "请选择源仓库区域", + "accountRequired": "请输入 Amazon Web Services 账户 ID", + "accountFormatInvalid": "账户 ID 不合法" + }, "settings": { "source": { "title": "源仓库设置", @@ -388,7 +397,7 @@ "comps": { "credential": { "name": "凭证", - "create": "创建一个", + "create": "在 Secret Manager 中创建一个", "store": "Secret Key", "save1": "用于凭证. 选择 ", "save2": " 其他类型的密钥 ", diff --git a/source/portal/src/App.tsx b/source/portal/src/App.tsx index 7738a0e..3f720ce 100644 --- a/source/portal/src/App.tsx +++ b/source/portal/src/App.tsx @@ -51,7 +51,6 @@ export interface SignedInAppProps { // loading component for suspense fallback const Loader = () => { - // console.info("window.location:", JSON.parse(JSON.stringify(window.location))); return (
@@ -162,7 +161,6 @@ const OIDCAppRouter: React.FC = () => { const dispatch = useDispatch(); useEffect(() => { - // the `return` is important - addAccessTokenExpiring() returns a cleanup function return auth?.events?.addAccessTokenExpiring((event) => { console.info("addAccessTokenExpiring:event:", event); auth.signinSilent(); @@ -269,7 +267,6 @@ const App: React.FC = () => { }); setAuthType(configData.aws_appsync_authenticationType); if (configData.aws_appsync_authenticationType === AppSyncAuthType.OPEN_ID) { - // Amplify.configure(configData); const settings = { userStore: new WebStorageStateStore({ store: window.localStorage }), authority: configData.aws_oidc_provider, @@ -320,7 +317,6 @@ const App: React.FC = () => { document.title = t("title"); if (window.performance) { if (performance.navigation.type === 1) { - // console.info("This page is reloaded"); const timeStamp = new Date().getTime(); setLoadingConfig(true); Axios.get(`/aws-exports.json?timestamp=${timeStamp}`).then((res) => { @@ -342,7 +338,6 @@ const App: React.FC = () => { setLoadingConfig(false); }); } else { - // console.info("This page is not reloaded"); setLocalStorageAfterLoad(); } } else { diff --git a/source/portal/src/assets/config/const.ts b/source/portal/src/assets/config/const.ts index 8a7f4d0..3a7cb9b 100644 --- a/source/portal/src/assets/config/const.ts +++ b/source/portal/src/assets/config/const.ts @@ -16,14 +16,19 @@ export const REPORT_ISSUE_LINK = GITHUB_LINK + "/issues/new"; // URL to be done export const URL = ""; export const URL_FEEDBACK = GITHUB_LINK + "/issues/new"; -export const SSM_LINK_MAP: any = { - china: "https://console.amazonaws.cn/secretsmanager", - global: "https://console.aws.amazon.com/secretsmanager", + +export const buildSecretMangerLink = (region: string) => { + if (region.startsWith("cn")) { + return `https://${region}.console.amazonaws.cn/secretsmanager/listsecrets?region=${region}`; + } + return `https://${region}.console.aws.amazon.com/secretsmanager/listsecrets?region=${region}`; }; -export const CLOUD_WATCH_DASHBOARD_LINK_MAP: any = { - china: "https://console.amazonaws.cn/cloudwatch/home", - global: "https://console.aws.amazon.com/cloudwatch/home", +export const buildCloudWatchLink = (region: string) => { + if (region.startsWith("cn")) { + return `https://${region}.console.amazonaws.cn/cloudwatch/home`; + } + return `https://${region}.console.aws.amazon.com/cloudwatch/home`; }; export const SSM_PARASTORE_HELP_LINK_MAP: any = { @@ -134,6 +139,10 @@ export const S3_PARAMS_LIST_MAP: any = { en_name: "Source Secret Key for Credential in Secrets Manager", zh_name: "源数据凭证中的密钥名称名称", }, + isPayerRequest: { + en_name: "Enable Payer Request?", + zh_name: "使用申请方付款?", + }, destBucketName: { en_name: "Destination Bucket Name", zh_name: "目标数据桶名称", @@ -546,7 +555,6 @@ export const OBJECT_ACL_LIST = [ { name: "authenticated-read", value: "authenticated-read" }, { name: "aws-exec-read", value: "aws-exec-read" }, { name: "bucket-owner-read", value: "bucket-owner-read" }, - // { name: "bucket-owner-full-control", value: "bucket-owner-full-control" }, ]; export const S3_STORAGE_CLASS_OPTIONS = [ @@ -617,12 +625,6 @@ export const CREATE_USE_LESS_PROPERTY = [ "__typename", ]; -// export const fixedEncodeURIComponent = (str: string) => { -// return encodeURIComponent(str).replace(/[!'()*]/g, function (c) { -// return "%" + c.charCodeAt(0).toString(16); -// }); -// }; - export const urlIsValid = (url: string): boolean => { const REG = /https?:\/\/(?:www\.|(?!www))[a-zA-Z0-9][a-zA-Z0-9-]+[a-zA-Z0-9]\.[^\s]{2,}|www\.[a-zA-Z0-9][a-zA-Z0-9-]+[a-zA-Z0-9]\.[^\s]{2,}|https?:\/\/(?:www\.|(?!www))[a-zA-Z0-9]+\.[^\s]{2,}|www\.[a-zA-Z0-9]+\.[^\s]{2,}/; @@ -635,7 +637,6 @@ export const emailIsValid = (email: string): boolean => { }; export const bucketNameIsValid = (bucketName: string): boolean => { - // return /^[^\s@]+@[^\s@]+\.[^\s@]+$/.test(email); const REG1 = bucketName && /^[a-z\d.-]*$/.test(bucketName); const REG2 = bucketName && /^[a-z\d]/.test(bucketName); const REG3 = bucketName && !/-$/.test(bucketName); diff --git a/source/portal/src/assets/types/index.ts b/source/portal/src/assets/types/index.ts index caf1e84..bc47882 100644 --- a/source/portal/src/assets/types/index.ts +++ b/source/portal/src/assets/types/index.ts @@ -1,9 +1,6 @@ // Task Type Icons import ICON_S3 from "../images/icon-s3.png"; import ICON_ECR from "../images/icon-ecr.png"; -// import ICON_DYDB from "../images/icon-dydb.png"; -// import ICON_MONGO from "../images/icon-mongo.png"; -// import ICON_MYSQL from "../images/icon-mysql.png"; import STATUS_PENDING from "@material-ui/icons/Schedule"; import STATUS_PROGRESS from "@material-ui/icons/RemoveCircleOutline"; diff --git a/source/portal/src/assets/utils/request.ts b/source/portal/src/assets/utils/request.ts index e55124c..19b8503 100644 --- a/source/portal/src/assets/utils/request.ts +++ b/source/portal/src/assets/utils/request.ts @@ -98,7 +98,6 @@ export const appSyncRequestQuery = (query: any, params?: any): any => { return new Promise(async (resolve, reject) => { try { - // const result: any = await API.graphql(graphqlOperation(query, params)); const result: any = await client.query({ query: gql(query), variables: params, @@ -143,8 +142,6 @@ export const appSyncRequestMutation = (mutation: any, params?: any): any => { return new Promise(async (resolve, reject) => { try { - // const result: any = await API.graphql(graphqlOperation(query, params)); - // encode params string value const encodedParams = encodeParams(mutation, cloneDeep(params)); const result: any = await client.mutate({ mutation: gql(mutation), diff --git a/source/portal/src/common/Alert.tsx b/source/portal/src/common/Alert.tsx index c125cc2..c878745 100644 --- a/source/portal/src/common/Alert.tsx +++ b/source/portal/src/common/Alert.tsx @@ -13,12 +13,14 @@ interface AlertProps { content: string | JSX.Element; actions?: ReactElement; noMargin?: boolean; + width?: number; } const Alert: React.FC = (props: AlertProps) => { - const { type, title, content, actions, noMargin } = props; + const { type, title, content, actions, noMargin, width } = props; return (
{ {showLang ? (
    - {langList.map((item: any, index) => { + {langList.map((item: any) => { return (
  • diff --git a/source/portal/src/common/InfoBar.tsx b/source/portal/src/common/InfoBar.tsx index ea50944..2599701 100644 --- a/source/portal/src/common/InfoBar.tsx +++ b/source/portal/src/common/InfoBar.tsx @@ -1,5 +1,4 @@ import React from "react"; -// import { Link } from "react-router-dom"; import classNames from "classnames"; import { useDispatch, useMappedState } from "redux-react-hook"; import { useTranslation } from "react-i18next"; @@ -41,7 +40,7 @@ const mapState = (state: IState) => ({ }); interface InfoType { - page?: string | undefined; + page?: string; } const InfoBar: React.FC = (props: InfoType) => { diff --git a/source/portal/src/common/InfoSpan.tsx b/source/portal/src/common/InfoSpan.tsx index 74c1089..5364e59 100644 --- a/source/portal/src/common/InfoSpan.tsx +++ b/source/portal/src/common/InfoSpan.tsx @@ -5,10 +5,11 @@ import { ACTION_TYPE } from "assets/types"; interface spanInfo { spanType: string; + infoText?: string; } const InfoSpan: React.FC = (props) => { - const { spanType } = props; + const { spanType, infoText } = props; const { t } = useTranslation(); const dispatch = useDispatch(); const openInfoBar = React.useCallback(() => { @@ -18,7 +19,7 @@ const InfoSpan: React.FC = (props) => { }, [dispatch, spanType]); return ( - {t("info")} + {infoText || t("info")} ); }; diff --git a/source/portal/src/common/comp/form/DrhCredential.tsx b/source/portal/src/common/comp/form/DrhCredential.tsx index 8fd2811..eb5ec16 100644 --- a/source/portal/src/common/comp/form/DrhCredential.tsx +++ b/source/portal/src/common/comp/form/DrhCredential.tsx @@ -13,15 +13,11 @@ import NormalButton from "common/comp/NormalButton"; import { listSecrets } from "graphql/queries"; import DescLink from "common/comp/DescLink"; -import { - MenuProps, - SSM_LINK_MAP, - DRH_REGION_NAME, - DRH_REGION_TYPE_NAME, - GLOBAL_STR, -} from "assets/config/const"; +import { MenuProps, buildSecretMangerLink } from "assets/config/const"; import { EnumSpanType } from "common/InfoBar"; import { appSyncRequestQuery } from "assets/utils/request"; +import { IState } from "store/Store"; +import { useMappedState } from "redux-react-hook"; interface SelectMenuProp { hideBucket?: boolean; @@ -29,16 +25,17 @@ interface SelectMenuProp { onChange: any; } -const curRegionType: string = - localStorage.getItem(DRH_REGION_TYPE_NAME) || GLOBAL_STR; -const curRegion = localStorage.getItem(DRH_REGION_NAME) || ""; - const DrhCredential: React.FC = (props: SelectMenuProp) => { const { t } = useTranslation(); const { credentialValue, hideBucket, onChange } = props; const [ssmParamList, setSSMParamList] = useState([]); const [loadingData, setLoadingData] = useState(false); + const mapState = (state: IState) => ({ + amplifyConfig: state.amplifyConfig, + }); + const { amplifyConfig } = useMappedState(mapState); + async function getSSMParamsList() { try { setLoadingData(true); @@ -65,13 +62,16 @@ const DrhCredential: React.FC = (props: SelectMenuProp) => { <>
    {t("creation.step2ECR.settings.source.credentialsStore")}{" "} - +
    {t("creation.tips.store1")}{" "} @@ -100,9 +100,13 @@ const DrhCredential: React.FC = (props: SelectMenuProp) => { className="font14px credential-empty" value="" > - {ssmParamList.map((param: any, index: number) => { + {ssmParamList.map((param: any) => { return ( - + {param.name} ); diff --git a/source/portal/src/common/comp/form/DrhCron.tsx b/source/portal/src/common/comp/form/DrhCron.tsx index 3689515..f3c201c 100644 --- a/source/portal/src/common/comp/form/DrhCron.tsx +++ b/source/portal/src/common/comp/form/DrhCron.tsx @@ -64,7 +64,6 @@ const DrhCron: React.FC = (props: DrhCronProp) => { if (hasOneTime) { setCronTypeList(CRON_TYPE_LIST_WITH_ONE_TIME); } else { - // Reset fixed rate after change s3 event setCronTypeList(CRON_TYPE_LIST); setCronType(CRON_TYPE.FIXED_RATE); setCronUnitType(CRON_FIX_UNIT.HOURS); @@ -91,8 +90,8 @@ const DrhCron: React.FC = (props: DrhCronProp) => {
    {optionTitle}
    {optionDescHtml - ? optionDescHtml.map((element: any, index: number) => { - return {element}; + ? optionDescHtml.map((element: any) => { + return {element}; }) : optionDesc}
    @@ -106,13 +105,17 @@ const DrhCron: React.FC = (props: DrhCronProp) => { }} input={} > - {cronTypeList.map((option, index) => { + {cronTypeList.map((option) => { return isI18n ? ( - + ) : ( - + {option.name} ); @@ -157,14 +160,14 @@ const DrhCron: React.FC = (props: DrhCronProp) => { }} input={} > - {CRON_UNIT_LIST.map((option, index) => { + {CRON_UNIT_LIST.map((option) => { return isI18n ? ( - + ) : ( diff --git a/source/portal/src/common/comp/form/DrhSelect.tsx b/source/portal/src/common/comp/form/DrhSelect.tsx index 786a9ba..2f75e6d 100644 --- a/source/portal/src/common/comp/form/DrhSelect.tsx +++ b/source/portal/src/common/comp/form/DrhSelect.tsx @@ -42,8 +42,8 @@ const DrhSelect: React.FC = (props: SelectMenuProp) => {
    {optionDescHtml - ? optionDescHtml.map((element: any, index: number) => { - return {element}; + ? optionDescHtml.map((element: any) => { + return {element}; }) : optionDesc}
    @@ -54,13 +54,17 @@ const DrhSelect: React.FC = (props: SelectMenuProp) => { onChange={(event) => onChange(event)} input={} > - {optionList.map((option, index) => { + {optionList.map((option) => { return isI18n ? ( - + ) : ( - + {option.name} ); diff --git a/source/portal/src/common/info/CredentialInfo.tsx b/source/portal/src/common/info/CredentialInfo.tsx index d9a08c6..e2fe911 100644 --- a/source/portal/src/common/info/CredentialInfo.tsx +++ b/source/portal/src/common/info/CredentialInfo.tsx @@ -6,7 +6,7 @@ import FileCopyIcon from "@material-ui/icons/FileCopy"; import CopyToClipboard from "react-copy-to-clipboard"; import OpenInNewIcon from "@material-ui/icons/OpenInNew"; import { - SSM_LINK_MAP, + buildSecretMangerLink, SSM_PARASTORE_HELP_LINK_MAP, DRH_REGION_TYPE_NAME, DRH_REGION_NAME, @@ -47,12 +47,14 @@ const CredentialInfo: React.FC = () => { className="a-link" rel="noopener noreferrer" target="_blank" - href={SSM_LINK_MAP[curRegionType] + "?region=" + curRegion} + href={buildSecretMangerLink(curRegion)} > {t("comps.credential.store")} {" "} {t("comps.credential.save1")} - {t("comps.credential.save2")} + + {t("comps.credential.save2")} + {t("comps.credential.save3")}
{t("comps.credential.format")}
diff --git a/source/portal/src/index.scss b/source/portal/src/index.scss index c45f1b9..0362776 100644 --- a/source/portal/src/index.scss +++ b/source/portal/src/index.scss @@ -439,7 +439,10 @@ code { .monitor-chart-list { margin-top: 37px; background-color: #fff; + display: flex; + flex-wrap: wrap; .monitor-chart { + width: 50%; margin-top: 10px; padding: 10px 10px 20px; } @@ -534,7 +537,7 @@ $loading-color: #555; color: #000; } &-content { - padding-top: 2px; + padding-top: 4px; font-size: 14px; } } diff --git a/source/portal/src/pages/creation/StepOne.tsx b/source/portal/src/pages/creation/StepOne.tsx index 8f81782..46ba51d 100644 --- a/source/portal/src/pages/creation/StepOne.tsx +++ b/source/portal/src/pages/creation/StepOne.tsx @@ -146,14 +146,14 @@ const StepOne: React.FC = (props: any) => { {t("creation.step1.engineOptions")}
- {TYPE_LIST.map((item, index) => { + {TYPE_LIST.map((item) => { const optionClass = classNames({ "option-list-item": true, "hand-point": !item.disabled, active: taskType === item.value, }); return ( -
+
- {paramsList.map((element: any, index: any) => { + {paramsList.map((element: any) => { return ( ECR_PARAMS_LIST_MAP[element.ParameterKey] && (
{ECR_PARAMS_LIST_MAP[element.ParameterKey] && diff --git a/source/portal/src/pages/creation/ecr/StepTwoECR.tsx b/source/portal/src/pages/creation/ecr/StepTwoECR.tsx index 3cb2694..b041f09 100644 --- a/source/portal/src/pages/creation/ecr/StepTwoECR.tsx +++ b/source/portal/src/pages/creation/ecr/StepTwoECR.tsx @@ -49,6 +49,8 @@ const mapState = (state: IState) => ({ const MAX_LENGTH = 4096; +const ACCOUNT_REGEX = /^\d{12}$/; + const defaultTxtValue = "ubuntu:14.04,\namazon-linux:latest,\nmysql"; const defaultTxtValueSourceECR = "my-ecr-repo:ALL_TAGS,\nubuntu:14.04,\namazon-linux:latest,\nmysql"; @@ -71,6 +73,17 @@ const StepTwoECR: React.FC = () => { const [alramEmailRequireError, setAlramEmailRequireError] = useState(false); const [alarmEmailFormatError, setAlarmEmailFormatError] = useState(false); + const [sourceRegionRequiredError, setSourceRegionRequiredError] = + useState(false); + const [sourceAccountRequiredError, setSourceAccountRequiredError] = + useState(false); + const [sourceAccountInvalidError, setSourceAccountInvalidError] = + useState(false); + + const [destAccountRequiredError, setDestAccountRequiredError] = + useState(false); + const [destAccountInvalidError, setDestAccountInvalidError] = useState(false); + const [classSourceRegion, setClassSourceRegion] = useState("form-items"); const [classIsSourceInAccount, setClassIsSourceAccount] = useState("form-items"); @@ -150,6 +163,50 @@ const StepTwoECR: React.FC = () => { let errorCount = 0; if (paramsObj) { + // if the source type is ECR + if (paramsObj.sourceType === ECREnumSourceType.ECR) { + // Check Source Region Required + if (!paramsObj.srcRegion) { + errorCount++; + setSourceRegionRequiredError(true); + } + + // Check Source Account ID Required + if ( + paramsObj.sourceInAccount === YES_NO.NO && + !paramsObj.srcAccountId + ) { + errorCount++; + setSourceAccountRequiredError(true); + } + + // Check Source Account ID Valid + if ( + paramsObj.sourceInAccount === YES_NO.NO && + paramsObj.srcAccountId && + !ACCOUNT_REGEX.test(paramsObj.srcAccountId) + ) { + errorCount++; + setSourceAccountInvalidError(true); + } + } + + // Check Destination Account ID Required + if (paramsObj.destInAccount === YES_NO.NO && !paramsObj.destAccountId) { + errorCount++; + setDestAccountRequiredError(true); + } + + // Check Destination Account ID Valid + if ( + paramsObj.destInAccount === YES_NO.NO && + paramsObj.destAccountId && + !ACCOUNT_REGEX.test(paramsObj.destAccountId) + ) { + errorCount++; + setDestAccountInvalidError(true); + } + // Check Destination Region if (!paramsObj.destRegion) { errorCount++; @@ -373,13 +430,13 @@ const StepTwoECR: React.FC = () => {
{t("creation.step2ECR.selectContainerType")}
- {ECR_SOURCE_TYPE.map((item: any, index: any) => { + {ECR_SOURCE_TYPE.map((item: any) => { const stClass = classNames({ "st-item": true, active: sourceType === item.value, }); return ( -
+
@@ -490,13 +562,13 @@ const StepTwoECR: React.FC = () => { )}
- {DOCKER_IMAGE_TYPE.map((item: any, index: any) => { + {DOCKER_IMAGE_TYPE.map((item: any) => { const stClass = classNames({ "st-item": true, active: srcList === item.value, }); return ( -
+
- {paramShowList.map((element: any, index: any) => { + {paramShowList.map((element: any) => { return ( S3_PARAMS_LIST_MAP[element.ParameterKey] && (
{S3_PARAMS_LIST_MAP[element.ParameterKey] && diff --git a/source/portal/src/pages/creation/s3/StepTwoS3.tsx b/source/portal/src/pages/creation/s3/StepTwoS3.tsx index 8175eae..73a8149 100644 --- a/source/portal/src/pages/creation/s3/StepTwoS3.tsx +++ b/source/portal/src/pages/creation/s3/StepTwoS3.tsx @@ -2,7 +2,6 @@ import React, { useState, useEffect } from "react"; import { useHistory, useParams } from "react-router-dom"; import { useMappedState } from "redux-react-hook"; import { useTranslation } from "react-i18next"; -// import classNames from "classnames"; import Breadcrumbs from "@material-ui/core/Breadcrumbs"; import NavigateNextIcon from "@material-ui/icons/NavigateNext"; diff --git a/source/portal/src/pages/creation/s3/comps/DestSettings.tsx b/source/portal/src/pages/creation/s3/comps/DestSettings.tsx index 29be34d..796e500 100644 --- a/source/portal/src/pages/creation/s3/comps/DestSettings.tsx +++ b/source/portal/src/pages/creation/s3/comps/DestSettings.tsx @@ -227,6 +227,16 @@ const DestSettings: React.FC = (props) => { setDestPrefixFormatError(false); }, [tmpTaskInfo?.parametersObj?.sourceType]); + // Change dest region to null when src region changed. + useEffect(() => { + if ( + !tmpTaskInfo?.parametersObj?.srcRegionName || + tmpTaskInfo?.parametersObj?.srcRegionName.startsWith("us-gov-") + ) { + setDestRegionObj(null); + } + }, [tmpTaskInfo?.parametersObj?.srcRegionName]); + return (
@@ -282,7 +292,13 @@ const DestSettings: React.FC = (props) => {
{ + return element.value.startsWith("us-"); + }) + : AWS_REGION_LIST + } optionTitle={t("creation.step2.settings.dest.destRegionName")} optionDesc={t( "creation.step2.settings.dest.destRegionNameDesc" diff --git a/source/portal/src/pages/creation/s3/comps/SourceSettings.tsx b/source/portal/src/pages/creation/s3/comps/SourceSettings.tsx index a4911f7..7f4986e 100644 --- a/source/portal/src/pages/creation/s3/comps/SourceSettings.tsx +++ b/source/portal/src/pages/creation/s3/comps/SourceSettings.tsx @@ -33,6 +33,7 @@ import { } from "assets/types/index"; import { IState } from "store/Store"; +import Alert from "common/Alert"; const mapState = (state: IState) => ({ tmpTaskInfo: state.tmpTaskInfo, }); @@ -117,6 +118,10 @@ const SourceSettings: React.FC = (props) => { tmpTaskInfo?.parametersObj?.srcPrefixsListFile || "" ); + const [isPayerRequest, setIsPayerRequest] = useState( + tmpTaskInfo?.parametersObj?.isPayerRequest || YES_NO.NO + ); + // Monitor the sourceType change useEffect(() => { // Hide Errors @@ -236,6 +241,9 @@ const SourceSettings: React.FC = (props) => { "srcCredentialsParameterStore", srcCredentialsParameterStore ); + if (!srcCredentialsParameterStore) { + setIsPayerRequest(YES_NO.NO); + } }, [srcCredentialsParameterStore]); useEffect(() => { @@ -273,6 +281,10 @@ const SourceSettings: React.FC = (props) => { } }, [srcPrefixType]); + useEffect(() => { + updateTmpTaskInfo("isPayerRequest", isPayerRequest); + }, [isPayerRequest]); + return (
@@ -410,28 +422,56 @@ const SourceSettings: React.FC = (props) => {
)} - {showSrcRegion && ( -
- + ) => { + setIsPayerRequest(event.target.value); + }} + optionTitle={t("creation.step2.settings.source.payerRequest")} optionDesc={t( - "creation.step2.settings.source.srcRegionNameDesc" + "creation.step2.settings.source.payerRequestDesc" )} - showRequiredError={srcRegionReqired} - requiredErrorMsg={t("tips.error.srcRegionRequired")} - onChange={( - event: React.ChangeEvent, - data: IRegionType - ) => { - setSrcRegionReqired(false); - setSrcRegionObj(data); - }} + selectValue={isPayerRequest} + optionList={YES_NO_LIST} />
)} + {showSrcRegion && ( + <> +
+ , + data: IRegionType + ) => { + setSrcRegionReqired(false); + setSrcRegionObj(data); + }} + /> + {tmpTaskInfo?.parametersObj?.srcRegionName?.startsWith( + "us-gov-" + ) && ( + + )} +
+ + )} +
) => { diff --git a/source/portal/src/pages/detail/DetailS3.tsx b/source/portal/src/pages/detail/DetailS3.tsx index 1eabf44..80d2e5b 100644 --- a/source/portal/src/pages/detail/DetailS3.tsx +++ b/source/portal/src/pages/detail/DetailS3.tsx @@ -52,8 +52,6 @@ import { } from "assets/utils/request"; import Monitor from "./tabs/Monitor"; -// const S3_EVENT_OPTIONS_MAP = ConverListToMap(S3_EVENT_OPTIONS); - interface StyledTabProps { label: string; } diff --git a/source/portal/src/pages/detail/LogEvents.tsx b/source/portal/src/pages/detail/LogEvents.tsx index d27c821..e24b869 100644 --- a/source/portal/src/pages/detail/LogEvents.tsx +++ b/source/portal/src/pages/detail/LogEvents.tsx @@ -68,7 +68,6 @@ const LogEvents: React.FC = () => { setLogEventList((prev) => { return [...prev, ...resData.data.getLogEvents.logEvents]; }); - // setForwardToken(resData.data.getLogEvents.nextForwardToken); setBackwardToken(resData.data.getLogEvents.nextBackwardToken); } else { if (resData.data.getLogEvents.logEvents.length > 0) { @@ -79,7 +78,6 @@ const LogEvents: React.FC = () => { setLogEventList((prev) => { return [...resData.data.getLogEvents.logEvents, ...prev]; }); - // setForwardToken(resData.data.getLogEvents.nextForwardToken); setBackwardToken(resData.data.getLogEvents.nextBackwardToken); } } @@ -204,9 +202,9 @@ const LogEvents: React.FC = () => { ) : logEventList && logEventList.length > 0 ? ( - logEventList.map((element, index) => { + logEventList.map((element) => { return ( - + {formatLocalTime( parseInt(element.timestamp || "0") diff --git a/source/portal/src/pages/detail/tabs/Details.tsx b/source/portal/src/pages/detail/tabs/Details.tsx index cab824b..e879692 100644 --- a/source/portal/src/pages/detail/tabs/Details.tsx +++ b/source/portal/src/pages/detail/tabs/Details.tsx @@ -1,7 +1,5 @@ import React from "react"; -// import Moment from "react-moment"; import { useTranslation } from "react-i18next"; -// import {} from "utils" import { S3SourcePrefixType, diff --git a/source/portal/src/pages/detail/tabs/LogGroup.tsx b/source/portal/src/pages/detail/tabs/LogGroup.tsx index 72726e8..6f02e18 100644 --- a/source/portal/src/pages/detail/tabs/LogGroup.tsx +++ b/source/portal/src/pages/detail/tabs/LogGroup.tsx @@ -10,7 +10,7 @@ import DataLoading from "common/Loading"; import { Pagination } from "@material-ui/lab"; import { Link } from "react-router-dom"; import { - CLOUD_WATCH_DASHBOARD_LINK_MAP, + buildCloudWatchLink, FINDER_DESC, LOGTYPE_FINDER, LOGTYPE_WORKER, @@ -30,14 +30,7 @@ interface LogGroupProps { const PAGE_SIZE = 20; const LogGroup: React.FC = (props: LogGroupProps) => { - const { - taskId, - curTaskInfo, - groupType, - logGroupName, - curRegionType, - curRegion, - } = props; + const { taskId, curTaskInfo, groupType, logGroupName, curRegion } = props; const { t } = useTranslation(); const [logStreams, setLogStreams] = useState([]); const [loadingData, setLoadingData] = useState(false); @@ -94,9 +87,9 @@ const LogGroup: React.FC = (props: LogGroupProps) => { className="a-link" rel="noopener noreferrer" target="_blank" - href={`${ - CLOUD_WATCH_DASHBOARD_LINK_MAP[curRegionType] - }?region=${curRegion}#logStream:group=${getOutputValueByDesc( + href={`${buildCloudWatchLink( + curRegion + )}?region=${curRegion}#logStream:group=${getOutputValueByDesc( FINDER_DESC, curTaskInfo )}`} @@ -122,9 +115,9 @@ const LogGroup: React.FC = (props: LogGroupProps) => { className="a-link" rel="noopener noreferrer" target="_blank" - href={`${ - CLOUD_WATCH_DASHBOARD_LINK_MAP[curRegionType] - }?region=${curRegion}#logStream:group=${getOutputValueByDesc( + href={`${buildCloudWatchLink( + curRegion + )}?region=${curRegion}#logStream:group=${getOutputValueByDesc( WORKER_DESC, curTaskInfo )}`} @@ -212,9 +205,9 @@ const LogGroup: React.FC = (props: LogGroupProps) => { ) : logStreams && logStreams.length > 0 ? ( - logStreams.map((element, index) => { + logStreams.map((element) => { return ( - + { switch (period) { @@ -99,9 +98,7 @@ const Monitor: React.FC = (props: MonitorProps) => { const goToCloudWatch = () => { window.open( - `${ - CLOUD_WATCH_DASHBOARD_LINK_MAP[curRegionType] - }?region=${curRegion}#dashboards:name=${ + `${buildCloudWatchLink(curRegion)}?region=${curRegion}#dashboards:name=${ curTaskInfo?.stackId?.split("/")[1] }-Dashboard-${curRegion}` ); @@ -119,10 +116,10 @@ const Monitor: React.FC = (props: MonitorProps) => {
- {METRICS_TAB_LIST.map((element, index) => { + {METRICS_TAB_LIST.map((element) => { return (
= (props: MonitorProps) => { )}
- {SPECIFY_TIME_ITEMS.map((element, index) => { + {SPECIFY_TIME_ITEMS.map((element) => { return ( = (props: MonitorProps) => {
{monitorTab === MonitorTabType.METRICS && (
- - - - - - - - - -
-
- -
-
-
- -
-
-
- -
-
-
- -
-
+
+ +
+ +
+ +
+ +
+ +
+ +
+ +
)} {monitorTab === MonitorTabType.FINDER && ( diff --git a/source/portal/src/pages/detail/tabs/MonitorCharts.tsx b/source/portal/src/pages/detail/tabs/MonitorCharts.tsx index 5aced9e..8feb770 100644 --- a/source/portal/src/pages/detail/tabs/MonitorCharts.tsx +++ b/source/portal/src/pages/detail/tabs/MonitorCharts.tsx @@ -24,6 +24,7 @@ const MonitorCharts: React.FC = ( const chartDefaultOptions: ApexOptions = { chart: { id: graphName, + redrawOnParentResize: true, width: "100%", height: 200, type: "line", @@ -34,7 +35,7 @@ const MonitorCharts: React.FC = ( enabled: false, }, }, - colors: ["#0073bb", "#ec7211"], + colors: ["#0073bb", "#ec7211", "#2ca02c", "#d62728"], grid: { padding: { top: 20, @@ -107,8 +108,7 @@ const MonitorCharts: React.FC = ( }, noData: { - text: `No data available. - Try adjusting the dashboard time range.`, + text: `No data available.`, align: "center", verticalAlign: "middle", style: { @@ -139,6 +139,7 @@ const MonitorCharts: React.FC = ( tickAmount: 10, categories: [startTime * 1000, endTime * 1000], labels: { + datetimeUTC: false, datetimeFormatter: { year: "yyyy", month: "yyyy-MM", @@ -221,12 +222,14 @@ const MonitorCharts: React.FC = ( return (
- {loadingData && ( -
- -
- )} - +
+ {loadingData && ( +
+ +
+ )} + +
); }; diff --git a/source/portal/src/pages/home/Home.tsx b/source/portal/src/pages/home/Home.tsx index eab6045..4fd232c 100644 --- a/source/portal/src/pages/home/Home.tsx +++ b/source/portal/src/pages/home/Home.tsx @@ -33,7 +33,6 @@ const mapState = (state: IState) => ({ const Home: React.FC = () => { const { t, i18n } = useTranslation(); const [titleStr, setTitleStr] = useState("en_title"); - // const [subTitleStr, setSubTitleStr] = useState("en_subTitle"); const [nameStr, setNameStr] = useState("en_name"); const [descStr, setDescStr] = useState("en_desc"); const [contentStr, setContentStr] = useState("en_content"); @@ -41,7 +40,6 @@ const Home: React.FC = () => { useEffect(() => { if (CUR_SUPPORT_LANGS.indexOf(i18n.language) >= 0) { setTitleStr(i18n.language + "_title"); - // setSubTitleStr(i18n.language + "_subTitle"); setNameStr(i18n.language + "_name"); setDescStr(i18n.language + "_desc"); setContentStr(i18n.language + "_content"); @@ -94,7 +92,7 @@ const Home: React.FC = () => {
{howItWorks.list.map((element: any, index: any) => { return ( -
+
{index + 1}
{element[nameStr]}
diff --git a/source/portal/src/pages/home/comps/Card.tsx b/source/portal/src/pages/home/comps/Card.tsx index 18b5ee8..f667300 100644 --- a/source/portal/src/pages/home/comps/Card.tsx +++ b/source/portal/src/pages/home/comps/Card.tsx @@ -26,9 +26,9 @@ const Card: React.FC = (props: any) => {
{contentInfo[titleStr]}
    - {contentInfo.list.map((element: any, index: any) => { + {contentInfo.list.map((element: any) => { return ( -
  • +
  • {element[nameStr]} diff --git a/source/portal/src/pages/list/TaskList.tsx b/source/portal/src/pages/list/TaskList.tsx index b77d895..4d4f050 100644 --- a/source/portal/src/pages/list/TaskList.tsx +++ b/source/portal/src/pages/list/TaskList.tsx @@ -355,6 +355,11 @@ const List: React.FC = () => { element.ParameterValue === "true" ? YES_NO.YES : YES_NO.NO; } + if (element.ParameterKey === "isPayerRequest") { + tmpTaskInfo.parametersObj.isPayerRequest = + element.ParameterValue === "true" ? YES_NO.YES : YES_NO.NO; + } + if (element.ParameterKey === "srcCredentials") { tmpTaskInfo.parametersObj.srcCredentialsParameterStore = element.ParameterValue; @@ -804,7 +809,7 @@ const List: React.FC = () => { {t("taskList.table.createdTime")}
- {taskListData.map((element: any, index: any) => { + {taskListData.map((element: any) => { const rowClass = classNames({ "table-row": true, active: @@ -816,7 +821,7 @@ const List: React.FC = () => { clickTaskInfo(element); }} data-uuid={element.id} - key={index} + key={element.id} className={rowClass} >
diff --git a/source/portal/src/store/Store.ts b/source/portal/src/store/Store.ts index e84e190..f3b04ef 100644 --- a/source/portal/src/store/Store.ts +++ b/source/portal/src/store/Store.ts @@ -45,6 +45,7 @@ export interface S3_EC2_TASK { multipartThreshold?: string; chunkSize?: string; maxThreads?: string; + isPayerRequest: string; }; [key: string]: any; }